196de2506SJakub Kicinski // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
262d03330SJakub Kicinski /* Copyright (C) 2015-2019 Netronome Systems, Inc. */
34c352362SJakub Kicinski
44c352362SJakub Kicinski /*
54c352362SJakub Kicinski * nfp_net_common.c
64c352362SJakub Kicinski * Netronome network device driver: Common functions between PF and VF
74c352362SJakub Kicinski * Authors: Jakub Kicinski <jakub.kicinski@netronome.com>
84c352362SJakub Kicinski * Jason McMullan <jason.mcmullan@netronome.com>
94c352362SJakub Kicinski * Rolf Neugebauer <rolf.neugebauer@netronome.com>
104c352362SJakub Kicinski * Brad Petrus <brad.petrus@netronome.com>
114c352362SJakub Kicinski * Chris Telfer <chris.telfer@netronome.com>
124c352362SJakub Kicinski */
134c352362SJakub Kicinski
149ff304bfSJakub Kicinski #include <linux/bitfield.h>
15ecd63a02SJakub Kicinski #include <linux/bpf.h>
164c352362SJakub Kicinski #include <linux/module.h>
174c352362SJakub Kicinski #include <linux/kernel.h>
184c352362SJakub Kicinski #include <linux/init.h>
194c352362SJakub Kicinski #include <linux/fs.h>
204c352362SJakub Kicinski #include <linux/netdevice.h>
214c352362SJakub Kicinski #include <linux/etherdevice.h>
224c352362SJakub Kicinski #include <linux/interrupt.h>
234c352362SJakub Kicinski #include <linux/ip.h>
244c352362SJakub Kicinski #include <linux/ipv6.h>
2546627170SJakub Kicinski #include <linux/mm.h>
265ea14712SJakub Kicinski #include <linux/overflow.h>
27c0f031bcSJakub Kicinski #include <linux/page_ref.h>
284c352362SJakub Kicinski #include <linux/pci.h>
294c352362SJakub Kicinski #include <linux/pci_regs.h>
304c352362SJakub Kicinski #include <linux/ethtool.h>
314c352362SJakub Kicinski #include <linux/log2.h>
324c352362SJakub Kicinski #include <linux/if_vlan.h>
33be801411SYinjun Zhang #include <linux/if_bridge.h>
344c352362SJakub Kicinski #include <linux/random.h>
35a7b1ad08SJakub Kicinski #include <linux/vmalloc.h>
364c352362SJakub Kicinski #include <linux/ktime.h>
374c352362SJakub Kicinski
38c3991d39SDirk van der Merwe #include <net/tls.h>
394c352362SJakub Kicinski #include <net/vxlan.h>
409c91a365SNiklas Söderlund #include <net/xdp_sock_drv.h>
411cf78d4cSHuanhuan Wang #include <net/xfrm.h>
424c352362SJakub Kicinski
43e900db70SJakub Kicinski #include "nfpcore/nfp_dev.h"
44ce22f5a2SJakub Kicinski #include "nfpcore/nfp_nsp.h"
45e2c7114aSJakub Kicinski #include "ccm.h"
46bb45e51cSJakub Kicinski #include "nfp_app.h"
474c352362SJakub Kicinski #include "nfp_net_ctrl.h"
484c352362SJakub Kicinski #include "nfp_net.h"
4962d03330SJakub Kicinski #include "nfp_net_dp.h"
5025528d90SPablo Cascón #include "nfp_net_sriov.h"
516402528bSNiklas Söderlund #include "nfp_net_xsk.h"
52eb488c26SJakub Kicinski #include "nfp_port.h"
53232eeb1fSJakub Kicinski #include "crypto/crypto.h"
546a35ddc5SJakub Kicinski #include "crypto/fw.h"
554c352362SJakub Kicinski
56cc7eab25SYinjun Zhang static int nfp_net_mc_unsync(struct net_device *netdev, const unsigned char *addr);
57cc7eab25SYinjun Zhang
584c352362SJakub Kicinski /**
594c352362SJakub Kicinski * nfp_net_get_fw_version() - Read and parse the FW version
604c352362SJakub Kicinski * @fw_ver: Output fw_version structure to read to
614c352362SJakub Kicinski * @ctrl_bar: Mapped address of the control BAR
624c352362SJakub Kicinski */
nfp_net_get_fw_version(struct nfp_net_fw_version * fw_ver,void __iomem * ctrl_bar)634c352362SJakub Kicinski void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver,
644c352362SJakub Kicinski void __iomem *ctrl_bar)
654c352362SJakub Kicinski {
664c352362SJakub Kicinski u32 reg;
674c352362SJakub Kicinski
684c352362SJakub Kicinski reg = readl(ctrl_bar + NFP_NET_CFG_VERSION);
694c352362SJakub Kicinski put_unaligned_le32(reg, fw_ver);
704c352362SJakub Kicinski }
714c352362SJakub Kicinski
nfp_qcp_queue_offset(const struct nfp_dev_info * dev_info,u16 queue)72e900db70SJakub Kicinski u32 nfp_qcp_queue_offset(const struct nfp_dev_info *dev_info, u16 queue)
73e900db70SJakub Kicinski {
74e900db70SJakub Kicinski queue &= dev_info->qc_idx_mask;
75e900db70SJakub Kicinski return dev_info->qc_addr_offset + NFP_QCP_QUEUE_ADDR_SZ * queue;
76e900db70SJakub Kicinski }
77e900db70SJakub Kicinski
783d780b92SJakub Kicinski /* Firmware reconfig
793d780b92SJakub Kicinski *
803d780b92SJakub Kicinski * Firmware reconfig may take a while so we have two versions of it -
813d780b92SJakub Kicinski * synchronous and asynchronous (posted). All synchronous callers are holding
823d780b92SJakub Kicinski * RTNL so we don't have to worry about serializing them.
833d780b92SJakub Kicinski */
nfp_net_reconfig_start(struct nfp_net * nn,u32 update)843d780b92SJakub Kicinski static void nfp_net_reconfig_start(struct nfp_net *nn, u32 update)
853d780b92SJakub Kicinski {
863d780b92SJakub Kicinski nn_writel(nn, NFP_NET_CFG_UPDATE, update);
873d780b92SJakub Kicinski /* ensure update is written before pinging HW */
883d780b92SJakub Kicinski nn_pci_flush(nn);
893d780b92SJakub Kicinski nfp_qcp_wr_ptr_add(nn->qcp_cfg, 1);
906db3a9dcSJakub Kicinski nn->reconfig_in_progress_update = update;
913d780b92SJakub Kicinski }
923d780b92SJakub Kicinski
933d780b92SJakub Kicinski /* Pass 0 as update to run posted reconfigs. */
nfp_net_reconfig_start_async(struct nfp_net * nn,u32 update)943d780b92SJakub Kicinski static void nfp_net_reconfig_start_async(struct nfp_net *nn, u32 update)
953d780b92SJakub Kicinski {
963d780b92SJakub Kicinski update |= nn->reconfig_posted;
973d780b92SJakub Kicinski nn->reconfig_posted = 0;
983d780b92SJakub Kicinski
993d780b92SJakub Kicinski nfp_net_reconfig_start(nn, update);
1003d780b92SJakub Kicinski
1013d780b92SJakub Kicinski nn->reconfig_timer_active = true;
1023d780b92SJakub Kicinski mod_timer(&nn->reconfig_timer, jiffies + NFP_NET_POLL_TIMEOUT * HZ);
1033d780b92SJakub Kicinski }
1043d780b92SJakub Kicinski
nfp_net_reconfig_check_done(struct nfp_net * nn,bool last_check)1053d780b92SJakub Kicinski static bool nfp_net_reconfig_check_done(struct nfp_net *nn, bool last_check)
1063d780b92SJakub Kicinski {
1073d780b92SJakub Kicinski u32 reg;
1083d780b92SJakub Kicinski
1093d780b92SJakub Kicinski reg = nn_readl(nn, NFP_NET_CFG_UPDATE);
1103d780b92SJakub Kicinski if (reg == 0)
1113d780b92SJakub Kicinski return true;
1123d780b92SJakub Kicinski if (reg & NFP_NET_CFG_UPDATE_ERR) {
1136db3a9dcSJakub Kicinski nn_err(nn, "Reconfig error (status: 0x%08x update: 0x%08x ctrl: 0x%08x)\n",
1146db3a9dcSJakub Kicinski reg, nn->reconfig_in_progress_update,
1156db3a9dcSJakub Kicinski nn_readl(nn, NFP_NET_CFG_CTRL));
1163d780b92SJakub Kicinski return true;
1173d780b92SJakub Kicinski } else if (last_check) {
1186db3a9dcSJakub Kicinski nn_err(nn, "Reconfig timeout (status: 0x%08x update: 0x%08x ctrl: 0x%08x)\n",
1196db3a9dcSJakub Kicinski reg, nn->reconfig_in_progress_update,
1206db3a9dcSJakub Kicinski nn_readl(nn, NFP_NET_CFG_CTRL));
1213d780b92SJakub Kicinski return true;
1223d780b92SJakub Kicinski }
1233d780b92SJakub Kicinski
1243d780b92SJakub Kicinski return false;
1253d780b92SJakub Kicinski }
1263d780b92SJakub Kicinski
__nfp_net_reconfig_wait(struct nfp_net * nn,unsigned long deadline)127e6471828SDirk van der Merwe static bool __nfp_net_reconfig_wait(struct nfp_net *nn, unsigned long deadline)
1283d780b92SJakub Kicinski {
1293d780b92SJakub Kicinski bool timed_out = false;
130e6471828SDirk van der Merwe int i;
1313d780b92SJakub Kicinski
132e6471828SDirk van der Merwe /* Poll update field, waiting for NFP to ack the config.
133e6471828SDirk van der Merwe * Do an opportunistic wait-busy loop, afterward sleep.
134e6471828SDirk van der Merwe */
135e6471828SDirk van der Merwe for (i = 0; i < 50; i++) {
136e6471828SDirk van der Merwe if (nfp_net_reconfig_check_done(nn, false))
137e6471828SDirk van der Merwe return false;
138e6471828SDirk van der Merwe udelay(4);
139e6471828SDirk van der Merwe }
140e6471828SDirk van der Merwe
1413d780b92SJakub Kicinski while (!nfp_net_reconfig_check_done(nn, timed_out)) {
142e6471828SDirk van der Merwe usleep_range(250, 500);
1433d780b92SJakub Kicinski timed_out = time_is_before_eq_jiffies(deadline);
1443d780b92SJakub Kicinski }
1453d780b92SJakub Kicinski
146e6471828SDirk van der Merwe return timed_out;
147e6471828SDirk van der Merwe }
148e6471828SDirk van der Merwe
nfp_net_reconfig_wait(struct nfp_net * nn,unsigned long deadline)149e6471828SDirk van der Merwe static int nfp_net_reconfig_wait(struct nfp_net *nn, unsigned long deadline)
150e6471828SDirk van der Merwe {
151e6471828SDirk van der Merwe if (__nfp_net_reconfig_wait(nn, deadline))
152e6471828SDirk van der Merwe return -EIO;
153e6471828SDirk van der Merwe
1543d780b92SJakub Kicinski if (nn_readl(nn, NFP_NET_CFG_UPDATE) & NFP_NET_CFG_UPDATE_ERR)
1553d780b92SJakub Kicinski return -EIO;
1563d780b92SJakub Kicinski
157e6471828SDirk van der Merwe return 0;
1583d780b92SJakub Kicinski }
1593d780b92SJakub Kicinski
nfp_net_reconfig_timer(struct timer_list * t)1603248f77fSKees Cook static void nfp_net_reconfig_timer(struct timer_list *t)
1613d780b92SJakub Kicinski {
1623248f77fSKees Cook struct nfp_net *nn = from_timer(nn, t, reconfig_timer);
1633d780b92SJakub Kicinski
1643d780b92SJakub Kicinski spin_lock_bh(&nn->reconfig_lock);
1653d780b92SJakub Kicinski
1663d780b92SJakub Kicinski nn->reconfig_timer_active = false;
1673d780b92SJakub Kicinski
1683d780b92SJakub Kicinski /* If sync caller is present it will take over from us */
1693d780b92SJakub Kicinski if (nn->reconfig_sync_present)
1703d780b92SJakub Kicinski goto done;
1713d780b92SJakub Kicinski
1723d780b92SJakub Kicinski /* Read reconfig status and report errors */
1733d780b92SJakub Kicinski nfp_net_reconfig_check_done(nn, true);
1743d780b92SJakub Kicinski
1753d780b92SJakub Kicinski if (nn->reconfig_posted)
1763d780b92SJakub Kicinski nfp_net_reconfig_start_async(nn, 0);
1773d780b92SJakub Kicinski done:
1783d780b92SJakub Kicinski spin_unlock_bh(&nn->reconfig_lock);
1793d780b92SJakub Kicinski }
1803d780b92SJakub Kicinski
1813d780b92SJakub Kicinski /**
1823d780b92SJakub Kicinski * nfp_net_reconfig_post() - Post async reconfig request
1833d780b92SJakub Kicinski * @nn: NFP Net device to reconfigure
1843d780b92SJakub Kicinski * @update: The value for the update field in the BAR config
1853d780b92SJakub Kicinski *
1863d780b92SJakub Kicinski * Record FW reconfiguration request. Reconfiguration will be kicked off
1873d780b92SJakub Kicinski * whenever reconfiguration machinery is idle. Multiple requests can be
1883d780b92SJakub Kicinski * merged together!
1893d780b92SJakub Kicinski */
nfp_net_reconfig_post(struct nfp_net * nn,u32 update)1903d780b92SJakub Kicinski static void nfp_net_reconfig_post(struct nfp_net *nn, u32 update)
1913d780b92SJakub Kicinski {
1923d780b92SJakub Kicinski spin_lock_bh(&nn->reconfig_lock);
1933d780b92SJakub Kicinski
1943d780b92SJakub Kicinski /* Sync caller will kick off async reconf when it's done, just post */
1953d780b92SJakub Kicinski if (nn->reconfig_sync_present) {
1963d780b92SJakub Kicinski nn->reconfig_posted |= update;
1973d780b92SJakub Kicinski goto done;
1983d780b92SJakub Kicinski }
1993d780b92SJakub Kicinski
2003d780b92SJakub Kicinski /* Opportunistically check if the previous command is done */
2013d780b92SJakub Kicinski if (!nn->reconfig_timer_active ||
2023d780b92SJakub Kicinski nfp_net_reconfig_check_done(nn, false))
2033d780b92SJakub Kicinski nfp_net_reconfig_start_async(nn, update);
2043d780b92SJakub Kicinski else
2053d780b92SJakub Kicinski nn->reconfig_posted |= update;
2063d780b92SJakub Kicinski done:
2073d780b92SJakub Kicinski spin_unlock_bh(&nn->reconfig_lock);
2083d780b92SJakub Kicinski }
2093d780b92SJakub Kicinski
nfp_net_reconfig_sync_enter(struct nfp_net * nn)2109ad716b9SJakub Kicinski static void nfp_net_reconfig_sync_enter(struct nfp_net *nn)
2119ad716b9SJakub Kicinski {
2129ad716b9SJakub Kicinski bool cancelled_timer = false;
2139ad716b9SJakub Kicinski u32 pre_posted_requests;
2149ad716b9SJakub Kicinski
2159ad716b9SJakub Kicinski spin_lock_bh(&nn->reconfig_lock);
2169ad716b9SJakub Kicinski
217e2c7114aSJakub Kicinski WARN_ON(nn->reconfig_sync_present);
2189ad716b9SJakub Kicinski nn->reconfig_sync_present = true;
2199ad716b9SJakub Kicinski
2209ad716b9SJakub Kicinski if (nn->reconfig_timer_active) {
2219ad716b9SJakub Kicinski nn->reconfig_timer_active = false;
2229ad716b9SJakub Kicinski cancelled_timer = true;
2239ad716b9SJakub Kicinski }
2249ad716b9SJakub Kicinski pre_posted_requests = nn->reconfig_posted;
2259ad716b9SJakub Kicinski nn->reconfig_posted = 0;
2269ad716b9SJakub Kicinski
2279ad716b9SJakub Kicinski spin_unlock_bh(&nn->reconfig_lock);
2289ad716b9SJakub Kicinski
2299ad716b9SJakub Kicinski if (cancelled_timer) {
2309ad716b9SJakub Kicinski del_timer_sync(&nn->reconfig_timer);
2319ad716b9SJakub Kicinski nfp_net_reconfig_wait(nn, nn->reconfig_timer.expires);
2329ad716b9SJakub Kicinski }
2339ad716b9SJakub Kicinski
2349ad716b9SJakub Kicinski /* Run the posted reconfigs which were issued before we started */
2359ad716b9SJakub Kicinski if (pre_posted_requests) {
2369ad716b9SJakub Kicinski nfp_net_reconfig_start(nn, pre_posted_requests);
2379ad716b9SJakub Kicinski nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT);
2389ad716b9SJakub Kicinski }
2399ad716b9SJakub Kicinski }
2409ad716b9SJakub Kicinski
nfp_net_reconfig_wait_posted(struct nfp_net * nn)2419ad716b9SJakub Kicinski static void nfp_net_reconfig_wait_posted(struct nfp_net *nn)
2429ad716b9SJakub Kicinski {
2439ad716b9SJakub Kicinski nfp_net_reconfig_sync_enter(nn);
2449ad716b9SJakub Kicinski
2459ad716b9SJakub Kicinski spin_lock_bh(&nn->reconfig_lock);
2469ad716b9SJakub Kicinski nn->reconfig_sync_present = false;
2479ad716b9SJakub Kicinski spin_unlock_bh(&nn->reconfig_lock);
2489ad716b9SJakub Kicinski }
2499ad716b9SJakub Kicinski
2504c352362SJakub Kicinski /**
251dd5b2498SJakub Kicinski * __nfp_net_reconfig() - Reconfigure the firmware
2524c352362SJakub Kicinski * @nn: NFP Net device to reconfigure
2534c352362SJakub Kicinski * @update: The value for the update field in the BAR config
2544c352362SJakub Kicinski *
2554c352362SJakub Kicinski * Write the update word to the BAR and ping the reconfig queue. The
2564c352362SJakub Kicinski * poll until the firmware has acknowledged the update by zeroing the
2574c352362SJakub Kicinski * update word.
2584c352362SJakub Kicinski *
2594c352362SJakub Kicinski * Return: Negative errno on error, 0 on success
2604c352362SJakub Kicinski */
__nfp_net_reconfig(struct nfp_net * nn,u32 update)261232eeb1fSJakub Kicinski int __nfp_net_reconfig(struct nfp_net *nn, u32 update)
2624c352362SJakub Kicinski {
2633d780b92SJakub Kicinski int ret;
2644c352362SJakub Kicinski
2659ad716b9SJakub Kicinski nfp_net_reconfig_sync_enter(nn);
2663d780b92SJakub Kicinski
2673d780b92SJakub Kicinski nfp_net_reconfig_start(nn, update);
2683d780b92SJakub Kicinski ret = nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT);
2693d780b92SJakub Kicinski
2703d780b92SJakub Kicinski spin_lock_bh(&nn->reconfig_lock);
2713d780b92SJakub Kicinski
2723d780b92SJakub Kicinski if (nn->reconfig_posted)
2733d780b92SJakub Kicinski nfp_net_reconfig_start_async(nn, 0);
2743d780b92SJakub Kicinski
2753d780b92SJakub Kicinski nn->reconfig_sync_present = false;
2763d780b92SJakub Kicinski
2773d780b92SJakub Kicinski spin_unlock_bh(&nn->reconfig_lock);
2783d780b92SJakub Kicinski
2794c352362SJakub Kicinski return ret;
2804c352362SJakub Kicinski }
2814c352362SJakub Kicinski
nfp_net_reconfig(struct nfp_net * nn,u32 update)282dd5b2498SJakub Kicinski int nfp_net_reconfig(struct nfp_net *nn, u32 update)
283dd5b2498SJakub Kicinski {
284dd5b2498SJakub Kicinski int ret;
285dd5b2498SJakub Kicinski
286dd5b2498SJakub Kicinski nn_ctrl_bar_lock(nn);
287dd5b2498SJakub Kicinski ret = __nfp_net_reconfig(nn, update);
288dd5b2498SJakub Kicinski nn_ctrl_bar_unlock(nn);
289dd5b2498SJakub Kicinski
290dd5b2498SJakub Kicinski return ret;
291dd5b2498SJakub Kicinski }
292dd5b2498SJakub Kicinski
nfp_net_mbox_lock(struct nfp_net * nn,unsigned int data_size)293dd5b2498SJakub Kicinski int nfp_net_mbox_lock(struct nfp_net *nn, unsigned int data_size)
294dd5b2498SJakub Kicinski {
295dd5b2498SJakub Kicinski if (nn->tlv_caps.mbox_len < NFP_NET_CFG_MBOX_SIMPLE_VAL + data_size) {
296dd5b2498SJakub Kicinski nn_err(nn, "mailbox too small for %u of data (%u)\n",
297dd5b2498SJakub Kicinski data_size, nn->tlv_caps.mbox_len);
298dd5b2498SJakub Kicinski return -EIO;
299dd5b2498SJakub Kicinski }
300dd5b2498SJakub Kicinski
301dd5b2498SJakub Kicinski nn_ctrl_bar_lock(nn);
302dd5b2498SJakub Kicinski return 0;
303dd5b2498SJakub Kicinski }
304dd5b2498SJakub Kicinski
305b64052fcSPablo Cascón /**
306dd5b2498SJakub Kicinski * nfp_net_mbox_reconfig() - Reconfigure the firmware via the mailbox
307b64052fcSPablo Cascón * @nn: NFP Net device to reconfigure
308b64052fcSPablo Cascón * @mbox_cmd: The value for the mailbox command
309b64052fcSPablo Cascón *
310b64052fcSPablo Cascón * Helper function for mailbox updates
311b64052fcSPablo Cascón *
312b64052fcSPablo Cascón * Return: Negative errno on error, 0 on success
313b64052fcSPablo Cascón */
nfp_net_mbox_reconfig(struct nfp_net * nn,u32 mbox_cmd)314dd5b2498SJakub Kicinski int nfp_net_mbox_reconfig(struct nfp_net *nn, u32 mbox_cmd)
315b64052fcSPablo Cascón {
316527d7d1bSJakub Kicinski u32 mbox = nn->tlv_caps.mbox_off;
317b64052fcSPablo Cascón int ret;
318b64052fcSPablo Cascón
319527d7d1bSJakub Kicinski nn_writeq(nn, mbox + NFP_NET_CFG_MBOX_SIMPLE_CMD, mbox_cmd);
320b64052fcSPablo Cascón
321dd5b2498SJakub Kicinski ret = __nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_MBOX);
322b64052fcSPablo Cascón if (ret) {
323b64052fcSPablo Cascón nn_err(nn, "Mailbox update error\n");
324b64052fcSPablo Cascón return ret;
325b64052fcSPablo Cascón }
326b64052fcSPablo Cascón
327527d7d1bSJakub Kicinski return -nn_readl(nn, mbox + NFP_NET_CFG_MBOX_SIMPLE_RET);
328b64052fcSPablo Cascón }
329b64052fcSPablo Cascón
nfp_net_mbox_reconfig_post(struct nfp_net * nn,u32 mbox_cmd)330e2c7114aSJakub Kicinski void nfp_net_mbox_reconfig_post(struct nfp_net *nn, u32 mbox_cmd)
331e2c7114aSJakub Kicinski {
332e2c7114aSJakub Kicinski u32 mbox = nn->tlv_caps.mbox_off;
333e2c7114aSJakub Kicinski
334e2c7114aSJakub Kicinski nn_writeq(nn, mbox + NFP_NET_CFG_MBOX_SIMPLE_CMD, mbox_cmd);
335e2c7114aSJakub Kicinski
336e2c7114aSJakub Kicinski nfp_net_reconfig_post(nn, NFP_NET_CFG_UPDATE_MBOX);
337e2c7114aSJakub Kicinski }
338e2c7114aSJakub Kicinski
nfp_net_mbox_reconfig_wait_posted(struct nfp_net * nn)339e2c7114aSJakub Kicinski int nfp_net_mbox_reconfig_wait_posted(struct nfp_net *nn)
340e2c7114aSJakub Kicinski {
341e2c7114aSJakub Kicinski u32 mbox = nn->tlv_caps.mbox_off;
342e2c7114aSJakub Kicinski
343e2c7114aSJakub Kicinski nfp_net_reconfig_wait_posted(nn);
344e2c7114aSJakub Kicinski
345e2c7114aSJakub Kicinski return -nn_readl(nn, mbox + NFP_NET_CFG_MBOX_SIMPLE_RET);
346e2c7114aSJakub Kicinski }
347e2c7114aSJakub Kicinski
nfp_net_mbox_reconfig_and_unlock(struct nfp_net * nn,u32 mbox_cmd)348dd5b2498SJakub Kicinski int nfp_net_mbox_reconfig_and_unlock(struct nfp_net *nn, u32 mbox_cmd)
349dd5b2498SJakub Kicinski {
350dd5b2498SJakub Kicinski int ret;
351dd5b2498SJakub Kicinski
352dd5b2498SJakub Kicinski ret = nfp_net_mbox_reconfig(nn, mbox_cmd);
353dd5b2498SJakub Kicinski nn_ctrl_bar_unlock(nn);
354dd5b2498SJakub Kicinski return ret;
355dd5b2498SJakub Kicinski }
356dd5b2498SJakub Kicinski
3574c352362SJakub Kicinski /* Interrupt configuration and handling
3584c352362SJakub Kicinski */
3594c352362SJakub Kicinski
3604c352362SJakub Kicinski /**
3614c352362SJakub Kicinski * nfp_net_irqs_alloc() - allocates MSI-X irqs
362fdace6c2SJakub Kicinski * @pdev: PCI device structure
363fdace6c2SJakub Kicinski * @irq_entries: Array to be initialized and used to hold the irq entries
364fdace6c2SJakub Kicinski * @min_irqs: Minimal acceptable number of interrupts
365fdace6c2SJakub Kicinski * @wanted_irqs: Target number of interrupts to allocate
3664c352362SJakub Kicinski *
3674c352362SJakub Kicinski * Return: Number of irqs obtained or 0 on error.
3684c352362SJakub Kicinski */
369fdace6c2SJakub Kicinski unsigned int
nfp_net_irqs_alloc(struct pci_dev * pdev,struct msix_entry * irq_entries,unsigned int min_irqs,unsigned int wanted_irqs)370fdace6c2SJakub Kicinski nfp_net_irqs_alloc(struct pci_dev *pdev, struct msix_entry *irq_entries,
371fdace6c2SJakub Kicinski unsigned int min_irqs, unsigned int wanted_irqs)
3724c352362SJakub Kicinski {
373fdace6c2SJakub Kicinski unsigned int i;
374fdace6c2SJakub Kicinski int got_irqs;
3754c352362SJakub Kicinski
376fdace6c2SJakub Kicinski for (i = 0; i < wanted_irqs; i++)
377fdace6c2SJakub Kicinski irq_entries[i].entry = i;
3784c352362SJakub Kicinski
379fdace6c2SJakub Kicinski got_irqs = pci_enable_msix_range(pdev, irq_entries,
380fdace6c2SJakub Kicinski min_irqs, wanted_irqs);
381fdace6c2SJakub Kicinski if (got_irqs < 0) {
382fdace6c2SJakub Kicinski dev_err(&pdev->dev, "Failed to enable %d-%d MSI-X (err=%d)\n",
383fdace6c2SJakub Kicinski min_irqs, wanted_irqs, got_irqs);
3844c352362SJakub Kicinski return 0;
3854c352362SJakub Kicinski }
3864c352362SJakub Kicinski
387fdace6c2SJakub Kicinski if (got_irqs < wanted_irqs)
388fdace6c2SJakub Kicinski dev_warn(&pdev->dev, "Unable to allocate %d IRQs got only %d\n",
389fdace6c2SJakub Kicinski wanted_irqs, got_irqs);
390fdace6c2SJakub Kicinski
391fdace6c2SJakub Kicinski return got_irqs;
392fdace6c2SJakub Kicinski }
393fdace6c2SJakub Kicinski
394fdace6c2SJakub Kicinski /**
395fdace6c2SJakub Kicinski * nfp_net_irqs_assign() - Assign interrupts allocated externally to netdev
396fdace6c2SJakub Kicinski * @nn: NFP Network structure
397fdace6c2SJakub Kicinski * @irq_entries: Table of allocated interrupts
398fdace6c2SJakub Kicinski * @n: Size of @irq_entries (number of entries to grab)
399fdace6c2SJakub Kicinski *
400fdace6c2SJakub Kicinski * After interrupts are allocated with nfp_net_irqs_alloc() this function
401fdace6c2SJakub Kicinski * should be called to assign them to a specific netdev (port).
402fdace6c2SJakub Kicinski */
403fdace6c2SJakub Kicinski void
nfp_net_irqs_assign(struct nfp_net * nn,struct msix_entry * irq_entries,unsigned int n)404fdace6c2SJakub Kicinski nfp_net_irqs_assign(struct nfp_net *nn, struct msix_entry *irq_entries,
405fdace6c2SJakub Kicinski unsigned int n)
406fdace6c2SJakub Kicinski {
40779c12a75SJakub Kicinski struct nfp_net_dp *dp = &nn->dp;
40879c12a75SJakub Kicinski
409b33ae997SJakub Kicinski nn->max_r_vecs = n - NFP_NET_NON_Q_VECTORS;
41079c12a75SJakub Kicinski dp->num_r_vecs = nn->max_r_vecs;
4114c352362SJakub Kicinski
412fdace6c2SJakub Kicinski memcpy(nn->irq_entries, irq_entries, sizeof(*irq_entries) * n);
4134c352362SJakub Kicinski
41479c12a75SJakub Kicinski if (dp->num_rx_rings > dp->num_r_vecs ||
41579c12a75SJakub Kicinski dp->num_tx_rings > dp->num_r_vecs)
41687232d96SJakub Kicinski dev_warn(nn->dp.dev, "More rings (%d,%d) than vectors (%d).\n",
41779c12a75SJakub Kicinski dp->num_rx_rings, dp->num_tx_rings,
41879c12a75SJakub Kicinski dp->num_r_vecs);
419fdace6c2SJakub Kicinski
42079c12a75SJakub Kicinski dp->num_rx_rings = min(dp->num_r_vecs, dp->num_rx_rings);
42179c12a75SJakub Kicinski dp->num_tx_rings = min(dp->num_r_vecs, dp->num_tx_rings);
42279c12a75SJakub Kicinski dp->num_stack_tx_rings = dp->num_tx_rings;
4234c352362SJakub Kicinski }
4244c352362SJakub Kicinski
4254c352362SJakub Kicinski /**
4264c352362SJakub Kicinski * nfp_net_irqs_disable() - Disable interrupts
427fdace6c2SJakub Kicinski * @pdev: PCI device structure
4284c352362SJakub Kicinski *
4294c352362SJakub Kicinski * Undoes what @nfp_net_irqs_alloc() does.
4304c352362SJakub Kicinski */
nfp_net_irqs_disable(struct pci_dev * pdev)431fdace6c2SJakub Kicinski void nfp_net_irqs_disable(struct pci_dev *pdev)
4324c352362SJakub Kicinski {
433fdace6c2SJakub Kicinski pci_disable_msix(pdev);
4344c352362SJakub Kicinski }
4354c352362SJakub Kicinski
4364c352362SJakub Kicinski /**
4374c352362SJakub Kicinski * nfp_net_irq_rxtx() - Interrupt service routine for RX/TX rings.
4384c352362SJakub Kicinski * @irq: Interrupt
4394c352362SJakub Kicinski * @data: Opaque data structure
4404c352362SJakub Kicinski *
4414c352362SJakub Kicinski * Return: Indicate if the interrupt has been handled.
4424c352362SJakub Kicinski */
nfp_net_irq_rxtx(int irq,void * data)4434c352362SJakub Kicinski static irqreturn_t nfp_net_irq_rxtx(int irq, void *data)
4444c352362SJakub Kicinski {
4454c352362SJakub Kicinski struct nfp_net_r_vector *r_vec = data;
4464c352362SJakub Kicinski
4479d32e4e7SYinjun Zhang /* Currently we cannot tell if it's a rx or tx interrupt,
4489d32e4e7SYinjun Zhang * since dim does not need accurate event_ctr to calculate,
4499d32e4e7SYinjun Zhang * we just use this counter for both rx and tx dim.
4509d32e4e7SYinjun Zhang */
4519d32e4e7SYinjun Zhang r_vec->event_ctr++;
4529d32e4e7SYinjun Zhang
4534c352362SJakub Kicinski napi_schedule_irqoff(&r_vec->napi);
4544c352362SJakub Kicinski
4554c352362SJakub Kicinski /* The FW auto-masks any interrupt, either via the MASK bit in
4564c352362SJakub Kicinski * the MSI-X table or via the per entry ICR field. So there
4574c352362SJakub Kicinski * is no need to disable interrupts here.
4584c352362SJakub Kicinski */
4594c352362SJakub Kicinski return IRQ_HANDLED;
4604c352362SJakub Kicinski }
4614c352362SJakub Kicinski
nfp_ctrl_irq_rxtx(int irq,void * data)46277ece8d5SJakub Kicinski static irqreturn_t nfp_ctrl_irq_rxtx(int irq, void *data)
46377ece8d5SJakub Kicinski {
46477ece8d5SJakub Kicinski struct nfp_net_r_vector *r_vec = data;
46577ece8d5SJakub Kicinski
46677ece8d5SJakub Kicinski tasklet_schedule(&r_vec->tasklet);
46777ece8d5SJakub Kicinski
46877ece8d5SJakub Kicinski return IRQ_HANDLED;
46977ece8d5SJakub Kicinski }
47077ece8d5SJakub Kicinski
4714c352362SJakub Kicinski /**
4724c352362SJakub Kicinski * nfp_net_read_link_status() - Reread link status from control BAR
4734c352362SJakub Kicinski * @nn: NFP Network structure
4744c352362SJakub Kicinski */
nfp_net_read_link_status(struct nfp_net * nn)4754c352362SJakub Kicinski static void nfp_net_read_link_status(struct nfp_net *nn)
4764c352362SJakub Kicinski {
4774c352362SJakub Kicinski unsigned long flags;
4784c352362SJakub Kicinski bool link_up;
47962fad9e6SYinjun Zhang u16 sts;
4804c352362SJakub Kicinski
4814c352362SJakub Kicinski spin_lock_irqsave(&nn->link_status_lock, flags);
4824c352362SJakub Kicinski
48362fad9e6SYinjun Zhang sts = nn_readw(nn, NFP_NET_CFG_STS);
4844c352362SJakub Kicinski link_up = !!(sts & NFP_NET_CFG_STS_LINK);
4854c352362SJakub Kicinski
4864c352362SJakub Kicinski if (nn->link_up == link_up)
4874c352362SJakub Kicinski goto out;
4884c352362SJakub Kicinski
4894c352362SJakub Kicinski nn->link_up = link_up;
49062fad9e6SYinjun Zhang if (nn->port) {
4916d4f8cbaSJakub Kicinski set_bit(NFP_PORT_CHANGED, &nn->port->flags);
49262fad9e6SYinjun Zhang if (nn->port->link_cb)
49362fad9e6SYinjun Zhang nn->port->link_cb(nn->port);
49462fad9e6SYinjun Zhang }
4954c352362SJakub Kicinski
4964c352362SJakub Kicinski if (nn->link_up) {
49779c12a75SJakub Kicinski netif_carrier_on(nn->dp.netdev);
49879c12a75SJakub Kicinski netdev_info(nn->dp.netdev, "NIC Link is Up\n");
4994c352362SJakub Kicinski } else {
50079c12a75SJakub Kicinski netif_carrier_off(nn->dp.netdev);
50179c12a75SJakub Kicinski netdev_info(nn->dp.netdev, "NIC Link is Down\n");
5024c352362SJakub Kicinski }
5034c352362SJakub Kicinski out:
5044c352362SJakub Kicinski spin_unlock_irqrestore(&nn->link_status_lock, flags);
5054c352362SJakub Kicinski }
5064c352362SJakub Kicinski
5074c352362SJakub Kicinski /**
5084c352362SJakub Kicinski * nfp_net_irq_lsc() - Interrupt service routine for link state changes
5094c352362SJakub Kicinski * @irq: Interrupt
5104c352362SJakub Kicinski * @data: Opaque data structure
5114c352362SJakub Kicinski *
5124c352362SJakub Kicinski * Return: Indicate if the interrupt has been handled.
5134c352362SJakub Kicinski */
nfp_net_irq_lsc(int irq,void * data)5144c352362SJakub Kicinski static irqreturn_t nfp_net_irq_lsc(int irq, void *data)
5154c352362SJakub Kicinski {
5164c352362SJakub Kicinski struct nfp_net *nn = data;
517fdace6c2SJakub Kicinski struct msix_entry *entry;
518fdace6c2SJakub Kicinski
519fdace6c2SJakub Kicinski entry = &nn->irq_entries[NFP_NET_IRQ_LSC_IDX];
5204c352362SJakub Kicinski
5214c352362SJakub Kicinski nfp_net_read_link_status(nn);
5224c352362SJakub Kicinski
523fdace6c2SJakub Kicinski nfp_net_irq_unmask(nn, entry->entry);
5244c352362SJakub Kicinski
5254c352362SJakub Kicinski return IRQ_HANDLED;
5264c352362SJakub Kicinski }
5274c352362SJakub Kicinski
5284c352362SJakub Kicinski /**
5294c352362SJakub Kicinski * nfp_net_irq_exn() - Interrupt service routine for exceptions
5304c352362SJakub Kicinski * @irq: Interrupt
5314c352362SJakub Kicinski * @data: Opaque data structure
5324c352362SJakub Kicinski *
5334c352362SJakub Kicinski * Return: Indicate if the interrupt has been handled.
5344c352362SJakub Kicinski */
nfp_net_irq_exn(int irq,void * data)5354c352362SJakub Kicinski static irqreturn_t nfp_net_irq_exn(int irq, void *data)
5364c352362SJakub Kicinski {
5374c352362SJakub Kicinski struct nfp_net *nn = data;
5384c352362SJakub Kicinski
5394c352362SJakub Kicinski nn_err(nn, "%s: UNIMPLEMENTED.\n", __func__);
5404c352362SJakub Kicinski /* XXX TO BE IMPLEMENTED */
5414c352362SJakub Kicinski return IRQ_HANDLED;
5424c352362SJakub Kicinski }
5434c352362SJakub Kicinski
5444c352362SJakub Kicinski /**
5454c352362SJakub Kicinski * nfp_net_aux_irq_request() - Request an auxiliary interrupt (LSC or EXN)
5464c352362SJakub Kicinski * @nn: NFP Network structure
5474c352362SJakub Kicinski * @ctrl_offset: Control BAR offset where IRQ configuration should be written
5484c352362SJakub Kicinski * @format: printf-style format to construct the interrupt name
5494c352362SJakub Kicinski * @name: Pointer to allocated space for interrupt name
5504c352362SJakub Kicinski * @name_sz: Size of space for interrupt name
5514c352362SJakub Kicinski * @vector_idx: Index of MSI-X vector used for this interrupt
5524c352362SJakub Kicinski * @handler: IRQ handler to register for this interrupt
5534c352362SJakub Kicinski */
5544c352362SJakub Kicinski static int
nfp_net_aux_irq_request(struct nfp_net * nn,u32 ctrl_offset,const char * format,char * name,size_t name_sz,unsigned int vector_idx,irq_handler_t handler)5554c352362SJakub Kicinski nfp_net_aux_irq_request(struct nfp_net *nn, u32 ctrl_offset,
5564c352362SJakub Kicinski const char *format, char *name, size_t name_sz,
5574c352362SJakub Kicinski unsigned int vector_idx, irq_handler_t handler)
5584c352362SJakub Kicinski {
5594c352362SJakub Kicinski struct msix_entry *entry;
5604c352362SJakub Kicinski int err;
5614c352362SJakub Kicinski
5624c352362SJakub Kicinski entry = &nn->irq_entries[vector_idx];
5634c352362SJakub Kicinski
56477ece8d5SJakub Kicinski snprintf(name, name_sz, format, nfp_net_name(nn));
5654c352362SJakub Kicinski err = request_irq(entry->vector, handler, 0, name, nn);
5664c352362SJakub Kicinski if (err) {
5674c352362SJakub Kicinski nn_err(nn, "Failed to request IRQ %d (err=%d).\n",
5684c352362SJakub Kicinski entry->vector, err);
5694c352362SJakub Kicinski return err;
5704c352362SJakub Kicinski }
571fdace6c2SJakub Kicinski nn_writeb(nn, ctrl_offset, entry->entry);
572fc233650SJakub Kicinski nfp_net_irq_unmask(nn, entry->entry);
5734c352362SJakub Kicinski
5744c352362SJakub Kicinski return 0;
5754c352362SJakub Kicinski }
5764c352362SJakub Kicinski
5774c352362SJakub Kicinski /**
5784c352362SJakub Kicinski * nfp_net_aux_irq_free() - Free an auxiliary interrupt (LSC or EXN)
5794c352362SJakub Kicinski * @nn: NFP Network structure
5804c352362SJakub Kicinski * @ctrl_offset: Control BAR offset where IRQ configuration should be written
5814c352362SJakub Kicinski * @vector_idx: Index of MSI-X vector used for this interrupt
5824c352362SJakub Kicinski */
nfp_net_aux_irq_free(struct nfp_net * nn,u32 ctrl_offset,unsigned int vector_idx)5834c352362SJakub Kicinski static void nfp_net_aux_irq_free(struct nfp_net *nn, u32 ctrl_offset,
5844c352362SJakub Kicinski unsigned int vector_idx)
5854c352362SJakub Kicinski {
5864c352362SJakub Kicinski nn_writeb(nn, ctrl_offset, 0xff);
587fc233650SJakub Kicinski nn_pci_flush(nn);
5884c352362SJakub Kicinski free_irq(nn->irq_entries[vector_idx].vector, nn);
5894c352362SJakub Kicinski }
5904c352362SJakub Kicinski
59162d03330SJakub Kicinski struct sk_buff *
nfp_net_tls_tx(struct nfp_net_dp * dp,struct nfp_net_r_vector * r_vec,struct sk_buff * skb,u64 * tls_handle,int * nr_frags)59251a5e563SJakub Kicinski nfp_net_tls_tx(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
59351a5e563SJakub Kicinski struct sk_buff *skb, u64 *tls_handle, int *nr_frags)
594c3991d39SDirk van der Merwe {
595c8d3928eSJakub Kicinski #ifdef CONFIG_TLS_DEVICE
596c3991d39SDirk van der Merwe struct nfp_net_tls_offload_ctx *ntls;
597c3991d39SDirk van der Merwe struct sk_buff *nskb;
5989ed431c1SJakub Kicinski bool resync_pending;
599c3991d39SDirk van der Merwe u32 datalen, seq;
600c3991d39SDirk van der Merwe
601c3991d39SDirk van der Merwe if (likely(!dp->ktls_tx))
602c3991d39SDirk van der Merwe return skb;
603ed3c9a2fSJakub Kicinski if (!tls_is_skb_tx_device_offloaded(skb))
604c3991d39SDirk van der Merwe return skb;
605c3991d39SDirk van der Merwe
606504148feSEric Dumazet datalen = skb->len - skb_tcp_all_headers(skb);
607c3991d39SDirk van der Merwe seq = ntohl(tcp_hdr(skb)->seq);
608c3991d39SDirk van der Merwe ntls = tls_driver_ctx(skb->sk, TLS_OFFLOAD_CTX_DIR_TX);
6099ed431c1SJakub Kicinski resync_pending = tls_offload_tx_resync_pending(skb->sk);
6109ed431c1SJakub Kicinski if (unlikely(resync_pending || ntls->next_seq != seq)) {
611c3991d39SDirk van der Merwe /* Pure ACK out of order already */
612c3991d39SDirk van der Merwe if (!datalen)
613c3991d39SDirk van der Merwe return skb;
614c3991d39SDirk van der Merwe
61551a5e563SJakub Kicinski u64_stats_update_begin(&r_vec->tx_sync);
61651a5e563SJakub Kicinski r_vec->tls_tx_fallback++;
61751a5e563SJakub Kicinski u64_stats_update_end(&r_vec->tx_sync);
61851a5e563SJakub Kicinski
619c3991d39SDirk van der Merwe nskb = tls_encrypt_skb(skb);
62051a5e563SJakub Kicinski if (!nskb) {
62151a5e563SJakub Kicinski u64_stats_update_begin(&r_vec->tx_sync);
62251a5e563SJakub Kicinski r_vec->tls_tx_no_fallback++;
62351a5e563SJakub Kicinski u64_stats_update_end(&r_vec->tx_sync);
624c3991d39SDirk van der Merwe return NULL;
62551a5e563SJakub Kicinski }
626c3991d39SDirk van der Merwe /* encryption wasn't necessary */
627c3991d39SDirk van der Merwe if (nskb == skb)
628c3991d39SDirk van der Merwe return skb;
629c3991d39SDirk van der Merwe /* we don't re-check ring space */
630c3991d39SDirk van der Merwe if (unlikely(skb_is_nonlinear(nskb))) {
631c3991d39SDirk van der Merwe nn_dp_warn(dp, "tls_encrypt_skb() produced fragmented frame\n");
63251a5e563SJakub Kicinski u64_stats_update_begin(&r_vec->tx_sync);
63351a5e563SJakub Kicinski r_vec->tx_errors++;
63451a5e563SJakub Kicinski u64_stats_update_end(&r_vec->tx_sync);
635c3991d39SDirk van der Merwe dev_kfree_skb_any(nskb);
636c3991d39SDirk van der Merwe return NULL;
637c3991d39SDirk van der Merwe }
638c3991d39SDirk van der Merwe
639c3991d39SDirk van der Merwe /* jump forward, a TX may have gotten lost, need to sync TX */
6409ed431c1SJakub Kicinski if (!resync_pending && seq - ntls->next_seq < U32_MAX / 4)
6418538d29cSJakub Kicinski tls_offload_tx_resync_request(nskb->sk, seq,
6428538d29cSJakub Kicinski ntls->next_seq);
643c3991d39SDirk van der Merwe
644c3991d39SDirk van der Merwe *nr_frags = 0;
645c3991d39SDirk van der Merwe return nskb;
646c3991d39SDirk van der Merwe }
647c3991d39SDirk van der Merwe
64851a5e563SJakub Kicinski if (datalen) {
64951a5e563SJakub Kicinski u64_stats_update_begin(&r_vec->tx_sync);
650427545b3SJakub Kicinski if (!skb_is_gso(skb))
65151a5e563SJakub Kicinski r_vec->hw_tls_tx++;
652427545b3SJakub Kicinski else
653427545b3SJakub Kicinski r_vec->hw_tls_tx += skb_shinfo(skb)->gso_segs;
65451a5e563SJakub Kicinski u64_stats_update_end(&r_vec->tx_sync);
65551a5e563SJakub Kicinski }
65651a5e563SJakub Kicinski
657c3991d39SDirk van der Merwe memcpy(tls_handle, ntls->fw_handle, sizeof(ntls->fw_handle));
658c3991d39SDirk van der Merwe ntls->next_seq += datalen;
659c8d3928eSJakub Kicinski #endif
660c3991d39SDirk van der Merwe return skb;
661c3991d39SDirk van der Merwe }
662c3991d39SDirk van der Merwe
nfp_net_tls_tx_undo(struct sk_buff * skb,u64 tls_handle)66362d03330SJakub Kicinski void nfp_net_tls_tx_undo(struct sk_buff *skb, u64 tls_handle)
6645a4cea28SJakub Kicinski {
6655a4cea28SJakub Kicinski #ifdef CONFIG_TLS_DEVICE
6665a4cea28SJakub Kicinski struct nfp_net_tls_offload_ctx *ntls;
6675a4cea28SJakub Kicinski u32 datalen, seq;
6685a4cea28SJakub Kicinski
6695a4cea28SJakub Kicinski if (!tls_handle)
6705a4cea28SJakub Kicinski return;
671ed3c9a2fSJakub Kicinski if (WARN_ON_ONCE(!tls_is_skb_tx_device_offloaded(skb)))
6725a4cea28SJakub Kicinski return;
6735a4cea28SJakub Kicinski
674504148feSEric Dumazet datalen = skb->len - skb_tcp_all_headers(skb);
6755a4cea28SJakub Kicinski seq = ntohl(tcp_hdr(skb)->seq);
6765a4cea28SJakub Kicinski
6775a4cea28SJakub Kicinski ntls = tls_driver_ctx(skb->sk, TLS_OFFLOAD_CTX_DIR_TX);
6785a4cea28SJakub Kicinski if (ntls->next_seq == seq + datalen)
6795a4cea28SJakub Kicinski ntls->next_seq = seq;
6805a4cea28SJakub Kicinski else
6815a4cea28SJakub Kicinski WARN_ON_ONCE(1);
6825a4cea28SJakub Kicinski #endif
6835a4cea28SJakub Kicinski }
6845a4cea28SJakub Kicinski
nfp_net_tx_timeout(struct net_device * netdev,unsigned int txqueue)6850290bd29SMichael S. Tsirkin static void nfp_net_tx_timeout(struct net_device *netdev, unsigned int txqueue)
6864c352362SJakub Kicinski {
6874c352362SJakub Kicinski struct nfp_net *nn = netdev_priv(netdev);
6884c352362SJakub Kicinski
689d8968edaSMichael S. Tsirkin nn_warn(nn, "TX watchdog timeout on ring: %u\n", txqueue);
6904c352362SJakub Kicinski }
6914c352362SJakub Kicinski
69262d03330SJakub Kicinski /* Receive processing */
693bf187ea0SJakub Kicinski static unsigned int
nfp_net_calc_fl_bufsz_data(struct nfp_net_dp * dp)6949c91a365SNiklas Söderlund nfp_net_calc_fl_bufsz_data(struct nfp_net_dp *dp)
695bf187ea0SJakub Kicinski {
6969c91a365SNiklas Söderlund unsigned int fl_bufsz = 0;
697bf187ea0SJakub Kicinski
69879c12a75SJakub Kicinski if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
699c0f031bcSJakub Kicinski fl_bufsz += NFP_NET_MAX_PREPEND;
700bf187ea0SJakub Kicinski else
70179c12a75SJakub Kicinski fl_bufsz += dp->rx_offset;
70276e1e1a8SJakub Kicinski fl_bufsz += ETH_HLEN + VLAN_HLEN * 2 + dp->mtu;
703bf187ea0SJakub Kicinski
7049c91a365SNiklas Söderlund return fl_bufsz;
7059c91a365SNiklas Söderlund }
7069c91a365SNiklas Söderlund
nfp_net_calc_fl_bufsz(struct nfp_net_dp * dp)7079c91a365SNiklas Söderlund static unsigned int nfp_net_calc_fl_bufsz(struct nfp_net_dp *dp)
7089c91a365SNiklas Söderlund {
7099c91a365SNiklas Söderlund unsigned int fl_bufsz;
7109c91a365SNiklas Söderlund
7119c91a365SNiklas Söderlund fl_bufsz = NFP_NET_RX_BUF_HEADROOM;
7129c91a365SNiklas Söderlund fl_bufsz += dp->rx_dma_off;
7139c91a365SNiklas Söderlund fl_bufsz += nfp_net_calc_fl_bufsz_data(dp);
7149c91a365SNiklas Söderlund
715c0f031bcSJakub Kicinski fl_bufsz = SKB_DATA_ALIGN(fl_bufsz);
716c0f031bcSJakub Kicinski fl_bufsz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
717c0f031bcSJakub Kicinski
718bf187ea0SJakub Kicinski return fl_bufsz;
719bf187ea0SJakub Kicinski }
7204c352362SJakub Kicinski
nfp_net_calc_fl_bufsz_xsk(struct nfp_net_dp * dp)7219c91a365SNiklas Söderlund static unsigned int nfp_net_calc_fl_bufsz_xsk(struct nfp_net_dp *dp)
7229c91a365SNiklas Söderlund {
7239c91a365SNiklas Söderlund unsigned int fl_bufsz;
7249c91a365SNiklas Söderlund
7259c91a365SNiklas Söderlund fl_bufsz = XDP_PACKET_HEADROOM;
7269c91a365SNiklas Söderlund fl_bufsz += nfp_net_calc_fl_bufsz_data(dp);
7279c91a365SNiklas Söderlund
7289c91a365SNiklas Söderlund return fl_bufsz;
7299c91a365SNiklas Söderlund }
7309c91a365SNiklas Söderlund
7314c352362SJakub Kicinski /* Setup and Configuration
7324c352362SJakub Kicinski */
7334c352362SJakub Kicinski
7344c352362SJakub Kicinski /**
735cd083ce1SJakub Kicinski * nfp_net_vecs_init() - Assign IRQs and setup rvecs.
736cd083ce1SJakub Kicinski * @nn: NFP Network structure
737cd083ce1SJakub Kicinski */
nfp_net_vecs_init(struct nfp_net * nn)738cd083ce1SJakub Kicinski static void nfp_net_vecs_init(struct nfp_net *nn)
739cd083ce1SJakub Kicinski {
74042ba9654SYinjun Zhang int numa_node = dev_to_node(&nn->pdev->dev);
741cd083ce1SJakub Kicinski struct nfp_net_r_vector *r_vec;
74242ba9654SYinjun Zhang unsigned int r;
743cd083ce1SJakub Kicinski
744cd083ce1SJakub Kicinski nn->lsc_handler = nfp_net_irq_lsc;
745cd083ce1SJakub Kicinski nn->exn_handler = nfp_net_irq_exn;
746cd083ce1SJakub Kicinski
747cd083ce1SJakub Kicinski for (r = 0; r < nn->max_r_vecs; r++) {
748cd083ce1SJakub Kicinski struct msix_entry *entry;
749cd083ce1SJakub Kicinski
750cd083ce1SJakub Kicinski entry = &nn->irq_entries[NFP_NET_NON_Q_VECTORS + r];
751cd083ce1SJakub Kicinski
752cd083ce1SJakub Kicinski r_vec = &nn->r_vecs[r];
753cd083ce1SJakub Kicinski r_vec->nfp_net = nn;
754cd083ce1SJakub Kicinski r_vec->irq_entry = entry->entry;
755cd083ce1SJakub Kicinski r_vec->irq_vector = entry->vector;
756cd083ce1SJakub Kicinski
75777ece8d5SJakub Kicinski if (nn->dp.netdev) {
75877ece8d5SJakub Kicinski r_vec->handler = nfp_net_irq_rxtx;
75977ece8d5SJakub Kicinski } else {
76077ece8d5SJakub Kicinski r_vec->handler = nfp_ctrl_irq_rxtx;
76177ece8d5SJakub Kicinski
76277ece8d5SJakub Kicinski __skb_queue_head_init(&r_vec->queue);
76377ece8d5SJakub Kicinski spin_lock_init(&r_vec->lock);
7646fd86efaSJakub Kicinski tasklet_setup(&r_vec->tasklet, nn->dp.ops->ctrl_poll);
76577ece8d5SJakub Kicinski tasklet_disable(&r_vec->tasklet);
76677ece8d5SJakub Kicinski }
76777ece8d5SJakub Kicinski
76842ba9654SYinjun Zhang cpumask_set_cpu(cpumask_local_spread(r, numa_node), &r_vec->affinity_mask);
769cd083ce1SJakub Kicinski }
770cd083ce1SJakub Kicinski }
771cd083ce1SJakub Kicinski
772e31230f9SJakub Kicinski static void
nfp_net_napi_add(struct nfp_net_dp * dp,struct nfp_net_r_vector * r_vec,int idx)7736402528bSNiklas Söderlund nfp_net_napi_add(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec, int idx)
77458eb4363SJakub Kicinski {
77558eb4363SJakub Kicinski if (dp->netdev)
77658eb4363SJakub Kicinski netif_napi_add(dp->netdev, &r_vec->napi,
777b48b89f9SJakub Kicinski nfp_net_has_xsk_pool_slow(dp, idx) ? dp->ops->xsk_poll : dp->ops->poll);
77858eb4363SJakub Kicinski else
77958eb4363SJakub Kicinski tasklet_enable(&r_vec->tasklet);
78058eb4363SJakub Kicinski }
78158eb4363SJakub Kicinski
78258eb4363SJakub Kicinski static void
nfp_net_napi_del(struct nfp_net_dp * dp,struct nfp_net_r_vector * r_vec)78358eb4363SJakub Kicinski nfp_net_napi_del(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec)
78458eb4363SJakub Kicinski {
78558eb4363SJakub Kicinski if (dp->netdev)
78658eb4363SJakub Kicinski netif_napi_del(&r_vec->napi);
78758eb4363SJakub Kicinski else
78858eb4363SJakub Kicinski tasklet_disable(&r_vec->tasklet);
78958eb4363SJakub Kicinski }
79058eb4363SJakub Kicinski
79158eb4363SJakub Kicinski static void
nfp_net_vector_assign_rings(struct nfp_net_dp * dp,struct nfp_net_r_vector * r_vec,int idx)79279c12a75SJakub Kicinski nfp_net_vector_assign_rings(struct nfp_net_dp *dp,
79379c12a75SJakub Kicinski struct nfp_net_r_vector *r_vec, int idx)
794e31230f9SJakub Kicinski {
79579c12a75SJakub Kicinski r_vec->rx_ring = idx < dp->num_rx_rings ? &dp->rx_rings[idx] : NULL;
796ecd63a02SJakub Kicinski r_vec->tx_ring =
79779c12a75SJakub Kicinski idx < dp->num_stack_tx_rings ? &dp->tx_rings[idx] : NULL;
798ecd63a02SJakub Kicinski
79979c12a75SJakub Kicinski r_vec->xdp_ring = idx < dp->num_tx_rings - dp->num_stack_tx_rings ?
80079c12a75SJakub Kicinski &dp->tx_rings[dp->num_stack_tx_rings + idx] : NULL;
8016402528bSNiklas Söderlund
8026402528bSNiklas Söderlund if (nfp_net_has_xsk_pool_slow(dp, idx) || r_vec->xsk_pool) {
8036402528bSNiklas Söderlund r_vec->xsk_pool = dp->xdp_prog ? dp->xsk_pools[idx] : NULL;
8046402528bSNiklas Söderlund
8056402528bSNiklas Söderlund if (r_vec->xsk_pool)
8066402528bSNiklas Söderlund xsk_pool_set_rxq_info(r_vec->xsk_pool,
8076402528bSNiklas Söderlund &r_vec->rx_ring->xdp_rxq);
8086402528bSNiklas Söderlund
8096402528bSNiklas Söderlund nfp_net_napi_del(dp, r_vec);
8106402528bSNiklas Söderlund nfp_net_napi_add(dp, r_vec, idx);
8116402528bSNiklas Söderlund }
812e31230f9SJakub Kicinski }
813e31230f9SJakub Kicinski
8140afbfb18SJakub Kicinski static int
nfp_net_prepare_vector(struct nfp_net * nn,struct nfp_net_r_vector * r_vec,int idx)8150afbfb18SJakub Kicinski nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
8160afbfb18SJakub Kicinski int idx)
8174c352362SJakub Kicinski {
8184c352362SJakub Kicinski int err;
8194c352362SJakub Kicinski
8206402528bSNiklas Söderlund nfp_net_napi_add(&nn->dp, r_vec, idx);
821164d1e9eSJakub Kicinski
8220afbfb18SJakub Kicinski snprintf(r_vec->name, sizeof(r_vec->name),
82377ece8d5SJakub Kicinski "%s-rxtx-%d", nfp_net_name(nn), idx);
824*f888741fSJinjie Ruan err = request_irq(r_vec->irq_vector, r_vec->handler, IRQF_NO_AUTOEN,
825*f888741fSJinjie Ruan r_vec->name, r_vec);
8260afbfb18SJakub Kicinski if (err) {
82758eb4363SJakub Kicinski nfp_net_napi_del(&nn->dp, r_vec);
828fdace6c2SJakub Kicinski nn_err(nn, "Error requesting IRQ %d\n", r_vec->irq_vector);
8290afbfb18SJakub Kicinski return err;
8300afbfb18SJakub Kicinski }
8314c352362SJakub Kicinski
832fdace6c2SJakub Kicinski irq_set_affinity_hint(r_vec->irq_vector, &r_vec->affinity_mask);
8334c352362SJakub Kicinski
834fdace6c2SJakub Kicinski nn_dbg(nn, "RV%02d: irq=%03d/%03d\n", idx, r_vec->irq_vector,
835fdace6c2SJakub Kicinski r_vec->irq_entry);
8364c352362SJakub Kicinski
8374c352362SJakub Kicinski return 0;
8380afbfb18SJakub Kicinski }
8394c352362SJakub Kicinski
8400afbfb18SJakub Kicinski static void
nfp_net_cleanup_vector(struct nfp_net * nn,struct nfp_net_r_vector * r_vec)8410afbfb18SJakub Kicinski nfp_net_cleanup_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec)
8420afbfb18SJakub Kicinski {
843fdace6c2SJakub Kicinski irq_set_affinity_hint(r_vec->irq_vector, NULL);
84458eb4363SJakub Kicinski nfp_net_napi_del(&nn->dp, r_vec);
845fdace6c2SJakub Kicinski free_irq(r_vec->irq_vector, r_vec);
8464c352362SJakub Kicinski }
8474c352362SJakub Kicinski
8484c352362SJakub Kicinski /**
8494c352362SJakub Kicinski * nfp_net_rss_write_itbl() - Write RSS indirection table to device
8504c352362SJakub Kicinski * @nn: NFP Net device to reconfigure
8514c352362SJakub Kicinski */
nfp_net_rss_write_itbl(struct nfp_net * nn)8524c352362SJakub Kicinski void nfp_net_rss_write_itbl(struct nfp_net *nn)
8534c352362SJakub Kicinski {
8544c352362SJakub Kicinski int i;
8554c352362SJakub Kicinski
8564c352362SJakub Kicinski for (i = 0; i < NFP_NET_CFG_RSS_ITBL_SZ; i += 4)
8574c352362SJakub Kicinski nn_writel(nn, NFP_NET_CFG_RSS_ITBL + i,
8584c352362SJakub Kicinski get_unaligned_le32(nn->rss_itbl + i));
8594c352362SJakub Kicinski }
8604c352362SJakub Kicinski
8614c352362SJakub Kicinski /**
8624c352362SJakub Kicinski * nfp_net_rss_write_key() - Write RSS hash key to device
8634c352362SJakub Kicinski * @nn: NFP Net device to reconfigure
8644c352362SJakub Kicinski */
nfp_net_rss_write_key(struct nfp_net * nn)8654c352362SJakub Kicinski void nfp_net_rss_write_key(struct nfp_net *nn)
8664c352362SJakub Kicinski {
8674c352362SJakub Kicinski int i;
8684c352362SJakub Kicinski
8699ff304bfSJakub Kicinski for (i = 0; i < nfp_net_rss_key_sz(nn); i += 4)
8704c352362SJakub Kicinski nn_writel(nn, NFP_NET_CFG_RSS_KEY + i,
8714c352362SJakub Kicinski get_unaligned_le32(nn->rss_key + i));
8724c352362SJakub Kicinski }
8734c352362SJakub Kicinski
8744c352362SJakub Kicinski /**
8754c352362SJakub Kicinski * nfp_net_coalesce_write_cfg() - Write irq coalescence configuration to HW
8764c352362SJakub Kicinski * @nn: NFP Net device to reconfigure
8774c352362SJakub Kicinski */
nfp_net_coalesce_write_cfg(struct nfp_net * nn)8784c352362SJakub Kicinski void nfp_net_coalesce_write_cfg(struct nfp_net *nn)
8794c352362SJakub Kicinski {
8804c352362SJakub Kicinski u8 i;
8814c352362SJakub Kicinski u32 factor;
8824c352362SJakub Kicinski u32 value;
8834c352362SJakub Kicinski
8844c352362SJakub Kicinski /* Compute factor used to convert coalesce '_usecs' parameters to
8854c352362SJakub Kicinski * ME timestamp ticks. There are 16 ME clock cycles for each timestamp
8864c352362SJakub Kicinski * count.
8874c352362SJakub Kicinski */
888ce991ab6SJakub Kicinski factor = nn->tlv_caps.me_freq_mhz / 16;
8894c352362SJakub Kicinski
8904c352362SJakub Kicinski /* copy RX interrupt coalesce parameters */
8914c352362SJakub Kicinski value = (nn->rx_coalesce_max_frames << 16) |
8924c352362SJakub Kicinski (factor * nn->rx_coalesce_usecs);
89379c12a75SJakub Kicinski for (i = 0; i < nn->dp.num_rx_rings; i++)
8944c352362SJakub Kicinski nn_writel(nn, NFP_NET_CFG_RXR_IRQ_MOD(i), value);
8954c352362SJakub Kicinski
8964c352362SJakub Kicinski /* copy TX interrupt coalesce parameters */
8974c352362SJakub Kicinski value = (nn->tx_coalesce_max_frames << 16) |
8984c352362SJakub Kicinski (factor * nn->tx_coalesce_usecs);
89979c12a75SJakub Kicinski for (i = 0; i < nn->dp.num_tx_rings; i++)
9004c352362SJakub Kicinski nn_writel(nn, NFP_NET_CFG_TXR_IRQ_MOD(i), value);
9014c352362SJakub Kicinski }
9024c352362SJakub Kicinski
9034c352362SJakub Kicinski /**
904f642963bSJakub Kicinski * nfp_net_write_mac_addr() - Write mac address to the device control BAR
9054c352362SJakub Kicinski * @nn: NFP Net device to reconfigure
9069d372759SPablo Cascón * @addr: MAC address to write
9074c352362SJakub Kicinski *
908f642963bSJakub Kicinski * Writes the MAC address from the netdev to the device control BAR. Does not
909f642963bSJakub Kicinski * perform the required reconfig. We do a bit of byte swapping dance because
910f642963bSJakub Kicinski * firmware is LE.
9114c352362SJakub Kicinski */
nfp_net_write_mac_addr(struct nfp_net * nn,const u8 * addr)9129d372759SPablo Cascón static void nfp_net_write_mac_addr(struct nfp_net *nn, const u8 *addr)
9134c352362SJakub Kicinski {
9149d372759SPablo Cascón nn_writel(nn, NFP_NET_CFG_MACADDR + 0, get_unaligned_be32(addr));
9159d372759SPablo Cascón nn_writew(nn, NFP_NET_CFG_MACADDR + 6, get_unaligned_be16(addr + 4));
9164c352362SJakub Kicinski }
9174c352362SJakub Kicinski
9184c352362SJakub Kicinski /**
9194c352362SJakub Kicinski * nfp_net_clear_config_and_disable() - Clear control BAR and disable NFP
9204c352362SJakub Kicinski * @nn: NFP Net device to reconfigure
92107300f77SJakub Kicinski *
92207300f77SJakub Kicinski * Warning: must be fully idempotent.
9234c352362SJakub Kicinski */
nfp_net_clear_config_and_disable(struct nfp_net * nn)9244c352362SJakub Kicinski static void nfp_net_clear_config_and_disable(struct nfp_net *nn)
9254c352362SJakub Kicinski {
926bec9ce34SZiyang Chen u32 new_ctrl, new_ctrl_w1, update;
927ca40feabSJakub Kicinski unsigned int r;
9284c352362SJakub Kicinski int err;
9294c352362SJakub Kicinski
93079c12a75SJakub Kicinski new_ctrl = nn->dp.ctrl;
9314c352362SJakub Kicinski new_ctrl &= ~NFP_NET_CFG_CTRL_ENABLE;
9324c352362SJakub Kicinski update = NFP_NET_CFG_UPDATE_GEN;
9334c352362SJakub Kicinski update |= NFP_NET_CFG_UPDATE_MSIX;
9344c352362SJakub Kicinski update |= NFP_NET_CFG_UPDATE_RING;
9354c352362SJakub Kicinski
9364c352362SJakub Kicinski if (nn->cap & NFP_NET_CFG_CTRL_RINGCFG)
9374c352362SJakub Kicinski new_ctrl &= ~NFP_NET_CFG_CTRL_RINGCFG;
9384c352362SJakub Kicinski
939bec9ce34SZiyang Chen if (!(nn->cap_w1 & NFP_NET_CFG_CTRL_FREELIST_EN)) {
9404c352362SJakub Kicinski nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, 0);
9414c352362SJakub Kicinski nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, 0);
942bec9ce34SZiyang Chen }
9434c352362SJakub Kicinski
9444c352362SJakub Kicinski nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
9454c352362SJakub Kicinski err = nfp_net_reconfig(nn, update);
946aba52df8SJakub Kicinski if (err)
9474c352362SJakub Kicinski nn_err(nn, "Could not disable device: %d\n", err);
9484c352362SJakub Kicinski
949bec9ce34SZiyang Chen if (nn->cap_w1 & NFP_NET_CFG_CTRL_FREELIST_EN) {
950bec9ce34SZiyang Chen new_ctrl_w1 = nn->dp.ctrl_w1;
951bec9ce34SZiyang Chen new_ctrl_w1 &= ~NFP_NET_CFG_CTRL_FREELIST_EN;
952bec9ce34SZiyang Chen nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, 0);
953bec9ce34SZiyang Chen nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, 0);
954bec9ce34SZiyang Chen
955bec9ce34SZiyang Chen nn_writel(nn, NFP_NET_CFG_CTRL_WORD1, new_ctrl_w1);
956bec9ce34SZiyang Chen err = nfp_net_reconfig(nn, update);
957bec9ce34SZiyang Chen if (err)
958bec9ce34SZiyang Chen nn_err(nn, "Could not disable FREELIST_EN: %d\n", err);
959bec9ce34SZiyang Chen nn->dp.ctrl_w1 = new_ctrl_w1;
960bec9ce34SZiyang Chen }
961bec9ce34SZiyang Chen
9626402528bSNiklas Söderlund for (r = 0; r < nn->dp.num_rx_rings; r++) {
96379c12a75SJakub Kicinski nfp_net_rx_ring_reset(&nn->dp.rx_rings[r]);
9646402528bSNiklas Söderlund if (nfp_net_has_xsk_pool_slow(&nn->dp, nn->dp.rx_rings[r].idx))
9656402528bSNiklas Söderlund nfp_net_xsk_rx_bufs_free(&nn->dp.rx_rings[r]);
9666402528bSNiklas Söderlund }
96779c12a75SJakub Kicinski for (r = 0; r < nn->dp.num_tx_rings; r++)
96879c12a75SJakub Kicinski nfp_net_tx_ring_reset(&nn->dp, &nn->dp.tx_rings[r]);
96979c12a75SJakub Kicinski for (r = 0; r < nn->dp.num_r_vecs; r++)
970ca40feabSJakub Kicinski nfp_net_vec_clear_ring_data(nn, r);
971ca40feabSJakub Kicinski
97279c12a75SJakub Kicinski nn->dp.ctrl = new_ctrl;
9734c352362SJakub Kicinski }
9744c352362SJakub Kicinski
975ac0488efSJakub Kicinski /**
976ac0488efSJakub Kicinski * nfp_net_set_config_and_enable() - Write control BAR and enable NFP
977ac0488efSJakub Kicinski * @nn: NFP Net device to reconfigure
978ac0488efSJakub Kicinski */
nfp_net_set_config_and_enable(struct nfp_net * nn)979ac0488efSJakub Kicinski static int nfp_net_set_config_and_enable(struct nfp_net *nn)
9801cd0cfc4SJakub Kicinski {
981bec9ce34SZiyang Chen u32 bufsz, new_ctrl, new_ctrl_w1, update = 0;
9821cd0cfc4SJakub Kicinski unsigned int r;
9831cd0cfc4SJakub Kicinski int err;
9841cd0cfc4SJakub Kicinski
98579c12a75SJakub Kicinski new_ctrl = nn->dp.ctrl;
986bec9ce34SZiyang Chen new_ctrl_w1 = nn->dp.ctrl_w1;
9871cd0cfc4SJakub Kicinski
988611bdd49SEdwin Peer if (nn->dp.ctrl & NFP_NET_CFG_CTRL_RSS_ANY) {
9891cd0cfc4SJakub Kicinski nfp_net_rss_write_key(nn);
9901cd0cfc4SJakub Kicinski nfp_net_rss_write_itbl(nn);
9911cd0cfc4SJakub Kicinski nn_writel(nn, NFP_NET_CFG_RSS_CTRL, nn->rss_cfg);
9921cd0cfc4SJakub Kicinski update |= NFP_NET_CFG_UPDATE_RSS;
9931cd0cfc4SJakub Kicinski }
9941cd0cfc4SJakub Kicinski
995ad50451eSJakub Kicinski if (nn->dp.ctrl & NFP_NET_CFG_CTRL_IRQMOD) {
9961cd0cfc4SJakub Kicinski nfp_net_coalesce_write_cfg(nn);
9971cd0cfc4SJakub Kicinski update |= NFP_NET_CFG_UPDATE_IRQMOD;
9981cd0cfc4SJakub Kicinski }
9991cd0cfc4SJakub Kicinski
100079c12a75SJakub Kicinski for (r = 0; r < nn->dp.num_tx_rings; r++)
100179c12a75SJakub Kicinski nfp_net_tx_ring_hw_cfg_write(nn, &nn->dp.tx_rings[r], r);
100279c12a75SJakub Kicinski for (r = 0; r < nn->dp.num_rx_rings; r++)
100379c12a75SJakub Kicinski nfp_net_rx_ring_hw_cfg_write(nn, &nn->dp.rx_rings[r], r);
10041cd0cfc4SJakub Kicinski
1005fc9769f6SJakub Kicinski nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE,
1006fc9769f6SJakub Kicinski U64_MAX >> (64 - nn->dp.num_tx_rings));
10071cd0cfc4SJakub Kicinski
1008fc9769f6SJakub Kicinski nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE,
1009fc9769f6SJakub Kicinski U64_MAX >> (64 - nn->dp.num_rx_rings));
10101cd0cfc4SJakub Kicinski
10115c0dbe9eSJakub Kicinski if (nn->dp.netdev)
10129d372759SPablo Cascón nfp_net_write_mac_addr(nn, nn->dp.netdev->dev_addr);
10131cd0cfc4SJakub Kicinski
10145c0dbe9eSJakub Kicinski nn_writel(nn, NFP_NET_CFG_MTU, nn->dp.mtu);
1015ee200a73SJakub Kicinski
1016ee200a73SJakub Kicinski bufsz = nn->dp.fl_bufsz - nn->dp.rx_dma_off - NFP_NET_RX_BUF_NON_DATA;
1017ee200a73SJakub Kicinski nn_writel(nn, NFP_NET_CFG_FLBUFSZ, bufsz);
10181cd0cfc4SJakub Kicinski
1019bec9ce34SZiyang Chen /* Enable device
1020bec9ce34SZiyang Chen * Step 1: Replace the CTRL_ENABLE by NFP_NET_CFG_CTRL_FREELIST_EN if
1021bec9ce34SZiyang Chen * FREELIST_EN exits.
1022bec9ce34SZiyang Chen */
1023bec9ce34SZiyang Chen if (nn->cap_w1 & NFP_NET_CFG_CTRL_FREELIST_EN)
1024bec9ce34SZiyang Chen new_ctrl_w1 |= NFP_NET_CFG_CTRL_FREELIST_EN;
1025bec9ce34SZiyang Chen else
10261cd0cfc4SJakub Kicinski new_ctrl |= NFP_NET_CFG_CTRL_ENABLE;
10271cd0cfc4SJakub Kicinski update |= NFP_NET_CFG_UPDATE_GEN;
10281cd0cfc4SJakub Kicinski update |= NFP_NET_CFG_UPDATE_MSIX;
10291cd0cfc4SJakub Kicinski update |= NFP_NET_CFG_UPDATE_RING;
10301cd0cfc4SJakub Kicinski if (nn->cap & NFP_NET_CFG_CTRL_RINGCFG)
10311cd0cfc4SJakub Kicinski new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG;
10321cd0cfc4SJakub Kicinski
1033bec9ce34SZiyang Chen /* Step 2: Send the configuration and write the freelist.
1034bec9ce34SZiyang Chen * - The freelist only need to be written once.
1035bec9ce34SZiyang Chen */
10361cd0cfc4SJakub Kicinski nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
1037bec9ce34SZiyang Chen nn_writel(nn, NFP_NET_CFG_CTRL_WORD1, new_ctrl_w1);
10381cd0cfc4SJakub Kicinski err = nfp_net_reconfig(nn, update);
1039ac0488efSJakub Kicinski if (err) {
1040ac0488efSJakub Kicinski nfp_net_clear_config_and_disable(nn);
1041ac0488efSJakub Kicinski return err;
1042ac0488efSJakub Kicinski }
10431cd0cfc4SJakub Kicinski
104479c12a75SJakub Kicinski nn->dp.ctrl = new_ctrl;
1045bec9ce34SZiyang Chen nn->dp.ctrl_w1 = new_ctrl_w1;
10461cd0cfc4SJakub Kicinski
104779c12a75SJakub Kicinski for (r = 0; r < nn->dp.num_rx_rings; r++)
10486fe0c3b4SJakub Kicinski nfp_net_rx_ring_fill_freelist(&nn->dp, &nn->dp.rx_rings[r]);
1049aba52df8SJakub Kicinski
1050bec9ce34SZiyang Chen /* Step 3: Do the NFP_NET_CFG_CTRL_ENABLE. Send the configuration.
1051bec9ce34SZiyang Chen */
1052bec9ce34SZiyang Chen if (nn->cap_w1 & NFP_NET_CFG_CTRL_FREELIST_EN) {
1053bec9ce34SZiyang Chen new_ctrl |= NFP_NET_CFG_CTRL_ENABLE;
1054bec9ce34SZiyang Chen nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
1055bec9ce34SZiyang Chen
1056bec9ce34SZiyang Chen err = nfp_net_reconfig(nn, update);
1057bec9ce34SZiyang Chen if (err) {
1058bec9ce34SZiyang Chen nfp_net_clear_config_and_disable(nn);
1059bec9ce34SZiyang Chen return err;
1060bec9ce34SZiyang Chen }
1061bec9ce34SZiyang Chen nn->dp.ctrl = new_ctrl;
1062bec9ce34SZiyang Chen }
1063bec9ce34SZiyang Chen
1064ac0488efSJakub Kicinski return 0;
10651cd0cfc4SJakub Kicinski }
10661cd0cfc4SJakub Kicinski
10674c352362SJakub Kicinski /**
1068d00ca2f3SJakub Kicinski * nfp_net_close_stack() - Quiesce the stack (part of close)
1069d00ca2f3SJakub Kicinski * @nn: NFP Net device to reconfigure
1070d00ca2f3SJakub Kicinski */
nfp_net_close_stack(struct nfp_net * nn)1071d00ca2f3SJakub Kicinski static void nfp_net_close_stack(struct nfp_net *nn)
1072d00ca2f3SJakub Kicinski {
10739d32e4e7SYinjun Zhang struct nfp_net_r_vector *r_vec;
1074d00ca2f3SJakub Kicinski unsigned int r;
1075d00ca2f3SJakub Kicinski
1076d00ca2f3SJakub Kicinski disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
1077d00ca2f3SJakub Kicinski netif_carrier_off(nn->dp.netdev);
1078d00ca2f3SJakub Kicinski nn->link_up = false;
1079d00ca2f3SJakub Kicinski
1080d00ca2f3SJakub Kicinski for (r = 0; r < nn->dp.num_r_vecs; r++) {
10819d32e4e7SYinjun Zhang r_vec = &nn->r_vecs[r];
10829d32e4e7SYinjun Zhang
10839d32e4e7SYinjun Zhang disable_irq(r_vec->irq_vector);
10849d32e4e7SYinjun Zhang napi_disable(&r_vec->napi);
10859d32e4e7SYinjun Zhang
10869d32e4e7SYinjun Zhang if (r_vec->rx_ring)
10879d32e4e7SYinjun Zhang cancel_work_sync(&r_vec->rx_dim.work);
10889d32e4e7SYinjun Zhang
10899d32e4e7SYinjun Zhang if (r_vec->tx_ring)
10909d32e4e7SYinjun Zhang cancel_work_sync(&r_vec->tx_dim.work);
1091d00ca2f3SJakub Kicinski }
1092d00ca2f3SJakub Kicinski
1093d00ca2f3SJakub Kicinski netif_tx_disable(nn->dp.netdev);
1094d00ca2f3SJakub Kicinski }
1095d00ca2f3SJakub Kicinski
1096d00ca2f3SJakub Kicinski /**
1097d00ca2f3SJakub Kicinski * nfp_net_close_free_all() - Free all runtime resources
1098d00ca2f3SJakub Kicinski * @nn: NFP Net device to reconfigure
1099d00ca2f3SJakub Kicinski */
nfp_net_close_free_all(struct nfp_net * nn)1100d00ca2f3SJakub Kicinski static void nfp_net_close_free_all(struct nfp_net *nn)
1101d00ca2f3SJakub Kicinski {
1102d00ca2f3SJakub Kicinski unsigned int r;
1103d00ca2f3SJakub Kicinski
11044621199dSJakub Kicinski nfp_net_tx_rings_free(&nn->dp);
11054621199dSJakub Kicinski nfp_net_rx_rings_free(&nn->dp);
11064621199dSJakub Kicinski
1107d00ca2f3SJakub Kicinski for (r = 0; r < nn->dp.num_r_vecs; r++)
1108d00ca2f3SJakub Kicinski nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
1109d00ca2f3SJakub Kicinski
1110d00ca2f3SJakub Kicinski nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
1111d00ca2f3SJakub Kicinski nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX);
1112d00ca2f3SJakub Kicinski }
1113d00ca2f3SJakub Kicinski
1114d00ca2f3SJakub Kicinski /**
1115d00ca2f3SJakub Kicinski * nfp_net_netdev_close() - Called when the device is downed
1116d00ca2f3SJakub Kicinski * @netdev: netdev structure
1117d00ca2f3SJakub Kicinski */
nfp_net_netdev_close(struct net_device * netdev)1118d00ca2f3SJakub Kicinski static int nfp_net_netdev_close(struct net_device *netdev)
1119d00ca2f3SJakub Kicinski {
1120d00ca2f3SJakub Kicinski struct nfp_net *nn = netdev_priv(netdev);
1121d00ca2f3SJakub Kicinski
1122d00ca2f3SJakub Kicinski /* Step 1: Disable RX and TX rings from the Linux kernel perspective
1123d00ca2f3SJakub Kicinski */
1124d00ca2f3SJakub Kicinski nfp_net_close_stack(nn);
1125d00ca2f3SJakub Kicinski
1126d00ca2f3SJakub Kicinski /* Step 2: Tell NFP
1127d00ca2f3SJakub Kicinski */
1128cc7eab25SYinjun Zhang if (nn->cap_w1 & NFP_NET_CFG_CTRL_MCAST_FILTER)
1129cc7eab25SYinjun Zhang __dev_mc_unsync(netdev, nfp_net_mc_unsync);
1130cc7eab25SYinjun Zhang
1131d00ca2f3SJakub Kicinski nfp_net_clear_config_and_disable(nn);
1132447e9ebfSDirk van der Merwe nfp_port_configure(netdev, false);
1133d00ca2f3SJakub Kicinski
1134d00ca2f3SJakub Kicinski /* Step 3: Free resources
1135d00ca2f3SJakub Kicinski */
1136d00ca2f3SJakub Kicinski nfp_net_close_free_all(nn);
1137d00ca2f3SJakub Kicinski
1138d00ca2f3SJakub Kicinski nn_dbg(nn, "%s down", netdev->name);
1139d00ca2f3SJakub Kicinski return 0;
1140d00ca2f3SJakub Kicinski }
1141d00ca2f3SJakub Kicinski
nfp_ctrl_close(struct nfp_net * nn)114277ece8d5SJakub Kicinski void nfp_ctrl_close(struct nfp_net *nn)
114377ece8d5SJakub Kicinski {
114477ece8d5SJakub Kicinski int r;
114577ece8d5SJakub Kicinski
114677ece8d5SJakub Kicinski rtnl_lock();
114777ece8d5SJakub Kicinski
114877ece8d5SJakub Kicinski for (r = 0; r < nn->dp.num_r_vecs; r++) {
114977ece8d5SJakub Kicinski disable_irq(nn->r_vecs[r].irq_vector);
115077ece8d5SJakub Kicinski tasklet_disable(&nn->r_vecs[r].tasklet);
115177ece8d5SJakub Kicinski }
115277ece8d5SJakub Kicinski
115377ece8d5SJakub Kicinski nfp_net_clear_config_and_disable(nn);
115477ece8d5SJakub Kicinski
115577ece8d5SJakub Kicinski nfp_net_close_free_all(nn);
115677ece8d5SJakub Kicinski
115777ece8d5SJakub Kicinski rtnl_unlock();
115877ece8d5SJakub Kicinski }
115977ece8d5SJakub Kicinski
nfp_net_rx_dim_work(struct work_struct * work)11609d32e4e7SYinjun Zhang static void nfp_net_rx_dim_work(struct work_struct *work)
11619d32e4e7SYinjun Zhang {
11629d32e4e7SYinjun Zhang struct nfp_net_r_vector *r_vec;
11639d32e4e7SYinjun Zhang unsigned int factor, value;
11649d32e4e7SYinjun Zhang struct dim_cq_moder moder;
11659d32e4e7SYinjun Zhang struct nfp_net *nn;
11669d32e4e7SYinjun Zhang struct dim *dim;
11679d32e4e7SYinjun Zhang
11689d32e4e7SYinjun Zhang dim = container_of(work, struct dim, work);
11699d32e4e7SYinjun Zhang moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
11709d32e4e7SYinjun Zhang r_vec = container_of(dim, struct nfp_net_r_vector, rx_dim);
11719d32e4e7SYinjun Zhang nn = r_vec->nfp_net;
11729d32e4e7SYinjun Zhang
11739d32e4e7SYinjun Zhang /* Compute factor used to convert coalesce '_usecs' parameters to
11749d32e4e7SYinjun Zhang * ME timestamp ticks. There are 16 ME clock cycles for each timestamp
11759d32e4e7SYinjun Zhang * count.
11769d32e4e7SYinjun Zhang */
11779d32e4e7SYinjun Zhang factor = nn->tlv_caps.me_freq_mhz / 16;
11789d32e4e7SYinjun Zhang if (nfp_net_coalesce_para_check(factor * moder.usec, moder.pkts))
11799d32e4e7SYinjun Zhang return;
11809d32e4e7SYinjun Zhang
11819d32e4e7SYinjun Zhang /* copy RX interrupt coalesce parameters */
11829d32e4e7SYinjun Zhang value = (moder.pkts << 16) | (factor * moder.usec);
11839d32e4e7SYinjun Zhang nn_writel(nn, NFP_NET_CFG_RXR_IRQ_MOD(r_vec->rx_ring->idx), value);
11849d32e4e7SYinjun Zhang (void)nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_IRQMOD);
11859d32e4e7SYinjun Zhang
11869d32e4e7SYinjun Zhang dim->state = DIM_START_MEASURE;
11879d32e4e7SYinjun Zhang }
11889d32e4e7SYinjun Zhang
nfp_net_tx_dim_work(struct work_struct * work)11899d32e4e7SYinjun Zhang static void nfp_net_tx_dim_work(struct work_struct *work)
11909d32e4e7SYinjun Zhang {
11919d32e4e7SYinjun Zhang struct nfp_net_r_vector *r_vec;
11929d32e4e7SYinjun Zhang unsigned int factor, value;
11939d32e4e7SYinjun Zhang struct dim_cq_moder moder;
11949d32e4e7SYinjun Zhang struct nfp_net *nn;
11959d32e4e7SYinjun Zhang struct dim *dim;
11969d32e4e7SYinjun Zhang
11979d32e4e7SYinjun Zhang dim = container_of(work, struct dim, work);
11989d32e4e7SYinjun Zhang moder = net_dim_get_tx_moderation(dim->mode, dim->profile_ix);
11999d32e4e7SYinjun Zhang r_vec = container_of(dim, struct nfp_net_r_vector, tx_dim);
12009d32e4e7SYinjun Zhang nn = r_vec->nfp_net;
12019d32e4e7SYinjun Zhang
12029d32e4e7SYinjun Zhang /* Compute factor used to convert coalesce '_usecs' parameters to
12039d32e4e7SYinjun Zhang * ME timestamp ticks. There are 16 ME clock cycles for each timestamp
12049d32e4e7SYinjun Zhang * count.
12059d32e4e7SYinjun Zhang */
12069d32e4e7SYinjun Zhang factor = nn->tlv_caps.me_freq_mhz / 16;
12079d32e4e7SYinjun Zhang if (nfp_net_coalesce_para_check(factor * moder.usec, moder.pkts))
12089d32e4e7SYinjun Zhang return;
12099d32e4e7SYinjun Zhang
12109d32e4e7SYinjun Zhang /* copy TX interrupt coalesce parameters */
12119d32e4e7SYinjun Zhang value = (moder.pkts << 16) | (factor * moder.usec);
12129d32e4e7SYinjun Zhang nn_writel(nn, NFP_NET_CFG_TXR_IRQ_MOD(r_vec->tx_ring->idx), value);
12139d32e4e7SYinjun Zhang (void)nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_IRQMOD);
12149d32e4e7SYinjun Zhang
12159d32e4e7SYinjun Zhang dim->state = DIM_START_MEASURE;
12169d32e4e7SYinjun Zhang }
12179d32e4e7SYinjun Zhang
1218d00ca2f3SJakub Kicinski /**
12191cd0cfc4SJakub Kicinski * nfp_net_open_stack() - Start the device from stack's perspective
12201cd0cfc4SJakub Kicinski * @nn: NFP Net device to reconfigure
12211cd0cfc4SJakub Kicinski */
nfp_net_open_stack(struct nfp_net * nn)12221cd0cfc4SJakub Kicinski static void nfp_net_open_stack(struct nfp_net *nn)
12231cd0cfc4SJakub Kicinski {
12249d32e4e7SYinjun Zhang struct nfp_net_r_vector *r_vec;
12251cd0cfc4SJakub Kicinski unsigned int r;
12261cd0cfc4SJakub Kicinski
122779c12a75SJakub Kicinski for (r = 0; r < nn->dp.num_r_vecs; r++) {
12289d32e4e7SYinjun Zhang r_vec = &nn->r_vecs[r];
12299d32e4e7SYinjun Zhang
12309d32e4e7SYinjun Zhang if (r_vec->rx_ring) {
12319d32e4e7SYinjun Zhang INIT_WORK(&r_vec->rx_dim.work, nfp_net_rx_dim_work);
12329d32e4e7SYinjun Zhang r_vec->rx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
12339d32e4e7SYinjun Zhang }
12349d32e4e7SYinjun Zhang
12359d32e4e7SYinjun Zhang if (r_vec->tx_ring) {
12369d32e4e7SYinjun Zhang INIT_WORK(&r_vec->tx_dim.work, nfp_net_tx_dim_work);
12379d32e4e7SYinjun Zhang r_vec->tx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
12389d32e4e7SYinjun Zhang }
12399d32e4e7SYinjun Zhang
12409d32e4e7SYinjun Zhang napi_enable(&r_vec->napi);
12419d32e4e7SYinjun Zhang enable_irq(r_vec->irq_vector);
1242aba52df8SJakub Kicinski }
12431cd0cfc4SJakub Kicinski
124479c12a75SJakub Kicinski netif_tx_wake_all_queues(nn->dp.netdev);
12451cd0cfc4SJakub Kicinski
1246ce449ba7SJakub Kicinski enable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
12471cd0cfc4SJakub Kicinski nfp_net_read_link_status(nn);
12481cd0cfc4SJakub Kicinski }
12491cd0cfc4SJakub Kicinski
nfp_net_open_alloc_all(struct nfp_net * nn)1250ee26756dSJakub Kicinski static int nfp_net_open_alloc_all(struct nfp_net *nn)
12514c352362SJakub Kicinski {
12524c352362SJakub Kicinski int err, r;
12534c352362SJakub Kicinski
12544c352362SJakub Kicinski err = nfp_net_aux_irq_request(nn, NFP_NET_CFG_EXN, "%s-exn",
12554c352362SJakub Kicinski nn->exn_name, sizeof(nn->exn_name),
12564c352362SJakub Kicinski NFP_NET_IRQ_EXN_IDX, nn->exn_handler);
12574c352362SJakub Kicinski if (err)
12584c352362SJakub Kicinski return err;
12590ba40af9SJakub Kicinski err = nfp_net_aux_irq_request(nn, NFP_NET_CFG_LSC, "%s-lsc",
12600ba40af9SJakub Kicinski nn->lsc_name, sizeof(nn->lsc_name),
12610ba40af9SJakub Kicinski NFP_NET_IRQ_LSC_IDX, nn->lsc_handler);
12620ba40af9SJakub Kicinski if (err)
12630ba40af9SJakub Kicinski goto err_free_exn;
1264ce449ba7SJakub Kicinski disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
12654c352362SJakub Kicinski
126679c12a75SJakub Kicinski for (r = 0; r < nn->dp.num_r_vecs; r++) {
12670afbfb18SJakub Kicinski err = nfp_net_prepare_vector(nn, &nn->r_vecs[r], r);
12684c352362SJakub Kicinski if (err)
1269cbeaf7aaSJakub Kicinski goto err_cleanup_vec_p;
1270cbeaf7aaSJakub Kicinski }
1271114bdef0SJakub Kicinski
1272892a7f70SJakub Kicinski err = nfp_net_rx_rings_prepare(nn, &nn->dp);
1273892a7f70SJakub Kicinski if (err)
1274a10b563dSJakub Kicinski goto err_cleanup_vec;
1275a10b563dSJakub Kicinski
1276892a7f70SJakub Kicinski err = nfp_net_tx_rings_prepare(nn, &nn->dp);
1277892a7f70SJakub Kicinski if (err)
1278a10b563dSJakub Kicinski goto err_free_rx_rings;
12794c352362SJakub Kicinski
1280e31230f9SJakub Kicinski for (r = 0; r < nn->max_r_vecs; r++)
128179c12a75SJakub Kicinski nfp_net_vector_assign_rings(&nn->dp, &nn->r_vecs[r], r);
1282e31230f9SJakub Kicinski
1283ee26756dSJakub Kicinski return 0;
1284ee26756dSJakub Kicinski
1285ee26756dSJakub Kicinski err_free_rx_rings:
1286ee26756dSJakub Kicinski nfp_net_rx_rings_free(&nn->dp);
1287ee26756dSJakub Kicinski err_cleanup_vec:
1288ee26756dSJakub Kicinski r = nn->dp.num_r_vecs;
1289ee26756dSJakub Kicinski err_cleanup_vec_p:
1290ee26756dSJakub Kicinski while (r--)
1291ee26756dSJakub Kicinski nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
1292ee26756dSJakub Kicinski nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
1293ee26756dSJakub Kicinski err_free_exn:
1294ee26756dSJakub Kicinski nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX);
1295ee26756dSJakub Kicinski return err;
1296ee26756dSJakub Kicinski }
1297ee26756dSJakub Kicinski
nfp_net_netdev_open(struct net_device * netdev)1298ee26756dSJakub Kicinski static int nfp_net_netdev_open(struct net_device *netdev)
1299ee26756dSJakub Kicinski {
1300ee26756dSJakub Kicinski struct nfp_net *nn = netdev_priv(netdev);
1301ee26756dSJakub Kicinski int err;
1302ee26756dSJakub Kicinski
1303ee26756dSJakub Kicinski /* Step 1: Allocate resources for rings and the like
1304ee26756dSJakub Kicinski * - Request interrupts
1305ee26756dSJakub Kicinski * - Allocate RX and TX ring resources
1306ee26756dSJakub Kicinski * - Setup initial RSS table
1307ee26756dSJakub Kicinski */
1308ee26756dSJakub Kicinski err = nfp_net_open_alloc_all(nn);
1309ee26756dSJakub Kicinski if (err)
1310ee26756dSJakub Kicinski return err;
1311ee26756dSJakub Kicinski
131279c12a75SJakub Kicinski err = netif_set_real_num_tx_queues(netdev, nn->dp.num_stack_tx_rings);
13134c352362SJakub Kicinski if (err)
1314ee26756dSJakub Kicinski goto err_free_all;
13154c352362SJakub Kicinski
131679c12a75SJakub Kicinski err = netif_set_real_num_rx_queues(netdev, nn->dp.num_rx_rings);
13174c352362SJakub Kicinski if (err)
1318ee26756dSJakub Kicinski goto err_free_all;
13194c352362SJakub Kicinski
13204c352362SJakub Kicinski /* Step 2: Configure the NFP
1321447e9ebfSDirk van der Merwe * - Ifup the physical interface if it exists
13224c352362SJakub Kicinski * - Enable rings from 0 to tx_rings/rx_rings - 1.
13234c352362SJakub Kicinski * - Write MAC address (in case it changed)
13244c352362SJakub Kicinski * - Set the MTU
13254c352362SJakub Kicinski * - Set the Freelist buffer size
13264c352362SJakub Kicinski * - Enable the FW
13274c352362SJakub Kicinski */
1328447e9ebfSDirk van der Merwe err = nfp_port_configure(netdev, true);
13294c352362SJakub Kicinski if (err)
1330ee26756dSJakub Kicinski goto err_free_all;
13314c352362SJakub Kicinski
1332447e9ebfSDirk van der Merwe err = nfp_net_set_config_and_enable(nn);
1333447e9ebfSDirk van der Merwe if (err)
1334447e9ebfSDirk van der Merwe goto err_port_disable;
1335447e9ebfSDirk van der Merwe
13364c352362SJakub Kicinski /* Step 3: Enable for kernel
13374c352362SJakub Kicinski * - put some freelist descriptors on each RX ring
13384c352362SJakub Kicinski * - enable NAPI on each ring
13394c352362SJakub Kicinski * - enable all TX queues
13404c352362SJakub Kicinski * - set link state
13414c352362SJakub Kicinski */
13421cd0cfc4SJakub Kicinski nfp_net_open_stack(nn);
13434c352362SJakub Kicinski
13444c352362SJakub Kicinski return 0;
13454c352362SJakub Kicinski
1346447e9ebfSDirk van der Merwe err_port_disable:
1347447e9ebfSDirk van der Merwe nfp_port_configure(netdev, false);
1348ee26756dSJakub Kicinski err_free_all:
1349ee26756dSJakub Kicinski nfp_net_close_free_all(nn);
13504c352362SJakub Kicinski return err;
13514c352362SJakub Kicinski }
13524c352362SJakub Kicinski
nfp_ctrl_open(struct nfp_net * nn)135377ece8d5SJakub Kicinski int nfp_ctrl_open(struct nfp_net *nn)
135477ece8d5SJakub Kicinski {
135577ece8d5SJakub Kicinski int err, r;
135677ece8d5SJakub Kicinski
135777ece8d5SJakub Kicinski /* ring dumping depends on vNICs being opened/closed under rtnl */
135877ece8d5SJakub Kicinski rtnl_lock();
135977ece8d5SJakub Kicinski
136077ece8d5SJakub Kicinski err = nfp_net_open_alloc_all(nn);
136177ece8d5SJakub Kicinski if (err)
136277ece8d5SJakub Kicinski goto err_unlock;
136377ece8d5SJakub Kicinski
136477ece8d5SJakub Kicinski err = nfp_net_set_config_and_enable(nn);
136577ece8d5SJakub Kicinski if (err)
136677ece8d5SJakub Kicinski goto err_free_all;
136777ece8d5SJakub Kicinski
136877ece8d5SJakub Kicinski for (r = 0; r < nn->dp.num_r_vecs; r++)
136977ece8d5SJakub Kicinski enable_irq(nn->r_vecs[r].irq_vector);
137077ece8d5SJakub Kicinski
137177ece8d5SJakub Kicinski rtnl_unlock();
137277ece8d5SJakub Kicinski
137377ece8d5SJakub Kicinski return 0;
137477ece8d5SJakub Kicinski
137577ece8d5SJakub Kicinski err_free_all:
137677ece8d5SJakub Kicinski nfp_net_close_free_all(nn);
137777ece8d5SJakub Kicinski err_unlock:
137877ece8d5SJakub Kicinski rtnl_unlock();
137977ece8d5SJakub Kicinski return err;
138077ece8d5SJakub Kicinski }
138177ece8d5SJakub Kicinski
nfp_net_sched_mbox_amsg_work(struct nfp_net * nn,u32 cmd,const void * data,size_t len,int (* cb)(struct nfp_net *,struct nfp_mbox_amsg_entry *))138271f814cdSYinjun Zhang int nfp_net_sched_mbox_amsg_work(struct nfp_net *nn, u32 cmd, const void *data, size_t len,
138371f814cdSYinjun Zhang int (*cb)(struct nfp_net *, struct nfp_mbox_amsg_entry *))
1384de624864SDiana Wang {
138571f814cdSYinjun Zhang struct nfp_mbox_amsg_entry *entry;
138671f814cdSYinjun Zhang
138771f814cdSYinjun Zhang entry = kmalloc(sizeof(*entry) + len, GFP_ATOMIC);
138871f814cdSYinjun Zhang if (!entry)
138971f814cdSYinjun Zhang return -ENOMEM;
139071f814cdSYinjun Zhang
139171f814cdSYinjun Zhang memcpy(entry->msg, data, len);
139271f814cdSYinjun Zhang entry->cmd = cmd;
139371f814cdSYinjun Zhang entry->cfg = cb;
139471f814cdSYinjun Zhang
139571f814cdSYinjun Zhang spin_lock_bh(&nn->mbox_amsg.lock);
139671f814cdSYinjun Zhang list_add_tail(&entry->list, &nn->mbox_amsg.list);
139771f814cdSYinjun Zhang spin_unlock_bh(&nn->mbox_amsg.lock);
139871f814cdSYinjun Zhang
139971f814cdSYinjun Zhang schedule_work(&nn->mbox_amsg.work);
140071f814cdSYinjun Zhang
140171f814cdSYinjun Zhang return 0;
140271f814cdSYinjun Zhang }
140371f814cdSYinjun Zhang
nfp_net_mbox_amsg_work(struct work_struct * work)140471f814cdSYinjun Zhang static void nfp_net_mbox_amsg_work(struct work_struct *work)
140571f814cdSYinjun Zhang {
140671f814cdSYinjun Zhang struct nfp_net *nn = container_of(work, struct nfp_net, mbox_amsg.work);
140771f814cdSYinjun Zhang struct nfp_mbox_amsg_entry *entry, *tmp;
140871f814cdSYinjun Zhang struct list_head tmp_list;
140971f814cdSYinjun Zhang
141071f814cdSYinjun Zhang INIT_LIST_HEAD(&tmp_list);
141171f814cdSYinjun Zhang
141271f814cdSYinjun Zhang spin_lock_bh(&nn->mbox_amsg.lock);
141371f814cdSYinjun Zhang list_splice_init(&nn->mbox_amsg.list, &tmp_list);
141471f814cdSYinjun Zhang spin_unlock_bh(&nn->mbox_amsg.lock);
141571f814cdSYinjun Zhang
141671f814cdSYinjun Zhang list_for_each_entry_safe(entry, tmp, &tmp_list, list) {
141771f814cdSYinjun Zhang int err = entry->cfg(nn, entry);
141871f814cdSYinjun Zhang
141971f814cdSYinjun Zhang if (err)
142071f814cdSYinjun Zhang nn_err(nn, "Config cmd %d to HW failed %d.\n", entry->cmd, err);
142171f814cdSYinjun Zhang
142271f814cdSYinjun Zhang list_del(&entry->list);
142371f814cdSYinjun Zhang kfree(entry);
142471f814cdSYinjun Zhang }
142571f814cdSYinjun Zhang }
142671f814cdSYinjun Zhang
nfp_net_mc_cfg(struct nfp_net * nn,struct nfp_mbox_amsg_entry * entry)142771f814cdSYinjun Zhang static int nfp_net_mc_cfg(struct nfp_net *nn, struct nfp_mbox_amsg_entry *entry)
142871f814cdSYinjun Zhang {
142971f814cdSYinjun Zhang unsigned char *addr = entry->msg;
1430de624864SDiana Wang int ret;
1431de624864SDiana Wang
1432de624864SDiana Wang ret = nfp_net_mbox_lock(nn, NFP_NET_CFG_MULTICAST_SZ);
1433de624864SDiana Wang if (ret)
1434de624864SDiana Wang return ret;
1435de624864SDiana Wang
1436de624864SDiana Wang nn_writel(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_MULTICAST_MAC_HI,
1437de624864SDiana Wang get_unaligned_be32(addr));
1438de624864SDiana Wang nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_MULTICAST_MAC_LO,
1439de624864SDiana Wang get_unaligned_be16(addr + 4));
1440de624864SDiana Wang
144171f814cdSYinjun Zhang return nfp_net_mbox_reconfig_and_unlock(nn, entry->cmd);
1442e20aa071SYinjun Zhang }
1443e20aa071SYinjun Zhang
nfp_net_mc_sync(struct net_device * netdev,const unsigned char * addr)1444de624864SDiana Wang static int nfp_net_mc_sync(struct net_device *netdev, const unsigned char *addr)
1445de624864SDiana Wang {
1446de624864SDiana Wang struct nfp_net *nn = netdev_priv(netdev);
1447de624864SDiana Wang
1448de624864SDiana Wang if (netdev_mc_count(netdev) > NFP_NET_CFG_MAC_MC_MAX) {
1449de624864SDiana Wang nn_err(nn, "Requested number of MC addresses (%d) exceeds maximum (%d).\n",
1450de624864SDiana Wang netdev_mc_count(netdev), NFP_NET_CFG_MAC_MC_MAX);
1451de624864SDiana Wang return -EINVAL;
1452de624864SDiana Wang }
1453de624864SDiana Wang
145471f814cdSYinjun Zhang return nfp_net_sched_mbox_amsg_work(nn, NFP_NET_CFG_MBOX_CMD_MULTICAST_ADD, addr,
145571f814cdSYinjun Zhang NFP_NET_CFG_MULTICAST_SZ, nfp_net_mc_cfg);
1456de624864SDiana Wang }
1457de624864SDiana Wang
nfp_net_mc_unsync(struct net_device * netdev,const unsigned char * addr)1458de624864SDiana Wang static int nfp_net_mc_unsync(struct net_device *netdev, const unsigned char *addr)
1459de624864SDiana Wang {
1460e20aa071SYinjun Zhang struct nfp_net *nn = netdev_priv(netdev);
1461e20aa071SYinjun Zhang
146271f814cdSYinjun Zhang return nfp_net_sched_mbox_amsg_work(nn, NFP_NET_CFG_MBOX_CMD_MULTICAST_DEL, addr,
146371f814cdSYinjun Zhang NFP_NET_CFG_MULTICAST_SZ, nfp_net_mc_cfg);
1464de624864SDiana Wang }
1465de624864SDiana Wang
nfp_net_set_rx_mode(struct net_device * netdev)14664c352362SJakub Kicinski static void nfp_net_set_rx_mode(struct net_device *netdev)
14674c352362SJakub Kicinski {
14684c352362SJakub Kicinski struct nfp_net *nn = netdev_priv(netdev);
1469de624864SDiana Wang u32 new_ctrl, new_ctrl_w1;
14704c352362SJakub Kicinski
147179c12a75SJakub Kicinski new_ctrl = nn->dp.ctrl;
1472de624864SDiana Wang new_ctrl_w1 = nn->dp.ctrl_w1;
14734c352362SJakub Kicinski
1474d0adb51eSJakub Kicinski if (!netdev_mc_empty(netdev) || netdev->flags & IFF_ALLMULTI)
1475d0adb51eSJakub Kicinski new_ctrl |= nn->cap & NFP_NET_CFG_CTRL_L2MC;
1476d0adb51eSJakub Kicinski else
1477d0adb51eSJakub Kicinski new_ctrl &= ~NFP_NET_CFG_CTRL_L2MC;
1478d0adb51eSJakub Kicinski
1479de624864SDiana Wang if (netdev->flags & IFF_ALLMULTI)
1480de624864SDiana Wang new_ctrl_w1 &= ~NFP_NET_CFG_CTRL_MCAST_FILTER;
1481de624864SDiana Wang else
1482de624864SDiana Wang new_ctrl_w1 |= nn->cap_w1 & NFP_NET_CFG_CTRL_MCAST_FILTER;
1483de624864SDiana Wang
14844c352362SJakub Kicinski if (netdev->flags & IFF_PROMISC) {
14854c352362SJakub Kicinski if (nn->cap & NFP_NET_CFG_CTRL_PROMISC)
14864c352362SJakub Kicinski new_ctrl |= NFP_NET_CFG_CTRL_PROMISC;
14874c352362SJakub Kicinski else
14884c352362SJakub Kicinski nn_warn(nn, "FW does not support promiscuous mode\n");
14894c352362SJakub Kicinski } else {
14904c352362SJakub Kicinski new_ctrl &= ~NFP_NET_CFG_CTRL_PROMISC;
14914c352362SJakub Kicinski }
14924c352362SJakub Kicinski
1493de624864SDiana Wang if ((nn->cap_w1 & NFP_NET_CFG_CTRL_MCAST_FILTER) &&
1494de624864SDiana Wang __dev_mc_sync(netdev, nfp_net_mc_sync, nfp_net_mc_unsync))
1495de624864SDiana Wang netdev_err(netdev, "Sync mc address failed\n");
1496de624864SDiana Wang
1497de624864SDiana Wang if (new_ctrl == nn->dp.ctrl && new_ctrl_w1 == nn->dp.ctrl_w1)
14984c352362SJakub Kicinski return;
14994c352362SJakub Kicinski
1500de624864SDiana Wang if (new_ctrl != nn->dp.ctrl)
15014c352362SJakub Kicinski nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
1502de624864SDiana Wang if (new_ctrl_w1 != nn->dp.ctrl_w1)
1503de624864SDiana Wang nn_writel(nn, NFP_NET_CFG_CTRL_WORD1, new_ctrl_w1);
15043d780b92SJakub Kicinski nfp_net_reconfig_post(nn, NFP_NET_CFG_UPDATE_GEN);
15054c352362SJakub Kicinski
150679c12a75SJakub Kicinski nn->dp.ctrl = new_ctrl;
1507de624864SDiana Wang nn->dp.ctrl_w1 = new_ctrl_w1;
15084c352362SJakub Kicinski }
15094c352362SJakub Kicinski
nfp_net_rss_init_itbl(struct nfp_net * nn)15101e9e10d0SJakub Kicinski static void nfp_net_rss_init_itbl(struct nfp_net *nn)
15111e9e10d0SJakub Kicinski {
15121e9e10d0SJakub Kicinski int i;
15131e9e10d0SJakub Kicinski
15141e9e10d0SJakub Kicinski for (i = 0; i < sizeof(nn->rss_itbl); i++)
15151e9e10d0SJakub Kicinski nn->rss_itbl[i] =
151679c12a75SJakub Kicinski ethtool_rxfh_indir_default(i, nn->dp.num_rx_rings);
15171e9e10d0SJakub Kicinski }
15181e9e10d0SJakub Kicinski
nfp_net_dp_swap(struct nfp_net * nn,struct nfp_net_dp * dp)1519512e94dcSJakub Kicinski static void nfp_net_dp_swap(struct nfp_net *nn, struct nfp_net_dp *dp)
1520512e94dcSJakub Kicinski {
1521512e94dcSJakub Kicinski struct nfp_net_dp new_dp = *dp;
1522512e94dcSJakub Kicinski
1523512e94dcSJakub Kicinski *dp = nn->dp;
1524512e94dcSJakub Kicinski nn->dp = new_dp;
152576e1e1a8SJakub Kicinski
152676e1e1a8SJakub Kicinski nn->dp.netdev->mtu = new_dp.mtu;
1527892a7f70SJakub Kicinski
1528892a7f70SJakub Kicinski if (!netif_is_rxfh_configured(nn->dp.netdev))
1529892a7f70SJakub Kicinski nfp_net_rss_init_itbl(nn);
1530512e94dcSJakub Kicinski }
1531512e94dcSJakub Kicinski
nfp_net_dp_swap_enable(struct nfp_net * nn,struct nfp_net_dp * dp)1532892a7f70SJakub Kicinski static int nfp_net_dp_swap_enable(struct nfp_net *nn, struct nfp_net_dp *dp)
15334c352362SJakub Kicinski {
1534e31230f9SJakub Kicinski unsigned int r;
1535164d1e9eSJakub Kicinski int err;
1536e31230f9SJakub Kicinski
1537892a7f70SJakub Kicinski nfp_net_dp_swap(nn, dp);
1538164d1e9eSJakub Kicinski
1539e31230f9SJakub Kicinski for (r = 0; r < nn->max_r_vecs; r++)
154079c12a75SJakub Kicinski nfp_net_vector_assign_rings(&nn->dp, &nn->r_vecs[r], r);
1541e31230f9SJakub Kicinski
1542e874f455SJakub Kicinski err = netif_set_real_num_queues(nn->dp.netdev,
1543e874f455SJakub Kicinski nn->dp.num_stack_tx_rings,
1544e874f455SJakub Kicinski nn->dp.num_rx_rings);
1545164d1e9eSJakub Kicinski if (err)
1546164d1e9eSJakub Kicinski return err;
1547164d1e9eSJakub Kicinski
1548ac0488efSJakub Kicinski return nfp_net_set_config_and_enable(nn);
15494c352362SJakub Kicinski }
15504c352362SJakub Kicinski
nfp_net_clone_dp(struct nfp_net * nn)1551783496b0SJakub Kicinski struct nfp_net_dp *nfp_net_clone_dp(struct nfp_net *nn)
1552783496b0SJakub Kicinski {
1553783496b0SJakub Kicinski struct nfp_net_dp *new;
1554783496b0SJakub Kicinski
1555783496b0SJakub Kicinski new = kmalloc(sizeof(*new), GFP_KERNEL);
1556783496b0SJakub Kicinski if (!new)
1557783496b0SJakub Kicinski return NULL;
1558783496b0SJakub Kicinski
1559783496b0SJakub Kicinski *new = nn->dp;
1560783496b0SJakub Kicinski
1561543bd14fSNiklas Söderlund new->xsk_pools = kmemdup(new->xsk_pools,
1562543bd14fSNiklas Söderlund array_size(nn->max_r_vecs,
1563543bd14fSNiklas Söderlund sizeof(new->xsk_pools)),
1564543bd14fSNiklas Söderlund GFP_KERNEL);
1565543bd14fSNiklas Söderlund if (!new->xsk_pools) {
1566543bd14fSNiklas Söderlund kfree(new);
1567543bd14fSNiklas Söderlund return NULL;
1568543bd14fSNiklas Söderlund }
1569543bd14fSNiklas Söderlund
1570783496b0SJakub Kicinski /* Clear things which need to be recomputed */
1571783496b0SJakub Kicinski new->fl_bufsz = 0;
1572783496b0SJakub Kicinski new->tx_rings = NULL;
1573783496b0SJakub Kicinski new->rx_rings = NULL;
1574783496b0SJakub Kicinski new->num_r_vecs = 0;
1575783496b0SJakub Kicinski new->num_stack_tx_rings = 0;
15760dcf7f50SJakub Kicinski new->txrwb = NULL;
15770dcf7f50SJakub Kicinski new->txrwb_dma = 0;
1578783496b0SJakub Kicinski
1579783496b0SJakub Kicinski return new;
1580783496b0SJakub Kicinski }
1581783496b0SJakub Kicinski
nfp_net_free_dp(struct nfp_net_dp * dp)1582543bd14fSNiklas Söderlund static void nfp_net_free_dp(struct nfp_net_dp *dp)
1583543bd14fSNiklas Söderlund {
1584543bd14fSNiklas Söderlund kfree(dp->xsk_pools);
1585543bd14fSNiklas Söderlund kfree(dp);
1586543bd14fSNiklas Söderlund }
1587543bd14fSNiklas Söderlund
1588d957c0f7SJakub Kicinski static int
nfp_net_check_config(struct nfp_net * nn,struct nfp_net_dp * dp,struct netlink_ext_ack * extack)1589d957c0f7SJakub Kicinski nfp_net_check_config(struct nfp_net *nn, struct nfp_net_dp *dp,
1590d957c0f7SJakub Kicinski struct netlink_ext_ack *extack)
1591ecd63a02SJakub Kicinski {
15929c91a365SNiklas Söderlund unsigned int r, xsk_min_fl_bufsz;
15939c91a365SNiklas Söderlund
1594ecd63a02SJakub Kicinski /* XDP-enabled tests */
15959dc6b116SJakub Kicinski if (!dp->xdp_prog)
1596ecd63a02SJakub Kicinski return 0;
15972195c263SJakub Kicinski if (dp->fl_bufsz > PAGE_SIZE) {
15984d463c4dSDaniel Borkmann NL_SET_ERR_MSG_MOD(extack, "MTU too large w/ XDP enabled");
1599ecd63a02SJakub Kicinski return -EINVAL;
1600ecd63a02SJakub Kicinski }
1601892a7f70SJakub Kicinski if (dp->num_tx_rings > nn->max_tx_rings) {
16024d463c4dSDaniel Borkmann NL_SET_ERR_MSG_MOD(extack, "Insufficient number of TX rings w/ XDP enabled");
1603ecd63a02SJakub Kicinski return -EINVAL;
1604ecd63a02SJakub Kicinski }
1605ecd63a02SJakub Kicinski
16069c91a365SNiklas Söderlund xsk_min_fl_bufsz = nfp_net_calc_fl_bufsz_xsk(dp);
16079c91a365SNiklas Söderlund for (r = 0; r < nn->max_r_vecs; r++) {
16089c91a365SNiklas Söderlund if (!dp->xsk_pools[r])
16099c91a365SNiklas Söderlund continue;
16109c91a365SNiklas Söderlund
16119c91a365SNiklas Söderlund if (xsk_pool_get_rx_frame_size(dp->xsk_pools[r]) < xsk_min_fl_bufsz) {
16129c91a365SNiklas Söderlund NL_SET_ERR_MSG_MOD(extack,
16130c1794c2SGuo Zhengkui "XSK buffer pool chunk size too small");
16149c91a365SNiklas Söderlund return -EINVAL;
16159c91a365SNiklas Söderlund }
16169c91a365SNiklas Söderlund }
16179c91a365SNiklas Söderlund
1618ecd63a02SJakub Kicinski return 0;
1619ecd63a02SJakub Kicinski }
1620ecd63a02SJakub Kicinski
nfp_net_ring_reconfig(struct nfp_net * nn,struct nfp_net_dp * dp,struct netlink_ext_ack * extack)1621d957c0f7SJakub Kicinski int nfp_net_ring_reconfig(struct nfp_net *nn, struct nfp_net_dp *dp,
1622d957c0f7SJakub Kicinski struct netlink_ext_ack *extack)
162368453c7aSJakub Kicinski {
1624512e94dcSJakub Kicinski int r, err;
1625cc7c0333SJakub Kicinski
162676e1e1a8SJakub Kicinski dp->fl_bufsz = nfp_net_calc_fl_bufsz(dp);
16272195c263SJakub Kicinski
1628892a7f70SJakub Kicinski dp->num_stack_tx_rings = dp->num_tx_rings;
16299dc6b116SJakub Kicinski if (dp->xdp_prog)
1630892a7f70SJakub Kicinski dp->num_stack_tx_rings -= dp->num_rx_rings;
1631ecd63a02SJakub Kicinski
1632892a7f70SJakub Kicinski dp->num_r_vecs = max(dp->num_rx_rings, dp->num_stack_tx_rings);
1633ecd63a02SJakub Kicinski
1634d957c0f7SJakub Kicinski err = nfp_net_check_config(nn, dp, extack);
1635ecd63a02SJakub Kicinski if (err)
1636783496b0SJakub Kicinski goto exit_free_dp;
1637164d1e9eSJakub Kicinski
1638783496b0SJakub Kicinski if (!netif_running(dp->netdev)) {
1639892a7f70SJakub Kicinski nfp_net_dp_swap(nn, dp);
1640783496b0SJakub Kicinski err = 0;
1641783496b0SJakub Kicinski goto exit_free_dp;
1642cc7c0333SJakub Kicinski }
1643cc7c0333SJakub Kicinski
1644cc7c0333SJakub Kicinski /* Prepare new rings */
1645512e94dcSJakub Kicinski for (r = nn->dp.num_r_vecs; r < dp->num_r_vecs; r++) {
1646164d1e9eSJakub Kicinski err = nfp_net_prepare_vector(nn, &nn->r_vecs[r], r);
1647164d1e9eSJakub Kicinski if (err) {
1648512e94dcSJakub Kicinski dp->num_r_vecs = r;
1649164d1e9eSJakub Kicinski goto err_cleanup_vecs;
1650164d1e9eSJakub Kicinski }
1651164d1e9eSJakub Kicinski }
1652892a7f70SJakub Kicinski
1653892a7f70SJakub Kicinski err = nfp_net_rx_rings_prepare(nn, dp);
1654892a7f70SJakub Kicinski if (err)
1655164d1e9eSJakub Kicinski goto err_cleanup_vecs;
1656892a7f70SJakub Kicinski
1657892a7f70SJakub Kicinski err = nfp_net_tx_rings_prepare(nn, dp);
1658892a7f70SJakub Kicinski if (err)
165968453c7aSJakub Kicinski goto err_free_rx;
1660cc7c0333SJakub Kicinski
1661cc7c0333SJakub Kicinski /* Stop device, swap in new rings, try to start the firmware */
1662cc7c0333SJakub Kicinski nfp_net_close_stack(nn);
1663cc7c0333SJakub Kicinski nfp_net_clear_config_and_disable(nn);
1664cc7c0333SJakub Kicinski
1665892a7f70SJakub Kicinski err = nfp_net_dp_swap_enable(nn, dp);
1666cc7c0333SJakub Kicinski if (err) {
166768453c7aSJakub Kicinski int err2;
166868453c7aSJakub Kicinski
166968453c7aSJakub Kicinski nfp_net_clear_config_and_disable(nn);
1670cc7c0333SJakub Kicinski
1671cc7c0333SJakub Kicinski /* Try with old configuration and old rings */
1672892a7f70SJakub Kicinski err2 = nfp_net_dp_swap_enable(nn, dp);
167368453c7aSJakub Kicinski if (err2)
1674cc7c0333SJakub Kicinski nn_err(nn, "Can't restore ring config - FW communication failed (%d,%d)\n",
167568453c7aSJakub Kicinski err, err2);
1676cc7c0333SJakub Kicinski }
1677512e94dcSJakub Kicinski for (r = dp->num_r_vecs - 1; r >= nn->dp.num_r_vecs; r--)
1678164d1e9eSJakub Kicinski nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
1679cc7c0333SJakub Kicinski
1680892a7f70SJakub Kicinski nfp_net_rx_rings_free(dp);
1681892a7f70SJakub Kicinski nfp_net_tx_rings_free(dp);
1682cc7c0333SJakub Kicinski
1683cc7c0333SJakub Kicinski nfp_net_open_stack(nn);
1684783496b0SJakub Kicinski exit_free_dp:
1685543bd14fSNiklas Söderlund nfp_net_free_dp(dp);
1686cc7c0333SJakub Kicinski
1687cc7c0333SJakub Kicinski return err;
168868453c7aSJakub Kicinski
168968453c7aSJakub Kicinski err_free_rx:
1690892a7f70SJakub Kicinski nfp_net_rx_rings_free(dp);
1691164d1e9eSJakub Kicinski err_cleanup_vecs:
1692512e94dcSJakub Kicinski for (r = dp->num_r_vecs - 1; r >= nn->dp.num_r_vecs; r--)
1693164d1e9eSJakub Kicinski nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
1694543bd14fSNiklas Söderlund nfp_net_free_dp(dp);
169568453c7aSJakub Kicinski return err;
169668453c7aSJakub Kicinski }
169768453c7aSJakub Kicinski
nfp_net_change_mtu(struct net_device * netdev,int new_mtu)169868453c7aSJakub Kicinski static int nfp_net_change_mtu(struct net_device *netdev, int new_mtu)
169968453c7aSJakub Kicinski {
170068453c7aSJakub Kicinski struct nfp_net *nn = netdev_priv(netdev);
1701783496b0SJakub Kicinski struct nfp_net_dp *dp;
1702ccbdc596SJakub Kicinski int err;
1703ccbdc596SJakub Kicinski
1704167cebefSJohn Hurley err = nfp_app_check_mtu(nn->app, netdev, new_mtu);
1705ccbdc596SJakub Kicinski if (err)
1706ccbdc596SJakub Kicinski return err;
170768453c7aSJakub Kicinski
1708783496b0SJakub Kicinski dp = nfp_net_clone_dp(nn);
1709783496b0SJakub Kicinski if (!dp)
1710783496b0SJakub Kicinski return -ENOMEM;
1711783496b0SJakub Kicinski
171276e1e1a8SJakub Kicinski dp->mtu = new_mtu;
171376e1e1a8SJakub Kicinski
1714d957c0f7SJakub Kicinski return nfp_net_ring_reconfig(nn, dp, NULL);
1715cc7c0333SJakub Kicinski }
1716cc7c0333SJakub Kicinski
1717b64052fcSPablo Cascón static int
nfp_net_vlan_rx_add_vid(struct net_device * netdev,__be16 proto,u16 vid)1718b64052fcSPablo Cascón nfp_net_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
1719b64052fcSPablo Cascón {
1720dd5b2498SJakub Kicinski const u32 cmd = NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_ADD;
1721b64052fcSPablo Cascón struct nfp_net *nn = netdev_priv(netdev);
1722dd5b2498SJakub Kicinski int err;
1723b64052fcSPablo Cascón
1724b64052fcSPablo Cascón /* Priority tagged packets with vlan id 0 are processed by the
1725b64052fcSPablo Cascón * NFP as untagged packets
1726b64052fcSPablo Cascón */
1727b64052fcSPablo Cascón if (!vid)
1728b64052fcSPablo Cascón return 0;
1729b64052fcSPablo Cascón
1730dd5b2498SJakub Kicinski err = nfp_net_mbox_lock(nn, NFP_NET_CFG_VLAN_FILTER_SZ);
1731dd5b2498SJakub Kicinski if (err)
1732dd5b2498SJakub Kicinski return err;
1733dd5b2498SJakub Kicinski
1734527d7d1bSJakub Kicinski nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_VID, vid);
1735527d7d1bSJakub Kicinski nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_PROTO,
1736527d7d1bSJakub Kicinski ETH_P_8021Q);
1737b64052fcSPablo Cascón
1738dd5b2498SJakub Kicinski return nfp_net_mbox_reconfig_and_unlock(nn, cmd);
1739b64052fcSPablo Cascón }
1740b64052fcSPablo Cascón
1741b64052fcSPablo Cascón static int
nfp_net_vlan_rx_kill_vid(struct net_device * netdev,__be16 proto,u16 vid)1742b64052fcSPablo Cascón nfp_net_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
1743b64052fcSPablo Cascón {
1744dd5b2498SJakub Kicinski const u32 cmd = NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL;
1745b64052fcSPablo Cascón struct nfp_net *nn = netdev_priv(netdev);
1746dd5b2498SJakub Kicinski int err;
1747b64052fcSPablo Cascón
1748b64052fcSPablo Cascón /* Priority tagged packets with vlan id 0 are processed by the
1749b64052fcSPablo Cascón * NFP as untagged packets
1750b64052fcSPablo Cascón */
1751b64052fcSPablo Cascón if (!vid)
1752b64052fcSPablo Cascón return 0;
1753b64052fcSPablo Cascón
1754dd5b2498SJakub Kicinski err = nfp_net_mbox_lock(nn, NFP_NET_CFG_VLAN_FILTER_SZ);
1755dd5b2498SJakub Kicinski if (err)
1756dd5b2498SJakub Kicinski return err;
1757dd5b2498SJakub Kicinski
1758527d7d1bSJakub Kicinski nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_VID, vid);
1759527d7d1bSJakub Kicinski nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_PROTO,
1760527d7d1bSJakub Kicinski ETH_P_8021Q);
1761b64052fcSPablo Cascón
1762dd5b2498SJakub Kicinski return nfp_net_mbox_reconfig_and_unlock(nn, cmd);
1763b64052fcSPablo Cascón }
1764b64052fcSPablo Cascón
nfp_net_stat64(struct net_device * netdev,struct rtnl_link_stats64 * stats)1765bc1f4470Sstephen hemminger static void nfp_net_stat64(struct net_device *netdev,
17664c352362SJakub Kicinski struct rtnl_link_stats64 *stats)
17674c352362SJakub Kicinski {
17684c352362SJakub Kicinski struct nfp_net *nn = netdev_priv(netdev);
17694c352362SJakub Kicinski int r;
17704c352362SJakub Kicinski
1771eca09be8SJakub Kicinski /* Collect software stats */
177229f534c4SJakub Kicinski for (r = 0; r < nn->max_r_vecs; r++) {
17734c352362SJakub Kicinski struct nfp_net_r_vector *r_vec = &nn->r_vecs[r];
17744c352362SJakub Kicinski u64 data[3];
17754c352362SJakub Kicinski unsigned int start;
17764c352362SJakub Kicinski
17774c352362SJakub Kicinski do {
1778068c38adSThomas Gleixner start = u64_stats_fetch_begin(&r_vec->rx_sync);
17794c352362SJakub Kicinski data[0] = r_vec->rx_pkts;
17804c352362SJakub Kicinski data[1] = r_vec->rx_bytes;
17814c352362SJakub Kicinski data[2] = r_vec->rx_drops;
1782068c38adSThomas Gleixner } while (u64_stats_fetch_retry(&r_vec->rx_sync, start));
17834c352362SJakub Kicinski stats->rx_packets += data[0];
17844c352362SJakub Kicinski stats->rx_bytes += data[1];
17854c352362SJakub Kicinski stats->rx_dropped += data[2];
17864c352362SJakub Kicinski
17874c352362SJakub Kicinski do {
1788068c38adSThomas Gleixner start = u64_stats_fetch_begin(&r_vec->tx_sync);
17894c352362SJakub Kicinski data[0] = r_vec->tx_pkts;
17904c352362SJakub Kicinski data[1] = r_vec->tx_bytes;
17914c352362SJakub Kicinski data[2] = r_vec->tx_errors;
1792068c38adSThomas Gleixner } while (u64_stats_fetch_retry(&r_vec->tx_sync, start));
17934c352362SJakub Kicinski stats->tx_packets += data[0];
17944c352362SJakub Kicinski stats->tx_bytes += data[1];
17954c352362SJakub Kicinski stats->tx_errors += data[2];
17964c352362SJakub Kicinski }
1797eca09be8SJakub Kicinski
1798eca09be8SJakub Kicinski /* Add in device stats */
1799eca09be8SJakub Kicinski stats->multicast += nn_readq(nn, NFP_NET_CFG_STATS_RX_MC_FRAMES);
1800eca09be8SJakub Kicinski stats->rx_dropped += nn_readq(nn, NFP_NET_CFG_STATS_RX_DISCARDS);
1801eca09be8SJakub Kicinski stats->rx_errors += nn_readq(nn, NFP_NET_CFG_STATS_RX_ERRORS);
1802eca09be8SJakub Kicinski
1803eca09be8SJakub Kicinski stats->tx_dropped += nn_readq(nn, NFP_NET_CFG_STATS_TX_DISCARDS);
1804eca09be8SJakub Kicinski stats->tx_errors += nn_readq(nn, NFP_NET_CFG_STATS_TX_ERRORS);
18054c352362SJakub Kicinski }
18064c352362SJakub Kicinski
nfp_net_set_features(struct net_device * netdev,netdev_features_t features)18074c352362SJakub Kicinski static int nfp_net_set_features(struct net_device *netdev,
18084c352362SJakub Kicinski netdev_features_t features)
18094c352362SJakub Kicinski {
18104c352362SJakub Kicinski netdev_features_t changed = netdev->features ^ features;
18114c352362SJakub Kicinski struct nfp_net *nn = netdev_priv(netdev);
18124c352362SJakub Kicinski u32 new_ctrl;
18134c352362SJakub Kicinski int err;
18144c352362SJakub Kicinski
18154c352362SJakub Kicinski /* Assume this is not called with features we have not advertised */
18164c352362SJakub Kicinski
181779c12a75SJakub Kicinski new_ctrl = nn->dp.ctrl;
18184c352362SJakub Kicinski
18194c352362SJakub Kicinski if (changed & NETIF_F_RXCSUM) {
18204c352362SJakub Kicinski if (features & NETIF_F_RXCSUM)
1821ddb98d94SJakub Kicinski new_ctrl |= nn->cap & NFP_NET_CFG_CTRL_RXCSUM_ANY;
18224c352362SJakub Kicinski else
1823ddb98d94SJakub Kicinski new_ctrl &= ~NFP_NET_CFG_CTRL_RXCSUM_ANY;
18244c352362SJakub Kicinski }
18254c352362SJakub Kicinski
18264c352362SJakub Kicinski if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
18274c352362SJakub Kicinski if (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))
18284c352362SJakub Kicinski new_ctrl |= NFP_NET_CFG_CTRL_TXCSUM;
18294c352362SJakub Kicinski else
18304c352362SJakub Kicinski new_ctrl &= ~NFP_NET_CFG_CTRL_TXCSUM;
18314c352362SJakub Kicinski }
18324c352362SJakub Kicinski
18334c352362SJakub Kicinski if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) {
18344c352362SJakub Kicinski if (features & (NETIF_F_TSO | NETIF_F_TSO6))
183528063be6SEdwin Peer new_ctrl |= nn->cap & NFP_NET_CFG_CTRL_LSO2 ?:
183628063be6SEdwin Peer NFP_NET_CFG_CTRL_LSO;
18374c352362SJakub Kicinski else
183828063be6SEdwin Peer new_ctrl &= ~NFP_NET_CFG_CTRL_LSO_ANY;
18394c352362SJakub Kicinski }
18404c352362SJakub Kicinski
18414c352362SJakub Kicinski if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
18424c352362SJakub Kicinski if (features & NETIF_F_HW_VLAN_CTAG_RX)
184367d2656bSDiana Wang new_ctrl |= nn->cap & NFP_NET_CFG_CTRL_RXVLAN_V2 ?:
184467d2656bSDiana Wang NFP_NET_CFG_CTRL_RXVLAN;
18454c352362SJakub Kicinski else
184667d2656bSDiana Wang new_ctrl &= ~NFP_NET_CFG_CTRL_RXVLAN_ANY;
18474c352362SJakub Kicinski }
18484c352362SJakub Kicinski
18494c352362SJakub Kicinski if (changed & NETIF_F_HW_VLAN_CTAG_TX) {
18504c352362SJakub Kicinski if (features & NETIF_F_HW_VLAN_CTAG_TX)
1851d80702ffSDiana Wang new_ctrl |= nn->cap & NFP_NET_CFG_CTRL_TXVLAN_V2 ?:
1852d80702ffSDiana Wang NFP_NET_CFG_CTRL_TXVLAN;
18534c352362SJakub Kicinski else
1854d80702ffSDiana Wang new_ctrl &= ~NFP_NET_CFG_CTRL_TXVLAN_ANY;
18554c352362SJakub Kicinski }
18564c352362SJakub Kicinski
1857b64052fcSPablo Cascón if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
1858b64052fcSPablo Cascón if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
1859b64052fcSPablo Cascón new_ctrl |= NFP_NET_CFG_CTRL_CTAG_FILTER;
1860b64052fcSPablo Cascón else
1861b64052fcSPablo Cascón new_ctrl &= ~NFP_NET_CFG_CTRL_CTAG_FILTER;
1862b64052fcSPablo Cascón }
1863b64052fcSPablo Cascón
186467d2656bSDiana Wang if (changed & NETIF_F_HW_VLAN_STAG_RX) {
186567d2656bSDiana Wang if (features & NETIF_F_HW_VLAN_STAG_RX)
186667d2656bSDiana Wang new_ctrl |= NFP_NET_CFG_CTRL_RXQINQ;
186767d2656bSDiana Wang else
186867d2656bSDiana Wang new_ctrl &= ~NFP_NET_CFG_CTRL_RXQINQ;
186967d2656bSDiana Wang }
187067d2656bSDiana Wang
18714c352362SJakub Kicinski if (changed & NETIF_F_SG) {
18724c352362SJakub Kicinski if (features & NETIF_F_SG)
18734c352362SJakub Kicinski new_ctrl |= NFP_NET_CFG_CTRL_GATHER;
18744c352362SJakub Kicinski else
18754c352362SJakub Kicinski new_ctrl &= ~NFP_NET_CFG_CTRL_GATHER;
18764c352362SJakub Kicinski }
18774c352362SJakub Kicinski
1878d692403eSJakub Kicinski err = nfp_port_set_features(netdev, features);
1879d692403eSJakub Kicinski if (err)
1880d692403eSJakub Kicinski return err;
18817533fdc0SJakub Kicinski
18824c352362SJakub Kicinski nn_dbg(nn, "Feature change 0x%llx -> 0x%llx (changed=0x%llx)\n",
18834c352362SJakub Kicinski netdev->features, features, changed);
18844c352362SJakub Kicinski
188579c12a75SJakub Kicinski if (new_ctrl == nn->dp.ctrl)
18864c352362SJakub Kicinski return 0;
18874c352362SJakub Kicinski
188879c12a75SJakub Kicinski nn_dbg(nn, "NIC ctrl: 0x%x -> 0x%x\n", nn->dp.ctrl, new_ctrl);
18894c352362SJakub Kicinski nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
18904c352362SJakub Kicinski err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
18914c352362SJakub Kicinski if (err)
18924c352362SJakub Kicinski return err;
18934c352362SJakub Kicinski
189479c12a75SJakub Kicinski nn->dp.ctrl = new_ctrl;
18954c352362SJakub Kicinski
18964c352362SJakub Kicinski return 0;
18974c352362SJakub Kicinski }
18984c352362SJakub Kicinski
18994c352362SJakub Kicinski static netdev_features_t
nfp_net_fix_features(struct net_device * netdev,netdev_features_t features)190067d2656bSDiana Wang nfp_net_fix_features(struct net_device *netdev,
190167d2656bSDiana Wang netdev_features_t features)
190267d2656bSDiana Wang {
190367d2656bSDiana Wang if ((features & NETIF_F_HW_VLAN_CTAG_RX) &&
190467d2656bSDiana Wang (features & NETIF_F_HW_VLAN_STAG_RX)) {
190567d2656bSDiana Wang if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
190667d2656bSDiana Wang features &= ~NETIF_F_HW_VLAN_CTAG_RX;
190767d2656bSDiana Wang netdev->wanted_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
190867d2656bSDiana Wang netdev_warn(netdev,
190967d2656bSDiana Wang "S-tag and C-tag stripping can't be enabled at the same time. Enabling S-tag stripping and disabling C-tag stripping\n");
191067d2656bSDiana Wang } else if (netdev->features & NETIF_F_HW_VLAN_STAG_RX) {
191167d2656bSDiana Wang features &= ~NETIF_F_HW_VLAN_STAG_RX;
191267d2656bSDiana Wang netdev->wanted_features &= ~NETIF_F_HW_VLAN_STAG_RX;
191367d2656bSDiana Wang netdev_warn(netdev,
191467d2656bSDiana Wang "S-tag and C-tag stripping can't be enabled at the same time. Enabling C-tag stripping and disabling S-tag stripping\n");
191567d2656bSDiana Wang }
191667d2656bSDiana Wang }
191767d2656bSDiana Wang return features;
191867d2656bSDiana Wang }
191967d2656bSDiana Wang
192067d2656bSDiana Wang static netdev_features_t
nfp_net_features_check(struct sk_buff * skb,struct net_device * dev,netdev_features_t features)19214c352362SJakub Kicinski nfp_net_features_check(struct sk_buff *skb, struct net_device *dev,
19224c352362SJakub Kicinski netdev_features_t features)
19234c352362SJakub Kicinski {
19244c352362SJakub Kicinski u8 l4_hdr;
19254c352362SJakub Kicinski
19264c352362SJakub Kicinski /* We can't do TSO over double tagged packets (802.1AD) */
19274c352362SJakub Kicinski features &= vlan_features_check(skb, features);
19284c352362SJakub Kicinski
19294c352362SJakub Kicinski if (!skb->encapsulation)
19304c352362SJakub Kicinski return features;
19314c352362SJakub Kicinski
19324c352362SJakub Kicinski /* Ensure that inner L4 header offset fits into TX descriptor field */
19334c352362SJakub Kicinski if (skb_is_gso(skb)) {
19344c352362SJakub Kicinski u32 hdrlen;
19354c352362SJakub Kicinski
1936504148feSEric Dumazet hdrlen = skb_inner_tcp_all_headers(skb);
19374c352362SJakub Kicinski
1938d7cc8252SJakub Kicinski /* Assume worst case scenario of having longest possible
1939d7cc8252SJakub Kicinski * metadata prepend - 8B
1940d7cc8252SJakub Kicinski */
1941d7cc8252SJakub Kicinski if (unlikely(hdrlen > NFP_NET_LSO_MAX_HDR_SZ - 8))
19424c352362SJakub Kicinski features &= ~NETIF_F_GSO_MASK;
19434c352362SJakub Kicinski }
19444c352362SJakub Kicinski
19451cf78d4cSHuanhuan Wang if (xfrm_offload(skb))
19461cf78d4cSHuanhuan Wang return features;
19471cf78d4cSHuanhuan Wang
19484c352362SJakub Kicinski /* VXLAN/GRE check */
19494c352362SJakub Kicinski switch (vlan_get_protocol(skb)) {
19504c352362SJakub Kicinski case htons(ETH_P_IP):
19514c352362SJakub Kicinski l4_hdr = ip_hdr(skb)->protocol;
19524c352362SJakub Kicinski break;
19534c352362SJakub Kicinski case htons(ETH_P_IPV6):
19544c352362SJakub Kicinski l4_hdr = ipv6_hdr(skb)->nexthdr;
19554c352362SJakub Kicinski break;
19564c352362SJakub Kicinski default:
1957a188222bSTom Herbert return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
19584c352362SJakub Kicinski }
19594c352362SJakub Kicinski
19604c352362SJakub Kicinski if (skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
19614c352362SJakub Kicinski skb->inner_protocol != htons(ETH_P_TEB) ||
19624c352362SJakub Kicinski (l4_hdr != IPPROTO_UDP && l4_hdr != IPPROTO_GRE) ||
19634c352362SJakub Kicinski (l4_hdr == IPPROTO_UDP &&
19644c352362SJakub Kicinski (skb_inner_mac_header(skb) - skb_transport_header(skb) !=
19654c352362SJakub Kicinski sizeof(struct udphdr) + sizeof(struct vxlanhdr))))
1966a188222bSTom Herbert return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
19674c352362SJakub Kicinski
19684c352362SJakub Kicinski return features;
19694c352362SJakub Kicinski }
19704c352362SJakub Kicinski
197151c1df83SJakub Kicinski static int
nfp_net_get_phys_port_name(struct net_device * netdev,char * name,size_t len)197251c1df83SJakub Kicinski nfp_net_get_phys_port_name(struct net_device *netdev, char *name, size_t len)
197351c1df83SJakub Kicinski {
197451c1df83SJakub Kicinski struct nfp_net *nn = netdev_priv(netdev);
197551c1df83SJakub Kicinski int n;
197651c1df83SJakub Kicinski
1977f1fa719cSJiri Pirko /* If port is defined, devlink_port is registered and devlink core
1978f1fa719cSJiri Pirko * is taking care of name formatting.
1979f1fa719cSJiri Pirko */
198051c1df83SJakub Kicinski if (nn->port)
1981f1fa719cSJiri Pirko return -EOPNOTSUPP;
198251c1df83SJakub Kicinski
1983fe06a64eSJakub Kicinski if (nn->dp.is_vf || nn->vnic_no_name)
19846fd1cfc0SJakub Kicinski return -EOPNOTSUPP;
19856fd1cfc0SJakub Kicinski
1986ca145732SJakub Kicinski n = snprintf(name, len, "n%d", nn->id);
198751c1df83SJakub Kicinski if (n >= len)
198851c1df83SJakub Kicinski return -EINVAL;
198951c1df83SJakub Kicinski
199051c1df83SJakub Kicinski return 0;
199151c1df83SJakub Kicinski }
199251c1df83SJakub Kicinski
nfp_net_xdp_setup_drv(struct nfp_net * nn,struct netdev_bpf * bpf)19935f428401SJakub Kicinski static int nfp_net_xdp_setup_drv(struct nfp_net *nn, struct netdev_bpf *bpf)
1994ecd63a02SJakub Kicinski {
19955f428401SJakub Kicinski struct bpf_prog *prog = bpf->prog;
1996783496b0SJakub Kicinski struct nfp_net_dp *dp;
19975f428401SJakub Kicinski int err;
19985f428401SJakub Kicinski
19996a8ef542SJakub Kicinski if (!prog == !nn->dp.xdp_prog) {
20006a8ef542SJakub Kicinski WRITE_ONCE(nn->dp.xdp_prog, prog);
20015f428401SJakub Kicinski xdp_attachment_setup(&nn->xdp, bpf);
2002ecd63a02SJakub Kicinski return 0;
2003ecd63a02SJakub Kicinski }
2004ecd63a02SJakub Kicinski
2005783496b0SJakub Kicinski dp = nfp_net_clone_dp(nn);
2006783496b0SJakub Kicinski if (!dp)
2007783496b0SJakub Kicinski return -ENOMEM;
2008783496b0SJakub Kicinski
20099dc6b116SJakub Kicinski dp->xdp_prog = prog;
2010892a7f70SJakub Kicinski dp->num_tx_rings += prog ? nn->dp.num_rx_rings : -nn->dp.num_rx_rings;
2011c487e6b1SJakub Kicinski dp->rx_dma_dir = prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
2012dbf637ffSJakub Kicinski dp->rx_dma_off = prog ? XDP_PACKET_HEADROOM - nn->dp.rx_offset : 0;
2013ecd63a02SJakub Kicinski
2014ecd63a02SJakub Kicinski /* We need RX reconfig to remap the buffers (BIDIR vs FROM_DEV) */
20155f428401SJakub Kicinski err = nfp_net_ring_reconfig(nn, dp, bpf->extack);
2016c443b5acSJakub Kicinski if (err)
2017c443b5acSJakub Kicinski return err;
2018c443b5acSJakub Kicinski
20195f428401SJakub Kicinski xdp_attachment_setup(&nn->xdp, bpf);
20205f428401SJakub Kicinski return 0;
20215f428401SJakub Kicinski }
20225f428401SJakub Kicinski
nfp_net_xdp_setup_hw(struct nfp_net * nn,struct netdev_bpf * bpf)20235f428401SJakub Kicinski static int nfp_net_xdp_setup_hw(struct nfp_net *nn, struct netdev_bpf *bpf)
20245f428401SJakub Kicinski {
20255f428401SJakub Kicinski int err;
20265f428401SJakub Kicinski
20275f428401SJakub Kicinski err = nfp_app_xdp_offload(nn->app, nn, bpf->prog, bpf->extack);
20285f428401SJakub Kicinski if (err)
2029cafa92acSJakub Kicinski return err;
20306a8ef542SJakub Kicinski
20315f428401SJakub Kicinski xdp_attachment_setup(&nn->xdp_hw, bpf);
2032ecd63a02SJakub Kicinski return 0;
2033ecd63a02SJakub Kicinski }
2034ecd63a02SJakub Kicinski
nfp_net_xdp(struct net_device * netdev,struct netdev_bpf * xdp)2035f4e63525SJakub Kicinski static int nfp_net_xdp(struct net_device *netdev, struct netdev_bpf *xdp)
2036ecd63a02SJakub Kicinski {
2037ecd63a02SJakub Kicinski struct nfp_net *nn = netdev_priv(netdev);
2038ecd63a02SJakub Kicinski
2039ecd63a02SJakub Kicinski switch (xdp->command) {
2040ecd63a02SJakub Kicinski case XDP_SETUP_PROG:
20415f428401SJakub Kicinski return nfp_net_xdp_setup_drv(nn, xdp);
2042cafa92acSJakub Kicinski case XDP_SETUP_PROG_HW:
20435f428401SJakub Kicinski return nfp_net_xdp_setup_hw(nn, xdp);
20446402528bSNiklas Söderlund case XDP_SETUP_XSK_POOL:
20456402528bSNiklas Söderlund return nfp_net_xsk_setup_pool(netdev, xdp->xsk.pool,
20466402528bSNiklas Söderlund xdp->xsk.queue_id);
2047ecd63a02SJakub Kicinski default:
2048af93d15aSJakub Kicinski return nfp_app_bpf(nn->app, nn, xdp);
2049ecd63a02SJakub Kicinski }
2050ecd63a02SJakub Kicinski }
2051ecd63a02SJakub Kicinski
nfp_net_set_mac_address(struct net_device * netdev,void * addr)20529d372759SPablo Cascón static int nfp_net_set_mac_address(struct net_device *netdev, void *addr)
20539d372759SPablo Cascón {
20549d372759SPablo Cascón struct nfp_net *nn = netdev_priv(netdev);
20559d372759SPablo Cascón struct sockaddr *saddr = addr;
20569d372759SPablo Cascón int err;
20579d372759SPablo Cascón
20589d372759SPablo Cascón err = eth_prepare_mac_addr_change(netdev, addr);
20599d372759SPablo Cascón if (err)
20609d372759SPablo Cascón return err;
20619d372759SPablo Cascón
20629d372759SPablo Cascón nfp_net_write_mac_addr(nn, saddr->sa_data);
20639d372759SPablo Cascón
20649d372759SPablo Cascón err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_MACADDR);
20659d372759SPablo Cascón if (err)
20669d372759SPablo Cascón return err;
20679d372759SPablo Cascón
20689d372759SPablo Cascón eth_commit_mac_addr_change(netdev, addr);
20699d372759SPablo Cascón
20709d372759SPablo Cascón return 0;
20719d372759SPablo Cascón }
20729d372759SPablo Cascón
nfp_net_bridge_getlink(struct sk_buff * skb,u32 pid,u32 seq,struct net_device * dev,u32 filter_mask,int nlflags)2073be801411SYinjun Zhang static int nfp_net_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
2074be801411SYinjun Zhang struct net_device *dev, u32 filter_mask,
2075be801411SYinjun Zhang int nlflags)
2076be801411SYinjun Zhang {
2077be801411SYinjun Zhang struct nfp_net *nn = netdev_priv(dev);
2078be801411SYinjun Zhang u16 mode;
2079be801411SYinjun Zhang
2080be801411SYinjun Zhang if (!(nn->cap & NFP_NET_CFG_CTRL_VEPA))
2081be801411SYinjun Zhang return -EOPNOTSUPP;
2082be801411SYinjun Zhang
2083be801411SYinjun Zhang mode = (nn->dp.ctrl & NFP_NET_CFG_CTRL_VEPA) ?
2084be801411SYinjun Zhang BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB;
2085be801411SYinjun Zhang
2086be801411SYinjun Zhang return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, 0, 0,
2087be801411SYinjun Zhang nlflags, filter_mask, NULL);
2088be801411SYinjun Zhang }
2089be801411SYinjun Zhang
nfp_net_bridge_setlink(struct net_device * dev,struct nlmsghdr * nlh,u16 flags,struct netlink_ext_ack * extack)2090be801411SYinjun Zhang static int nfp_net_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
2091be801411SYinjun Zhang u16 flags, struct netlink_ext_ack *extack)
2092be801411SYinjun Zhang {
2093be801411SYinjun Zhang struct nfp_net *nn = netdev_priv(dev);
2094be801411SYinjun Zhang struct nlattr *attr, *br_spec;
2095be801411SYinjun Zhang int rem, err;
2096be801411SYinjun Zhang u32 new_ctrl;
2097be801411SYinjun Zhang u16 mode;
2098be801411SYinjun Zhang
2099be801411SYinjun Zhang if (!(nn->cap & NFP_NET_CFG_CTRL_VEPA))
2100be801411SYinjun Zhang return -EOPNOTSUPP;
2101be801411SYinjun Zhang
2102be801411SYinjun Zhang br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
2103be801411SYinjun Zhang if (!br_spec)
2104be801411SYinjun Zhang return -EINVAL;
2105be801411SYinjun Zhang
2106be801411SYinjun Zhang nla_for_each_nested(attr, br_spec, rem) {
2107be801411SYinjun Zhang if (nla_type(attr) != IFLA_BRIDGE_MODE)
2108be801411SYinjun Zhang continue;
2109be801411SYinjun Zhang
2110be801411SYinjun Zhang new_ctrl = nn->dp.ctrl;
2111be801411SYinjun Zhang mode = nla_get_u16(attr);
2112be801411SYinjun Zhang if (mode == BRIDGE_MODE_VEPA)
2113be801411SYinjun Zhang new_ctrl |= NFP_NET_CFG_CTRL_VEPA;
2114be801411SYinjun Zhang else if (mode == BRIDGE_MODE_VEB)
2115be801411SYinjun Zhang new_ctrl &= ~NFP_NET_CFG_CTRL_VEPA;
2116be801411SYinjun Zhang else
2117be801411SYinjun Zhang return -EOPNOTSUPP;
2118be801411SYinjun Zhang
2119be801411SYinjun Zhang if (new_ctrl == nn->dp.ctrl)
2120be801411SYinjun Zhang return 0;
2121be801411SYinjun Zhang
2122be801411SYinjun Zhang nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
2123be801411SYinjun Zhang err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
2124be801411SYinjun Zhang if (!err)
2125be801411SYinjun Zhang nn->dp.ctrl = new_ctrl;
2126be801411SYinjun Zhang
2127be801411SYinjun Zhang return err;
2128be801411SYinjun Zhang }
2129be801411SYinjun Zhang
2130be801411SYinjun Zhang return -EINVAL;
2131be801411SYinjun Zhang }
2132be801411SYinjun Zhang
2133d9e3c299SJakub Kicinski const struct net_device_ops nfp_nfd3_netdev_ops = {
21344612bebfSJakub Kicinski .ndo_init = nfp_app_ndo_init,
21354612bebfSJakub Kicinski .ndo_uninit = nfp_app_ndo_uninit,
21364c352362SJakub Kicinski .ndo_open = nfp_net_netdev_open,
21374c352362SJakub Kicinski .ndo_stop = nfp_net_netdev_close,
21386fd86efaSJakub Kicinski .ndo_start_xmit = nfp_net_tx,
21394c352362SJakub Kicinski .ndo_get_stats64 = nfp_net_stat64,
2140b64052fcSPablo Cascón .ndo_vlan_rx_add_vid = nfp_net_vlan_rx_add_vid,
2141b64052fcSPablo Cascón .ndo_vlan_rx_kill_vid = nfp_net_vlan_rx_kill_vid,
214225528d90SPablo Cascón .ndo_set_vf_mac = nfp_app_set_vf_mac,
214325528d90SPablo Cascón .ndo_set_vf_vlan = nfp_app_set_vf_vlan,
2144e0d0e1fdSBin Chen .ndo_set_vf_rate = nfp_app_set_vf_rate,
214525528d90SPablo Cascón .ndo_set_vf_spoofchk = nfp_app_set_vf_spoofchk,
21464ef6cbe8SPablo Cascón .ndo_set_vf_trust = nfp_app_set_vf_trust,
214725528d90SPablo Cascón .ndo_get_vf_config = nfp_app_get_vf_config,
214825528d90SPablo Cascón .ndo_set_vf_link_state = nfp_app_set_vf_link_state,
21498a276873SPieter Jansen van Vuuren .ndo_setup_tc = nfp_port_setup_tc,
21504c352362SJakub Kicinski .ndo_tx_timeout = nfp_net_tx_timeout,
21514c352362SJakub Kicinski .ndo_set_rx_mode = nfp_net_set_rx_mode,
21524c352362SJakub Kicinski .ndo_change_mtu = nfp_net_change_mtu,
21539d372759SPablo Cascón .ndo_set_mac_address = nfp_net_set_mac_address,
21544c352362SJakub Kicinski .ndo_set_features = nfp_net_set_features,
215567d2656bSDiana Wang .ndo_fix_features = nfp_net_fix_features,
21564c352362SJakub Kicinski .ndo_features_check = nfp_net_features_check,
215751c1df83SJakub Kicinski .ndo_get_phys_port_name = nfp_net_get_phys_port_name,
2158f4e63525SJakub Kicinski .ndo_bpf = nfp_net_xdp,
21596402528bSNiklas Söderlund .ndo_xsk_wakeup = nfp_net_xsk_wakeup,
2160be801411SYinjun Zhang .ndo_bridge_getlink = nfp_net_bridge_getlink,
2161be801411SYinjun Zhang .ndo_bridge_setlink = nfp_net_bridge_setlink,
21624c352362SJakub Kicinski };
21634c352362SJakub Kicinski
2164c10d12e3SJakub Kicinski const struct net_device_ops nfp_nfdk_netdev_ops = {
2165c10d12e3SJakub Kicinski .ndo_init = nfp_app_ndo_init,
2166c10d12e3SJakub Kicinski .ndo_uninit = nfp_app_ndo_uninit,
2167c10d12e3SJakub Kicinski .ndo_open = nfp_net_netdev_open,
2168c10d12e3SJakub Kicinski .ndo_stop = nfp_net_netdev_close,
2169c10d12e3SJakub Kicinski .ndo_start_xmit = nfp_net_tx,
2170c10d12e3SJakub Kicinski .ndo_get_stats64 = nfp_net_stat64,
2171c10d12e3SJakub Kicinski .ndo_vlan_rx_add_vid = nfp_net_vlan_rx_add_vid,
2172c10d12e3SJakub Kicinski .ndo_vlan_rx_kill_vid = nfp_net_vlan_rx_kill_vid,
2173c10d12e3SJakub Kicinski .ndo_set_vf_mac = nfp_app_set_vf_mac,
2174c10d12e3SJakub Kicinski .ndo_set_vf_vlan = nfp_app_set_vf_vlan,
2175c7b1267bSBin Chen .ndo_set_vf_rate = nfp_app_set_vf_rate,
2176c10d12e3SJakub Kicinski .ndo_set_vf_spoofchk = nfp_app_set_vf_spoofchk,
2177c10d12e3SJakub Kicinski .ndo_set_vf_trust = nfp_app_set_vf_trust,
2178c10d12e3SJakub Kicinski .ndo_get_vf_config = nfp_app_get_vf_config,
2179c10d12e3SJakub Kicinski .ndo_set_vf_link_state = nfp_app_set_vf_link_state,
2180c10d12e3SJakub Kicinski .ndo_setup_tc = nfp_port_setup_tc,
2181c10d12e3SJakub Kicinski .ndo_tx_timeout = nfp_net_tx_timeout,
2182c10d12e3SJakub Kicinski .ndo_set_rx_mode = nfp_net_set_rx_mode,
2183c10d12e3SJakub Kicinski .ndo_change_mtu = nfp_net_change_mtu,
2184c10d12e3SJakub Kicinski .ndo_set_mac_address = nfp_net_set_mac_address,
2185c10d12e3SJakub Kicinski .ndo_set_features = nfp_net_set_features,
218667d2656bSDiana Wang .ndo_fix_features = nfp_net_fix_features,
2187c10d12e3SJakub Kicinski .ndo_features_check = nfp_net_features_check,
2188c10d12e3SJakub Kicinski .ndo_get_phys_port_name = nfp_net_get_phys_port_name,
2189c10d12e3SJakub Kicinski .ndo_bpf = nfp_net_xdp,
2190be801411SYinjun Zhang .ndo_bridge_getlink = nfp_net_bridge_getlink,
2191be801411SYinjun Zhang .ndo_bridge_setlink = nfp_net_bridge_setlink,
2192c10d12e3SJakub Kicinski };
2193c10d12e3SJakub Kicinski
nfp_udp_tunnel_sync(struct net_device * netdev,unsigned int table)2194641ca085SJakub Kicinski static int nfp_udp_tunnel_sync(struct net_device *netdev, unsigned int table)
2195641ca085SJakub Kicinski {
2196641ca085SJakub Kicinski struct nfp_net *nn = netdev_priv(netdev);
2197641ca085SJakub Kicinski int i;
2198641ca085SJakub Kicinski
2199641ca085SJakub Kicinski BUILD_BUG_ON(NFP_NET_N_VXLAN_PORTS & 1);
2200641ca085SJakub Kicinski for (i = 0; i < NFP_NET_N_VXLAN_PORTS; i += 2) {
2201641ca085SJakub Kicinski struct udp_tunnel_info ti0, ti1;
2202641ca085SJakub Kicinski
2203641ca085SJakub Kicinski udp_tunnel_nic_get_port(netdev, table, i, &ti0);
2204641ca085SJakub Kicinski udp_tunnel_nic_get_port(netdev, table, i + 1, &ti1);
2205641ca085SJakub Kicinski
2206641ca085SJakub Kicinski nn_writel(nn, NFP_NET_CFG_VXLAN_PORT + i * sizeof(ti0.port),
2207641ca085SJakub Kicinski be16_to_cpu(ti1.port) << 16 | be16_to_cpu(ti0.port));
2208641ca085SJakub Kicinski }
2209641ca085SJakub Kicinski
2210641ca085SJakub Kicinski return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_VXLAN);
2211641ca085SJakub Kicinski }
2212641ca085SJakub Kicinski
2213641ca085SJakub Kicinski static const struct udp_tunnel_nic_info nfp_udp_tunnels = {
2214641ca085SJakub Kicinski .sync_table = nfp_udp_tunnel_sync,
2215641ca085SJakub Kicinski .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
2216641ca085SJakub Kicinski UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
2217641ca085SJakub Kicinski .tables = {
2218641ca085SJakub Kicinski {
2219641ca085SJakub Kicinski .n_entries = NFP_NET_N_VXLAN_PORTS,
2220641ca085SJakub Kicinski .tunnel_types = UDP_TUNNEL_TYPE_VXLAN,
2221641ca085SJakub Kicinski },
2222641ca085SJakub Kicinski },
2223641ca085SJakub Kicinski };
2224641ca085SJakub Kicinski
22254c352362SJakub Kicinski /**
22264c352362SJakub Kicinski * nfp_net_info() - Print general info about the NIC
22274c352362SJakub Kicinski * @nn: NFP Net device to reconfigure
22284c352362SJakub Kicinski */
nfp_net_info(struct nfp_net * nn)22294c352362SJakub Kicinski void nfp_net_info(struct nfp_net *nn)
22304c352362SJakub Kicinski {
223134e244eaSYu Xiao nn_info(nn, "NFP-6xxx %sNetdev: TxQs=%d/%d RxQs=%d/%d\n",
223279c12a75SJakub Kicinski nn->dp.is_vf ? "VF " : "",
223379c12a75SJakub Kicinski nn->dp.num_tx_rings, nn->max_tx_rings,
223479c12a75SJakub Kicinski nn->dp.num_rx_rings, nn->max_rx_rings);
22354c352362SJakub Kicinski nn_info(nn, "VER: %d.%d.%d.%d, Maximum supported MTU: %d\n",
2236d9e3c299SJakub Kicinski nn->fw_ver.extend, nn->fw_ver.class,
22374c352362SJakub Kicinski nn->fw_ver.major, nn->fw_ver.minor,
22384c352362SJakub Kicinski nn->max_mtu);
2239de624864SDiana Wang nn_info(nn, "CAP: %#x %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
22404c352362SJakub Kicinski nn->cap,
22414c352362SJakub Kicinski nn->cap & NFP_NET_CFG_CTRL_PROMISC ? "PROMISC " : "",
22424c352362SJakub Kicinski nn->cap & NFP_NET_CFG_CTRL_L2BC ? "L2BCFILT " : "",
22434c352362SJakub Kicinski nn->cap & NFP_NET_CFG_CTRL_L2MC ? "L2MCFILT " : "",
22444c352362SJakub Kicinski nn->cap & NFP_NET_CFG_CTRL_RXCSUM ? "RXCSUM " : "",
22454c352362SJakub Kicinski nn->cap & NFP_NET_CFG_CTRL_TXCSUM ? "TXCSUM " : "",
22464c352362SJakub Kicinski nn->cap & NFP_NET_CFG_CTRL_RXVLAN ? "RXVLAN " : "",
22474c352362SJakub Kicinski nn->cap & NFP_NET_CFG_CTRL_TXVLAN ? "TXVLAN " : "",
224867d2656bSDiana Wang nn->cap & NFP_NET_CFG_CTRL_RXQINQ ? "RXQINQ " : "",
224967d2656bSDiana Wang nn->cap & NFP_NET_CFG_CTRL_RXVLAN_V2 ? "RXVLANv2 " : "",
2250eca250b1SDiana Wang nn->cap & NFP_NET_CFG_CTRL_TXVLAN_V2 ? "TXVLANv2 " : "",
22514c352362SJakub Kicinski nn->cap & NFP_NET_CFG_CTRL_SCATTER ? "SCATTER " : "",
22524c352362SJakub Kicinski nn->cap & NFP_NET_CFG_CTRL_GATHER ? "GATHER " : "",
225328063be6SEdwin Peer nn->cap & NFP_NET_CFG_CTRL_LSO ? "TSO1 " : "",
225428063be6SEdwin Peer nn->cap & NFP_NET_CFG_CTRL_LSO2 ? "TSO2 " : "",
2255611bdd49SEdwin Peer nn->cap & NFP_NET_CFG_CTRL_RSS ? "RSS1 " : "",
2256611bdd49SEdwin Peer nn->cap & NFP_NET_CFG_CTRL_RSS2 ? "RSS2 " : "",
2257b64052fcSPablo Cascón nn->cap & NFP_NET_CFG_CTRL_CTAG_FILTER ? "CTAG_FILTER " : "",
22584c352362SJakub Kicinski nn->cap & NFP_NET_CFG_CTRL_MSIXAUTO ? "AUTOMASK " : "",
22594c352362SJakub Kicinski nn->cap & NFP_NET_CFG_CTRL_IRQMOD ? "IRQMOD " : "",
22600dcf7f50SJakub Kicinski nn->cap & NFP_NET_CFG_CTRL_TXRWB ? "TXRWB " : "",
2261be801411SYinjun Zhang nn->cap & NFP_NET_CFG_CTRL_VEPA ? "VEPA " : "",
22624c352362SJakub Kicinski nn->cap & NFP_NET_CFG_CTRL_VXLAN ? "VXLAN " : "",
22637533fdc0SJakub Kicinski nn->cap & NFP_NET_CFG_CTRL_NVGRE ? "NVGRE " : "",
2264ddb98d94SJakub Kicinski nn->cap & NFP_NET_CFG_CTRL_CSUM_COMPLETE ?
22659d372759SPablo Cascón "RXCSUM_COMPLETE " : "",
2266bb45e51cSJakub Kicinski nn->cap & NFP_NET_CFG_CTRL_LIVE_ADDR ? "LIVE_ADDR " : "",
2267de624864SDiana Wang nn->cap_w1 & NFP_NET_CFG_CTRL_MCAST_FILTER ? "MULTICAST_FILTER " : "",
2268bb45e51cSJakub Kicinski nfp_app_extra_cap(nn->app, nn));
22694c352362SJakub Kicinski }
22704c352362SJakub Kicinski
22714c352362SJakub Kicinski /**
2272beba69caSJakub Kicinski * nfp_net_alloc() - Allocate netdev and related structure
22734c352362SJakub Kicinski * @pdev: PCI device
22749423d24bSJakub Kicinski * @dev_info: NFP ASIC params
2275e38f5d11SJakub Kicinski * @ctrl_bar: PCI IOMEM with vNIC config memory
2276a7b1ad08SJakub Kicinski * @needs_netdev: Whether to allocate a netdev for this vNIC
22774c352362SJakub Kicinski * @max_tx_rings: Maximum number of TX rings supported by device
22784c352362SJakub Kicinski * @max_rx_rings: Maximum number of RX rings supported by device
22794c352362SJakub Kicinski *
22804c352362SJakub Kicinski * This function allocates a netdev device and fills in the initial
2281a7b1ad08SJakub Kicinski * part of the @struct nfp_net structure. In case of control device
2282a7b1ad08SJakub Kicinski * nfp_net structure is allocated without the netdev.
22834c352362SJakub Kicinski *
22844c352362SJakub Kicinski * Return: NFP Net device structure, or ERR_PTR on error.
22854c352362SJakub Kicinski */
2286e38f5d11SJakub Kicinski struct nfp_net *
nfp_net_alloc(struct pci_dev * pdev,const struct nfp_dev_info * dev_info,void __iomem * ctrl_bar,bool needs_netdev,unsigned int max_tx_rings,unsigned int max_rx_rings)22879423d24bSJakub Kicinski nfp_net_alloc(struct pci_dev *pdev, const struct nfp_dev_info *dev_info,
22889423d24bSJakub Kicinski void __iomem *ctrl_bar, bool needs_netdev,
2289e38f5d11SJakub Kicinski unsigned int max_tx_rings, unsigned int max_rx_rings)
22904c352362SJakub Kicinski {
22915f30671dSYinjun Zhang u64 dma_mask = dma_get_mask(&pdev->dev);
22924c352362SJakub Kicinski struct nfp_net *nn;
229383ec8857SJakub Kicinski int err;
22944c352362SJakub Kicinski
2295a7b1ad08SJakub Kicinski if (needs_netdev) {
2296a7b1ad08SJakub Kicinski struct net_device *netdev;
2297a7b1ad08SJakub Kicinski
22984c352362SJakub Kicinski netdev = alloc_etherdev_mqs(sizeof(struct nfp_net),
22994c352362SJakub Kicinski max_tx_rings, max_rx_rings);
23004c352362SJakub Kicinski if (!netdev)
23014c352362SJakub Kicinski return ERR_PTR(-ENOMEM);
23024c352362SJakub Kicinski
23034c352362SJakub Kicinski SET_NETDEV_DEV(netdev, &pdev->dev);
23044c352362SJakub Kicinski nn = netdev_priv(netdev);
230579c12a75SJakub Kicinski nn->dp.netdev = netdev;
2306a7b1ad08SJakub Kicinski } else {
2307a7b1ad08SJakub Kicinski nn = vzalloc(sizeof(*nn));
2308a7b1ad08SJakub Kicinski if (!nn)
2309a7b1ad08SJakub Kicinski return ERR_PTR(-ENOMEM);
2310a7b1ad08SJakub Kicinski }
2311a7b1ad08SJakub Kicinski
231279c12a75SJakub Kicinski nn->dp.dev = &pdev->dev;
2313e38f5d11SJakub Kicinski nn->dp.ctrl_bar = ctrl_bar;
23149423d24bSJakub Kicinski nn->dev_info = dev_info;
23154c352362SJakub Kicinski nn->pdev = pdev;
2316d9e3c299SJakub Kicinski nfp_net_get_fw_version(&nn->fw_ver, ctrl_bar);
2317d9e3c299SJakub Kicinski
2318d9e3c299SJakub Kicinski switch (FIELD_GET(NFP_NET_CFG_VERSION_DP_MASK, nn->fw_ver.extend)) {
2319d9e3c299SJakub Kicinski case NFP_NET_CFG_VERSION_DP_NFD3:
23206fd86efaSJakub Kicinski nn->dp.ops = &nfp_nfd3_ops;
2321d9e3c299SJakub Kicinski break;
2322c10d12e3SJakub Kicinski case NFP_NET_CFG_VERSION_DP_NFDK:
2323c10d12e3SJakub Kicinski if (nn->fw_ver.major < 5) {
2324c10d12e3SJakub Kicinski dev_err(&pdev->dev,
2325c10d12e3SJakub Kicinski "NFDK must use ABI 5 or newer, found: %d\n",
2326c10d12e3SJakub Kicinski nn->fw_ver.major);
2327c10d12e3SJakub Kicinski err = -EINVAL;
2328c10d12e3SJakub Kicinski goto err_free_nn;
2329c10d12e3SJakub Kicinski }
2330c10d12e3SJakub Kicinski nn->dp.ops = &nfp_nfdk_ops;
2331c10d12e3SJakub Kicinski break;
2332d9e3c299SJakub Kicinski default:
2333d9e3c299SJakub Kicinski err = -EINVAL;
2334d9e3c299SJakub Kicinski goto err_free_nn;
2335d9e3c299SJakub Kicinski }
23364c352362SJakub Kicinski
23375f30671dSYinjun Zhang if ((dma_mask & nn->dp.ops->dma_mask) != dma_mask) {
23385f30671dSYinjun Zhang dev_err(&pdev->dev,
23395f30671dSYinjun Zhang "DMA mask of loaded firmware: %llx, required DMA mask: %llx\n",
23405f30671dSYinjun Zhang nn->dp.ops->dma_mask, dma_mask);
23415f30671dSYinjun Zhang err = -EINVAL;
23425f30671dSYinjun Zhang goto err_free_nn;
23435f30671dSYinjun Zhang }
23445f30671dSYinjun Zhang
23454c352362SJakub Kicinski nn->max_tx_rings = max_tx_rings;
23464c352362SJakub Kicinski nn->max_rx_rings = max_rx_rings;
23474c352362SJakub Kicinski
234879c12a75SJakub Kicinski nn->dp.num_tx_rings = min_t(unsigned int,
234979c12a75SJakub Kicinski max_tx_rings, num_online_cpus());
235079c12a75SJakub Kicinski nn->dp.num_rx_rings = min_t(unsigned int, max_rx_rings,
2351cbeaf7aaSJakub Kicinski netif_get_num_default_rss_queues());
23524c352362SJakub Kicinski
235379c12a75SJakub Kicinski nn->dp.num_r_vecs = max(nn->dp.num_tx_rings, nn->dp.num_rx_rings);
235479c12a75SJakub Kicinski nn->dp.num_r_vecs = min_t(unsigned int,
235579c12a75SJakub Kicinski nn->dp.num_r_vecs, num_online_cpus());
23566402528bSNiklas Söderlund nn->max_r_vecs = nn->dp.num_r_vecs;
23576402528bSNiklas Söderlund
23586402528bSNiklas Söderlund nn->dp.xsk_pools = kcalloc(nn->max_r_vecs, sizeof(nn->dp.xsk_pools),
23596402528bSNiklas Söderlund GFP_KERNEL);
23606402528bSNiklas Söderlund if (!nn->dp.xsk_pools) {
23616402528bSNiklas Söderlund err = -ENOMEM;
23626402528bSNiklas Söderlund goto err_free_nn;
23636402528bSNiklas Söderlund }
23644b27a1ebSJakub Kicinski
236579c12a75SJakub Kicinski nn->dp.txd_cnt = NFP_NET_TX_DESCS_DEFAULT;
236679c12a75SJakub Kicinski nn->dp.rxd_cnt = NFP_NET_RX_DESCS_DEFAULT;
23674c352362SJakub Kicinski
23683ed77bf7SJakub Kicinski sema_init(&nn->bar_lock, 1);
2369dd5b2498SJakub Kicinski
23704c352362SJakub Kicinski spin_lock_init(&nn->reconfig_lock);
23714c352362SJakub Kicinski spin_lock_init(&nn->link_status_lock);
23724c352362SJakub Kicinski
23733248f77fSKees Cook timer_setup(&nn->reconfig_timer, nfp_net_reconfig_timer, 0);
23743d780b92SJakub Kicinski
237583ec8857SJakub Kicinski err = nfp_net_tlv_caps_parse(&nn->pdev->dev, nn->dp.ctrl_bar,
237683ec8857SJakub Kicinski &nn->tlv_caps);
237783ec8857SJakub Kicinski if (err)
237883ec8857SJakub Kicinski goto err_free_nn;
237983ec8857SJakub Kicinski
2380e2c7114aSJakub Kicinski err = nfp_ccm_mbox_alloc(nn);
2381e2c7114aSJakub Kicinski if (err)
2382e2c7114aSJakub Kicinski goto err_free_nn;
2383e2c7114aSJakub Kicinski
23844c352362SJakub Kicinski return nn;
238583ec8857SJakub Kicinski
238683ec8857SJakub Kicinski err_free_nn:
238783ec8857SJakub Kicinski if (nn->dp.netdev)
238883ec8857SJakub Kicinski free_netdev(nn->dp.netdev);
238983ec8857SJakub Kicinski else
239083ec8857SJakub Kicinski vfree(nn);
239183ec8857SJakub Kicinski return ERR_PTR(err);
23924c352362SJakub Kicinski }
23934c352362SJakub Kicinski
23944c352362SJakub Kicinski /**
2395beba69caSJakub Kicinski * nfp_net_free() - Undo what @nfp_net_alloc() did
23964c352362SJakub Kicinski * @nn: NFP Net device to reconfigure
23974c352362SJakub Kicinski */
nfp_net_free(struct nfp_net * nn)2398beba69caSJakub Kicinski void nfp_net_free(struct nfp_net *nn)
23994c352362SJakub Kicinski {
24009ad716b9SJakub Kicinski WARN_ON(timer_pending(&nn->reconfig_timer) || nn->reconfig_posted);
2401e2c7114aSJakub Kicinski nfp_ccm_mbox_free(nn);
2402dd5b2498SJakub Kicinski
24036402528bSNiklas Söderlund kfree(nn->dp.xsk_pools);
2404a7b1ad08SJakub Kicinski if (nn->dp.netdev)
240579c12a75SJakub Kicinski free_netdev(nn->dp.netdev);
2406a7b1ad08SJakub Kicinski else
2407a7b1ad08SJakub Kicinski vfree(nn);
24084c352362SJakub Kicinski }
24094c352362SJakub Kicinski
24104c352362SJakub Kicinski /**
24119ff304bfSJakub Kicinski * nfp_net_rss_key_sz() - Get current size of the RSS key
24129ff304bfSJakub Kicinski * @nn: NFP Net device instance
24139ff304bfSJakub Kicinski *
24149ff304bfSJakub Kicinski * Return: size of the RSS key for currently selected hash function.
24159ff304bfSJakub Kicinski */
nfp_net_rss_key_sz(struct nfp_net * nn)24169ff304bfSJakub Kicinski unsigned int nfp_net_rss_key_sz(struct nfp_net *nn)
24179ff304bfSJakub Kicinski {
24189ff304bfSJakub Kicinski switch (nn->rss_hfunc) {
24199ff304bfSJakub Kicinski case ETH_RSS_HASH_TOP:
24209ff304bfSJakub Kicinski return NFP_NET_CFG_RSS_KEY_SZ;
24219ff304bfSJakub Kicinski case ETH_RSS_HASH_XOR:
24229ff304bfSJakub Kicinski return 0;
24239ff304bfSJakub Kicinski case ETH_RSS_HASH_CRC32:
24249ff304bfSJakub Kicinski return 4;
24259ff304bfSJakub Kicinski }
24269ff304bfSJakub Kicinski
24279ff304bfSJakub Kicinski nn_warn(nn, "Unknown hash function: %u\n", nn->rss_hfunc);
24289ff304bfSJakub Kicinski return 0;
24299ff304bfSJakub Kicinski }
24309ff304bfSJakub Kicinski
24319ff304bfSJakub Kicinski /**
24324c352362SJakub Kicinski * nfp_net_rss_init() - Set the initial RSS parameters
24334c352362SJakub Kicinski * @nn: NFP Net device to reconfigure
24344c352362SJakub Kicinski */
nfp_net_rss_init(struct nfp_net * nn)24354c352362SJakub Kicinski static void nfp_net_rss_init(struct nfp_net *nn)
24364c352362SJakub Kicinski {
24379ff304bfSJakub Kicinski unsigned long func_bit, rss_cap_hfunc;
24389ff304bfSJakub Kicinski u32 reg;
24399ff304bfSJakub Kicinski
24409ff304bfSJakub Kicinski /* Read the RSS function capability and select first supported func */
24419ff304bfSJakub Kicinski reg = nn_readl(nn, NFP_NET_CFG_RSS_CAP);
24429ff304bfSJakub Kicinski rss_cap_hfunc = FIELD_GET(NFP_NET_CFG_RSS_CAP_HFUNC, reg);
24439ff304bfSJakub Kicinski if (!rss_cap_hfunc)
24449ff304bfSJakub Kicinski rss_cap_hfunc = FIELD_GET(NFP_NET_CFG_RSS_CAP_HFUNC,
24459ff304bfSJakub Kicinski NFP_NET_CFG_RSS_TOEPLITZ);
24469ff304bfSJakub Kicinski
24479ff304bfSJakub Kicinski func_bit = find_first_bit(&rss_cap_hfunc, NFP_NET_CFG_RSS_HFUNCS);
24489ff304bfSJakub Kicinski if (func_bit == NFP_NET_CFG_RSS_HFUNCS) {
244979c12a75SJakub Kicinski dev_warn(nn->dp.dev,
24509ff304bfSJakub Kicinski "Bad RSS config, defaulting to Toeplitz hash\n");
24519ff304bfSJakub Kicinski func_bit = ETH_RSS_HASH_TOP_BIT;
24529ff304bfSJakub Kicinski }
24539ff304bfSJakub Kicinski nn->rss_hfunc = 1 << func_bit;
24549ff304bfSJakub Kicinski
24559ff304bfSJakub Kicinski netdev_rss_key_fill(nn->rss_key, nfp_net_rss_key_sz(nn));
24564c352362SJakub Kicinski
24571e9e10d0SJakub Kicinski nfp_net_rss_init_itbl(nn);
24584c352362SJakub Kicinski
24594c352362SJakub Kicinski /* Enable IPv4/IPv6 TCP by default */
24604c352362SJakub Kicinski nn->rss_cfg = NFP_NET_CFG_RSS_IPV4_TCP |
24614c352362SJakub Kicinski NFP_NET_CFG_RSS_IPV6_TCP |
246257910a47SJaco Coetzee NFP_NET_CFG_RSS_IPV4_UDP |
246357910a47SJaco Coetzee NFP_NET_CFG_RSS_IPV6_UDP |
24649ff304bfSJakub Kicinski FIELD_PREP(NFP_NET_CFG_RSS_HFUNC, nn->rss_hfunc) |
24654c352362SJakub Kicinski NFP_NET_CFG_RSS_MASK;
24664c352362SJakub Kicinski }
24674c352362SJakub Kicinski
24684c352362SJakub Kicinski /**
24694c352362SJakub Kicinski * nfp_net_irqmod_init() - Set the initial IRQ moderation parameters
24704c352362SJakub Kicinski * @nn: NFP Net device to reconfigure
24714c352362SJakub Kicinski */
nfp_net_irqmod_init(struct nfp_net * nn)24724c352362SJakub Kicinski static void nfp_net_irqmod_init(struct nfp_net *nn)
24734c352362SJakub Kicinski {
24744c352362SJakub Kicinski nn->rx_coalesce_usecs = 50;
24754c352362SJakub Kicinski nn->rx_coalesce_max_frames = 64;
24764c352362SJakub Kicinski nn->tx_coalesce_usecs = 50;
24774c352362SJakub Kicinski nn->tx_coalesce_max_frames = 64;
24789d32e4e7SYinjun Zhang
24799d32e4e7SYinjun Zhang nn->rx_coalesce_adapt_on = true;
24809d32e4e7SYinjun Zhang nn->tx_coalesce_adapt_on = true;
24814c352362SJakub Kicinski }
24824c352362SJakub Kicinski
nfp_net_netdev_init(struct nfp_net * nn)2483a7b1ad08SJakub Kicinski static void nfp_net_netdev_init(struct nfp_net *nn)
24844c352362SJakub Kicinski {
2485beba69caSJakub Kicinski struct net_device *netdev = nn->dp.netdev;
2486611bdd49SEdwin Peer
24879d372759SPablo Cascón nfp_net_write_mac_addr(nn, nn->dp.netdev->dev_addr);
24884c352362SJakub Kicinski
2489a7b1ad08SJakub Kicinski netdev->mtu = nn->dp.mtu;
24904c352362SJakub Kicinski
24914c352362SJakub Kicinski /* Advertise/enable offloads based on capabilities
24924c352362SJakub Kicinski *
24934c352362SJakub Kicinski * Note: netdev->features show the currently enabled features
24944c352362SJakub Kicinski * and netdev->hw_features advertises which features are
24954c352362SJakub Kicinski * supported. By default we enable most features.
24964c352362SJakub Kicinski */
24979d372759SPablo Cascón if (nn->cap & NFP_NET_CFG_CTRL_LIVE_ADDR)
24989d372759SPablo Cascón netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
24999d372759SPablo Cascón
25004c352362SJakub Kicinski netdev->hw_features = NETIF_F_HIGHDMA;
2501ddb98d94SJakub Kicinski if (nn->cap & NFP_NET_CFG_CTRL_RXCSUM_ANY) {
25024c352362SJakub Kicinski netdev->hw_features |= NETIF_F_RXCSUM;
2503ddb98d94SJakub Kicinski nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_RXCSUM_ANY;
25044c352362SJakub Kicinski }
25054c352362SJakub Kicinski if (nn->cap & NFP_NET_CFG_CTRL_TXCSUM) {
25064c352362SJakub Kicinski netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
250779c12a75SJakub Kicinski nn->dp.ctrl |= NFP_NET_CFG_CTRL_TXCSUM;
25084c352362SJakub Kicinski }
25094c352362SJakub Kicinski if (nn->cap & NFP_NET_CFG_CTRL_GATHER) {
25104c352362SJakub Kicinski netdev->hw_features |= NETIF_F_SG;
251179c12a75SJakub Kicinski nn->dp.ctrl |= NFP_NET_CFG_CTRL_GATHER;
25124c352362SJakub Kicinski }
251328063be6SEdwin Peer if ((nn->cap & NFP_NET_CFG_CTRL_LSO && nn->fw_ver.major > 2) ||
251428063be6SEdwin Peer nn->cap & NFP_NET_CFG_CTRL_LSO2) {
25154c352362SJakub Kicinski netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
251628063be6SEdwin Peer nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_LSO2 ?:
251728063be6SEdwin Peer NFP_NET_CFG_CTRL_LSO;
25184c352362SJakub Kicinski }
2519a7b1ad08SJakub Kicinski if (nn->cap & NFP_NET_CFG_CTRL_RSS_ANY)
25204c352362SJakub Kicinski netdev->hw_features |= NETIF_F_RXHASH;
2521859a497fSHuanhuan Wang
2522859a497fSHuanhuan Wang #ifdef CONFIG_NFP_NET_IPSEC
2523859a497fSHuanhuan Wang if (nn->cap_w1 & NFP_NET_CFG_CTRL_IPSEC)
2524859a497fSHuanhuan Wang netdev->hw_features |= NETIF_F_HW_ESP | NETIF_F_HW_ESP_TX_CSUM;
2525859a497fSHuanhuan Wang #endif
2526859a497fSHuanhuan Wang
25277848418eSJakub Kicinski if (nn->cap & NFP_NET_CFG_CTRL_VXLAN) {
2528ae664d9dSFei Qin if (nn->cap & NFP_NET_CFG_CTRL_LSO) {
2529ae664d9dSFei Qin netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
2530ae664d9dSFei Qin NETIF_F_GSO_UDP_TUNNEL_CSUM |
2531ae664d9dSFei Qin NETIF_F_GSO_PARTIAL;
2532ae664d9dSFei Qin netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
2533ae664d9dSFei Qin }
2534641ca085SJakub Kicinski netdev->udp_tunnel_nic_info = &nfp_udp_tunnels;
25357848418eSJakub Kicinski nn->dp.ctrl |= NFP_NET_CFG_CTRL_VXLAN;
25364c352362SJakub Kicinski }
25377848418eSJakub Kicinski if (nn->cap & NFP_NET_CFG_CTRL_NVGRE) {
25387848418eSJakub Kicinski if (nn->cap & NFP_NET_CFG_CTRL_LSO)
25397848418eSJakub Kicinski netdev->hw_features |= NETIF_F_GSO_GRE;
25407848418eSJakub Kicinski nn->dp.ctrl |= NFP_NET_CFG_CTRL_NVGRE;
25417848418eSJakub Kicinski }
25427848418eSJakub Kicinski if (nn->cap & (NFP_NET_CFG_CTRL_VXLAN | NFP_NET_CFG_CTRL_NVGRE))
25437848418eSJakub Kicinski netdev->hw_enc_features = netdev->hw_features;
25444c352362SJakub Kicinski
25454c352362SJakub Kicinski netdev->vlan_features = netdev->hw_features;
25464c352362SJakub Kicinski
254767d2656bSDiana Wang if (nn->cap & NFP_NET_CFG_CTRL_RXVLAN_ANY) {
25484c352362SJakub Kicinski netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
254967d2656bSDiana Wang nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_RXVLAN_V2 ?:
255067d2656bSDiana Wang NFP_NET_CFG_CTRL_RXVLAN;
25514c352362SJakub Kicinski }
2552d80702ffSDiana Wang if (nn->cap & NFP_NET_CFG_CTRL_TXVLAN_ANY) {
255328063be6SEdwin Peer if (nn->cap & NFP_NET_CFG_CTRL_LSO2) {
255428063be6SEdwin Peer nn_warn(nn, "Device advertises both TSO2 and TXVLAN. Refusing to enable TXVLAN.\n");
255528063be6SEdwin Peer } else {
25564c352362SJakub Kicinski netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
2557d80702ffSDiana Wang nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_TXVLAN_V2 ?:
2558d80702ffSDiana Wang NFP_NET_CFG_CTRL_TXVLAN;
25594c352362SJakub Kicinski }
256028063be6SEdwin Peer }
2561b64052fcSPablo Cascón if (nn->cap & NFP_NET_CFG_CTRL_CTAG_FILTER) {
2562b64052fcSPablo Cascón netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
2563b64052fcSPablo Cascón nn->dp.ctrl |= NFP_NET_CFG_CTRL_CTAG_FILTER;
2564b64052fcSPablo Cascón }
256567d2656bSDiana Wang if (nn->cap & NFP_NET_CFG_CTRL_RXQINQ) {
256667d2656bSDiana Wang netdev->hw_features |= NETIF_F_HW_VLAN_STAG_RX;
256767d2656bSDiana Wang nn->dp.ctrl |= NFP_NET_CFG_CTRL_RXQINQ;
256867d2656bSDiana Wang }
25694c352362SJakub Kicinski
25704c352362SJakub Kicinski netdev->features = netdev->hw_features;
25714c352362SJakub Kicinski
25720b9de4caSJakub Kicinski if (nfp_app_has_tc(nn->app) && nn->port)
25737533fdc0SJakub Kicinski netdev->hw_features |= NETIF_F_HW_TC;
25747533fdc0SJakub Kicinski
25757de8b691SSimon Horman /* C-Tag strip and S-Tag strip can't be supported simultaneously,
257667d2656bSDiana Wang * so enable C-Tag strip and disable S-Tag strip by default.
257767d2656bSDiana Wang */
25787de8b691SSimon Horman netdev->features &= ~NETIF_F_HW_VLAN_STAG_RX;
25797de8b691SSimon Horman nn->dp.ctrl &= ~NFP_NET_CFG_CTRL_RXQINQ;
25804c352362SJakub Kicinski
258166c0e13aSMarek Majtyka netdev->xdp_features = NETDEV_XDP_ACT_BASIC;
258266c0e13aSMarek Majtyka if (nn->app && nn->app->type->id == NFP_APP_BPF_NIC)
258366c0e13aSMarek Majtyka netdev->xdp_features |= NETDEV_XDP_ACT_HW_OFFLOAD;
258466c0e13aSMarek Majtyka
2585a7b1ad08SJakub Kicinski /* Finalise the netdev setup */
2586d9e3c299SJakub Kicinski switch (nn->dp.ops->version) {
2587d9e3c299SJakub Kicinski case NFP_NFD_VER_NFD3:
2588d9e3c299SJakub Kicinski netdev->netdev_ops = &nfp_nfd3_netdev_ops;
258966c0e13aSMarek Majtyka netdev->xdp_features |= NETDEV_XDP_ACT_XSK_ZEROCOPY;
2590d194f1c7SJames Hershaw netdev->xdp_features |= NETDEV_XDP_ACT_REDIRECT;
2591d9e3c299SJakub Kicinski break;
2592c10d12e3SJakub Kicinski case NFP_NFD_VER_NFDK:
2593c10d12e3SJakub Kicinski netdev->netdev_ops = &nfp_nfdk_netdev_ops;
2594c10d12e3SJakub Kicinski break;
2595d9e3c299SJakub Kicinski }
2596d9e3c299SJakub Kicinski
2597a7b1ad08SJakub Kicinski netdev->watchdog_timeo = msecs_to_jiffies(5 * 1000);
2598a7b1ad08SJakub Kicinski
2599a7b1ad08SJakub Kicinski /* MTU range: 68 - hw-specific max */
2600a7b1ad08SJakub Kicinski netdev->min_mtu = ETH_MIN_MTU;
2601a7b1ad08SJakub Kicinski netdev->max_mtu = nn->max_mtu;
2602a7b1ad08SJakub Kicinski
2603ee8b7a11SJakub Kicinski netif_set_tso_max_segs(netdev, NFP_NET_LSO_MAX_SEGS);
26040d592e52SJakub Kicinski
2605a7b1ad08SJakub Kicinski netif_carrier_off(netdev);
2606a7b1ad08SJakub Kicinski
2607a7b1ad08SJakub Kicinski nfp_net_set_ethtool_ops(netdev);
2608a7b1ad08SJakub Kicinski }
2609a7b1ad08SJakub Kicinski
nfp_net_read_caps(struct nfp_net * nn)2610545bfa7aSJakub Kicinski static int nfp_net_read_caps(struct nfp_net *nn)
2611a7b1ad08SJakub Kicinski {
2612a7b1ad08SJakub Kicinski /* Get some of the read-only fields from the BAR */
2613a7b1ad08SJakub Kicinski nn->cap = nn_readl(nn, NFP_NET_CFG_CAP);
26141b0c84a3SHuanhuan Wang nn->cap_w1 = nn_readl(nn, NFP_NET_CFG_CAP_WORD1);
2615a7b1ad08SJakub Kicinski nn->max_mtu = nn_readl(nn, NFP_NET_CFG_MAX_MTU);
2616a7b1ad08SJakub Kicinski
261764a919a9SJakub Kicinski /* ABI 4.x and ctrl vNIC always use chained metadata, in other cases
261864a919a9SJakub Kicinski * we allow use of non-chained metadata if RSS(v1) is the only
261964a919a9SJakub Kicinski * advertised capability requiring metadata.
262064a919a9SJakub Kicinski */
2621a7b1ad08SJakub Kicinski nn->dp.chained_metadata_format = nn->fw_ver.major == 4 ||
262277ece8d5SJakub Kicinski !nn->dp.netdev ||
262364a919a9SJakub Kicinski !(nn->cap & NFP_NET_CFG_CTRL_RSS) ||
2624a7b1ad08SJakub Kicinski nn->cap & NFP_NET_CFG_CTRL_CHAIN_META;
262564a919a9SJakub Kicinski /* RSS(v1) uses non-chained metadata format, except in ABI 4.x where
262664a919a9SJakub Kicinski * it has the same meaning as RSSv2.
262764a919a9SJakub Kicinski */
2628a7b1ad08SJakub Kicinski if (nn->dp.chained_metadata_format && nn->fw_ver.major != 4)
2629a7b1ad08SJakub Kicinski nn->cap &= ~NFP_NET_CFG_CTRL_RSS;
2630a7b1ad08SJakub Kicinski
2631a7b1ad08SJakub Kicinski /* Determine RX packet/metadata boundary offset */
2632a7b1ad08SJakub Kicinski if (nn->fw_ver.major >= 2) {
2633a7b1ad08SJakub Kicinski u32 reg;
2634a7b1ad08SJakub Kicinski
2635a7b1ad08SJakub Kicinski reg = nn_readl(nn, NFP_NET_CFG_RX_OFFSET);
2636a7b1ad08SJakub Kicinski if (reg > NFP_NET_MAX_PREPEND) {
2637a7b1ad08SJakub Kicinski nn_err(nn, "Invalid rx offset: %d\n", reg);
2638a7b1ad08SJakub Kicinski return -EINVAL;
2639a7b1ad08SJakub Kicinski }
2640a7b1ad08SJakub Kicinski nn->dp.rx_offset = reg;
2641a7b1ad08SJakub Kicinski } else {
2642a7b1ad08SJakub Kicinski nn->dp.rx_offset = NFP_NET_RX_OFFSET;
2643a7b1ad08SJakub Kicinski }
2644a7b1ad08SJakub Kicinski
2645b94b6a13SJakub Kicinski /* Mask out NFD-version-specific features */
2646b94b6a13SJakub Kicinski nn->cap &= nn->dp.ops->cap_mask;
2647b94b6a13SJakub Kicinski
264878a0a65fSJakub Kicinski /* For control vNICs mask out the capabilities app doesn't want. */
264978a0a65fSJakub Kicinski if (!nn->dp.netdev)
265078a0a65fSJakub Kicinski nn->cap &= nn->app->type->ctrl_cap_mask;
265178a0a65fSJakub Kicinski
2652545bfa7aSJakub Kicinski return 0;
2653545bfa7aSJakub Kicinski }
2654545bfa7aSJakub Kicinski
2655545bfa7aSJakub Kicinski /**
2656545bfa7aSJakub Kicinski * nfp_net_init() - Initialise/finalise the nfp_net structure
2657545bfa7aSJakub Kicinski * @nn: NFP Net device structure
2658545bfa7aSJakub Kicinski *
2659545bfa7aSJakub Kicinski * Return: 0 on success or negative errno on error.
2660545bfa7aSJakub Kicinski */
nfp_net_init(struct nfp_net * nn)2661545bfa7aSJakub Kicinski int nfp_net_init(struct nfp_net *nn)
2662545bfa7aSJakub Kicinski {
2663545bfa7aSJakub Kicinski int err;
2664545bfa7aSJakub Kicinski
2665545bfa7aSJakub Kicinski nn->dp.rx_dma_dir = DMA_FROM_DEVICE;
2666545bfa7aSJakub Kicinski
2667545bfa7aSJakub Kicinski err = nfp_net_read_caps(nn);
2668545bfa7aSJakub Kicinski if (err)
2669545bfa7aSJakub Kicinski return err;
2670545bfa7aSJakub Kicinski
2671a7b1ad08SJakub Kicinski /* Set default MTU and Freelist buffer size */
26729bbdd41bSJakub Kicinski if (!nfp_net_is_data_vnic(nn) && nn->app->ctrl_mtu) {
2673bc2796dbSJakub Kicinski nn->dp.mtu = min(nn->app->ctrl_mtu, nn->max_mtu);
26749bbdd41bSJakub Kicinski } else if (nn->max_mtu < NFP_NET_DEFAULT_MTU) {
26759bbdd41bSJakub Kicinski nn->dp.mtu = nn->max_mtu;
26769bbdd41bSJakub Kicinski } else {
2677a7b1ad08SJakub Kicinski nn->dp.mtu = NFP_NET_DEFAULT_MTU;
26789bbdd41bSJakub Kicinski }
2679a7b1ad08SJakub Kicinski nn->dp.fl_bufsz = nfp_net_calc_fl_bufsz(&nn->dp);
2680a7b1ad08SJakub Kicinski
268179ca38e8SJakub Kicinski if (nfp_app_ctrl_uses_data_vnics(nn->app))
268279ca38e8SJakub Kicinski nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_CMSG_DATA;
268379ca38e8SJakub Kicinski
2684a7b1ad08SJakub Kicinski if (nn->cap & NFP_NET_CFG_CTRL_RSS_ANY) {
2685a7b1ad08SJakub Kicinski nfp_net_rss_init(nn);
2686a7b1ad08SJakub Kicinski nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_RSS2 ?:
2687a7b1ad08SJakub Kicinski NFP_NET_CFG_CTRL_RSS;
2688a7b1ad08SJakub Kicinski }
2689a7b1ad08SJakub Kicinski
26904c352362SJakub Kicinski /* Allow L2 Broadcast and Multicast through by default, if supported */
26914c352362SJakub Kicinski if (nn->cap & NFP_NET_CFG_CTRL_L2BC)
269279c12a75SJakub Kicinski nn->dp.ctrl |= NFP_NET_CFG_CTRL_L2BC;
26934c352362SJakub Kicinski
26944c352362SJakub Kicinski /* Allow IRQ moderation, if supported */
26954c352362SJakub Kicinski if (nn->cap & NFP_NET_CFG_CTRL_IRQMOD) {
26964c352362SJakub Kicinski nfp_net_irqmod_init(nn);
269779c12a75SJakub Kicinski nn->dp.ctrl |= NFP_NET_CFG_CTRL_IRQMOD;
26984c352362SJakub Kicinski }
26994c352362SJakub Kicinski
27000dcf7f50SJakub Kicinski /* Enable TX pointer writeback, if supported */
27010dcf7f50SJakub Kicinski if (nn->cap & NFP_NET_CFG_CTRL_TXRWB)
27020dcf7f50SJakub Kicinski nn->dp.ctrl |= NFP_NET_CFG_CTRL_TXRWB;
27030dcf7f50SJakub Kicinski
2704de624864SDiana Wang if (nn->cap_w1 & NFP_NET_CFG_CTRL_MCAST_FILTER)
2705de624864SDiana Wang nn->dp.ctrl_w1 |= NFP_NET_CFG_CTRL_MCAST_FILTER;
2706de624864SDiana Wang
27074c352362SJakub Kicinski /* Stash the re-configuration queue away. First odd queue in TX Bar */
27084c352362SJakub Kicinski nn->qcp_cfg = nn->tx_bar + NFP_QCP_QUEUE_ADDR_SZ;
27094c352362SJakub Kicinski
27104c352362SJakub Kicinski /* Make sure the FW knows the netdev is supposed to be disabled here */
27114c352362SJakub Kicinski nn_writel(nn, NFP_NET_CFG_CTRL, 0);
27124c352362SJakub Kicinski nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, 0);
27134c352362SJakub Kicinski nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, 0);
2714de624864SDiana Wang nn_writel(nn, NFP_NET_CFG_CTRL_WORD1, 0);
27154c352362SJakub Kicinski err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_RING |
27164c352362SJakub Kicinski NFP_NET_CFG_UPDATE_GEN);
27174c352362SJakub Kicinski if (err)
27184c352362SJakub Kicinski return err;
27194c352362SJakub Kicinski
2720232eeb1fSJakub Kicinski if (nn->dp.netdev) {
27210a72d833SJakub Kicinski nfp_net_netdev_init(nn);
27220a72d833SJakub Kicinski
2723e2c7114aSJakub Kicinski err = nfp_ccm_mbox_init(nn);
2724232eeb1fSJakub Kicinski if (err)
2725232eeb1fSJakub Kicinski return err;
2726e2c7114aSJakub Kicinski
2727e2c7114aSJakub Kicinski err = nfp_net_tls_init(nn);
2728e2c7114aSJakub Kicinski if (err)
2729e2c7114aSJakub Kicinski goto err_clean_mbox;
273057f273adSHuanhuan Wang
273157f273adSHuanhuan Wang nfp_net_ipsec_init(nn);
2732232eeb1fSJakub Kicinski }
2733232eeb1fSJakub Kicinski
2734beba69caSJakub Kicinski nfp_net_vecs_init(nn);
27354c352362SJakub Kicinski
2736a7b1ad08SJakub Kicinski if (!nn->dp.netdev)
2737a7b1ad08SJakub Kicinski return 0;
2738e20aa071SYinjun Zhang
273971f814cdSYinjun Zhang spin_lock_init(&nn->mbox_amsg.lock);
274071f814cdSYinjun Zhang INIT_LIST_HEAD(&nn->mbox_amsg.list);
274171f814cdSYinjun Zhang INIT_WORK(&nn->mbox_amsg.work, nfp_net_mbox_amsg_work);
2742e20aa071SYinjun Zhang
2743a7b1ad08SJakub Kicinski return register_netdev(nn->dp.netdev);
2744e2c7114aSJakub Kicinski
2745e2c7114aSJakub Kicinski err_clean_mbox:
2746e2c7114aSJakub Kicinski nfp_ccm_mbox_clean(nn);
2747e2c7114aSJakub Kicinski return err;
27484c352362SJakub Kicinski }
27494c352362SJakub Kicinski
27504c352362SJakub Kicinski /**
2751beba69caSJakub Kicinski * nfp_net_clean() - Undo what nfp_net_init() did.
2752beba69caSJakub Kicinski * @nn: NFP Net device structure
27534c352362SJakub Kicinski */
nfp_net_clean(struct nfp_net * nn)2754beba69caSJakub Kicinski void nfp_net_clean(struct nfp_net *nn)
27554c352362SJakub Kicinski {
2756a7b1ad08SJakub Kicinski if (!nn->dp.netdev)
2757a7b1ad08SJakub Kicinski return;
2758a7b1ad08SJakub Kicinski
27596f14f443SDavid S. Miller unregister_netdev(nn->dp.netdev);
276057f273adSHuanhuan Wang nfp_net_ipsec_clean(nn);
2761e2c7114aSJakub Kicinski nfp_ccm_mbox_clean(nn);
276271f814cdSYinjun Zhang flush_work(&nn->mbox_amsg.work);
27639ad716b9SJakub Kicinski nfp_net_reconfig_wait_posted(nn);
27644c352362SJakub Kicinski }
2765