xref: /openbmc/linux/drivers/net/hyperv/netvsc_bpf.c (revision 1cb9d3b6)
1351e1581SHaiyang Zhang // SPDX-License-Identifier: GPL-2.0-only
2351e1581SHaiyang Zhang /* Copyright (c) 2019, Microsoft Corporation.
3351e1581SHaiyang Zhang  *
4351e1581SHaiyang Zhang  * Author:
5351e1581SHaiyang Zhang  *   Haiyang Zhang <haiyangz@microsoft.com>
6351e1581SHaiyang Zhang  */
7351e1581SHaiyang Zhang 
8351e1581SHaiyang Zhang #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9351e1581SHaiyang Zhang 
10351e1581SHaiyang Zhang #include <linux/netdevice.h>
11351e1581SHaiyang Zhang #include <linux/etherdevice.h>
12351e1581SHaiyang Zhang #include <linux/ethtool.h>
13*1cb9d3b6SHaiyang Zhang #include <linux/netpoll.h>
14351e1581SHaiyang Zhang #include <linux/bpf.h>
15351e1581SHaiyang Zhang #include <linux/bpf_trace.h>
16351e1581SHaiyang Zhang #include <linux/kernel.h>
17351e1581SHaiyang Zhang #include <net/xdp.h>
18351e1581SHaiyang Zhang 
19351e1581SHaiyang Zhang #include <linux/mutex.h>
20351e1581SHaiyang Zhang #include <linux/rtnetlink.h>
21351e1581SHaiyang Zhang 
22351e1581SHaiyang Zhang #include "hyperv_net.h"
23351e1581SHaiyang Zhang 
netvsc_run_xdp(struct net_device * ndev,struct netvsc_channel * nvchan,struct xdp_buff * xdp)24351e1581SHaiyang Zhang u32 netvsc_run_xdp(struct net_device *ndev, struct netvsc_channel *nvchan,
25351e1581SHaiyang Zhang 		   struct xdp_buff *xdp)
26351e1581SHaiyang Zhang {
27*1cb9d3b6SHaiyang Zhang 	struct netvsc_stats_rx *rx_stats = &nvchan->rx_stats;
28351e1581SHaiyang Zhang 	void *data = nvchan->rsc.data[0];
29351e1581SHaiyang Zhang 	u32 len = nvchan->rsc.len[0];
30351e1581SHaiyang Zhang 	struct page *page = NULL;
31351e1581SHaiyang Zhang 	struct bpf_prog *prog;
32351e1581SHaiyang Zhang 	u32 act = XDP_PASS;
33*1cb9d3b6SHaiyang Zhang 	bool drop = true;
34351e1581SHaiyang Zhang 
35351e1581SHaiyang Zhang 	xdp->data_hard_start = NULL;
36351e1581SHaiyang Zhang 
37351e1581SHaiyang Zhang 	rcu_read_lock();
38351e1581SHaiyang Zhang 	prog = rcu_dereference(nvchan->bpf_prog);
39351e1581SHaiyang Zhang 
40351e1581SHaiyang Zhang 	if (!prog)
41351e1581SHaiyang Zhang 		goto out;
42351e1581SHaiyang Zhang 
43505e3f00SAndrea Parri (Microsoft) 	/* Ensure that the below memcpy() won't overflow the page buffer. */
44505e3f00SAndrea Parri (Microsoft) 	if (len > ndev->mtu + ETH_HLEN) {
45505e3f00SAndrea Parri (Microsoft) 		act = XDP_DROP;
46505e3f00SAndrea Parri (Microsoft) 		goto out;
47505e3f00SAndrea Parri (Microsoft) 	}
48505e3f00SAndrea Parri (Microsoft) 
49351e1581SHaiyang Zhang 	/* allocate page buffer for data */
50351e1581SHaiyang Zhang 	page = alloc_page(GFP_ATOMIC);
51351e1581SHaiyang Zhang 	if (!page) {
52351e1581SHaiyang Zhang 		act = XDP_DROP;
53351e1581SHaiyang Zhang 		goto out;
54351e1581SHaiyang Zhang 	}
55351e1581SHaiyang Zhang 
5643b5169dSLorenzo Bianconi 	xdp_init_buff(xdp, PAGE_SIZE, &nvchan->xdp_rxq);
57be9df4afSLorenzo Bianconi 	xdp_prepare_buff(xdp, page_address(page), NETVSC_XDP_HDRM, len, false);
58351e1581SHaiyang Zhang 
59351e1581SHaiyang Zhang 	memcpy(xdp->data, data, len);
60351e1581SHaiyang Zhang 
61351e1581SHaiyang Zhang 	act = bpf_prog_run_xdp(prog, xdp);
62351e1581SHaiyang Zhang 
63351e1581SHaiyang Zhang 	switch (act) {
64351e1581SHaiyang Zhang 	case XDP_PASS:
65351e1581SHaiyang Zhang 	case XDP_TX:
66*1cb9d3b6SHaiyang Zhang 		drop = false;
67*1cb9d3b6SHaiyang Zhang 		break;
68*1cb9d3b6SHaiyang Zhang 
69351e1581SHaiyang Zhang 	case XDP_DROP:
70351e1581SHaiyang Zhang 		break;
71351e1581SHaiyang Zhang 
72*1cb9d3b6SHaiyang Zhang 	case XDP_REDIRECT:
73*1cb9d3b6SHaiyang Zhang 		if (!xdp_do_redirect(ndev, xdp, prog)) {
74*1cb9d3b6SHaiyang Zhang 			nvchan->xdp_flush = true;
75*1cb9d3b6SHaiyang Zhang 			drop = false;
76*1cb9d3b6SHaiyang Zhang 
77*1cb9d3b6SHaiyang Zhang 			u64_stats_update_begin(&rx_stats->syncp);
78*1cb9d3b6SHaiyang Zhang 
79*1cb9d3b6SHaiyang Zhang 			rx_stats->xdp_redirect++;
80*1cb9d3b6SHaiyang Zhang 			rx_stats->packets++;
81*1cb9d3b6SHaiyang Zhang 			rx_stats->bytes += nvchan->rsc.pktlen;
82*1cb9d3b6SHaiyang Zhang 
83*1cb9d3b6SHaiyang Zhang 			u64_stats_update_end(&rx_stats->syncp);
84*1cb9d3b6SHaiyang Zhang 
85*1cb9d3b6SHaiyang Zhang 			break;
86*1cb9d3b6SHaiyang Zhang 		} else {
87*1cb9d3b6SHaiyang Zhang 			u64_stats_update_begin(&rx_stats->syncp);
88*1cb9d3b6SHaiyang Zhang 			rx_stats->xdp_drop++;
89*1cb9d3b6SHaiyang Zhang 			u64_stats_update_end(&rx_stats->syncp);
90*1cb9d3b6SHaiyang Zhang 		}
91*1cb9d3b6SHaiyang Zhang 
92*1cb9d3b6SHaiyang Zhang 		fallthrough;
93*1cb9d3b6SHaiyang Zhang 
94351e1581SHaiyang Zhang 	case XDP_ABORTED:
95351e1581SHaiyang Zhang 		trace_xdp_exception(ndev, prog, act);
96351e1581SHaiyang Zhang 		break;
97351e1581SHaiyang Zhang 
98351e1581SHaiyang Zhang 	default:
99c8064e5bSPaolo Abeni 		bpf_warn_invalid_xdp_action(ndev, prog, act);
100351e1581SHaiyang Zhang 	}
101351e1581SHaiyang Zhang 
102351e1581SHaiyang Zhang out:
103351e1581SHaiyang Zhang 	rcu_read_unlock();
104351e1581SHaiyang Zhang 
105*1cb9d3b6SHaiyang Zhang 	if (page && drop) {
106351e1581SHaiyang Zhang 		__free_page(page);
107351e1581SHaiyang Zhang 		xdp->data_hard_start = NULL;
108351e1581SHaiyang Zhang 	}
109351e1581SHaiyang Zhang 
110351e1581SHaiyang Zhang 	return act;
111351e1581SHaiyang Zhang }
112351e1581SHaiyang Zhang 
netvsc_xdp_fraglen(unsigned int len)113351e1581SHaiyang Zhang unsigned int netvsc_xdp_fraglen(unsigned int len)
114351e1581SHaiyang Zhang {
115351e1581SHaiyang Zhang 	return SKB_DATA_ALIGN(len) +
116351e1581SHaiyang Zhang 	       SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
117351e1581SHaiyang Zhang }
118351e1581SHaiyang Zhang 
netvsc_xdp_get(struct netvsc_device * nvdev)119351e1581SHaiyang Zhang struct bpf_prog *netvsc_xdp_get(struct netvsc_device *nvdev)
120351e1581SHaiyang Zhang {
121351e1581SHaiyang Zhang 	return rtnl_dereference(nvdev->chan_table[0].bpf_prog);
122351e1581SHaiyang Zhang }
123351e1581SHaiyang Zhang 
netvsc_xdp_set(struct net_device * dev,struct bpf_prog * prog,struct netlink_ext_ack * extack,struct netvsc_device * nvdev)124351e1581SHaiyang Zhang int netvsc_xdp_set(struct net_device *dev, struct bpf_prog *prog,
125351e1581SHaiyang Zhang 		   struct netlink_ext_ack *extack,
126351e1581SHaiyang Zhang 		   struct netvsc_device *nvdev)
127351e1581SHaiyang Zhang {
128351e1581SHaiyang Zhang 	struct bpf_prog *old_prog;
129351e1581SHaiyang Zhang 	int buf_max, i;
130351e1581SHaiyang Zhang 
131351e1581SHaiyang Zhang 	old_prog = netvsc_xdp_get(nvdev);
132351e1581SHaiyang Zhang 
133351e1581SHaiyang Zhang 	if (!old_prog && !prog)
134351e1581SHaiyang Zhang 		return 0;
135351e1581SHaiyang Zhang 
136351e1581SHaiyang Zhang 	buf_max = NETVSC_XDP_HDRM + netvsc_xdp_fraglen(dev->mtu + ETH_HLEN);
137351e1581SHaiyang Zhang 	if (prog && buf_max > PAGE_SIZE) {
138351e1581SHaiyang Zhang 		netdev_err(dev, "XDP: mtu:%u too large, buf_max:%u\n",
139351e1581SHaiyang Zhang 			   dev->mtu, buf_max);
140351e1581SHaiyang Zhang 		NL_SET_ERR_MSG_MOD(extack, "XDP: mtu too large");
141351e1581SHaiyang Zhang 
142351e1581SHaiyang Zhang 		return -EOPNOTSUPP;
143351e1581SHaiyang Zhang 	}
144351e1581SHaiyang Zhang 
145351e1581SHaiyang Zhang 	if (prog && (dev->features & NETIF_F_LRO)) {
146351e1581SHaiyang Zhang 		netdev_err(dev, "XDP: not support LRO\n");
147351e1581SHaiyang Zhang 		NL_SET_ERR_MSG_MOD(extack, "XDP: not support LRO");
148351e1581SHaiyang Zhang 
149351e1581SHaiyang Zhang 		return -EOPNOTSUPP;
150351e1581SHaiyang Zhang 	}
151351e1581SHaiyang Zhang 
152351e1581SHaiyang Zhang 	if (prog)
153184367dcSHaiyang Zhang 		bpf_prog_add(prog, nvdev->num_chn - 1);
154351e1581SHaiyang Zhang 
155351e1581SHaiyang Zhang 	for (i = 0; i < nvdev->num_chn; i++)
156351e1581SHaiyang Zhang 		rcu_assign_pointer(nvdev->chan_table[i].bpf_prog, prog);
157351e1581SHaiyang Zhang 
158351e1581SHaiyang Zhang 	if (old_prog)
159351e1581SHaiyang Zhang 		for (i = 0; i < nvdev->num_chn; i++)
160351e1581SHaiyang Zhang 			bpf_prog_put(old_prog);
161351e1581SHaiyang Zhang 
162351e1581SHaiyang Zhang 	return 0;
163351e1581SHaiyang Zhang }
164351e1581SHaiyang Zhang 
netvsc_vf_setxdp(struct net_device * vf_netdev,struct bpf_prog * prog)165351e1581SHaiyang Zhang int netvsc_vf_setxdp(struct net_device *vf_netdev, struct bpf_prog *prog)
166351e1581SHaiyang Zhang {
167351e1581SHaiyang Zhang 	struct netdev_bpf xdp;
168184367dcSHaiyang Zhang 	int ret;
169351e1581SHaiyang Zhang 
170351e1581SHaiyang Zhang 	ASSERT_RTNL();
171351e1581SHaiyang Zhang 
172351e1581SHaiyang Zhang 	if (!vf_netdev)
173351e1581SHaiyang Zhang 		return 0;
174351e1581SHaiyang Zhang 
175e416531fSJakub Kicinski 	if (!vf_netdev->netdev_ops->ndo_bpf)
176351e1581SHaiyang Zhang 		return 0;
177351e1581SHaiyang Zhang 
178351e1581SHaiyang Zhang 	memset(&xdp, 0, sizeof(xdp));
179351e1581SHaiyang Zhang 
180184367dcSHaiyang Zhang 	if (prog)
181184367dcSHaiyang Zhang 		bpf_prog_inc(prog);
182184367dcSHaiyang Zhang 
183351e1581SHaiyang Zhang 	xdp.command = XDP_SETUP_PROG;
184351e1581SHaiyang Zhang 	xdp.prog = prog;
185351e1581SHaiyang Zhang 
186e416531fSJakub Kicinski 	ret = vf_netdev->netdev_ops->ndo_bpf(vf_netdev, &xdp);
187184367dcSHaiyang Zhang 
188184367dcSHaiyang Zhang 	if (ret && prog)
189184367dcSHaiyang Zhang 		bpf_prog_put(prog);
190184367dcSHaiyang Zhang 
191184367dcSHaiyang Zhang 	return ret;
192351e1581SHaiyang Zhang }
193351e1581SHaiyang Zhang 
netvsc_bpf(struct net_device * dev,struct netdev_bpf * bpf)194351e1581SHaiyang Zhang int netvsc_bpf(struct net_device *dev, struct netdev_bpf *bpf)
195351e1581SHaiyang Zhang {
196351e1581SHaiyang Zhang 	struct net_device_context *ndevctx = netdev_priv(dev);
197351e1581SHaiyang Zhang 	struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
198351e1581SHaiyang Zhang 	struct net_device *vf_netdev = rtnl_dereference(ndevctx->vf_netdev);
199351e1581SHaiyang Zhang 	struct netlink_ext_ack *extack = bpf->extack;
200351e1581SHaiyang Zhang 	int ret;
201351e1581SHaiyang Zhang 
202351e1581SHaiyang Zhang 	if (!nvdev || nvdev->destroy) {
203351e1581SHaiyang Zhang 		return -ENODEV;
204351e1581SHaiyang Zhang 	}
205351e1581SHaiyang Zhang 
206351e1581SHaiyang Zhang 	switch (bpf->command) {
207351e1581SHaiyang Zhang 	case XDP_SETUP_PROG:
208351e1581SHaiyang Zhang 		ret = netvsc_xdp_set(dev, bpf->prog, extack, nvdev);
209351e1581SHaiyang Zhang 
210351e1581SHaiyang Zhang 		if (ret)
211351e1581SHaiyang Zhang 			return ret;
212351e1581SHaiyang Zhang 
213351e1581SHaiyang Zhang 		ret = netvsc_vf_setxdp(vf_netdev, bpf->prog);
214351e1581SHaiyang Zhang 
215351e1581SHaiyang Zhang 		if (ret) {
216351e1581SHaiyang Zhang 			netdev_err(dev, "vf_setxdp failed:%d\n", ret);
217351e1581SHaiyang Zhang 			NL_SET_ERR_MSG_MOD(extack, "vf_setxdp failed");
218351e1581SHaiyang Zhang 
219351e1581SHaiyang Zhang 			netvsc_xdp_set(dev, NULL, extack, nvdev);
220351e1581SHaiyang Zhang 		}
221351e1581SHaiyang Zhang 
222351e1581SHaiyang Zhang 		return ret;
223351e1581SHaiyang Zhang 
224351e1581SHaiyang Zhang 	default:
225351e1581SHaiyang Zhang 		return -EINVAL;
226351e1581SHaiyang Zhang 	}
227351e1581SHaiyang Zhang }
228*1cb9d3b6SHaiyang Zhang 
netvsc_ndoxdp_xmit_fm(struct net_device * ndev,struct xdp_frame * frame,u16 q_idx)229*1cb9d3b6SHaiyang Zhang static int netvsc_ndoxdp_xmit_fm(struct net_device *ndev,
230*1cb9d3b6SHaiyang Zhang 				 struct xdp_frame *frame, u16 q_idx)
231*1cb9d3b6SHaiyang Zhang {
232*1cb9d3b6SHaiyang Zhang 	struct sk_buff *skb;
233*1cb9d3b6SHaiyang Zhang 
234*1cb9d3b6SHaiyang Zhang 	skb = xdp_build_skb_from_frame(frame, ndev);
235*1cb9d3b6SHaiyang Zhang 	if (unlikely(!skb))
236*1cb9d3b6SHaiyang Zhang 		return -ENOMEM;
237*1cb9d3b6SHaiyang Zhang 
238*1cb9d3b6SHaiyang Zhang 	netvsc_get_hash(skb, netdev_priv(ndev));
239*1cb9d3b6SHaiyang Zhang 
240*1cb9d3b6SHaiyang Zhang 	skb_record_rx_queue(skb, q_idx);
241*1cb9d3b6SHaiyang Zhang 
242*1cb9d3b6SHaiyang Zhang 	netvsc_xdp_xmit(skb, ndev);
243*1cb9d3b6SHaiyang Zhang 
244*1cb9d3b6SHaiyang Zhang 	return 0;
245*1cb9d3b6SHaiyang Zhang }
246*1cb9d3b6SHaiyang Zhang 
netvsc_ndoxdp_xmit(struct net_device * ndev,int n,struct xdp_frame ** frames,u32 flags)247*1cb9d3b6SHaiyang Zhang int netvsc_ndoxdp_xmit(struct net_device *ndev, int n,
248*1cb9d3b6SHaiyang Zhang 		       struct xdp_frame **frames, u32 flags)
249*1cb9d3b6SHaiyang Zhang {
250*1cb9d3b6SHaiyang Zhang 	struct net_device_context *ndev_ctx = netdev_priv(ndev);
251*1cb9d3b6SHaiyang Zhang 	const struct net_device_ops *vf_ops;
252*1cb9d3b6SHaiyang Zhang 	struct netvsc_stats_tx *tx_stats;
253*1cb9d3b6SHaiyang Zhang 	struct netvsc_device *nvsc_dev;
254*1cb9d3b6SHaiyang Zhang 	struct net_device *vf_netdev;
255*1cb9d3b6SHaiyang Zhang 	int i, count = 0;
256*1cb9d3b6SHaiyang Zhang 	u16 q_idx;
257*1cb9d3b6SHaiyang Zhang 
258*1cb9d3b6SHaiyang Zhang 	/* Don't transmit if netvsc_device is gone */
259*1cb9d3b6SHaiyang Zhang 	nvsc_dev = rcu_dereference_bh(ndev_ctx->nvdev);
260*1cb9d3b6SHaiyang Zhang 	if (unlikely(!nvsc_dev || nvsc_dev->destroy))
261*1cb9d3b6SHaiyang Zhang 		return 0;
262*1cb9d3b6SHaiyang Zhang 
263*1cb9d3b6SHaiyang Zhang 	/* If VF is present and up then redirect packets to it.
264*1cb9d3b6SHaiyang Zhang 	 * Skip the VF if it is marked down or has no carrier.
265*1cb9d3b6SHaiyang Zhang 	 * If netpoll is in uses, then VF can not be used either.
266*1cb9d3b6SHaiyang Zhang 	 */
267*1cb9d3b6SHaiyang Zhang 	vf_netdev = rcu_dereference_bh(ndev_ctx->vf_netdev);
268*1cb9d3b6SHaiyang Zhang 	if (vf_netdev && netif_running(vf_netdev) &&
269*1cb9d3b6SHaiyang Zhang 	    netif_carrier_ok(vf_netdev) && !netpoll_tx_running(ndev) &&
270*1cb9d3b6SHaiyang Zhang 	    vf_netdev->netdev_ops->ndo_xdp_xmit &&
271*1cb9d3b6SHaiyang Zhang 	    ndev_ctx->data_path_is_vf) {
272*1cb9d3b6SHaiyang Zhang 		vf_ops = vf_netdev->netdev_ops;
273*1cb9d3b6SHaiyang Zhang 		return vf_ops->ndo_xdp_xmit(vf_netdev, n, frames, flags);
274*1cb9d3b6SHaiyang Zhang 	}
275*1cb9d3b6SHaiyang Zhang 
276*1cb9d3b6SHaiyang Zhang 	q_idx = smp_processor_id() % ndev->real_num_tx_queues;
277*1cb9d3b6SHaiyang Zhang 
278*1cb9d3b6SHaiyang Zhang 	for (i = 0; i < n; i++) {
279*1cb9d3b6SHaiyang Zhang 		if (netvsc_ndoxdp_xmit_fm(ndev, frames[i], q_idx))
280*1cb9d3b6SHaiyang Zhang 			break;
281*1cb9d3b6SHaiyang Zhang 
282*1cb9d3b6SHaiyang Zhang 		count++;
283*1cb9d3b6SHaiyang Zhang 	}
284*1cb9d3b6SHaiyang Zhang 
285*1cb9d3b6SHaiyang Zhang 	tx_stats = &nvsc_dev->chan_table[q_idx].tx_stats;
286*1cb9d3b6SHaiyang Zhang 
287*1cb9d3b6SHaiyang Zhang 	u64_stats_update_begin(&tx_stats->syncp);
288*1cb9d3b6SHaiyang Zhang 	tx_stats->xdp_xmit += count;
289*1cb9d3b6SHaiyang Zhang 	u64_stats_update_end(&tx_stats->syncp);
290*1cb9d3b6SHaiyang Zhang 
291*1cb9d3b6SHaiyang Zhang 	return count;
292*1cb9d3b6SHaiyang Zhang }
293