xref: /openbmc/linux/net/bpf/test_run.c (revision b2ca4e1cfa7d3d755e1ec637d1235f89af9bd01f)
125763b3cSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
21cf1cae9SAlexei Starovoitov /* Copyright (c) 2017 Facebook
31cf1cae9SAlexei Starovoitov  */
41cf1cae9SAlexei Starovoitov #include <linux/bpf.h>
51cf1cae9SAlexei Starovoitov #include <linux/slab.h>
61cf1cae9SAlexei Starovoitov #include <linux/vmalloc.h>
71cf1cae9SAlexei Starovoitov #include <linux/etherdevice.h>
81cf1cae9SAlexei Starovoitov #include <linux/filter.h>
91cf1cae9SAlexei Starovoitov #include <linux/sched/signal.h>
106ac99e8fSMartin KaFai Lau #include <net/bpf_sk_storage.h>
112cb494a3SSong Liu #include <net/sock.h>
122cb494a3SSong Liu #include <net/tcp.h>
131cf1cae9SAlexei Starovoitov 
14e950e843SMatt Mullins #define CREATE_TRACE_POINTS
15e950e843SMatt Mullins #include <trace/events/bpf_test_run.h>
16e950e843SMatt Mullins 
17df1a2cb7SStanislav Fomichev static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
18df1a2cb7SStanislav Fomichev 			u32 *retval, u32 *time)
191cf1cae9SAlexei Starovoitov {
2071b91a50SBo YU 	struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { NULL };
218bad74f9SRoman Gushchin 	enum bpf_cgroup_storage_type stype;
221cf1cae9SAlexei Starovoitov 	u64 time_start, time_spent = 0;
23df1a2cb7SStanislav Fomichev 	int ret = 0;
24dcb40590SRoman Gushchin 	u32 i;
251cf1cae9SAlexei Starovoitov 
268bad74f9SRoman Gushchin 	for_each_cgroup_storage_type(stype) {
278bad74f9SRoman Gushchin 		storage[stype] = bpf_cgroup_storage_alloc(prog, stype);
288bad74f9SRoman Gushchin 		if (IS_ERR(storage[stype])) {
298bad74f9SRoman Gushchin 			storage[stype] = NULL;
308bad74f9SRoman Gushchin 			for_each_cgroup_storage_type(stype)
318bad74f9SRoman Gushchin 				bpf_cgroup_storage_free(storage[stype]);
328bad74f9SRoman Gushchin 			return -ENOMEM;
338bad74f9SRoman Gushchin 		}
348bad74f9SRoman Gushchin 	}
35f42ee093SRoman Gushchin 
361cf1cae9SAlexei Starovoitov 	if (!repeat)
371cf1cae9SAlexei Starovoitov 		repeat = 1;
38df1a2cb7SStanislav Fomichev 
39df1a2cb7SStanislav Fomichev 	rcu_read_lock();
40df1a2cb7SStanislav Fomichev 	preempt_disable();
411cf1cae9SAlexei Starovoitov 	time_start = ktime_get_ns();
421cf1cae9SAlexei Starovoitov 	for (i = 0; i < repeat; i++) {
43df1a2cb7SStanislav Fomichev 		bpf_cgroup_storage_set(storage);
44df1a2cb7SStanislav Fomichev 		*retval = BPF_PROG_RUN(prog, ctx);
45df1a2cb7SStanislav Fomichev 
46df1a2cb7SStanislav Fomichev 		if (signal_pending(current)) {
47df1a2cb7SStanislav Fomichev 			ret = -EINTR;
481cf1cae9SAlexei Starovoitov 			break;
49df1a2cb7SStanislav Fomichev 		}
50df1a2cb7SStanislav Fomichev 
51df1a2cb7SStanislav Fomichev 		if (need_resched()) {
521cf1cae9SAlexei Starovoitov 			time_spent += ktime_get_ns() - time_start;
53df1a2cb7SStanislav Fomichev 			preempt_enable();
54df1a2cb7SStanislav Fomichev 			rcu_read_unlock();
55df1a2cb7SStanislav Fomichev 
561cf1cae9SAlexei Starovoitov 			cond_resched();
57df1a2cb7SStanislav Fomichev 
58df1a2cb7SStanislav Fomichev 			rcu_read_lock();
59df1a2cb7SStanislav Fomichev 			preempt_disable();
601cf1cae9SAlexei Starovoitov 			time_start = ktime_get_ns();
611cf1cae9SAlexei Starovoitov 		}
621cf1cae9SAlexei Starovoitov 	}
631cf1cae9SAlexei Starovoitov 	time_spent += ktime_get_ns() - time_start;
64df1a2cb7SStanislav Fomichev 	preempt_enable();
65df1a2cb7SStanislav Fomichev 	rcu_read_unlock();
66df1a2cb7SStanislav Fomichev 
671cf1cae9SAlexei Starovoitov 	do_div(time_spent, repeat);
681cf1cae9SAlexei Starovoitov 	*time = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
691cf1cae9SAlexei Starovoitov 
708bad74f9SRoman Gushchin 	for_each_cgroup_storage_type(stype)
718bad74f9SRoman Gushchin 		bpf_cgroup_storage_free(storage[stype]);
72f42ee093SRoman Gushchin 
73df1a2cb7SStanislav Fomichev 	return ret;
741cf1cae9SAlexei Starovoitov }
751cf1cae9SAlexei Starovoitov 
7678e52272SDavid Miller static int bpf_test_finish(const union bpf_attr *kattr,
7778e52272SDavid Miller 			   union bpf_attr __user *uattr, const void *data,
781cf1cae9SAlexei Starovoitov 			   u32 size, u32 retval, u32 duration)
791cf1cae9SAlexei Starovoitov {
8078e52272SDavid Miller 	void __user *data_out = u64_to_user_ptr(kattr->test.data_out);
811cf1cae9SAlexei Starovoitov 	int err = -EFAULT;
82b5a36b1eSLorenz Bauer 	u32 copy_size = size;
831cf1cae9SAlexei Starovoitov 
84b5a36b1eSLorenz Bauer 	/* Clamp copy if the user has provided a size hint, but copy the full
85b5a36b1eSLorenz Bauer 	 * buffer if not to retain old behaviour.
86b5a36b1eSLorenz Bauer 	 */
87b5a36b1eSLorenz Bauer 	if (kattr->test.data_size_out &&
88b5a36b1eSLorenz Bauer 	    copy_size > kattr->test.data_size_out) {
89b5a36b1eSLorenz Bauer 		copy_size = kattr->test.data_size_out;
90b5a36b1eSLorenz Bauer 		err = -ENOSPC;
91b5a36b1eSLorenz Bauer 	}
92b5a36b1eSLorenz Bauer 
93b5a36b1eSLorenz Bauer 	if (data_out && copy_to_user(data_out, data, copy_size))
941cf1cae9SAlexei Starovoitov 		goto out;
951cf1cae9SAlexei Starovoitov 	if (copy_to_user(&uattr->test.data_size_out, &size, sizeof(size)))
961cf1cae9SAlexei Starovoitov 		goto out;
971cf1cae9SAlexei Starovoitov 	if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
981cf1cae9SAlexei Starovoitov 		goto out;
991cf1cae9SAlexei Starovoitov 	if (copy_to_user(&uattr->test.duration, &duration, sizeof(duration)))
1001cf1cae9SAlexei Starovoitov 		goto out;
101b5a36b1eSLorenz Bauer 	if (err != -ENOSPC)
1021cf1cae9SAlexei Starovoitov 		err = 0;
1031cf1cae9SAlexei Starovoitov out:
104e950e843SMatt Mullins 	trace_bpf_test_finish(&err);
1051cf1cae9SAlexei Starovoitov 	return err;
1061cf1cae9SAlexei Starovoitov }
1071cf1cae9SAlexei Starovoitov 
1081cf1cae9SAlexei Starovoitov static void *bpf_test_init(const union bpf_attr *kattr, u32 size,
1091cf1cae9SAlexei Starovoitov 			   u32 headroom, u32 tailroom)
1101cf1cae9SAlexei Starovoitov {
1111cf1cae9SAlexei Starovoitov 	void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
1121cf1cae9SAlexei Starovoitov 	void *data;
1131cf1cae9SAlexei Starovoitov 
1141cf1cae9SAlexei Starovoitov 	if (size < ETH_HLEN || size > PAGE_SIZE - headroom - tailroom)
1151cf1cae9SAlexei Starovoitov 		return ERR_PTR(-EINVAL);
1161cf1cae9SAlexei Starovoitov 
1171cf1cae9SAlexei Starovoitov 	data = kzalloc(size + headroom + tailroom, GFP_USER);
1181cf1cae9SAlexei Starovoitov 	if (!data)
1191cf1cae9SAlexei Starovoitov 		return ERR_PTR(-ENOMEM);
1201cf1cae9SAlexei Starovoitov 
1211cf1cae9SAlexei Starovoitov 	if (copy_from_user(data + headroom, data_in, size)) {
1221cf1cae9SAlexei Starovoitov 		kfree(data);
1231cf1cae9SAlexei Starovoitov 		return ERR_PTR(-EFAULT);
1241cf1cae9SAlexei Starovoitov 	}
1251cf1cae9SAlexei Starovoitov 	return data;
1261cf1cae9SAlexei Starovoitov }
1271cf1cae9SAlexei Starovoitov 
128b0b9395dSStanislav Fomichev static void *bpf_ctx_init(const union bpf_attr *kattr, u32 max_size)
129b0b9395dSStanislav Fomichev {
130b0b9395dSStanislav Fomichev 	void __user *data_in = u64_to_user_ptr(kattr->test.ctx_in);
131b0b9395dSStanislav Fomichev 	void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out);
132b0b9395dSStanislav Fomichev 	u32 size = kattr->test.ctx_size_in;
133b0b9395dSStanislav Fomichev 	void *data;
134b0b9395dSStanislav Fomichev 	int err;
135b0b9395dSStanislav Fomichev 
136b0b9395dSStanislav Fomichev 	if (!data_in && !data_out)
137b0b9395dSStanislav Fomichev 		return NULL;
138b0b9395dSStanislav Fomichev 
139b0b9395dSStanislav Fomichev 	data = kzalloc(max_size, GFP_USER);
140b0b9395dSStanislav Fomichev 	if (!data)
141b0b9395dSStanislav Fomichev 		return ERR_PTR(-ENOMEM);
142b0b9395dSStanislav Fomichev 
143b0b9395dSStanislav Fomichev 	if (data_in) {
144b0b9395dSStanislav Fomichev 		err = bpf_check_uarg_tail_zero(data_in, max_size, size);
145b0b9395dSStanislav Fomichev 		if (err) {
146b0b9395dSStanislav Fomichev 			kfree(data);
147b0b9395dSStanislav Fomichev 			return ERR_PTR(err);
148b0b9395dSStanislav Fomichev 		}
149b0b9395dSStanislav Fomichev 
150b0b9395dSStanislav Fomichev 		size = min_t(u32, max_size, size);
151b0b9395dSStanislav Fomichev 		if (copy_from_user(data, data_in, size)) {
152b0b9395dSStanislav Fomichev 			kfree(data);
153b0b9395dSStanislav Fomichev 			return ERR_PTR(-EFAULT);
154b0b9395dSStanislav Fomichev 		}
155b0b9395dSStanislav Fomichev 	}
156b0b9395dSStanislav Fomichev 	return data;
157b0b9395dSStanislav Fomichev }
158b0b9395dSStanislav Fomichev 
159b0b9395dSStanislav Fomichev static int bpf_ctx_finish(const union bpf_attr *kattr,
160b0b9395dSStanislav Fomichev 			  union bpf_attr __user *uattr, const void *data,
161b0b9395dSStanislav Fomichev 			  u32 size)
162b0b9395dSStanislav Fomichev {
163b0b9395dSStanislav Fomichev 	void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out);
164b0b9395dSStanislav Fomichev 	int err = -EFAULT;
165b0b9395dSStanislav Fomichev 	u32 copy_size = size;
166b0b9395dSStanislav Fomichev 
167b0b9395dSStanislav Fomichev 	if (!data || !data_out)
168b0b9395dSStanislav Fomichev 		return 0;
169b0b9395dSStanislav Fomichev 
170b0b9395dSStanislav Fomichev 	if (copy_size > kattr->test.ctx_size_out) {
171b0b9395dSStanislav Fomichev 		copy_size = kattr->test.ctx_size_out;
172b0b9395dSStanislav Fomichev 		err = -ENOSPC;
173b0b9395dSStanislav Fomichev 	}
174b0b9395dSStanislav Fomichev 
175b0b9395dSStanislav Fomichev 	if (copy_to_user(data_out, data, copy_size))
176b0b9395dSStanislav Fomichev 		goto out;
177b0b9395dSStanislav Fomichev 	if (copy_to_user(&uattr->test.ctx_size_out, &size, sizeof(size)))
178b0b9395dSStanislav Fomichev 		goto out;
179b0b9395dSStanislav Fomichev 	if (err != -ENOSPC)
180b0b9395dSStanislav Fomichev 		err = 0;
181b0b9395dSStanislav Fomichev out:
182b0b9395dSStanislav Fomichev 	return err;
183b0b9395dSStanislav Fomichev }
184b0b9395dSStanislav Fomichev 
185b0b9395dSStanislav Fomichev /**
186b0b9395dSStanislav Fomichev  * range_is_zero - test whether buffer is initialized
187b0b9395dSStanislav Fomichev  * @buf: buffer to check
188b0b9395dSStanislav Fomichev  * @from: check from this position
189b0b9395dSStanislav Fomichev  * @to: check up until (excluding) this position
190b0b9395dSStanislav Fomichev  *
191b0b9395dSStanislav Fomichev  * This function returns true if the there is a non-zero byte
192b0b9395dSStanislav Fomichev  * in the buf in the range [from,to).
193b0b9395dSStanislav Fomichev  */
194b0b9395dSStanislav Fomichev static inline bool range_is_zero(void *buf, size_t from, size_t to)
195b0b9395dSStanislav Fomichev {
196b0b9395dSStanislav Fomichev 	return !memchr_inv((u8 *)buf + from, 0, to - from);
197b0b9395dSStanislav Fomichev }
198b0b9395dSStanislav Fomichev 
199b0b9395dSStanislav Fomichev static int convert___skb_to_skb(struct sk_buff *skb, struct __sk_buff *__skb)
200b0b9395dSStanislav Fomichev {
201b0b9395dSStanislav Fomichev 	struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
202b0b9395dSStanislav Fomichev 
203b0b9395dSStanislav Fomichev 	if (!__skb)
204b0b9395dSStanislav Fomichev 		return 0;
205b0b9395dSStanislav Fomichev 
206b0b9395dSStanislav Fomichev 	/* make sure the fields we don't use are zeroed */
207b0b9395dSStanislav Fomichev 	if (!range_is_zero(__skb, 0, offsetof(struct __sk_buff, priority)))
208b0b9395dSStanislav Fomichev 		return -EINVAL;
209b0b9395dSStanislav Fomichev 
210b0b9395dSStanislav Fomichev 	/* priority is allowed */
211b0b9395dSStanislav Fomichev 
212b0b9395dSStanislav Fomichev 	if (!range_is_zero(__skb, offsetof(struct __sk_buff, priority) +
213b0b9395dSStanislav Fomichev 			   FIELD_SIZEOF(struct __sk_buff, priority),
214b0b9395dSStanislav Fomichev 			   offsetof(struct __sk_buff, cb)))
215b0b9395dSStanislav Fomichev 		return -EINVAL;
216b0b9395dSStanislav Fomichev 
217b0b9395dSStanislav Fomichev 	/* cb is allowed */
218b0b9395dSStanislav Fomichev 
219b0b9395dSStanislav Fomichev 	if (!range_is_zero(__skb, offsetof(struct __sk_buff, cb) +
220b0b9395dSStanislav Fomichev 			   FIELD_SIZEOF(struct __sk_buff, cb),
221b0b9395dSStanislav Fomichev 			   sizeof(struct __sk_buff)))
222b0b9395dSStanislav Fomichev 		return -EINVAL;
223b0b9395dSStanislav Fomichev 
224b0b9395dSStanislav Fomichev 	skb->priority = __skb->priority;
225b0b9395dSStanislav Fomichev 	memcpy(&cb->data, __skb->cb, QDISC_CB_PRIV_LEN);
226b0b9395dSStanislav Fomichev 
227b0b9395dSStanislav Fomichev 	return 0;
228b0b9395dSStanislav Fomichev }
229b0b9395dSStanislav Fomichev 
230b0b9395dSStanislav Fomichev static void convert_skb_to___skb(struct sk_buff *skb, struct __sk_buff *__skb)
231b0b9395dSStanislav Fomichev {
232b0b9395dSStanislav Fomichev 	struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
233b0b9395dSStanislav Fomichev 
234b0b9395dSStanislav Fomichev 	if (!__skb)
235b0b9395dSStanislav Fomichev 		return;
236b0b9395dSStanislav Fomichev 
237b0b9395dSStanislav Fomichev 	__skb->priority = skb->priority;
238b0b9395dSStanislav Fomichev 	memcpy(__skb->cb, &cb->data, QDISC_CB_PRIV_LEN);
239b0b9395dSStanislav Fomichev }
240b0b9395dSStanislav Fomichev 
2411cf1cae9SAlexei Starovoitov int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
2421cf1cae9SAlexei Starovoitov 			  union bpf_attr __user *uattr)
2431cf1cae9SAlexei Starovoitov {
2441cf1cae9SAlexei Starovoitov 	bool is_l2 = false, is_direct_pkt_access = false;
2451cf1cae9SAlexei Starovoitov 	u32 size = kattr->test.data_size_in;
2461cf1cae9SAlexei Starovoitov 	u32 repeat = kattr->test.repeat;
247b0b9395dSStanislav Fomichev 	struct __sk_buff *ctx = NULL;
2481cf1cae9SAlexei Starovoitov 	u32 retval, duration;
2496e6fddc7SDaniel Borkmann 	int hh_len = ETH_HLEN;
2501cf1cae9SAlexei Starovoitov 	struct sk_buff *skb;
2512cb494a3SSong Liu 	struct sock *sk;
2521cf1cae9SAlexei Starovoitov 	void *data;
2531cf1cae9SAlexei Starovoitov 	int ret;
2541cf1cae9SAlexei Starovoitov 
255586f8525SDavid Miller 	data = bpf_test_init(kattr, size, NET_SKB_PAD + NET_IP_ALIGN,
2561cf1cae9SAlexei Starovoitov 			     SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
2571cf1cae9SAlexei Starovoitov 	if (IS_ERR(data))
2581cf1cae9SAlexei Starovoitov 		return PTR_ERR(data);
2591cf1cae9SAlexei Starovoitov 
260b0b9395dSStanislav Fomichev 	ctx = bpf_ctx_init(kattr, sizeof(struct __sk_buff));
261b0b9395dSStanislav Fomichev 	if (IS_ERR(ctx)) {
262b0b9395dSStanislav Fomichev 		kfree(data);
263b0b9395dSStanislav Fomichev 		return PTR_ERR(ctx);
264b0b9395dSStanislav Fomichev 	}
265b0b9395dSStanislav Fomichev 
2661cf1cae9SAlexei Starovoitov 	switch (prog->type) {
2671cf1cae9SAlexei Starovoitov 	case BPF_PROG_TYPE_SCHED_CLS:
2681cf1cae9SAlexei Starovoitov 	case BPF_PROG_TYPE_SCHED_ACT:
2691cf1cae9SAlexei Starovoitov 		is_l2 = true;
2701cf1cae9SAlexei Starovoitov 		/* fall through */
2711cf1cae9SAlexei Starovoitov 	case BPF_PROG_TYPE_LWT_IN:
2721cf1cae9SAlexei Starovoitov 	case BPF_PROG_TYPE_LWT_OUT:
2731cf1cae9SAlexei Starovoitov 	case BPF_PROG_TYPE_LWT_XMIT:
2741cf1cae9SAlexei Starovoitov 		is_direct_pkt_access = true;
2751cf1cae9SAlexei Starovoitov 		break;
2761cf1cae9SAlexei Starovoitov 	default:
2771cf1cae9SAlexei Starovoitov 		break;
2781cf1cae9SAlexei Starovoitov 	}
2791cf1cae9SAlexei Starovoitov 
2802cb494a3SSong Liu 	sk = kzalloc(sizeof(struct sock), GFP_USER);
2812cb494a3SSong Liu 	if (!sk) {
2821cf1cae9SAlexei Starovoitov 		kfree(data);
283b0b9395dSStanislav Fomichev 		kfree(ctx);
2841cf1cae9SAlexei Starovoitov 		return -ENOMEM;
2851cf1cae9SAlexei Starovoitov 	}
2862cb494a3SSong Liu 	sock_net_set(sk, current->nsproxy->net_ns);
2872cb494a3SSong Liu 	sock_init_data(NULL, sk);
2882cb494a3SSong Liu 
2892cb494a3SSong Liu 	skb = build_skb(data, 0);
2902cb494a3SSong Liu 	if (!skb) {
2912cb494a3SSong Liu 		kfree(data);
292b0b9395dSStanislav Fomichev 		kfree(ctx);
2932cb494a3SSong Liu 		kfree(sk);
2942cb494a3SSong Liu 		return -ENOMEM;
2952cb494a3SSong Liu 	}
2962cb494a3SSong Liu 	skb->sk = sk;
2971cf1cae9SAlexei Starovoitov 
298586f8525SDavid Miller 	skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
2991cf1cae9SAlexei Starovoitov 	__skb_put(skb, size);
3001cf1cae9SAlexei Starovoitov 	skb->protocol = eth_type_trans(skb, current->nsproxy->net_ns->loopback_dev);
3011cf1cae9SAlexei Starovoitov 	skb_reset_network_header(skb);
3021cf1cae9SAlexei Starovoitov 
3031cf1cae9SAlexei Starovoitov 	if (is_l2)
3046e6fddc7SDaniel Borkmann 		__skb_push(skb, hh_len);
3051cf1cae9SAlexei Starovoitov 	if (is_direct_pkt_access)
3066aaae2b6SDaniel Borkmann 		bpf_compute_data_pointers(skb);
307b0b9395dSStanislav Fomichev 	ret = convert___skb_to_skb(skb, ctx);
308b0b9395dSStanislav Fomichev 	if (ret)
309b0b9395dSStanislav Fomichev 		goto out;
310dcb40590SRoman Gushchin 	ret = bpf_test_run(prog, skb, repeat, &retval, &duration);
311b0b9395dSStanislav Fomichev 	if (ret)
312b0b9395dSStanislav Fomichev 		goto out;
3136e6fddc7SDaniel Borkmann 	if (!is_l2) {
3146e6fddc7SDaniel Borkmann 		if (skb_headroom(skb) < hh_len) {
3156e6fddc7SDaniel Borkmann 			int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
3166e6fddc7SDaniel Borkmann 
3176e6fddc7SDaniel Borkmann 			if (pskb_expand_head(skb, nhead, 0, GFP_USER)) {
318b0b9395dSStanislav Fomichev 				ret = -ENOMEM;
319b0b9395dSStanislav Fomichev 				goto out;
3206e6fddc7SDaniel Borkmann 			}
3216e6fddc7SDaniel Borkmann 		}
3226e6fddc7SDaniel Borkmann 		memset(__skb_push(skb, hh_len), 0, hh_len);
3236e6fddc7SDaniel Borkmann 	}
324b0b9395dSStanislav Fomichev 	convert_skb_to___skb(skb, ctx);
3256e6fddc7SDaniel Borkmann 
3261cf1cae9SAlexei Starovoitov 	size = skb->len;
3271cf1cae9SAlexei Starovoitov 	/* bpf program can never convert linear skb to non-linear */
3281cf1cae9SAlexei Starovoitov 	if (WARN_ON_ONCE(skb_is_nonlinear(skb)))
3291cf1cae9SAlexei Starovoitov 		size = skb_headlen(skb);
33078e52272SDavid Miller 	ret = bpf_test_finish(kattr, uattr, skb->data, size, retval, duration);
331b0b9395dSStanislav Fomichev 	if (!ret)
332b0b9395dSStanislav Fomichev 		ret = bpf_ctx_finish(kattr, uattr, ctx,
333b0b9395dSStanislav Fomichev 				     sizeof(struct __sk_buff));
334b0b9395dSStanislav Fomichev out:
3351cf1cae9SAlexei Starovoitov 	kfree_skb(skb);
3366ac99e8fSMartin KaFai Lau 	bpf_sk_storage_free(sk);
3372cb494a3SSong Liu 	kfree(sk);
338b0b9395dSStanislav Fomichev 	kfree(ctx);
3391cf1cae9SAlexei Starovoitov 	return ret;
3401cf1cae9SAlexei Starovoitov }
3411cf1cae9SAlexei Starovoitov 
3421cf1cae9SAlexei Starovoitov int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
3431cf1cae9SAlexei Starovoitov 			  union bpf_attr __user *uattr)
3441cf1cae9SAlexei Starovoitov {
3451cf1cae9SAlexei Starovoitov 	u32 size = kattr->test.data_size_in;
3461cf1cae9SAlexei Starovoitov 	u32 repeat = kattr->test.repeat;
34765073a67SDaniel Borkmann 	struct netdev_rx_queue *rxqueue;
3481cf1cae9SAlexei Starovoitov 	struct xdp_buff xdp = {};
3491cf1cae9SAlexei Starovoitov 	u32 retval, duration;
3501cf1cae9SAlexei Starovoitov 	void *data;
3511cf1cae9SAlexei Starovoitov 	int ret;
3521cf1cae9SAlexei Starovoitov 
353947e8b59SStanislav Fomichev 	if (kattr->test.ctx_in || kattr->test.ctx_out)
354947e8b59SStanislav Fomichev 		return -EINVAL;
355947e8b59SStanislav Fomichev 
356586f8525SDavid Miller 	data = bpf_test_init(kattr, size, XDP_PACKET_HEADROOM + NET_IP_ALIGN, 0);
3571cf1cae9SAlexei Starovoitov 	if (IS_ERR(data))
3581cf1cae9SAlexei Starovoitov 		return PTR_ERR(data);
3591cf1cae9SAlexei Starovoitov 
3601cf1cae9SAlexei Starovoitov 	xdp.data_hard_start = data;
361586f8525SDavid Miller 	xdp.data = data + XDP_PACKET_HEADROOM + NET_IP_ALIGN;
362de8f3a83SDaniel Borkmann 	xdp.data_meta = xdp.data;
3631cf1cae9SAlexei Starovoitov 	xdp.data_end = xdp.data + size;
3641cf1cae9SAlexei Starovoitov 
36565073a67SDaniel Borkmann 	rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0);
36665073a67SDaniel Borkmann 	xdp.rxq = &rxqueue->xdp_rxq;
36765073a67SDaniel Borkmann 
368dcb40590SRoman Gushchin 	ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration);
369dcb40590SRoman Gushchin 	if (ret)
370dcb40590SRoman Gushchin 		goto out;
371587b80ccSNikita V. Shirokov 	if (xdp.data != data + XDP_PACKET_HEADROOM + NET_IP_ALIGN ||
372587b80ccSNikita V. Shirokov 	    xdp.data_end != xdp.data + size)
3731cf1cae9SAlexei Starovoitov 		size = xdp.data_end - xdp.data;
37478e52272SDavid Miller 	ret = bpf_test_finish(kattr, uattr, xdp.data, size, retval, duration);
375dcb40590SRoman Gushchin out:
3761cf1cae9SAlexei Starovoitov 	kfree(data);
3771cf1cae9SAlexei Starovoitov 	return ret;
3781cf1cae9SAlexei Starovoitov }
379b7a1848eSStanislav Fomichev 
380*b2ca4e1cSStanislav Fomichev static int verify_user_bpf_flow_keys(struct bpf_flow_keys *ctx)
381*b2ca4e1cSStanislav Fomichev {
382*b2ca4e1cSStanislav Fomichev 	/* make sure the fields we don't use are zeroed */
383*b2ca4e1cSStanislav Fomichev 	if (!range_is_zero(ctx, 0, offsetof(struct bpf_flow_keys, flags)))
384*b2ca4e1cSStanislav Fomichev 		return -EINVAL;
385*b2ca4e1cSStanislav Fomichev 
386*b2ca4e1cSStanislav Fomichev 	/* flags is allowed */
387*b2ca4e1cSStanislav Fomichev 
388*b2ca4e1cSStanislav Fomichev 	if (!range_is_zero(ctx, offsetof(struct bpf_flow_keys, flags) +
389*b2ca4e1cSStanislav Fomichev 			   FIELD_SIZEOF(struct bpf_flow_keys, flags),
390*b2ca4e1cSStanislav Fomichev 			   sizeof(struct bpf_flow_keys)))
391*b2ca4e1cSStanislav Fomichev 		return -EINVAL;
392*b2ca4e1cSStanislav Fomichev 
393*b2ca4e1cSStanislav Fomichev 	return 0;
394*b2ca4e1cSStanislav Fomichev }
395*b2ca4e1cSStanislav Fomichev 
396b7a1848eSStanislav Fomichev int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
397b7a1848eSStanislav Fomichev 				     const union bpf_attr *kattr,
398b7a1848eSStanislav Fomichev 				     union bpf_attr __user *uattr)
399b7a1848eSStanislav Fomichev {
400b7a1848eSStanislav Fomichev 	u32 size = kattr->test.data_size_in;
4017b8a1304SStanislav Fomichev 	struct bpf_flow_dissector ctx = {};
402b7a1848eSStanislav Fomichev 	u32 repeat = kattr->test.repeat;
403*b2ca4e1cSStanislav Fomichev 	struct bpf_flow_keys *user_ctx;
404b7a1848eSStanislav Fomichev 	struct bpf_flow_keys flow_keys;
405b7a1848eSStanislav Fomichev 	u64 time_start, time_spent = 0;
4067b8a1304SStanislav Fomichev 	const struct ethhdr *eth;
407*b2ca4e1cSStanislav Fomichev 	unsigned int flags = 0;
408b7a1848eSStanislav Fomichev 	u32 retval, duration;
409b7a1848eSStanislav Fomichev 	void *data;
410b7a1848eSStanislav Fomichev 	int ret;
411b7a1848eSStanislav Fomichev 	u32 i;
412b7a1848eSStanislav Fomichev 
413b7a1848eSStanislav Fomichev 	if (prog->type != BPF_PROG_TYPE_FLOW_DISSECTOR)
414b7a1848eSStanislav Fomichev 		return -EINVAL;
415b7a1848eSStanislav Fomichev 
4167b8a1304SStanislav Fomichev 	if (size < ETH_HLEN)
4177b8a1304SStanislav Fomichev 		return -EINVAL;
4187b8a1304SStanislav Fomichev 
4197b8a1304SStanislav Fomichev 	data = bpf_test_init(kattr, size, 0, 0);
420b7a1848eSStanislav Fomichev 	if (IS_ERR(data))
421b7a1848eSStanislav Fomichev 		return PTR_ERR(data);
422b7a1848eSStanislav Fomichev 
4237b8a1304SStanislav Fomichev 	eth = (struct ethhdr *)data;
424b7a1848eSStanislav Fomichev 
425b7a1848eSStanislav Fomichev 	if (!repeat)
426b7a1848eSStanislav Fomichev 		repeat = 1;
427b7a1848eSStanislav Fomichev 
428*b2ca4e1cSStanislav Fomichev 	user_ctx = bpf_ctx_init(kattr, sizeof(struct bpf_flow_keys));
429*b2ca4e1cSStanislav Fomichev 	if (IS_ERR(user_ctx)) {
430*b2ca4e1cSStanislav Fomichev 		kfree(data);
431*b2ca4e1cSStanislav Fomichev 		return PTR_ERR(user_ctx);
432*b2ca4e1cSStanislav Fomichev 	}
433*b2ca4e1cSStanislav Fomichev 	if (user_ctx) {
434*b2ca4e1cSStanislav Fomichev 		ret = verify_user_bpf_flow_keys(user_ctx);
435*b2ca4e1cSStanislav Fomichev 		if (ret)
436*b2ca4e1cSStanislav Fomichev 			goto out;
437*b2ca4e1cSStanislav Fomichev 		flags = user_ctx->flags;
438*b2ca4e1cSStanislav Fomichev 	}
439*b2ca4e1cSStanislav Fomichev 
4407b8a1304SStanislav Fomichev 	ctx.flow_keys = &flow_keys;
4417b8a1304SStanislav Fomichev 	ctx.data = data;
4427b8a1304SStanislav Fomichev 	ctx.data_end = (__u8 *)data + size;
4437b8a1304SStanislav Fomichev 
444a439184dSStanislav Fomichev 	rcu_read_lock();
445a439184dSStanislav Fomichev 	preempt_disable();
446b7a1848eSStanislav Fomichev 	time_start = ktime_get_ns();
447b7a1848eSStanislav Fomichev 	for (i = 0; i < repeat; i++) {
4487b8a1304SStanislav Fomichev 		retval = bpf_flow_dissect(prog, &ctx, eth->h_proto, ETH_HLEN,
449*b2ca4e1cSStanislav Fomichev 					  size, flags);
4507b8a1304SStanislav Fomichev 
451a439184dSStanislav Fomichev 		if (signal_pending(current)) {
452b7a1848eSStanislav Fomichev 			preempt_enable();
453a439184dSStanislav Fomichev 			rcu_read_unlock();
454a439184dSStanislav Fomichev 
455a439184dSStanislav Fomichev 			ret = -EINTR;
456a439184dSStanislav Fomichev 			goto out;
457a439184dSStanislav Fomichev 		}
458b7a1848eSStanislav Fomichev 
459b7a1848eSStanislav Fomichev 		if (need_resched()) {
460b7a1848eSStanislav Fomichev 			time_spent += ktime_get_ns() - time_start;
461a439184dSStanislav Fomichev 			preempt_enable();
462a439184dSStanislav Fomichev 			rcu_read_unlock();
463a439184dSStanislav Fomichev 
464b7a1848eSStanislav Fomichev 			cond_resched();
465a439184dSStanislav Fomichev 
466a439184dSStanislav Fomichev 			rcu_read_lock();
467a439184dSStanislav Fomichev 			preempt_disable();
468b7a1848eSStanislav Fomichev 			time_start = ktime_get_ns();
469b7a1848eSStanislav Fomichev 		}
470b7a1848eSStanislav Fomichev 	}
471b7a1848eSStanislav Fomichev 	time_spent += ktime_get_ns() - time_start;
472a439184dSStanislav Fomichev 	preempt_enable();
473a439184dSStanislav Fomichev 	rcu_read_unlock();
474a439184dSStanislav Fomichev 
475b7a1848eSStanislav Fomichev 	do_div(time_spent, repeat);
476b7a1848eSStanislav Fomichev 	duration = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
477b7a1848eSStanislav Fomichev 
478b7a1848eSStanislav Fomichev 	ret = bpf_test_finish(kattr, uattr, &flow_keys, sizeof(flow_keys),
479b7a1848eSStanislav Fomichev 			      retval, duration);
480*b2ca4e1cSStanislav Fomichev 	if (!ret)
481*b2ca4e1cSStanislav Fomichev 		ret = bpf_ctx_finish(kattr, uattr, user_ctx,
482*b2ca4e1cSStanislav Fomichev 				     sizeof(struct bpf_flow_keys));
483b7a1848eSStanislav Fomichev 
484a439184dSStanislav Fomichev out:
485*b2ca4e1cSStanislav Fomichev 	kfree(user_ctx);
4867b8a1304SStanislav Fomichev 	kfree(data);
487b7a1848eSStanislav Fomichev 	return ret;
488b7a1848eSStanislav Fomichev }
489