xref: /openbmc/linux/net/bpf/test_run.c (revision 7b8a1304323b35bbf060e0d29691031056836b73)
11cf1cae9SAlexei Starovoitov /* Copyright (c) 2017 Facebook
21cf1cae9SAlexei Starovoitov  *
31cf1cae9SAlexei Starovoitov  * This program is free software; you can redistribute it and/or
41cf1cae9SAlexei Starovoitov  * modify it under the terms of version 2 of the GNU General Public
51cf1cae9SAlexei Starovoitov  * License as published by the Free Software Foundation.
61cf1cae9SAlexei Starovoitov  */
71cf1cae9SAlexei Starovoitov #include <linux/bpf.h>
81cf1cae9SAlexei Starovoitov #include <linux/slab.h>
91cf1cae9SAlexei Starovoitov #include <linux/vmalloc.h>
101cf1cae9SAlexei Starovoitov #include <linux/etherdevice.h>
111cf1cae9SAlexei Starovoitov #include <linux/filter.h>
121cf1cae9SAlexei Starovoitov #include <linux/sched/signal.h>
132cb494a3SSong Liu #include <net/sock.h>
142cb494a3SSong Liu #include <net/tcp.h>
151cf1cae9SAlexei Starovoitov 
16df1a2cb7SStanislav Fomichev static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
17df1a2cb7SStanislav Fomichev 			u32 *retval, u32 *time)
181cf1cae9SAlexei Starovoitov {
1971b91a50SBo YU 	struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { NULL };
208bad74f9SRoman Gushchin 	enum bpf_cgroup_storage_type stype;
211cf1cae9SAlexei Starovoitov 	u64 time_start, time_spent = 0;
22df1a2cb7SStanislav Fomichev 	int ret = 0;
23dcb40590SRoman Gushchin 	u32 i;
241cf1cae9SAlexei Starovoitov 
258bad74f9SRoman Gushchin 	for_each_cgroup_storage_type(stype) {
268bad74f9SRoman Gushchin 		storage[stype] = bpf_cgroup_storage_alloc(prog, stype);
278bad74f9SRoman Gushchin 		if (IS_ERR(storage[stype])) {
288bad74f9SRoman Gushchin 			storage[stype] = NULL;
298bad74f9SRoman Gushchin 			for_each_cgroup_storage_type(stype)
308bad74f9SRoman Gushchin 				bpf_cgroup_storage_free(storage[stype]);
318bad74f9SRoman Gushchin 			return -ENOMEM;
328bad74f9SRoman Gushchin 		}
338bad74f9SRoman Gushchin 	}
34f42ee093SRoman Gushchin 
351cf1cae9SAlexei Starovoitov 	if (!repeat)
361cf1cae9SAlexei Starovoitov 		repeat = 1;
37df1a2cb7SStanislav Fomichev 
38df1a2cb7SStanislav Fomichev 	rcu_read_lock();
39df1a2cb7SStanislav Fomichev 	preempt_disable();
401cf1cae9SAlexei Starovoitov 	time_start = ktime_get_ns();
411cf1cae9SAlexei Starovoitov 	for (i = 0; i < repeat; i++) {
42df1a2cb7SStanislav Fomichev 		bpf_cgroup_storage_set(storage);
43df1a2cb7SStanislav Fomichev 		*retval = BPF_PROG_RUN(prog, ctx);
44df1a2cb7SStanislav Fomichev 
45df1a2cb7SStanislav Fomichev 		if (signal_pending(current)) {
46df1a2cb7SStanislav Fomichev 			ret = -EINTR;
471cf1cae9SAlexei Starovoitov 			break;
48df1a2cb7SStanislav Fomichev 		}
49df1a2cb7SStanislav Fomichev 
50df1a2cb7SStanislav Fomichev 		if (need_resched()) {
511cf1cae9SAlexei Starovoitov 			time_spent += ktime_get_ns() - time_start;
52df1a2cb7SStanislav Fomichev 			preempt_enable();
53df1a2cb7SStanislav Fomichev 			rcu_read_unlock();
54df1a2cb7SStanislav Fomichev 
551cf1cae9SAlexei Starovoitov 			cond_resched();
56df1a2cb7SStanislav Fomichev 
57df1a2cb7SStanislav Fomichev 			rcu_read_lock();
58df1a2cb7SStanislav Fomichev 			preempt_disable();
591cf1cae9SAlexei Starovoitov 			time_start = ktime_get_ns();
601cf1cae9SAlexei Starovoitov 		}
611cf1cae9SAlexei Starovoitov 	}
621cf1cae9SAlexei Starovoitov 	time_spent += ktime_get_ns() - time_start;
63df1a2cb7SStanislav Fomichev 	preempt_enable();
64df1a2cb7SStanislav Fomichev 	rcu_read_unlock();
65df1a2cb7SStanislav Fomichev 
661cf1cae9SAlexei Starovoitov 	do_div(time_spent, repeat);
671cf1cae9SAlexei Starovoitov 	*time = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
681cf1cae9SAlexei Starovoitov 
698bad74f9SRoman Gushchin 	for_each_cgroup_storage_type(stype)
708bad74f9SRoman Gushchin 		bpf_cgroup_storage_free(storage[stype]);
71f42ee093SRoman Gushchin 
72df1a2cb7SStanislav Fomichev 	return ret;
731cf1cae9SAlexei Starovoitov }
741cf1cae9SAlexei Starovoitov 
7578e52272SDavid Miller static int bpf_test_finish(const union bpf_attr *kattr,
7678e52272SDavid Miller 			   union bpf_attr __user *uattr, const void *data,
771cf1cae9SAlexei Starovoitov 			   u32 size, u32 retval, u32 duration)
781cf1cae9SAlexei Starovoitov {
7978e52272SDavid Miller 	void __user *data_out = u64_to_user_ptr(kattr->test.data_out);
801cf1cae9SAlexei Starovoitov 	int err = -EFAULT;
81b5a36b1eSLorenz Bauer 	u32 copy_size = size;
821cf1cae9SAlexei Starovoitov 
83b5a36b1eSLorenz Bauer 	/* Clamp copy if the user has provided a size hint, but copy the full
84b5a36b1eSLorenz Bauer 	 * buffer if not to retain old behaviour.
85b5a36b1eSLorenz Bauer 	 */
86b5a36b1eSLorenz Bauer 	if (kattr->test.data_size_out &&
87b5a36b1eSLorenz Bauer 	    copy_size > kattr->test.data_size_out) {
88b5a36b1eSLorenz Bauer 		copy_size = kattr->test.data_size_out;
89b5a36b1eSLorenz Bauer 		err = -ENOSPC;
90b5a36b1eSLorenz Bauer 	}
91b5a36b1eSLorenz Bauer 
92b5a36b1eSLorenz Bauer 	if (data_out && copy_to_user(data_out, data, copy_size))
931cf1cae9SAlexei Starovoitov 		goto out;
941cf1cae9SAlexei Starovoitov 	if (copy_to_user(&uattr->test.data_size_out, &size, sizeof(size)))
951cf1cae9SAlexei Starovoitov 		goto out;
961cf1cae9SAlexei Starovoitov 	if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
971cf1cae9SAlexei Starovoitov 		goto out;
981cf1cae9SAlexei Starovoitov 	if (copy_to_user(&uattr->test.duration, &duration, sizeof(duration)))
991cf1cae9SAlexei Starovoitov 		goto out;
100b5a36b1eSLorenz Bauer 	if (err != -ENOSPC)
1011cf1cae9SAlexei Starovoitov 		err = 0;
1021cf1cae9SAlexei Starovoitov out:
1031cf1cae9SAlexei Starovoitov 	return err;
1041cf1cae9SAlexei Starovoitov }
1051cf1cae9SAlexei Starovoitov 
1061cf1cae9SAlexei Starovoitov static void *bpf_test_init(const union bpf_attr *kattr, u32 size,
1071cf1cae9SAlexei Starovoitov 			   u32 headroom, u32 tailroom)
1081cf1cae9SAlexei Starovoitov {
1091cf1cae9SAlexei Starovoitov 	void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
1101cf1cae9SAlexei Starovoitov 	void *data;
1111cf1cae9SAlexei Starovoitov 
1121cf1cae9SAlexei Starovoitov 	if (size < ETH_HLEN || size > PAGE_SIZE - headroom - tailroom)
1131cf1cae9SAlexei Starovoitov 		return ERR_PTR(-EINVAL);
1141cf1cae9SAlexei Starovoitov 
1151cf1cae9SAlexei Starovoitov 	data = kzalloc(size + headroom + tailroom, GFP_USER);
1161cf1cae9SAlexei Starovoitov 	if (!data)
1171cf1cae9SAlexei Starovoitov 		return ERR_PTR(-ENOMEM);
1181cf1cae9SAlexei Starovoitov 
1191cf1cae9SAlexei Starovoitov 	if (copy_from_user(data + headroom, data_in, size)) {
1201cf1cae9SAlexei Starovoitov 		kfree(data);
1211cf1cae9SAlexei Starovoitov 		return ERR_PTR(-EFAULT);
1221cf1cae9SAlexei Starovoitov 	}
1231cf1cae9SAlexei Starovoitov 	return data;
1241cf1cae9SAlexei Starovoitov }
1251cf1cae9SAlexei Starovoitov 
126b0b9395dSStanislav Fomichev static void *bpf_ctx_init(const union bpf_attr *kattr, u32 max_size)
127b0b9395dSStanislav Fomichev {
128b0b9395dSStanislav Fomichev 	void __user *data_in = u64_to_user_ptr(kattr->test.ctx_in);
129b0b9395dSStanislav Fomichev 	void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out);
130b0b9395dSStanislav Fomichev 	u32 size = kattr->test.ctx_size_in;
131b0b9395dSStanislav Fomichev 	void *data;
132b0b9395dSStanislav Fomichev 	int err;
133b0b9395dSStanislav Fomichev 
134b0b9395dSStanislav Fomichev 	if (!data_in && !data_out)
135b0b9395dSStanislav Fomichev 		return NULL;
136b0b9395dSStanislav Fomichev 
137b0b9395dSStanislav Fomichev 	data = kzalloc(max_size, GFP_USER);
138b0b9395dSStanislav Fomichev 	if (!data)
139b0b9395dSStanislav Fomichev 		return ERR_PTR(-ENOMEM);
140b0b9395dSStanislav Fomichev 
141b0b9395dSStanislav Fomichev 	if (data_in) {
142b0b9395dSStanislav Fomichev 		err = bpf_check_uarg_tail_zero(data_in, max_size, size);
143b0b9395dSStanislav Fomichev 		if (err) {
144b0b9395dSStanislav Fomichev 			kfree(data);
145b0b9395dSStanislav Fomichev 			return ERR_PTR(err);
146b0b9395dSStanislav Fomichev 		}
147b0b9395dSStanislav Fomichev 
148b0b9395dSStanislav Fomichev 		size = min_t(u32, max_size, size);
149b0b9395dSStanislav Fomichev 		if (copy_from_user(data, data_in, size)) {
150b0b9395dSStanislav Fomichev 			kfree(data);
151b0b9395dSStanislav Fomichev 			return ERR_PTR(-EFAULT);
152b0b9395dSStanislav Fomichev 		}
153b0b9395dSStanislav Fomichev 	}
154b0b9395dSStanislav Fomichev 	return data;
155b0b9395dSStanislav Fomichev }
156b0b9395dSStanislav Fomichev 
157b0b9395dSStanislav Fomichev static int bpf_ctx_finish(const union bpf_attr *kattr,
158b0b9395dSStanislav Fomichev 			  union bpf_attr __user *uattr, const void *data,
159b0b9395dSStanislav Fomichev 			  u32 size)
160b0b9395dSStanislav Fomichev {
161b0b9395dSStanislav Fomichev 	void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out);
162b0b9395dSStanislav Fomichev 	int err = -EFAULT;
163b0b9395dSStanislav Fomichev 	u32 copy_size = size;
164b0b9395dSStanislav Fomichev 
165b0b9395dSStanislav Fomichev 	if (!data || !data_out)
166b0b9395dSStanislav Fomichev 		return 0;
167b0b9395dSStanislav Fomichev 
168b0b9395dSStanislav Fomichev 	if (copy_size > kattr->test.ctx_size_out) {
169b0b9395dSStanislav Fomichev 		copy_size = kattr->test.ctx_size_out;
170b0b9395dSStanislav Fomichev 		err = -ENOSPC;
171b0b9395dSStanislav Fomichev 	}
172b0b9395dSStanislav Fomichev 
173b0b9395dSStanislav Fomichev 	if (copy_to_user(data_out, data, copy_size))
174b0b9395dSStanislav Fomichev 		goto out;
175b0b9395dSStanislav Fomichev 	if (copy_to_user(&uattr->test.ctx_size_out, &size, sizeof(size)))
176b0b9395dSStanislav Fomichev 		goto out;
177b0b9395dSStanislav Fomichev 	if (err != -ENOSPC)
178b0b9395dSStanislav Fomichev 		err = 0;
179b0b9395dSStanislav Fomichev out:
180b0b9395dSStanislav Fomichev 	return err;
181b0b9395dSStanislav Fomichev }
182b0b9395dSStanislav Fomichev 
183b0b9395dSStanislav Fomichev /**
184b0b9395dSStanislav Fomichev  * range_is_zero - test whether buffer is initialized
185b0b9395dSStanislav Fomichev  * @buf: buffer to check
186b0b9395dSStanislav Fomichev  * @from: check from this position
187b0b9395dSStanislav Fomichev  * @to: check up until (excluding) this position
188b0b9395dSStanislav Fomichev  *
189b0b9395dSStanislav Fomichev  * This function returns true if the there is a non-zero byte
190b0b9395dSStanislav Fomichev  * in the buf in the range [from,to).
191b0b9395dSStanislav Fomichev  */
192b0b9395dSStanislav Fomichev static inline bool range_is_zero(void *buf, size_t from, size_t to)
193b0b9395dSStanislav Fomichev {
194b0b9395dSStanislav Fomichev 	return !memchr_inv((u8 *)buf + from, 0, to - from);
195b0b9395dSStanislav Fomichev }
196b0b9395dSStanislav Fomichev 
197b0b9395dSStanislav Fomichev static int convert___skb_to_skb(struct sk_buff *skb, struct __sk_buff *__skb)
198b0b9395dSStanislav Fomichev {
199b0b9395dSStanislav Fomichev 	struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
200b0b9395dSStanislav Fomichev 
201b0b9395dSStanislav Fomichev 	if (!__skb)
202b0b9395dSStanislav Fomichev 		return 0;
203b0b9395dSStanislav Fomichev 
204b0b9395dSStanislav Fomichev 	/* make sure the fields we don't use are zeroed */
205b0b9395dSStanislav Fomichev 	if (!range_is_zero(__skb, 0, offsetof(struct __sk_buff, priority)))
206b0b9395dSStanislav Fomichev 		return -EINVAL;
207b0b9395dSStanislav Fomichev 
208b0b9395dSStanislav Fomichev 	/* priority is allowed */
209b0b9395dSStanislav Fomichev 
210b0b9395dSStanislav Fomichev 	if (!range_is_zero(__skb, offsetof(struct __sk_buff, priority) +
211b0b9395dSStanislav Fomichev 			   FIELD_SIZEOF(struct __sk_buff, priority),
212b0b9395dSStanislav Fomichev 			   offsetof(struct __sk_buff, cb)))
213b0b9395dSStanislav Fomichev 		return -EINVAL;
214b0b9395dSStanislav Fomichev 
215b0b9395dSStanislav Fomichev 	/* cb is allowed */
216b0b9395dSStanislav Fomichev 
217b0b9395dSStanislav Fomichev 	if (!range_is_zero(__skb, offsetof(struct __sk_buff, cb) +
218b0b9395dSStanislav Fomichev 			   FIELD_SIZEOF(struct __sk_buff, cb),
219b0b9395dSStanislav Fomichev 			   sizeof(struct __sk_buff)))
220b0b9395dSStanislav Fomichev 		return -EINVAL;
221b0b9395dSStanislav Fomichev 
222b0b9395dSStanislav Fomichev 	skb->priority = __skb->priority;
223b0b9395dSStanislav Fomichev 	memcpy(&cb->data, __skb->cb, QDISC_CB_PRIV_LEN);
224b0b9395dSStanislav Fomichev 
225b0b9395dSStanislav Fomichev 	return 0;
226b0b9395dSStanislav Fomichev }
227b0b9395dSStanislav Fomichev 
228b0b9395dSStanislav Fomichev static void convert_skb_to___skb(struct sk_buff *skb, struct __sk_buff *__skb)
229b0b9395dSStanislav Fomichev {
230b0b9395dSStanislav Fomichev 	struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
231b0b9395dSStanislav Fomichev 
232b0b9395dSStanislav Fomichev 	if (!__skb)
233b0b9395dSStanislav Fomichev 		return;
234b0b9395dSStanislav Fomichev 
235b0b9395dSStanislav Fomichev 	__skb->priority = skb->priority;
236b0b9395dSStanislav Fomichev 	memcpy(__skb->cb, &cb->data, QDISC_CB_PRIV_LEN);
237b0b9395dSStanislav Fomichev }
238b0b9395dSStanislav Fomichev 
2391cf1cae9SAlexei Starovoitov int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
2401cf1cae9SAlexei Starovoitov 			  union bpf_attr __user *uattr)
2411cf1cae9SAlexei Starovoitov {
2421cf1cae9SAlexei Starovoitov 	bool is_l2 = false, is_direct_pkt_access = false;
2431cf1cae9SAlexei Starovoitov 	u32 size = kattr->test.data_size_in;
2441cf1cae9SAlexei Starovoitov 	u32 repeat = kattr->test.repeat;
245b0b9395dSStanislav Fomichev 	struct __sk_buff *ctx = NULL;
2461cf1cae9SAlexei Starovoitov 	u32 retval, duration;
2476e6fddc7SDaniel Borkmann 	int hh_len = ETH_HLEN;
2481cf1cae9SAlexei Starovoitov 	struct sk_buff *skb;
2492cb494a3SSong Liu 	struct sock *sk;
2501cf1cae9SAlexei Starovoitov 	void *data;
2511cf1cae9SAlexei Starovoitov 	int ret;
2521cf1cae9SAlexei Starovoitov 
253586f8525SDavid Miller 	data = bpf_test_init(kattr, size, NET_SKB_PAD + NET_IP_ALIGN,
2541cf1cae9SAlexei Starovoitov 			     SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
2551cf1cae9SAlexei Starovoitov 	if (IS_ERR(data))
2561cf1cae9SAlexei Starovoitov 		return PTR_ERR(data);
2571cf1cae9SAlexei Starovoitov 
258b0b9395dSStanislav Fomichev 	ctx = bpf_ctx_init(kattr, sizeof(struct __sk_buff));
259b0b9395dSStanislav Fomichev 	if (IS_ERR(ctx)) {
260b0b9395dSStanislav Fomichev 		kfree(data);
261b0b9395dSStanislav Fomichev 		return PTR_ERR(ctx);
262b0b9395dSStanislav Fomichev 	}
263b0b9395dSStanislav Fomichev 
2641cf1cae9SAlexei Starovoitov 	switch (prog->type) {
2651cf1cae9SAlexei Starovoitov 	case BPF_PROG_TYPE_SCHED_CLS:
2661cf1cae9SAlexei Starovoitov 	case BPF_PROG_TYPE_SCHED_ACT:
2671cf1cae9SAlexei Starovoitov 		is_l2 = true;
2681cf1cae9SAlexei Starovoitov 		/* fall through */
2691cf1cae9SAlexei Starovoitov 	case BPF_PROG_TYPE_LWT_IN:
2701cf1cae9SAlexei Starovoitov 	case BPF_PROG_TYPE_LWT_OUT:
2711cf1cae9SAlexei Starovoitov 	case BPF_PROG_TYPE_LWT_XMIT:
2721cf1cae9SAlexei Starovoitov 		is_direct_pkt_access = true;
2731cf1cae9SAlexei Starovoitov 		break;
2741cf1cae9SAlexei Starovoitov 	default:
2751cf1cae9SAlexei Starovoitov 		break;
2761cf1cae9SAlexei Starovoitov 	}
2771cf1cae9SAlexei Starovoitov 
2782cb494a3SSong Liu 	sk = kzalloc(sizeof(struct sock), GFP_USER);
2792cb494a3SSong Liu 	if (!sk) {
2801cf1cae9SAlexei Starovoitov 		kfree(data);
281b0b9395dSStanislav Fomichev 		kfree(ctx);
2821cf1cae9SAlexei Starovoitov 		return -ENOMEM;
2831cf1cae9SAlexei Starovoitov 	}
2842cb494a3SSong Liu 	sock_net_set(sk, current->nsproxy->net_ns);
2852cb494a3SSong Liu 	sock_init_data(NULL, sk);
2862cb494a3SSong Liu 
2872cb494a3SSong Liu 	skb = build_skb(data, 0);
2882cb494a3SSong Liu 	if (!skb) {
2892cb494a3SSong Liu 		kfree(data);
290b0b9395dSStanislav Fomichev 		kfree(ctx);
2912cb494a3SSong Liu 		kfree(sk);
2922cb494a3SSong Liu 		return -ENOMEM;
2932cb494a3SSong Liu 	}
2942cb494a3SSong Liu 	skb->sk = sk;
2951cf1cae9SAlexei Starovoitov 
296586f8525SDavid Miller 	skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
2971cf1cae9SAlexei Starovoitov 	__skb_put(skb, size);
2981cf1cae9SAlexei Starovoitov 	skb->protocol = eth_type_trans(skb, current->nsproxy->net_ns->loopback_dev);
2991cf1cae9SAlexei Starovoitov 	skb_reset_network_header(skb);
3001cf1cae9SAlexei Starovoitov 
3011cf1cae9SAlexei Starovoitov 	if (is_l2)
3026e6fddc7SDaniel Borkmann 		__skb_push(skb, hh_len);
3031cf1cae9SAlexei Starovoitov 	if (is_direct_pkt_access)
3046aaae2b6SDaniel Borkmann 		bpf_compute_data_pointers(skb);
305b0b9395dSStanislav Fomichev 	ret = convert___skb_to_skb(skb, ctx);
306b0b9395dSStanislav Fomichev 	if (ret)
307b0b9395dSStanislav Fomichev 		goto out;
308dcb40590SRoman Gushchin 	ret = bpf_test_run(prog, skb, repeat, &retval, &duration);
309b0b9395dSStanislav Fomichev 	if (ret)
310b0b9395dSStanislav Fomichev 		goto out;
3116e6fddc7SDaniel Borkmann 	if (!is_l2) {
3126e6fddc7SDaniel Borkmann 		if (skb_headroom(skb) < hh_len) {
3136e6fddc7SDaniel Borkmann 			int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
3146e6fddc7SDaniel Borkmann 
3156e6fddc7SDaniel Borkmann 			if (pskb_expand_head(skb, nhead, 0, GFP_USER)) {
316b0b9395dSStanislav Fomichev 				ret = -ENOMEM;
317b0b9395dSStanislav Fomichev 				goto out;
3186e6fddc7SDaniel Borkmann 			}
3196e6fddc7SDaniel Borkmann 		}
3206e6fddc7SDaniel Borkmann 		memset(__skb_push(skb, hh_len), 0, hh_len);
3216e6fddc7SDaniel Borkmann 	}
322b0b9395dSStanislav Fomichev 	convert_skb_to___skb(skb, ctx);
3236e6fddc7SDaniel Borkmann 
3241cf1cae9SAlexei Starovoitov 	size = skb->len;
3251cf1cae9SAlexei Starovoitov 	/* bpf program can never convert linear skb to non-linear */
3261cf1cae9SAlexei Starovoitov 	if (WARN_ON_ONCE(skb_is_nonlinear(skb)))
3271cf1cae9SAlexei Starovoitov 		size = skb_headlen(skb);
32878e52272SDavid Miller 	ret = bpf_test_finish(kattr, uattr, skb->data, size, retval, duration);
329b0b9395dSStanislav Fomichev 	if (!ret)
330b0b9395dSStanislav Fomichev 		ret = bpf_ctx_finish(kattr, uattr, ctx,
331b0b9395dSStanislav Fomichev 				     sizeof(struct __sk_buff));
332b0b9395dSStanislav Fomichev out:
3331cf1cae9SAlexei Starovoitov 	kfree_skb(skb);
3342cb494a3SSong Liu 	kfree(sk);
335b0b9395dSStanislav Fomichev 	kfree(ctx);
3361cf1cae9SAlexei Starovoitov 	return ret;
3371cf1cae9SAlexei Starovoitov }
3381cf1cae9SAlexei Starovoitov 
3391cf1cae9SAlexei Starovoitov int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
3401cf1cae9SAlexei Starovoitov 			  union bpf_attr __user *uattr)
3411cf1cae9SAlexei Starovoitov {
3421cf1cae9SAlexei Starovoitov 	u32 size = kattr->test.data_size_in;
3431cf1cae9SAlexei Starovoitov 	u32 repeat = kattr->test.repeat;
34465073a67SDaniel Borkmann 	struct netdev_rx_queue *rxqueue;
3451cf1cae9SAlexei Starovoitov 	struct xdp_buff xdp = {};
3461cf1cae9SAlexei Starovoitov 	u32 retval, duration;
3471cf1cae9SAlexei Starovoitov 	void *data;
3481cf1cae9SAlexei Starovoitov 	int ret;
3491cf1cae9SAlexei Starovoitov 
350947e8b59SStanislav Fomichev 	if (kattr->test.ctx_in || kattr->test.ctx_out)
351947e8b59SStanislav Fomichev 		return -EINVAL;
352947e8b59SStanislav Fomichev 
353586f8525SDavid Miller 	data = bpf_test_init(kattr, size, XDP_PACKET_HEADROOM + NET_IP_ALIGN, 0);
3541cf1cae9SAlexei Starovoitov 	if (IS_ERR(data))
3551cf1cae9SAlexei Starovoitov 		return PTR_ERR(data);
3561cf1cae9SAlexei Starovoitov 
3571cf1cae9SAlexei Starovoitov 	xdp.data_hard_start = data;
358586f8525SDavid Miller 	xdp.data = data + XDP_PACKET_HEADROOM + NET_IP_ALIGN;
359de8f3a83SDaniel Borkmann 	xdp.data_meta = xdp.data;
3601cf1cae9SAlexei Starovoitov 	xdp.data_end = xdp.data + size;
3611cf1cae9SAlexei Starovoitov 
36265073a67SDaniel Borkmann 	rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0);
36365073a67SDaniel Borkmann 	xdp.rxq = &rxqueue->xdp_rxq;
36465073a67SDaniel Borkmann 
365dcb40590SRoman Gushchin 	ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration);
366dcb40590SRoman Gushchin 	if (ret)
367dcb40590SRoman Gushchin 		goto out;
368587b80ccSNikita V. Shirokov 	if (xdp.data != data + XDP_PACKET_HEADROOM + NET_IP_ALIGN ||
369587b80ccSNikita V. Shirokov 	    xdp.data_end != xdp.data + size)
3701cf1cae9SAlexei Starovoitov 		size = xdp.data_end - xdp.data;
37178e52272SDavid Miller 	ret = bpf_test_finish(kattr, uattr, xdp.data, size, retval, duration);
372dcb40590SRoman Gushchin out:
3731cf1cae9SAlexei Starovoitov 	kfree(data);
3741cf1cae9SAlexei Starovoitov 	return ret;
3751cf1cae9SAlexei Starovoitov }
376b7a1848eSStanislav Fomichev 
377b7a1848eSStanislav Fomichev int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
378b7a1848eSStanislav Fomichev 				     const union bpf_attr *kattr,
379b7a1848eSStanislav Fomichev 				     union bpf_attr __user *uattr)
380b7a1848eSStanislav Fomichev {
381b7a1848eSStanislav Fomichev 	u32 size = kattr->test.data_size_in;
382*7b8a1304SStanislav Fomichev 	struct bpf_flow_dissector ctx = {};
383b7a1848eSStanislav Fomichev 	u32 repeat = kattr->test.repeat;
384b7a1848eSStanislav Fomichev 	struct bpf_flow_keys flow_keys;
385b7a1848eSStanislav Fomichev 	u64 time_start, time_spent = 0;
386*7b8a1304SStanislav Fomichev 	const struct ethhdr *eth;
387b7a1848eSStanislav Fomichev 	u32 retval, duration;
388b7a1848eSStanislav Fomichev 	void *data;
389b7a1848eSStanislav Fomichev 	int ret;
390b7a1848eSStanislav Fomichev 	u32 i;
391b7a1848eSStanislav Fomichev 
392b7a1848eSStanislav Fomichev 	if (prog->type != BPF_PROG_TYPE_FLOW_DISSECTOR)
393b7a1848eSStanislav Fomichev 		return -EINVAL;
394b7a1848eSStanislav Fomichev 
395947e8b59SStanislav Fomichev 	if (kattr->test.ctx_in || kattr->test.ctx_out)
396947e8b59SStanislav Fomichev 		return -EINVAL;
397947e8b59SStanislav Fomichev 
398*7b8a1304SStanislav Fomichev 	if (size < ETH_HLEN)
399*7b8a1304SStanislav Fomichev 		return -EINVAL;
400*7b8a1304SStanislav Fomichev 
401*7b8a1304SStanislav Fomichev 	data = bpf_test_init(kattr, size, 0, 0);
402b7a1848eSStanislav Fomichev 	if (IS_ERR(data))
403b7a1848eSStanislav Fomichev 		return PTR_ERR(data);
404b7a1848eSStanislav Fomichev 
405*7b8a1304SStanislav Fomichev 	eth = (struct ethhdr *)data;
406b7a1848eSStanislav Fomichev 
407b7a1848eSStanislav Fomichev 	if (!repeat)
408b7a1848eSStanislav Fomichev 		repeat = 1;
409b7a1848eSStanislav Fomichev 
410*7b8a1304SStanislav Fomichev 	ctx.flow_keys = &flow_keys;
411*7b8a1304SStanislav Fomichev 	ctx.data = data;
412*7b8a1304SStanislav Fomichev 	ctx.data_end = (__u8 *)data + size;
413*7b8a1304SStanislav Fomichev 
414a439184dSStanislav Fomichev 	rcu_read_lock();
415a439184dSStanislav Fomichev 	preempt_disable();
416b7a1848eSStanislav Fomichev 	time_start = ktime_get_ns();
417b7a1848eSStanislav Fomichev 	for (i = 0; i < repeat; i++) {
418*7b8a1304SStanislav Fomichev 		retval = bpf_flow_dissect(prog, &ctx, eth->h_proto, ETH_HLEN,
419*7b8a1304SStanislav Fomichev 					  size);
420*7b8a1304SStanislav Fomichev 
421*7b8a1304SStanislav Fomichev 		flow_keys.nhoff -= ETH_HLEN;
422*7b8a1304SStanislav Fomichev 		flow_keys.thoff -= ETH_HLEN;
423a439184dSStanislav Fomichev 
424a439184dSStanislav Fomichev 		if (signal_pending(current)) {
425b7a1848eSStanislav Fomichev 			preempt_enable();
426a439184dSStanislav Fomichev 			rcu_read_unlock();
427a439184dSStanislav Fomichev 
428a439184dSStanislav Fomichev 			ret = -EINTR;
429a439184dSStanislav Fomichev 			goto out;
430a439184dSStanislav Fomichev 		}
431b7a1848eSStanislav Fomichev 
432b7a1848eSStanislav Fomichev 		if (need_resched()) {
433b7a1848eSStanislav Fomichev 			time_spent += ktime_get_ns() - time_start;
434a439184dSStanislav Fomichev 			preempt_enable();
435a439184dSStanislav Fomichev 			rcu_read_unlock();
436a439184dSStanislav Fomichev 
437b7a1848eSStanislav Fomichev 			cond_resched();
438a439184dSStanislav Fomichev 
439a439184dSStanislav Fomichev 			rcu_read_lock();
440a439184dSStanislav Fomichev 			preempt_disable();
441b7a1848eSStanislav Fomichev 			time_start = ktime_get_ns();
442b7a1848eSStanislav Fomichev 		}
443b7a1848eSStanislav Fomichev 	}
444b7a1848eSStanislav Fomichev 	time_spent += ktime_get_ns() - time_start;
445a439184dSStanislav Fomichev 	preempt_enable();
446a439184dSStanislav Fomichev 	rcu_read_unlock();
447a439184dSStanislav Fomichev 
448b7a1848eSStanislav Fomichev 	do_div(time_spent, repeat);
449b7a1848eSStanislav Fomichev 	duration = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
450b7a1848eSStanislav Fomichev 
451b7a1848eSStanislav Fomichev 	ret = bpf_test_finish(kattr, uattr, &flow_keys, sizeof(flow_keys),
452b7a1848eSStanislav Fomichev 			      retval, duration);
453b7a1848eSStanislav Fomichev 
454a439184dSStanislav Fomichev out:
455*7b8a1304SStanislav Fomichev 	kfree(data);
456b7a1848eSStanislav Fomichev 	return ret;
457b7a1848eSStanislav Fomichev }
458