xref: /openbmc/linux/net/bpf/test_run.c (revision b0b9395d865e3060d97658fbc9ba3f77fecc8da1)
11cf1cae9SAlexei Starovoitov /* Copyright (c) 2017 Facebook
21cf1cae9SAlexei Starovoitov  *
31cf1cae9SAlexei Starovoitov  * This program is free software; you can redistribute it and/or
41cf1cae9SAlexei Starovoitov  * modify it under the terms of version 2 of the GNU General Public
51cf1cae9SAlexei Starovoitov  * License as published by the Free Software Foundation.
61cf1cae9SAlexei Starovoitov  */
71cf1cae9SAlexei Starovoitov #include <linux/bpf.h>
81cf1cae9SAlexei Starovoitov #include <linux/slab.h>
91cf1cae9SAlexei Starovoitov #include <linux/vmalloc.h>
101cf1cae9SAlexei Starovoitov #include <linux/etherdevice.h>
111cf1cae9SAlexei Starovoitov #include <linux/filter.h>
121cf1cae9SAlexei Starovoitov #include <linux/sched/signal.h>
132cb494a3SSong Liu #include <net/sock.h>
142cb494a3SSong Liu #include <net/tcp.h>
151cf1cae9SAlexei Starovoitov 
16df1a2cb7SStanislav Fomichev static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
17df1a2cb7SStanislav Fomichev 			u32 *retval, u32 *time)
181cf1cae9SAlexei Starovoitov {
1971b91a50SBo YU 	struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { NULL };
208bad74f9SRoman Gushchin 	enum bpf_cgroup_storage_type stype;
211cf1cae9SAlexei Starovoitov 	u64 time_start, time_spent = 0;
22df1a2cb7SStanislav Fomichev 	int ret = 0;
23dcb40590SRoman Gushchin 	u32 i;
241cf1cae9SAlexei Starovoitov 
258bad74f9SRoman Gushchin 	for_each_cgroup_storage_type(stype) {
268bad74f9SRoman Gushchin 		storage[stype] = bpf_cgroup_storage_alloc(prog, stype);
278bad74f9SRoman Gushchin 		if (IS_ERR(storage[stype])) {
288bad74f9SRoman Gushchin 			storage[stype] = NULL;
298bad74f9SRoman Gushchin 			for_each_cgroup_storage_type(stype)
308bad74f9SRoman Gushchin 				bpf_cgroup_storage_free(storage[stype]);
318bad74f9SRoman Gushchin 			return -ENOMEM;
328bad74f9SRoman Gushchin 		}
338bad74f9SRoman Gushchin 	}
34f42ee093SRoman Gushchin 
351cf1cae9SAlexei Starovoitov 	if (!repeat)
361cf1cae9SAlexei Starovoitov 		repeat = 1;
37df1a2cb7SStanislav Fomichev 
38df1a2cb7SStanislav Fomichev 	rcu_read_lock();
39df1a2cb7SStanislav Fomichev 	preempt_disable();
401cf1cae9SAlexei Starovoitov 	time_start = ktime_get_ns();
411cf1cae9SAlexei Starovoitov 	for (i = 0; i < repeat; i++) {
42df1a2cb7SStanislav Fomichev 		bpf_cgroup_storage_set(storage);
43df1a2cb7SStanislav Fomichev 		*retval = BPF_PROG_RUN(prog, ctx);
44df1a2cb7SStanislav Fomichev 
45df1a2cb7SStanislav Fomichev 		if (signal_pending(current)) {
46df1a2cb7SStanislav Fomichev 			ret = -EINTR;
471cf1cae9SAlexei Starovoitov 			break;
48df1a2cb7SStanislav Fomichev 		}
49df1a2cb7SStanislav Fomichev 
50df1a2cb7SStanislav Fomichev 		if (need_resched()) {
511cf1cae9SAlexei Starovoitov 			time_spent += ktime_get_ns() - time_start;
52df1a2cb7SStanislav Fomichev 			preempt_enable();
53df1a2cb7SStanislav Fomichev 			rcu_read_unlock();
54df1a2cb7SStanislav Fomichev 
551cf1cae9SAlexei Starovoitov 			cond_resched();
56df1a2cb7SStanislav Fomichev 
57df1a2cb7SStanislav Fomichev 			rcu_read_lock();
58df1a2cb7SStanislav Fomichev 			preempt_disable();
591cf1cae9SAlexei Starovoitov 			time_start = ktime_get_ns();
601cf1cae9SAlexei Starovoitov 		}
611cf1cae9SAlexei Starovoitov 	}
621cf1cae9SAlexei Starovoitov 	time_spent += ktime_get_ns() - time_start;
63df1a2cb7SStanislav Fomichev 	preempt_enable();
64df1a2cb7SStanislav Fomichev 	rcu_read_unlock();
65df1a2cb7SStanislav Fomichev 
661cf1cae9SAlexei Starovoitov 	do_div(time_spent, repeat);
671cf1cae9SAlexei Starovoitov 	*time = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
681cf1cae9SAlexei Starovoitov 
698bad74f9SRoman Gushchin 	for_each_cgroup_storage_type(stype)
708bad74f9SRoman Gushchin 		bpf_cgroup_storage_free(storage[stype]);
71f42ee093SRoman Gushchin 
72df1a2cb7SStanislav Fomichev 	return ret;
731cf1cae9SAlexei Starovoitov }
741cf1cae9SAlexei Starovoitov 
7578e52272SDavid Miller static int bpf_test_finish(const union bpf_attr *kattr,
7678e52272SDavid Miller 			   union bpf_attr __user *uattr, const void *data,
771cf1cae9SAlexei Starovoitov 			   u32 size, u32 retval, u32 duration)
781cf1cae9SAlexei Starovoitov {
7978e52272SDavid Miller 	void __user *data_out = u64_to_user_ptr(kattr->test.data_out);
801cf1cae9SAlexei Starovoitov 	int err = -EFAULT;
81b5a36b1eSLorenz Bauer 	u32 copy_size = size;
821cf1cae9SAlexei Starovoitov 
83b5a36b1eSLorenz Bauer 	/* Clamp copy if the user has provided a size hint, but copy the full
84b5a36b1eSLorenz Bauer 	 * buffer if not to retain old behaviour.
85b5a36b1eSLorenz Bauer 	 */
86b5a36b1eSLorenz Bauer 	if (kattr->test.data_size_out &&
87b5a36b1eSLorenz Bauer 	    copy_size > kattr->test.data_size_out) {
88b5a36b1eSLorenz Bauer 		copy_size = kattr->test.data_size_out;
89b5a36b1eSLorenz Bauer 		err = -ENOSPC;
90b5a36b1eSLorenz Bauer 	}
91b5a36b1eSLorenz Bauer 
92b5a36b1eSLorenz Bauer 	if (data_out && copy_to_user(data_out, data, copy_size))
931cf1cae9SAlexei Starovoitov 		goto out;
941cf1cae9SAlexei Starovoitov 	if (copy_to_user(&uattr->test.data_size_out, &size, sizeof(size)))
951cf1cae9SAlexei Starovoitov 		goto out;
961cf1cae9SAlexei Starovoitov 	if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
971cf1cae9SAlexei Starovoitov 		goto out;
981cf1cae9SAlexei Starovoitov 	if (copy_to_user(&uattr->test.duration, &duration, sizeof(duration)))
991cf1cae9SAlexei Starovoitov 		goto out;
100b5a36b1eSLorenz Bauer 	if (err != -ENOSPC)
1011cf1cae9SAlexei Starovoitov 		err = 0;
1021cf1cae9SAlexei Starovoitov out:
1031cf1cae9SAlexei Starovoitov 	return err;
1041cf1cae9SAlexei Starovoitov }
1051cf1cae9SAlexei Starovoitov 
1061cf1cae9SAlexei Starovoitov static void *bpf_test_init(const union bpf_attr *kattr, u32 size,
1071cf1cae9SAlexei Starovoitov 			   u32 headroom, u32 tailroom)
1081cf1cae9SAlexei Starovoitov {
1091cf1cae9SAlexei Starovoitov 	void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
1101cf1cae9SAlexei Starovoitov 	void *data;
1111cf1cae9SAlexei Starovoitov 
1121cf1cae9SAlexei Starovoitov 	if (size < ETH_HLEN || size > PAGE_SIZE - headroom - tailroom)
1131cf1cae9SAlexei Starovoitov 		return ERR_PTR(-EINVAL);
1141cf1cae9SAlexei Starovoitov 
1151cf1cae9SAlexei Starovoitov 	data = kzalloc(size + headroom + tailroom, GFP_USER);
1161cf1cae9SAlexei Starovoitov 	if (!data)
1171cf1cae9SAlexei Starovoitov 		return ERR_PTR(-ENOMEM);
1181cf1cae9SAlexei Starovoitov 
1191cf1cae9SAlexei Starovoitov 	if (copy_from_user(data + headroom, data_in, size)) {
1201cf1cae9SAlexei Starovoitov 		kfree(data);
1211cf1cae9SAlexei Starovoitov 		return ERR_PTR(-EFAULT);
1221cf1cae9SAlexei Starovoitov 	}
1231cf1cae9SAlexei Starovoitov 	return data;
1241cf1cae9SAlexei Starovoitov }
1251cf1cae9SAlexei Starovoitov 
126*b0b9395dSStanislav Fomichev static void *bpf_ctx_init(const union bpf_attr *kattr, u32 max_size)
127*b0b9395dSStanislav Fomichev {
128*b0b9395dSStanislav Fomichev 	void __user *data_in = u64_to_user_ptr(kattr->test.ctx_in);
129*b0b9395dSStanislav Fomichev 	void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out);
130*b0b9395dSStanislav Fomichev 	u32 size = kattr->test.ctx_size_in;
131*b0b9395dSStanislav Fomichev 	void *data;
132*b0b9395dSStanislav Fomichev 	int err;
133*b0b9395dSStanislav Fomichev 
134*b0b9395dSStanislav Fomichev 	if (!data_in && !data_out)
135*b0b9395dSStanislav Fomichev 		return NULL;
136*b0b9395dSStanislav Fomichev 
137*b0b9395dSStanislav Fomichev 	data = kzalloc(max_size, GFP_USER);
138*b0b9395dSStanislav Fomichev 	if (!data)
139*b0b9395dSStanislav Fomichev 		return ERR_PTR(-ENOMEM);
140*b0b9395dSStanislav Fomichev 
141*b0b9395dSStanislav Fomichev 	if (data_in) {
142*b0b9395dSStanislav Fomichev 		err = bpf_check_uarg_tail_zero(data_in, max_size, size);
143*b0b9395dSStanislav Fomichev 		if (err) {
144*b0b9395dSStanislav Fomichev 			kfree(data);
145*b0b9395dSStanislav Fomichev 			return ERR_PTR(err);
146*b0b9395dSStanislav Fomichev 		}
147*b0b9395dSStanislav Fomichev 
148*b0b9395dSStanislav Fomichev 		size = min_t(u32, max_size, size);
149*b0b9395dSStanislav Fomichev 		if (copy_from_user(data, data_in, size)) {
150*b0b9395dSStanislav Fomichev 			kfree(data);
151*b0b9395dSStanislav Fomichev 			return ERR_PTR(-EFAULT);
152*b0b9395dSStanislav Fomichev 		}
153*b0b9395dSStanislav Fomichev 	}
154*b0b9395dSStanislav Fomichev 	return data;
155*b0b9395dSStanislav Fomichev }
156*b0b9395dSStanislav Fomichev 
157*b0b9395dSStanislav Fomichev static int bpf_ctx_finish(const union bpf_attr *kattr,
158*b0b9395dSStanislav Fomichev 			  union bpf_attr __user *uattr, const void *data,
159*b0b9395dSStanislav Fomichev 			  u32 size)
160*b0b9395dSStanislav Fomichev {
161*b0b9395dSStanislav Fomichev 	void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out);
162*b0b9395dSStanislav Fomichev 	int err = -EFAULT;
163*b0b9395dSStanislav Fomichev 	u32 copy_size = size;
164*b0b9395dSStanislav Fomichev 
165*b0b9395dSStanislav Fomichev 	if (!data || !data_out)
166*b0b9395dSStanislav Fomichev 		return 0;
167*b0b9395dSStanislav Fomichev 
168*b0b9395dSStanislav Fomichev 	if (copy_size > kattr->test.ctx_size_out) {
169*b0b9395dSStanislav Fomichev 		copy_size = kattr->test.ctx_size_out;
170*b0b9395dSStanislav Fomichev 		err = -ENOSPC;
171*b0b9395dSStanislav Fomichev 	}
172*b0b9395dSStanislav Fomichev 
173*b0b9395dSStanislav Fomichev 	if (copy_to_user(data_out, data, copy_size))
174*b0b9395dSStanislav Fomichev 		goto out;
175*b0b9395dSStanislav Fomichev 	if (copy_to_user(&uattr->test.ctx_size_out, &size, sizeof(size)))
176*b0b9395dSStanislav Fomichev 		goto out;
177*b0b9395dSStanislav Fomichev 	if (err != -ENOSPC)
178*b0b9395dSStanislav Fomichev 		err = 0;
179*b0b9395dSStanislav Fomichev out:
180*b0b9395dSStanislav Fomichev 	return err;
181*b0b9395dSStanislav Fomichev }
182*b0b9395dSStanislav Fomichev 
183*b0b9395dSStanislav Fomichev /**
184*b0b9395dSStanislav Fomichev  * range_is_zero - test whether buffer is initialized
185*b0b9395dSStanislav Fomichev  * @buf: buffer to check
186*b0b9395dSStanislav Fomichev  * @from: check from this position
187*b0b9395dSStanislav Fomichev  * @to: check up until (excluding) this position
188*b0b9395dSStanislav Fomichev  *
189*b0b9395dSStanislav Fomichev  * This function returns true if the there is a non-zero byte
190*b0b9395dSStanislav Fomichev  * in the buf in the range [from,to).
191*b0b9395dSStanislav Fomichev  */
192*b0b9395dSStanislav Fomichev static inline bool range_is_zero(void *buf, size_t from, size_t to)
193*b0b9395dSStanislav Fomichev {
194*b0b9395dSStanislav Fomichev 	return !memchr_inv((u8 *)buf + from, 0, to - from);
195*b0b9395dSStanislav Fomichev }
196*b0b9395dSStanislav Fomichev 
197*b0b9395dSStanislav Fomichev static int convert___skb_to_skb(struct sk_buff *skb, struct __sk_buff *__skb)
198*b0b9395dSStanislav Fomichev {
199*b0b9395dSStanislav Fomichev 	struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
200*b0b9395dSStanislav Fomichev 
201*b0b9395dSStanislav Fomichev 	if (!__skb)
202*b0b9395dSStanislav Fomichev 		return 0;
203*b0b9395dSStanislav Fomichev 
204*b0b9395dSStanislav Fomichev 	/* make sure the fields we don't use are zeroed */
205*b0b9395dSStanislav Fomichev 	if (!range_is_zero(__skb, 0, offsetof(struct __sk_buff, priority)))
206*b0b9395dSStanislav Fomichev 		return -EINVAL;
207*b0b9395dSStanislav Fomichev 
208*b0b9395dSStanislav Fomichev 	/* priority is allowed */
209*b0b9395dSStanislav Fomichev 
210*b0b9395dSStanislav Fomichev 	if (!range_is_zero(__skb, offsetof(struct __sk_buff, priority) +
211*b0b9395dSStanislav Fomichev 			   FIELD_SIZEOF(struct __sk_buff, priority),
212*b0b9395dSStanislav Fomichev 			   offsetof(struct __sk_buff, cb)))
213*b0b9395dSStanislav Fomichev 		return -EINVAL;
214*b0b9395dSStanislav Fomichev 
215*b0b9395dSStanislav Fomichev 	/* cb is allowed */
216*b0b9395dSStanislav Fomichev 
217*b0b9395dSStanislav Fomichev 	if (!range_is_zero(__skb, offsetof(struct __sk_buff, cb) +
218*b0b9395dSStanislav Fomichev 			   FIELD_SIZEOF(struct __sk_buff, cb),
219*b0b9395dSStanislav Fomichev 			   sizeof(struct __sk_buff)))
220*b0b9395dSStanislav Fomichev 		return -EINVAL;
221*b0b9395dSStanislav Fomichev 
222*b0b9395dSStanislav Fomichev 	skb->priority = __skb->priority;
223*b0b9395dSStanislav Fomichev 	memcpy(&cb->data, __skb->cb, QDISC_CB_PRIV_LEN);
224*b0b9395dSStanislav Fomichev 
225*b0b9395dSStanislav Fomichev 	return 0;
226*b0b9395dSStanislav Fomichev }
227*b0b9395dSStanislav Fomichev 
228*b0b9395dSStanislav Fomichev static void convert_skb_to___skb(struct sk_buff *skb, struct __sk_buff *__skb)
229*b0b9395dSStanislav Fomichev {
230*b0b9395dSStanislav Fomichev 	struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
231*b0b9395dSStanislav Fomichev 
232*b0b9395dSStanislav Fomichev 	if (!__skb)
233*b0b9395dSStanislav Fomichev 		return;
234*b0b9395dSStanislav Fomichev 
235*b0b9395dSStanislav Fomichev 	__skb->priority = skb->priority;
236*b0b9395dSStanislav Fomichev 	memcpy(__skb->cb, &cb->data, QDISC_CB_PRIV_LEN);
237*b0b9395dSStanislav Fomichev }
238*b0b9395dSStanislav Fomichev 
2391cf1cae9SAlexei Starovoitov int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
2401cf1cae9SAlexei Starovoitov 			  union bpf_attr __user *uattr)
2411cf1cae9SAlexei Starovoitov {
2421cf1cae9SAlexei Starovoitov 	bool is_l2 = false, is_direct_pkt_access = false;
2431cf1cae9SAlexei Starovoitov 	u32 size = kattr->test.data_size_in;
2441cf1cae9SAlexei Starovoitov 	u32 repeat = kattr->test.repeat;
245*b0b9395dSStanislav Fomichev 	struct __sk_buff *ctx = NULL;
2461cf1cae9SAlexei Starovoitov 	u32 retval, duration;
2476e6fddc7SDaniel Borkmann 	int hh_len = ETH_HLEN;
2481cf1cae9SAlexei Starovoitov 	struct sk_buff *skb;
2492cb494a3SSong Liu 	struct sock *sk;
2501cf1cae9SAlexei Starovoitov 	void *data;
2511cf1cae9SAlexei Starovoitov 	int ret;
2521cf1cae9SAlexei Starovoitov 
253586f8525SDavid Miller 	data = bpf_test_init(kattr, size, NET_SKB_PAD + NET_IP_ALIGN,
2541cf1cae9SAlexei Starovoitov 			     SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
2551cf1cae9SAlexei Starovoitov 	if (IS_ERR(data))
2561cf1cae9SAlexei Starovoitov 		return PTR_ERR(data);
2571cf1cae9SAlexei Starovoitov 
258*b0b9395dSStanislav Fomichev 	ctx = bpf_ctx_init(kattr, sizeof(struct __sk_buff));
259*b0b9395dSStanislav Fomichev 	if (IS_ERR(ctx)) {
260*b0b9395dSStanislav Fomichev 		kfree(data);
261*b0b9395dSStanislav Fomichev 		return PTR_ERR(ctx);
262*b0b9395dSStanislav Fomichev 	}
263*b0b9395dSStanislav Fomichev 
2641cf1cae9SAlexei Starovoitov 	switch (prog->type) {
2651cf1cae9SAlexei Starovoitov 	case BPF_PROG_TYPE_SCHED_CLS:
2661cf1cae9SAlexei Starovoitov 	case BPF_PROG_TYPE_SCHED_ACT:
2671cf1cae9SAlexei Starovoitov 		is_l2 = true;
2681cf1cae9SAlexei Starovoitov 		/* fall through */
2691cf1cae9SAlexei Starovoitov 	case BPF_PROG_TYPE_LWT_IN:
2701cf1cae9SAlexei Starovoitov 	case BPF_PROG_TYPE_LWT_OUT:
2711cf1cae9SAlexei Starovoitov 	case BPF_PROG_TYPE_LWT_XMIT:
2721cf1cae9SAlexei Starovoitov 		is_direct_pkt_access = true;
2731cf1cae9SAlexei Starovoitov 		break;
2741cf1cae9SAlexei Starovoitov 	default:
2751cf1cae9SAlexei Starovoitov 		break;
2761cf1cae9SAlexei Starovoitov 	}
2771cf1cae9SAlexei Starovoitov 
2782cb494a3SSong Liu 	sk = kzalloc(sizeof(struct sock), GFP_USER);
2792cb494a3SSong Liu 	if (!sk) {
2801cf1cae9SAlexei Starovoitov 		kfree(data);
281*b0b9395dSStanislav Fomichev 		kfree(ctx);
2821cf1cae9SAlexei Starovoitov 		return -ENOMEM;
2831cf1cae9SAlexei Starovoitov 	}
2842cb494a3SSong Liu 	sock_net_set(sk, current->nsproxy->net_ns);
2852cb494a3SSong Liu 	sock_init_data(NULL, sk);
2862cb494a3SSong Liu 
2872cb494a3SSong Liu 	skb = build_skb(data, 0);
2882cb494a3SSong Liu 	if (!skb) {
2892cb494a3SSong Liu 		kfree(data);
290*b0b9395dSStanislav Fomichev 		kfree(ctx);
2912cb494a3SSong Liu 		kfree(sk);
2922cb494a3SSong Liu 		return -ENOMEM;
2932cb494a3SSong Liu 	}
2942cb494a3SSong Liu 	skb->sk = sk;
2951cf1cae9SAlexei Starovoitov 
296586f8525SDavid Miller 	skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
2971cf1cae9SAlexei Starovoitov 	__skb_put(skb, size);
2981cf1cae9SAlexei Starovoitov 	skb->protocol = eth_type_trans(skb, current->nsproxy->net_ns->loopback_dev);
2991cf1cae9SAlexei Starovoitov 	skb_reset_network_header(skb);
3001cf1cae9SAlexei Starovoitov 
3011cf1cae9SAlexei Starovoitov 	if (is_l2)
3026e6fddc7SDaniel Borkmann 		__skb_push(skb, hh_len);
3031cf1cae9SAlexei Starovoitov 	if (is_direct_pkt_access)
3046aaae2b6SDaniel Borkmann 		bpf_compute_data_pointers(skb);
305*b0b9395dSStanislav Fomichev 	ret = convert___skb_to_skb(skb, ctx);
306*b0b9395dSStanislav Fomichev 	if (ret)
307*b0b9395dSStanislav Fomichev 		goto out;
308dcb40590SRoman Gushchin 	ret = bpf_test_run(prog, skb, repeat, &retval, &duration);
309*b0b9395dSStanislav Fomichev 	if (ret)
310*b0b9395dSStanislav Fomichev 		goto out;
3116e6fddc7SDaniel Borkmann 	if (!is_l2) {
3126e6fddc7SDaniel Borkmann 		if (skb_headroom(skb) < hh_len) {
3136e6fddc7SDaniel Borkmann 			int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
3146e6fddc7SDaniel Borkmann 
3156e6fddc7SDaniel Borkmann 			if (pskb_expand_head(skb, nhead, 0, GFP_USER)) {
316*b0b9395dSStanislav Fomichev 				ret = -ENOMEM;
317*b0b9395dSStanislav Fomichev 				goto out;
3186e6fddc7SDaniel Borkmann 			}
3196e6fddc7SDaniel Borkmann 		}
3206e6fddc7SDaniel Borkmann 		memset(__skb_push(skb, hh_len), 0, hh_len);
3216e6fddc7SDaniel Borkmann 	}
322*b0b9395dSStanislav Fomichev 	convert_skb_to___skb(skb, ctx);
3236e6fddc7SDaniel Borkmann 
3241cf1cae9SAlexei Starovoitov 	size = skb->len;
3251cf1cae9SAlexei Starovoitov 	/* bpf program can never convert linear skb to non-linear */
3261cf1cae9SAlexei Starovoitov 	if (WARN_ON_ONCE(skb_is_nonlinear(skb)))
3271cf1cae9SAlexei Starovoitov 		size = skb_headlen(skb);
32878e52272SDavid Miller 	ret = bpf_test_finish(kattr, uattr, skb->data, size, retval, duration);
329*b0b9395dSStanislav Fomichev 	if (!ret)
330*b0b9395dSStanislav Fomichev 		ret = bpf_ctx_finish(kattr, uattr, ctx,
331*b0b9395dSStanislav Fomichev 				     sizeof(struct __sk_buff));
332*b0b9395dSStanislav Fomichev out:
3331cf1cae9SAlexei Starovoitov 	kfree_skb(skb);
3342cb494a3SSong Liu 	kfree(sk);
335*b0b9395dSStanislav Fomichev 	kfree(ctx);
3361cf1cae9SAlexei Starovoitov 	return ret;
3371cf1cae9SAlexei Starovoitov }
3381cf1cae9SAlexei Starovoitov 
3391cf1cae9SAlexei Starovoitov int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
3401cf1cae9SAlexei Starovoitov 			  union bpf_attr __user *uattr)
3411cf1cae9SAlexei Starovoitov {
3421cf1cae9SAlexei Starovoitov 	u32 size = kattr->test.data_size_in;
3431cf1cae9SAlexei Starovoitov 	u32 repeat = kattr->test.repeat;
34465073a67SDaniel Borkmann 	struct netdev_rx_queue *rxqueue;
3451cf1cae9SAlexei Starovoitov 	struct xdp_buff xdp = {};
3461cf1cae9SAlexei Starovoitov 	u32 retval, duration;
3471cf1cae9SAlexei Starovoitov 	void *data;
3481cf1cae9SAlexei Starovoitov 	int ret;
3491cf1cae9SAlexei Starovoitov 
350586f8525SDavid Miller 	data = bpf_test_init(kattr, size, XDP_PACKET_HEADROOM + NET_IP_ALIGN, 0);
3511cf1cae9SAlexei Starovoitov 	if (IS_ERR(data))
3521cf1cae9SAlexei Starovoitov 		return PTR_ERR(data);
3531cf1cae9SAlexei Starovoitov 
3541cf1cae9SAlexei Starovoitov 	xdp.data_hard_start = data;
355586f8525SDavid Miller 	xdp.data = data + XDP_PACKET_HEADROOM + NET_IP_ALIGN;
356de8f3a83SDaniel Borkmann 	xdp.data_meta = xdp.data;
3571cf1cae9SAlexei Starovoitov 	xdp.data_end = xdp.data + size;
3581cf1cae9SAlexei Starovoitov 
35965073a67SDaniel Borkmann 	rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0);
36065073a67SDaniel Borkmann 	xdp.rxq = &rxqueue->xdp_rxq;
36165073a67SDaniel Borkmann 
362dcb40590SRoman Gushchin 	ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration);
363dcb40590SRoman Gushchin 	if (ret)
364dcb40590SRoman Gushchin 		goto out;
365587b80ccSNikita V. Shirokov 	if (xdp.data != data + XDP_PACKET_HEADROOM + NET_IP_ALIGN ||
366587b80ccSNikita V. Shirokov 	    xdp.data_end != xdp.data + size)
3671cf1cae9SAlexei Starovoitov 		size = xdp.data_end - xdp.data;
36878e52272SDavid Miller 	ret = bpf_test_finish(kattr, uattr, xdp.data, size, retval, duration);
369dcb40590SRoman Gushchin out:
3701cf1cae9SAlexei Starovoitov 	kfree(data);
3711cf1cae9SAlexei Starovoitov 	return ret;
3721cf1cae9SAlexei Starovoitov }
373b7a1848eSStanislav Fomichev 
374b7a1848eSStanislav Fomichev int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
375b7a1848eSStanislav Fomichev 				     const union bpf_attr *kattr,
376b7a1848eSStanislav Fomichev 				     union bpf_attr __user *uattr)
377b7a1848eSStanislav Fomichev {
378b7a1848eSStanislav Fomichev 	u32 size = kattr->test.data_size_in;
379b7a1848eSStanislav Fomichev 	u32 repeat = kattr->test.repeat;
380b7a1848eSStanislav Fomichev 	struct bpf_flow_keys flow_keys;
381b7a1848eSStanislav Fomichev 	u64 time_start, time_spent = 0;
382b7a1848eSStanislav Fomichev 	struct bpf_skb_data_end *cb;
383b7a1848eSStanislav Fomichev 	u32 retval, duration;
384b7a1848eSStanislav Fomichev 	struct sk_buff *skb;
385b7a1848eSStanislav Fomichev 	struct sock *sk;
386b7a1848eSStanislav Fomichev 	void *data;
387b7a1848eSStanislav Fomichev 	int ret;
388b7a1848eSStanislav Fomichev 	u32 i;
389b7a1848eSStanislav Fomichev 
390b7a1848eSStanislav Fomichev 	if (prog->type != BPF_PROG_TYPE_FLOW_DISSECTOR)
391b7a1848eSStanislav Fomichev 		return -EINVAL;
392b7a1848eSStanislav Fomichev 
393b7a1848eSStanislav Fomichev 	data = bpf_test_init(kattr, size, NET_SKB_PAD + NET_IP_ALIGN,
394b7a1848eSStanislav Fomichev 			     SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
395b7a1848eSStanislav Fomichev 	if (IS_ERR(data))
396b7a1848eSStanislav Fomichev 		return PTR_ERR(data);
397b7a1848eSStanislav Fomichev 
398b7a1848eSStanislav Fomichev 	sk = kzalloc(sizeof(*sk), GFP_USER);
399b7a1848eSStanislav Fomichev 	if (!sk) {
400b7a1848eSStanislav Fomichev 		kfree(data);
401b7a1848eSStanislav Fomichev 		return -ENOMEM;
402b7a1848eSStanislav Fomichev 	}
403b7a1848eSStanislav Fomichev 	sock_net_set(sk, current->nsproxy->net_ns);
404b7a1848eSStanislav Fomichev 	sock_init_data(NULL, sk);
405b7a1848eSStanislav Fomichev 
406b7a1848eSStanislav Fomichev 	skb = build_skb(data, 0);
407b7a1848eSStanislav Fomichev 	if (!skb) {
408b7a1848eSStanislav Fomichev 		kfree(data);
409b7a1848eSStanislav Fomichev 		kfree(sk);
410b7a1848eSStanislav Fomichev 		return -ENOMEM;
411b7a1848eSStanislav Fomichev 	}
412b7a1848eSStanislav Fomichev 	skb->sk = sk;
413b7a1848eSStanislav Fomichev 
414b7a1848eSStanislav Fomichev 	skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
415b7a1848eSStanislav Fomichev 	__skb_put(skb, size);
416b7a1848eSStanislav Fomichev 	skb->protocol = eth_type_trans(skb,
417b7a1848eSStanislav Fomichev 				       current->nsproxy->net_ns->loopback_dev);
418b7a1848eSStanislav Fomichev 	skb_reset_network_header(skb);
419b7a1848eSStanislav Fomichev 
420b7a1848eSStanislav Fomichev 	cb = (struct bpf_skb_data_end *)skb->cb;
421b7a1848eSStanislav Fomichev 	cb->qdisc_cb.flow_keys = &flow_keys;
422b7a1848eSStanislav Fomichev 
423b7a1848eSStanislav Fomichev 	if (!repeat)
424b7a1848eSStanislav Fomichev 		repeat = 1;
425b7a1848eSStanislav Fomichev 
426a439184dSStanislav Fomichev 	rcu_read_lock();
427a439184dSStanislav Fomichev 	preempt_disable();
428b7a1848eSStanislav Fomichev 	time_start = ktime_get_ns();
429b7a1848eSStanislav Fomichev 	for (i = 0; i < repeat; i++) {
430b7a1848eSStanislav Fomichev 		retval = __skb_flow_bpf_dissect(prog, skb,
431b7a1848eSStanislav Fomichev 						&flow_keys_dissector,
432b7a1848eSStanislav Fomichev 						&flow_keys);
433a439184dSStanislav Fomichev 
434a439184dSStanislav Fomichev 		if (signal_pending(current)) {
435b7a1848eSStanislav Fomichev 			preempt_enable();
436a439184dSStanislav Fomichev 			rcu_read_unlock();
437a439184dSStanislav Fomichev 
438a439184dSStanislav Fomichev 			ret = -EINTR;
439a439184dSStanislav Fomichev 			goto out;
440a439184dSStanislav Fomichev 		}
441b7a1848eSStanislav Fomichev 
442b7a1848eSStanislav Fomichev 		if (need_resched()) {
443b7a1848eSStanislav Fomichev 			time_spent += ktime_get_ns() - time_start;
444a439184dSStanislav Fomichev 			preempt_enable();
445a439184dSStanislav Fomichev 			rcu_read_unlock();
446a439184dSStanislav Fomichev 
447b7a1848eSStanislav Fomichev 			cond_resched();
448a439184dSStanislav Fomichev 
449a439184dSStanislav Fomichev 			rcu_read_lock();
450a439184dSStanislav Fomichev 			preempt_disable();
451b7a1848eSStanislav Fomichev 			time_start = ktime_get_ns();
452b7a1848eSStanislav Fomichev 		}
453b7a1848eSStanislav Fomichev 	}
454b7a1848eSStanislav Fomichev 	time_spent += ktime_get_ns() - time_start;
455a439184dSStanislav Fomichev 	preempt_enable();
456a439184dSStanislav Fomichev 	rcu_read_unlock();
457a439184dSStanislav Fomichev 
458b7a1848eSStanislav Fomichev 	do_div(time_spent, repeat);
459b7a1848eSStanislav Fomichev 	duration = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
460b7a1848eSStanislav Fomichev 
461b7a1848eSStanislav Fomichev 	ret = bpf_test_finish(kattr, uattr, &flow_keys, sizeof(flow_keys),
462b7a1848eSStanislav Fomichev 			      retval, duration);
463b7a1848eSStanislav Fomichev 
464a439184dSStanislav Fomichev out:
465b7a1848eSStanislav Fomichev 	kfree_skb(skb);
466b7a1848eSStanislav Fomichev 	kfree(sk);
467b7a1848eSStanislav Fomichev 	return ret;
468b7a1848eSStanislav Fomichev }
469