xref: /openbmc/linux/net/bpf/test_run.c (revision c48e51c8b07aba8a18125221cb67a40cb1256bf2)
125763b3cSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
21cf1cae9SAlexei Starovoitov /* Copyright (c) 2017 Facebook
31cf1cae9SAlexei Starovoitov  */
41cf1cae9SAlexei Starovoitov #include <linux/bpf.h>
5*c48e51c8SKumar Kartikeya Dwivedi #include <linux/btf.h>
67bd1590dSMartin KaFai Lau #include <linux/btf_ids.h>
71cf1cae9SAlexei Starovoitov #include <linux/slab.h>
81cf1cae9SAlexei Starovoitov #include <linux/vmalloc.h>
91cf1cae9SAlexei Starovoitov #include <linux/etherdevice.h>
101cf1cae9SAlexei Starovoitov #include <linux/filter.h>
1187b7b533SYonghong Song #include <linux/rcupdate_trace.h>
121cf1cae9SAlexei Starovoitov #include <linux/sched/signal.h>
136ac99e8fSMartin KaFai Lau #include <net/bpf_sk_storage.h>
142cb494a3SSong Liu #include <net/sock.h>
152cb494a3SSong Liu #include <net/tcp.h>
167c32e8f8SLorenz Bauer #include <net/net_namespace.h>
173d08b6f2SKP Singh #include <linux/error-injection.h>
181b4d60ecSSong Liu #include <linux/smp.h>
197c32e8f8SLorenz Bauer #include <linux/sock_diag.h>
2047316f4aSZvi Effron #include <net/xdp.h>
211cf1cae9SAlexei Starovoitov 
22e950e843SMatt Mullins #define CREATE_TRACE_POINTS
23e950e843SMatt Mullins #include <trace/events/bpf_test_run.h>
24e950e843SMatt Mullins 
25607b9cc9SLorenz Bauer struct bpf_test_timer {
26607b9cc9SLorenz Bauer 	enum { NO_PREEMPT, NO_MIGRATE } mode;
27607b9cc9SLorenz Bauer 	u32 i;
28607b9cc9SLorenz Bauer 	u64 time_start, time_spent;
29607b9cc9SLorenz Bauer };
30607b9cc9SLorenz Bauer 
31607b9cc9SLorenz Bauer static void bpf_test_timer_enter(struct bpf_test_timer *t)
32607b9cc9SLorenz Bauer 	__acquires(rcu)
33607b9cc9SLorenz Bauer {
34607b9cc9SLorenz Bauer 	rcu_read_lock();
35607b9cc9SLorenz Bauer 	if (t->mode == NO_PREEMPT)
36607b9cc9SLorenz Bauer 		preempt_disable();
37607b9cc9SLorenz Bauer 	else
38607b9cc9SLorenz Bauer 		migrate_disable();
39607b9cc9SLorenz Bauer 
40607b9cc9SLorenz Bauer 	t->time_start = ktime_get_ns();
41607b9cc9SLorenz Bauer }
42607b9cc9SLorenz Bauer 
43607b9cc9SLorenz Bauer static void bpf_test_timer_leave(struct bpf_test_timer *t)
44607b9cc9SLorenz Bauer 	__releases(rcu)
45607b9cc9SLorenz Bauer {
46607b9cc9SLorenz Bauer 	t->time_start = 0;
47607b9cc9SLorenz Bauer 
48607b9cc9SLorenz Bauer 	if (t->mode == NO_PREEMPT)
49607b9cc9SLorenz Bauer 		preempt_enable();
50607b9cc9SLorenz Bauer 	else
51607b9cc9SLorenz Bauer 		migrate_enable();
52607b9cc9SLorenz Bauer 	rcu_read_unlock();
53607b9cc9SLorenz Bauer }
54607b9cc9SLorenz Bauer 
55607b9cc9SLorenz Bauer static bool bpf_test_timer_continue(struct bpf_test_timer *t, u32 repeat, int *err, u32 *duration)
56607b9cc9SLorenz Bauer 	__must_hold(rcu)
57607b9cc9SLorenz Bauer {
58607b9cc9SLorenz Bauer 	t->i++;
59607b9cc9SLorenz Bauer 	if (t->i >= repeat) {
60607b9cc9SLorenz Bauer 		/* We're done. */
61607b9cc9SLorenz Bauer 		t->time_spent += ktime_get_ns() - t->time_start;
62607b9cc9SLorenz Bauer 		do_div(t->time_spent, t->i);
63607b9cc9SLorenz Bauer 		*duration = t->time_spent > U32_MAX ? U32_MAX : (u32)t->time_spent;
64607b9cc9SLorenz Bauer 		*err = 0;
65607b9cc9SLorenz Bauer 		goto reset;
66607b9cc9SLorenz Bauer 	}
67607b9cc9SLorenz Bauer 
68607b9cc9SLorenz Bauer 	if (signal_pending(current)) {
69607b9cc9SLorenz Bauer 		/* During iteration: we've been cancelled, abort. */
70607b9cc9SLorenz Bauer 		*err = -EINTR;
71607b9cc9SLorenz Bauer 		goto reset;
72607b9cc9SLorenz Bauer 	}
73607b9cc9SLorenz Bauer 
74607b9cc9SLorenz Bauer 	if (need_resched()) {
75607b9cc9SLorenz Bauer 		/* During iteration: we need to reschedule between runs. */
76607b9cc9SLorenz Bauer 		t->time_spent += ktime_get_ns() - t->time_start;
77607b9cc9SLorenz Bauer 		bpf_test_timer_leave(t);
78607b9cc9SLorenz Bauer 		cond_resched();
79607b9cc9SLorenz Bauer 		bpf_test_timer_enter(t);
80607b9cc9SLorenz Bauer 	}
81607b9cc9SLorenz Bauer 
82607b9cc9SLorenz Bauer 	/* Do another round. */
83607b9cc9SLorenz Bauer 	return true;
84607b9cc9SLorenz Bauer 
85607b9cc9SLorenz Bauer reset:
86607b9cc9SLorenz Bauer 	t->i = 0;
87607b9cc9SLorenz Bauer 	return false;
88607b9cc9SLorenz Bauer }
89607b9cc9SLorenz Bauer 
90df1a2cb7SStanislav Fomichev static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
91f23c4b39SBjörn Töpel 			u32 *retval, u32 *time, bool xdp)
921cf1cae9SAlexei Starovoitov {
93c7603cfaSAndrii Nakryiko 	struct bpf_prog_array_item item = {.prog = prog};
94c7603cfaSAndrii Nakryiko 	struct bpf_run_ctx *old_ctx;
95c7603cfaSAndrii Nakryiko 	struct bpf_cg_run_ctx run_ctx;
96607b9cc9SLorenz Bauer 	struct bpf_test_timer t = { NO_MIGRATE };
978bad74f9SRoman Gushchin 	enum bpf_cgroup_storage_type stype;
98607b9cc9SLorenz Bauer 	int ret;
991cf1cae9SAlexei Starovoitov 
1008bad74f9SRoman Gushchin 	for_each_cgroup_storage_type(stype) {
101c7603cfaSAndrii Nakryiko 		item.cgroup_storage[stype] = bpf_cgroup_storage_alloc(prog, stype);
102c7603cfaSAndrii Nakryiko 		if (IS_ERR(item.cgroup_storage[stype])) {
103c7603cfaSAndrii Nakryiko 			item.cgroup_storage[stype] = NULL;
1048bad74f9SRoman Gushchin 			for_each_cgroup_storage_type(stype)
105c7603cfaSAndrii Nakryiko 				bpf_cgroup_storage_free(item.cgroup_storage[stype]);
1068bad74f9SRoman Gushchin 			return -ENOMEM;
1078bad74f9SRoman Gushchin 		}
1088bad74f9SRoman Gushchin 	}
109f42ee093SRoman Gushchin 
1101cf1cae9SAlexei Starovoitov 	if (!repeat)
1111cf1cae9SAlexei Starovoitov 		repeat = 1;
112df1a2cb7SStanislav Fomichev 
113607b9cc9SLorenz Bauer 	bpf_test_timer_enter(&t);
114c7603cfaSAndrii Nakryiko 	old_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
115607b9cc9SLorenz Bauer 	do {
116c7603cfaSAndrii Nakryiko 		run_ctx.prog_item = &item;
117f23c4b39SBjörn Töpel 		if (xdp)
118f23c4b39SBjörn Töpel 			*retval = bpf_prog_run_xdp(prog, ctx);
119f23c4b39SBjörn Töpel 		else
120fb7dd8bcSAndrii Nakryiko 			*retval = bpf_prog_run(prog, ctx);
121607b9cc9SLorenz Bauer 	} while (bpf_test_timer_continue(&t, repeat, &ret, time));
122c7603cfaSAndrii Nakryiko 	bpf_reset_run_ctx(old_ctx);
123607b9cc9SLorenz Bauer 	bpf_test_timer_leave(&t);
1241cf1cae9SAlexei Starovoitov 
1258bad74f9SRoman Gushchin 	for_each_cgroup_storage_type(stype)
126c7603cfaSAndrii Nakryiko 		bpf_cgroup_storage_free(item.cgroup_storage[stype]);
127f42ee093SRoman Gushchin 
128df1a2cb7SStanislav Fomichev 	return ret;
1291cf1cae9SAlexei Starovoitov }
1301cf1cae9SAlexei Starovoitov 
13178e52272SDavid Miller static int bpf_test_finish(const union bpf_attr *kattr,
13278e52272SDavid Miller 			   union bpf_attr __user *uattr, const void *data,
1331cf1cae9SAlexei Starovoitov 			   u32 size, u32 retval, u32 duration)
1341cf1cae9SAlexei Starovoitov {
13578e52272SDavid Miller 	void __user *data_out = u64_to_user_ptr(kattr->test.data_out);
1361cf1cae9SAlexei Starovoitov 	int err = -EFAULT;
137b5a36b1eSLorenz Bauer 	u32 copy_size = size;
1381cf1cae9SAlexei Starovoitov 
139b5a36b1eSLorenz Bauer 	/* Clamp copy if the user has provided a size hint, but copy the full
140b5a36b1eSLorenz Bauer 	 * buffer if not to retain old behaviour.
141b5a36b1eSLorenz Bauer 	 */
142b5a36b1eSLorenz Bauer 	if (kattr->test.data_size_out &&
143b5a36b1eSLorenz Bauer 	    copy_size > kattr->test.data_size_out) {
144b5a36b1eSLorenz Bauer 		copy_size = kattr->test.data_size_out;
145b5a36b1eSLorenz Bauer 		err = -ENOSPC;
146b5a36b1eSLorenz Bauer 	}
147b5a36b1eSLorenz Bauer 
148b5a36b1eSLorenz Bauer 	if (data_out && copy_to_user(data_out, data, copy_size))
1491cf1cae9SAlexei Starovoitov 		goto out;
1501cf1cae9SAlexei Starovoitov 	if (copy_to_user(&uattr->test.data_size_out, &size, sizeof(size)))
1511cf1cae9SAlexei Starovoitov 		goto out;
1521cf1cae9SAlexei Starovoitov 	if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
1531cf1cae9SAlexei Starovoitov 		goto out;
1541cf1cae9SAlexei Starovoitov 	if (copy_to_user(&uattr->test.duration, &duration, sizeof(duration)))
1551cf1cae9SAlexei Starovoitov 		goto out;
156b5a36b1eSLorenz Bauer 	if (err != -ENOSPC)
1571cf1cae9SAlexei Starovoitov 		err = 0;
1581cf1cae9SAlexei Starovoitov out:
159e950e843SMatt Mullins 	trace_bpf_test_finish(&err);
1601cf1cae9SAlexei Starovoitov 	return err;
1611cf1cae9SAlexei Starovoitov }
1621cf1cae9SAlexei Starovoitov 
163faeb2dceSAlexei Starovoitov /* Integer types of various sizes and pointer combinations cover variety of
164faeb2dceSAlexei Starovoitov  * architecture dependent calling conventions. 7+ can be supported in the
165faeb2dceSAlexei Starovoitov  * future.
166faeb2dceSAlexei Starovoitov  */
167e9ff9d52SJean-Philippe Menil __diag_push();
168e9ff9d52SJean-Philippe Menil __diag_ignore(GCC, 8, "-Wmissing-prototypes",
169e9ff9d52SJean-Philippe Menil 	      "Global functions as their definitions will be in vmlinux BTF");
170faeb2dceSAlexei Starovoitov int noinline bpf_fentry_test1(int a)
171faeb2dceSAlexei Starovoitov {
172faeb2dceSAlexei Starovoitov 	return a + 1;
173faeb2dceSAlexei Starovoitov }
174faeb2dceSAlexei Starovoitov 
175faeb2dceSAlexei Starovoitov int noinline bpf_fentry_test2(int a, u64 b)
176faeb2dceSAlexei Starovoitov {
177faeb2dceSAlexei Starovoitov 	return a + b;
178faeb2dceSAlexei Starovoitov }
179faeb2dceSAlexei Starovoitov 
180faeb2dceSAlexei Starovoitov int noinline bpf_fentry_test3(char a, int b, u64 c)
181faeb2dceSAlexei Starovoitov {
182faeb2dceSAlexei Starovoitov 	return a + b + c;
183faeb2dceSAlexei Starovoitov }
184faeb2dceSAlexei Starovoitov 
185faeb2dceSAlexei Starovoitov int noinline bpf_fentry_test4(void *a, char b, int c, u64 d)
186faeb2dceSAlexei Starovoitov {
187faeb2dceSAlexei Starovoitov 	return (long)a + b + c + d;
188faeb2dceSAlexei Starovoitov }
189faeb2dceSAlexei Starovoitov 
190faeb2dceSAlexei Starovoitov int noinline bpf_fentry_test5(u64 a, void *b, short c, int d, u64 e)
191faeb2dceSAlexei Starovoitov {
192faeb2dceSAlexei Starovoitov 	return a + (long)b + c + d + e;
193faeb2dceSAlexei Starovoitov }
194faeb2dceSAlexei Starovoitov 
195faeb2dceSAlexei Starovoitov int noinline bpf_fentry_test6(u64 a, void *b, short c, int d, void *e, u64 f)
196faeb2dceSAlexei Starovoitov {
197faeb2dceSAlexei Starovoitov 	return a + (long)b + c + d + (long)e + f;
198faeb2dceSAlexei Starovoitov }
199faeb2dceSAlexei Starovoitov 
200d923021cSYonghong Song struct bpf_fentry_test_t {
201d923021cSYonghong Song 	struct bpf_fentry_test_t *a;
202d923021cSYonghong Song };
203d923021cSYonghong Song 
204d923021cSYonghong Song int noinline bpf_fentry_test7(struct bpf_fentry_test_t *arg)
205d923021cSYonghong Song {
206d923021cSYonghong Song 	return (long)arg;
207d923021cSYonghong Song }
208d923021cSYonghong Song 
209d923021cSYonghong Song int noinline bpf_fentry_test8(struct bpf_fentry_test_t *arg)
210d923021cSYonghong Song {
211d923021cSYonghong Song 	return (long)arg->a;
212d923021cSYonghong Song }
213d923021cSYonghong Song 
2143d08b6f2SKP Singh int noinline bpf_modify_return_test(int a, int *b)
2153d08b6f2SKP Singh {
2163d08b6f2SKP Singh 	*b += 1;
2173d08b6f2SKP Singh 	return a + *b;
2183d08b6f2SKP Singh }
2197bd1590dSMartin KaFai Lau 
2207bd1590dSMartin KaFai Lau u64 noinline bpf_kfunc_call_test1(struct sock *sk, u32 a, u64 b, u32 c, u64 d)
2217bd1590dSMartin KaFai Lau {
2227bd1590dSMartin KaFai Lau 	return a + b + c + d;
2237bd1590dSMartin KaFai Lau }
2247bd1590dSMartin KaFai Lau 
2257bd1590dSMartin KaFai Lau int noinline bpf_kfunc_call_test2(struct sock *sk, u32 a, u32 b)
2267bd1590dSMartin KaFai Lau {
2277bd1590dSMartin KaFai Lau 	return a + b;
2287bd1590dSMartin KaFai Lau }
2297bd1590dSMartin KaFai Lau 
2307bd1590dSMartin KaFai Lau struct sock * noinline bpf_kfunc_call_test3(struct sock *sk)
2317bd1590dSMartin KaFai Lau {
2327bd1590dSMartin KaFai Lau 	return sk;
2337bd1590dSMartin KaFai Lau }
2347bd1590dSMartin KaFai Lau 
235e9ff9d52SJean-Philippe Menil __diag_pop();
2363d08b6f2SKP Singh 
2373d08b6f2SKP Singh ALLOW_ERROR_INJECTION(bpf_modify_return_test, ERRNO);
2383d08b6f2SKP Singh 
2397bd1590dSMartin KaFai Lau BTF_SET_START(test_sk_kfunc_ids)
2407bd1590dSMartin KaFai Lau BTF_ID(func, bpf_kfunc_call_test1)
2417bd1590dSMartin KaFai Lau BTF_ID(func, bpf_kfunc_call_test2)
2427bd1590dSMartin KaFai Lau BTF_ID(func, bpf_kfunc_call_test3)
2437bd1590dSMartin KaFai Lau BTF_SET_END(test_sk_kfunc_ids)
2447bd1590dSMartin KaFai Lau 
2452357672cSKumar Kartikeya Dwivedi bool bpf_prog_test_check_kfunc_call(u32 kfunc_id, struct module *owner)
2467bd1590dSMartin KaFai Lau {
247*c48e51c8SKumar Kartikeya Dwivedi 	if (btf_id_set_contains(&test_sk_kfunc_ids, kfunc_id))
248*c48e51c8SKumar Kartikeya Dwivedi 		return true;
249*c48e51c8SKumar Kartikeya Dwivedi 	return bpf_check_mod_kfunc_call(&prog_test_kfunc_list, kfunc_id, owner);
2507bd1590dSMartin KaFai Lau }
2517bd1590dSMartin KaFai Lau 
2521cf1cae9SAlexei Starovoitov static void *bpf_test_init(const union bpf_attr *kattr, u32 size,
2531cf1cae9SAlexei Starovoitov 			   u32 headroom, u32 tailroom)
2541cf1cae9SAlexei Starovoitov {
2551cf1cae9SAlexei Starovoitov 	void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
256d800bad6SJesper Dangaard Brouer 	u32 user_size = kattr->test.data_size_in;
2571cf1cae9SAlexei Starovoitov 	void *data;
2581cf1cae9SAlexei Starovoitov 
2591cf1cae9SAlexei Starovoitov 	if (size < ETH_HLEN || size > PAGE_SIZE - headroom - tailroom)
2601cf1cae9SAlexei Starovoitov 		return ERR_PTR(-EINVAL);
2611cf1cae9SAlexei Starovoitov 
262d800bad6SJesper Dangaard Brouer 	if (user_size > size)
263d800bad6SJesper Dangaard Brouer 		return ERR_PTR(-EMSGSIZE);
264d800bad6SJesper Dangaard Brouer 
2651cf1cae9SAlexei Starovoitov 	data = kzalloc(size + headroom + tailroom, GFP_USER);
2661cf1cae9SAlexei Starovoitov 	if (!data)
2671cf1cae9SAlexei Starovoitov 		return ERR_PTR(-ENOMEM);
2681cf1cae9SAlexei Starovoitov 
269d800bad6SJesper Dangaard Brouer 	if (copy_from_user(data + headroom, data_in, user_size)) {
2701cf1cae9SAlexei Starovoitov 		kfree(data);
2711cf1cae9SAlexei Starovoitov 		return ERR_PTR(-EFAULT);
2721cf1cae9SAlexei Starovoitov 	}
273da00d2f1SKP Singh 
274da00d2f1SKP Singh 	return data;
275da00d2f1SKP Singh }
276da00d2f1SKP Singh 
277da00d2f1SKP Singh int bpf_prog_test_run_tracing(struct bpf_prog *prog,
278da00d2f1SKP Singh 			      const union bpf_attr *kattr,
279da00d2f1SKP Singh 			      union bpf_attr __user *uattr)
280da00d2f1SKP Singh {
281d923021cSYonghong Song 	struct bpf_fentry_test_t arg = {};
2823d08b6f2SKP Singh 	u16 side_effect = 0, ret = 0;
2833d08b6f2SKP Singh 	int b = 2, err = -EFAULT;
2843d08b6f2SKP Singh 	u32 retval = 0;
285da00d2f1SKP Singh 
2861b4d60ecSSong Liu 	if (kattr->test.flags || kattr->test.cpu)
2871b4d60ecSSong Liu 		return -EINVAL;
2881b4d60ecSSong Liu 
289da00d2f1SKP Singh 	switch (prog->expected_attach_type) {
290da00d2f1SKP Singh 	case BPF_TRACE_FENTRY:
291da00d2f1SKP Singh 	case BPF_TRACE_FEXIT:
292faeb2dceSAlexei Starovoitov 		if (bpf_fentry_test1(1) != 2 ||
293faeb2dceSAlexei Starovoitov 		    bpf_fentry_test2(2, 3) != 5 ||
294faeb2dceSAlexei Starovoitov 		    bpf_fentry_test3(4, 5, 6) != 15 ||
295faeb2dceSAlexei Starovoitov 		    bpf_fentry_test4((void *)7, 8, 9, 10) != 34 ||
296faeb2dceSAlexei Starovoitov 		    bpf_fentry_test5(11, (void *)12, 13, 14, 15) != 65 ||
297d923021cSYonghong Song 		    bpf_fentry_test6(16, (void *)17, 18, 19, (void *)20, 21) != 111 ||
298d923021cSYonghong Song 		    bpf_fentry_test7((struct bpf_fentry_test_t *)0) != 0 ||
299d923021cSYonghong Song 		    bpf_fentry_test8(&arg) != 0)
300da00d2f1SKP Singh 			goto out;
301da00d2f1SKP Singh 		break;
3023d08b6f2SKP Singh 	case BPF_MODIFY_RETURN:
3033d08b6f2SKP Singh 		ret = bpf_modify_return_test(1, &b);
3043d08b6f2SKP Singh 		if (b != 2)
3053d08b6f2SKP Singh 			side_effect = 1;
3063d08b6f2SKP Singh 		break;
307da00d2f1SKP Singh 	default:
308da00d2f1SKP Singh 		goto out;
309a25ecd9dSColin Ian King 	}
310da00d2f1SKP Singh 
3113d08b6f2SKP Singh 	retval = ((u32)side_effect << 16) | ret;
3123d08b6f2SKP Singh 	if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
3133d08b6f2SKP Singh 		goto out;
3143d08b6f2SKP Singh 
315da00d2f1SKP Singh 	err = 0;
316da00d2f1SKP Singh out:
317da00d2f1SKP Singh 	trace_bpf_test_finish(&err);
318da00d2f1SKP Singh 	return err;
3191cf1cae9SAlexei Starovoitov }
3201cf1cae9SAlexei Starovoitov 
3211b4d60ecSSong Liu struct bpf_raw_tp_test_run_info {
3221b4d60ecSSong Liu 	struct bpf_prog *prog;
3231b4d60ecSSong Liu 	void *ctx;
3241b4d60ecSSong Liu 	u32 retval;
3251b4d60ecSSong Liu };
3261b4d60ecSSong Liu 
3271b4d60ecSSong Liu static void
3281b4d60ecSSong Liu __bpf_prog_test_run_raw_tp(void *data)
3291b4d60ecSSong Liu {
3301b4d60ecSSong Liu 	struct bpf_raw_tp_test_run_info *info = data;
3311b4d60ecSSong Liu 
3321b4d60ecSSong Liu 	rcu_read_lock();
333fb7dd8bcSAndrii Nakryiko 	info->retval = bpf_prog_run(info->prog, info->ctx);
3341b4d60ecSSong Liu 	rcu_read_unlock();
3351b4d60ecSSong Liu }
3361b4d60ecSSong Liu 
3371b4d60ecSSong Liu int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
3381b4d60ecSSong Liu 			     const union bpf_attr *kattr,
3391b4d60ecSSong Liu 			     union bpf_attr __user *uattr)
3401b4d60ecSSong Liu {
3411b4d60ecSSong Liu 	void __user *ctx_in = u64_to_user_ptr(kattr->test.ctx_in);
3421b4d60ecSSong Liu 	__u32 ctx_size_in = kattr->test.ctx_size_in;
3431b4d60ecSSong Liu 	struct bpf_raw_tp_test_run_info info;
3441b4d60ecSSong Liu 	int cpu = kattr->test.cpu, err = 0;
345963ec27aSSong Liu 	int current_cpu;
3461b4d60ecSSong Liu 
3471b4d60ecSSong Liu 	/* doesn't support data_in/out, ctx_out, duration, or repeat */
3481b4d60ecSSong Liu 	if (kattr->test.data_in || kattr->test.data_out ||
3491b4d60ecSSong Liu 	    kattr->test.ctx_out || kattr->test.duration ||
3501b4d60ecSSong Liu 	    kattr->test.repeat)
3511b4d60ecSSong Liu 		return -EINVAL;
3521b4d60ecSSong Liu 
3537ac6ad05SSong Liu 	if (ctx_size_in < prog->aux->max_ctx_offset ||
3547ac6ad05SSong Liu 	    ctx_size_in > MAX_BPF_FUNC_ARGS * sizeof(u64))
3551b4d60ecSSong Liu 		return -EINVAL;
3561b4d60ecSSong Liu 
3571b4d60ecSSong Liu 	if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 && cpu != 0)
3581b4d60ecSSong Liu 		return -EINVAL;
3591b4d60ecSSong Liu 
3601b4d60ecSSong Liu 	if (ctx_size_in) {
3611b4d60ecSSong Liu 		info.ctx = kzalloc(ctx_size_in, GFP_USER);
3621b4d60ecSSong Liu 		if (!info.ctx)
3631b4d60ecSSong Liu 			return -ENOMEM;
3641b4d60ecSSong Liu 		if (copy_from_user(info.ctx, ctx_in, ctx_size_in)) {
3651b4d60ecSSong Liu 			err = -EFAULT;
3661b4d60ecSSong Liu 			goto out;
3671b4d60ecSSong Liu 		}
3681b4d60ecSSong Liu 	} else {
3691b4d60ecSSong Liu 		info.ctx = NULL;
3701b4d60ecSSong Liu 	}
3711b4d60ecSSong Liu 
3721b4d60ecSSong Liu 	info.prog = prog;
3731b4d60ecSSong Liu 
374963ec27aSSong Liu 	current_cpu = get_cpu();
3751b4d60ecSSong Liu 	if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 ||
376963ec27aSSong Liu 	    cpu == current_cpu) {
3771b4d60ecSSong Liu 		__bpf_prog_test_run_raw_tp(&info);
378963ec27aSSong Liu 	} else if (cpu >= nr_cpu_ids || !cpu_online(cpu)) {
3791b4d60ecSSong Liu 		/* smp_call_function_single() also checks cpu_online()
3801b4d60ecSSong Liu 		 * after csd_lock(). However, since cpu is from user
3811b4d60ecSSong Liu 		 * space, let's do an extra quick check to filter out
3821b4d60ecSSong Liu 		 * invalid value before smp_call_function_single().
3831b4d60ecSSong Liu 		 */
3841b4d60ecSSong Liu 		err = -ENXIO;
385963ec27aSSong Liu 	} else {
3861b4d60ecSSong Liu 		err = smp_call_function_single(cpu, __bpf_prog_test_run_raw_tp,
3871b4d60ecSSong Liu 					       &info, 1);
3881b4d60ecSSong Liu 	}
389963ec27aSSong Liu 	put_cpu();
3901b4d60ecSSong Liu 
391963ec27aSSong Liu 	if (!err &&
392963ec27aSSong Liu 	    copy_to_user(&uattr->test.retval, &info.retval, sizeof(u32)))
3931b4d60ecSSong Liu 		err = -EFAULT;
3941b4d60ecSSong Liu 
3951b4d60ecSSong Liu out:
3961b4d60ecSSong Liu 	kfree(info.ctx);
3971b4d60ecSSong Liu 	return err;
3981b4d60ecSSong Liu }
3991b4d60ecSSong Liu 
400b0b9395dSStanislav Fomichev static void *bpf_ctx_init(const union bpf_attr *kattr, u32 max_size)
401b0b9395dSStanislav Fomichev {
402b0b9395dSStanislav Fomichev 	void __user *data_in = u64_to_user_ptr(kattr->test.ctx_in);
403b0b9395dSStanislav Fomichev 	void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out);
404b0b9395dSStanislav Fomichev 	u32 size = kattr->test.ctx_size_in;
405b0b9395dSStanislav Fomichev 	void *data;
406b0b9395dSStanislav Fomichev 	int err;
407b0b9395dSStanislav Fomichev 
408b0b9395dSStanislav Fomichev 	if (!data_in && !data_out)
409b0b9395dSStanislav Fomichev 		return NULL;
410b0b9395dSStanislav Fomichev 
411b0b9395dSStanislav Fomichev 	data = kzalloc(max_size, GFP_USER);
412b0b9395dSStanislav Fomichev 	if (!data)
413b0b9395dSStanislav Fomichev 		return ERR_PTR(-ENOMEM);
414b0b9395dSStanislav Fomichev 
415b0b9395dSStanislav Fomichev 	if (data_in) {
416af2ac3e1SAlexei Starovoitov 		err = bpf_check_uarg_tail_zero(USER_BPFPTR(data_in), max_size, size);
417b0b9395dSStanislav Fomichev 		if (err) {
418b0b9395dSStanislav Fomichev 			kfree(data);
419b0b9395dSStanislav Fomichev 			return ERR_PTR(err);
420b0b9395dSStanislav Fomichev 		}
421b0b9395dSStanislav Fomichev 
422b0b9395dSStanislav Fomichev 		size = min_t(u32, max_size, size);
423b0b9395dSStanislav Fomichev 		if (copy_from_user(data, data_in, size)) {
424b0b9395dSStanislav Fomichev 			kfree(data);
425b0b9395dSStanislav Fomichev 			return ERR_PTR(-EFAULT);
426b0b9395dSStanislav Fomichev 		}
427b0b9395dSStanislav Fomichev 	}
428b0b9395dSStanislav Fomichev 	return data;
429b0b9395dSStanislav Fomichev }
430b0b9395dSStanislav Fomichev 
431b0b9395dSStanislav Fomichev static int bpf_ctx_finish(const union bpf_attr *kattr,
432b0b9395dSStanislav Fomichev 			  union bpf_attr __user *uattr, const void *data,
433b0b9395dSStanislav Fomichev 			  u32 size)
434b0b9395dSStanislav Fomichev {
435b0b9395dSStanislav Fomichev 	void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out);
436b0b9395dSStanislav Fomichev 	int err = -EFAULT;
437b0b9395dSStanislav Fomichev 	u32 copy_size = size;
438b0b9395dSStanislav Fomichev 
439b0b9395dSStanislav Fomichev 	if (!data || !data_out)
440b0b9395dSStanislav Fomichev 		return 0;
441b0b9395dSStanislav Fomichev 
442b0b9395dSStanislav Fomichev 	if (copy_size > kattr->test.ctx_size_out) {
443b0b9395dSStanislav Fomichev 		copy_size = kattr->test.ctx_size_out;
444b0b9395dSStanislav Fomichev 		err = -ENOSPC;
445b0b9395dSStanislav Fomichev 	}
446b0b9395dSStanislav Fomichev 
447b0b9395dSStanislav Fomichev 	if (copy_to_user(data_out, data, copy_size))
448b0b9395dSStanislav Fomichev 		goto out;
449b0b9395dSStanislav Fomichev 	if (copy_to_user(&uattr->test.ctx_size_out, &size, sizeof(size)))
450b0b9395dSStanislav Fomichev 		goto out;
451b0b9395dSStanislav Fomichev 	if (err != -ENOSPC)
452b0b9395dSStanislav Fomichev 		err = 0;
453b0b9395dSStanislav Fomichev out:
454b0b9395dSStanislav Fomichev 	return err;
455b0b9395dSStanislav Fomichev }
456b0b9395dSStanislav Fomichev 
457b0b9395dSStanislav Fomichev /**
458b0b9395dSStanislav Fomichev  * range_is_zero - test whether buffer is initialized
459b0b9395dSStanislav Fomichev  * @buf: buffer to check
460b0b9395dSStanislav Fomichev  * @from: check from this position
461b0b9395dSStanislav Fomichev  * @to: check up until (excluding) this position
462b0b9395dSStanislav Fomichev  *
463b0b9395dSStanislav Fomichev  * This function returns true if the there is a non-zero byte
464b0b9395dSStanislav Fomichev  * in the buf in the range [from,to).
465b0b9395dSStanislav Fomichev  */
466b0b9395dSStanislav Fomichev static inline bool range_is_zero(void *buf, size_t from, size_t to)
467b0b9395dSStanislav Fomichev {
468b0b9395dSStanislav Fomichev 	return !memchr_inv((u8 *)buf + from, 0, to - from);
469b0b9395dSStanislav Fomichev }
470b0b9395dSStanislav Fomichev 
471b0b9395dSStanislav Fomichev static int convert___skb_to_skb(struct sk_buff *skb, struct __sk_buff *__skb)
472b0b9395dSStanislav Fomichev {
473b0b9395dSStanislav Fomichev 	struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
474b0b9395dSStanislav Fomichev 
475b0b9395dSStanislav Fomichev 	if (!__skb)
476b0b9395dSStanislav Fomichev 		return 0;
477b0b9395dSStanislav Fomichev 
478b0b9395dSStanislav Fomichev 	/* make sure the fields we don't use are zeroed */
4796de6c1f8SNikita V. Shirokov 	if (!range_is_zero(__skb, 0, offsetof(struct __sk_buff, mark)))
4806de6c1f8SNikita V. Shirokov 		return -EINVAL;
4816de6c1f8SNikita V. Shirokov 
4826de6c1f8SNikita V. Shirokov 	/* mark is allowed */
4836de6c1f8SNikita V. Shirokov 
4846de6c1f8SNikita V. Shirokov 	if (!range_is_zero(__skb, offsetofend(struct __sk_buff, mark),
4856de6c1f8SNikita V. Shirokov 			   offsetof(struct __sk_buff, priority)))
486b0b9395dSStanislav Fomichev 		return -EINVAL;
487b0b9395dSStanislav Fomichev 
488b0b9395dSStanislav Fomichev 	/* priority is allowed */
489b238290bSNeil Spring 	/* ingress_ifindex is allowed */
49021594c44SDmitry Yakunin 	/* ifindex is allowed */
49121594c44SDmitry Yakunin 
49221594c44SDmitry Yakunin 	if (!range_is_zero(__skb, offsetofend(struct __sk_buff, ifindex),
493b0b9395dSStanislav Fomichev 			   offsetof(struct __sk_buff, cb)))
494b0b9395dSStanislav Fomichev 		return -EINVAL;
495b0b9395dSStanislav Fomichev 
496b0b9395dSStanislav Fomichev 	/* cb is allowed */
497b0b9395dSStanislav Fomichev 
498b590cb5fSStanislav Fomichev 	if (!range_is_zero(__skb, offsetofend(struct __sk_buff, cb),
499ba940948SStanislav Fomichev 			   offsetof(struct __sk_buff, tstamp)))
500ba940948SStanislav Fomichev 		return -EINVAL;
501ba940948SStanislav Fomichev 
502ba940948SStanislav Fomichev 	/* tstamp is allowed */
503850a88ccSStanislav Fomichev 	/* wire_len is allowed */
504850a88ccSStanislav Fomichev 	/* gso_segs is allowed */
505ba940948SStanislav Fomichev 
506850a88ccSStanislav Fomichev 	if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_segs),
507cf62089bSWillem de Bruijn 			   offsetof(struct __sk_buff, gso_size)))
508cf62089bSWillem de Bruijn 		return -EINVAL;
509cf62089bSWillem de Bruijn 
510cf62089bSWillem de Bruijn 	/* gso_size is allowed */
511cf62089bSWillem de Bruijn 
512cf62089bSWillem de Bruijn 	if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_size),
5133384c7c7SVadim Fedorenko 			   offsetof(struct __sk_buff, hwtstamp)))
5143384c7c7SVadim Fedorenko 		return -EINVAL;
5153384c7c7SVadim Fedorenko 
5163384c7c7SVadim Fedorenko 	/* hwtstamp is allowed */
5173384c7c7SVadim Fedorenko 
5183384c7c7SVadim Fedorenko 	if (!range_is_zero(__skb, offsetofend(struct __sk_buff, hwtstamp),
519b0b9395dSStanislav Fomichev 			   sizeof(struct __sk_buff)))
520b0b9395dSStanislav Fomichev 		return -EINVAL;
521b0b9395dSStanislav Fomichev 
5226de6c1f8SNikita V. Shirokov 	skb->mark = __skb->mark;
523b0b9395dSStanislav Fomichev 	skb->priority = __skb->priority;
524b238290bSNeil Spring 	skb->skb_iif = __skb->ingress_ifindex;
525ba940948SStanislav Fomichev 	skb->tstamp = __skb->tstamp;
526b0b9395dSStanislav Fomichev 	memcpy(&cb->data, __skb->cb, QDISC_CB_PRIV_LEN);
527b0b9395dSStanislav Fomichev 
528850a88ccSStanislav Fomichev 	if (__skb->wire_len == 0) {
529850a88ccSStanislav Fomichev 		cb->pkt_len = skb->len;
530850a88ccSStanislav Fomichev 	} else {
531850a88ccSStanislav Fomichev 		if (__skb->wire_len < skb->len ||
532850a88ccSStanislav Fomichev 		    __skb->wire_len > GSO_MAX_SIZE)
533850a88ccSStanislav Fomichev 			return -EINVAL;
534850a88ccSStanislav Fomichev 		cb->pkt_len = __skb->wire_len;
535850a88ccSStanislav Fomichev 	}
536850a88ccSStanislav Fomichev 
537850a88ccSStanislav Fomichev 	if (__skb->gso_segs > GSO_MAX_SEGS)
538850a88ccSStanislav Fomichev 		return -EINVAL;
539850a88ccSStanislav Fomichev 	skb_shinfo(skb)->gso_segs = __skb->gso_segs;
540cf62089bSWillem de Bruijn 	skb_shinfo(skb)->gso_size = __skb->gso_size;
5413384c7c7SVadim Fedorenko 	skb_shinfo(skb)->hwtstamps.hwtstamp = __skb->hwtstamp;
542850a88ccSStanislav Fomichev 
543b0b9395dSStanislav Fomichev 	return 0;
544b0b9395dSStanislav Fomichev }
545b0b9395dSStanislav Fomichev 
546b0b9395dSStanislav Fomichev static void convert_skb_to___skb(struct sk_buff *skb, struct __sk_buff *__skb)
547b0b9395dSStanislav Fomichev {
548b0b9395dSStanislav Fomichev 	struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
549b0b9395dSStanislav Fomichev 
550b0b9395dSStanislav Fomichev 	if (!__skb)
551b0b9395dSStanislav Fomichev 		return;
552b0b9395dSStanislav Fomichev 
5536de6c1f8SNikita V. Shirokov 	__skb->mark = skb->mark;
554b0b9395dSStanislav Fomichev 	__skb->priority = skb->priority;
555b238290bSNeil Spring 	__skb->ingress_ifindex = skb->skb_iif;
55621594c44SDmitry Yakunin 	__skb->ifindex = skb->dev->ifindex;
557ba940948SStanislav Fomichev 	__skb->tstamp = skb->tstamp;
558b0b9395dSStanislav Fomichev 	memcpy(__skb->cb, &cb->data, QDISC_CB_PRIV_LEN);
559850a88ccSStanislav Fomichev 	__skb->wire_len = cb->pkt_len;
560850a88ccSStanislav Fomichev 	__skb->gso_segs = skb_shinfo(skb)->gso_segs;
5613384c7c7SVadim Fedorenko 	__skb->hwtstamp = skb_shinfo(skb)->hwtstamps.hwtstamp;
562b0b9395dSStanislav Fomichev }
563b0b9395dSStanislav Fomichev 
564435b08ecSDaniel Borkmann static struct proto bpf_dummy_proto = {
565435b08ecSDaniel Borkmann 	.name   = "bpf_dummy",
566435b08ecSDaniel Borkmann 	.owner  = THIS_MODULE,
567435b08ecSDaniel Borkmann 	.obj_size = sizeof(struct sock),
568435b08ecSDaniel Borkmann };
569435b08ecSDaniel Borkmann 
5701cf1cae9SAlexei Starovoitov int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
5711cf1cae9SAlexei Starovoitov 			  union bpf_attr __user *uattr)
5721cf1cae9SAlexei Starovoitov {
5731cf1cae9SAlexei Starovoitov 	bool is_l2 = false, is_direct_pkt_access = false;
57421594c44SDmitry Yakunin 	struct net *net = current->nsproxy->net_ns;
57521594c44SDmitry Yakunin 	struct net_device *dev = net->loopback_dev;
5761cf1cae9SAlexei Starovoitov 	u32 size = kattr->test.data_size_in;
5771cf1cae9SAlexei Starovoitov 	u32 repeat = kattr->test.repeat;
578b0b9395dSStanislav Fomichev 	struct __sk_buff *ctx = NULL;
5791cf1cae9SAlexei Starovoitov 	u32 retval, duration;
5806e6fddc7SDaniel Borkmann 	int hh_len = ETH_HLEN;
5811cf1cae9SAlexei Starovoitov 	struct sk_buff *skb;
5822cb494a3SSong Liu 	struct sock *sk;
5831cf1cae9SAlexei Starovoitov 	void *data;
5841cf1cae9SAlexei Starovoitov 	int ret;
5851cf1cae9SAlexei Starovoitov 
5861b4d60ecSSong Liu 	if (kattr->test.flags || kattr->test.cpu)
5871b4d60ecSSong Liu 		return -EINVAL;
5881b4d60ecSSong Liu 
589586f8525SDavid Miller 	data = bpf_test_init(kattr, size, NET_SKB_PAD + NET_IP_ALIGN,
5901cf1cae9SAlexei Starovoitov 			     SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
5911cf1cae9SAlexei Starovoitov 	if (IS_ERR(data))
5921cf1cae9SAlexei Starovoitov 		return PTR_ERR(data);
5931cf1cae9SAlexei Starovoitov 
594b0b9395dSStanislav Fomichev 	ctx = bpf_ctx_init(kattr, sizeof(struct __sk_buff));
595b0b9395dSStanislav Fomichev 	if (IS_ERR(ctx)) {
596b0b9395dSStanislav Fomichev 		kfree(data);
597b0b9395dSStanislav Fomichev 		return PTR_ERR(ctx);
598b0b9395dSStanislav Fomichev 	}
599b0b9395dSStanislav Fomichev 
6001cf1cae9SAlexei Starovoitov 	switch (prog->type) {
6011cf1cae9SAlexei Starovoitov 	case BPF_PROG_TYPE_SCHED_CLS:
6021cf1cae9SAlexei Starovoitov 	case BPF_PROG_TYPE_SCHED_ACT:
6031cf1cae9SAlexei Starovoitov 		is_l2 = true;
604df561f66SGustavo A. R. Silva 		fallthrough;
6051cf1cae9SAlexei Starovoitov 	case BPF_PROG_TYPE_LWT_IN:
6061cf1cae9SAlexei Starovoitov 	case BPF_PROG_TYPE_LWT_OUT:
6071cf1cae9SAlexei Starovoitov 	case BPF_PROG_TYPE_LWT_XMIT:
6081cf1cae9SAlexei Starovoitov 		is_direct_pkt_access = true;
6091cf1cae9SAlexei Starovoitov 		break;
6101cf1cae9SAlexei Starovoitov 	default:
6111cf1cae9SAlexei Starovoitov 		break;
6121cf1cae9SAlexei Starovoitov 	}
6131cf1cae9SAlexei Starovoitov 
614435b08ecSDaniel Borkmann 	sk = sk_alloc(net, AF_UNSPEC, GFP_USER, &bpf_dummy_proto, 1);
6152cb494a3SSong Liu 	if (!sk) {
6161cf1cae9SAlexei Starovoitov 		kfree(data);
617b0b9395dSStanislav Fomichev 		kfree(ctx);
6181cf1cae9SAlexei Starovoitov 		return -ENOMEM;
6191cf1cae9SAlexei Starovoitov 	}
6202cb494a3SSong Liu 	sock_init_data(NULL, sk);
6212cb494a3SSong Liu 
6222cb494a3SSong Liu 	skb = build_skb(data, 0);
6232cb494a3SSong Liu 	if (!skb) {
6242cb494a3SSong Liu 		kfree(data);
625b0b9395dSStanislav Fomichev 		kfree(ctx);
626435b08ecSDaniel Borkmann 		sk_free(sk);
6272cb494a3SSong Liu 		return -ENOMEM;
6282cb494a3SSong Liu 	}
6292cb494a3SSong Liu 	skb->sk = sk;
6301cf1cae9SAlexei Starovoitov 
631586f8525SDavid Miller 	skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
6321cf1cae9SAlexei Starovoitov 	__skb_put(skb, size);
63321594c44SDmitry Yakunin 	if (ctx && ctx->ifindex > 1) {
63421594c44SDmitry Yakunin 		dev = dev_get_by_index(net, ctx->ifindex);
63521594c44SDmitry Yakunin 		if (!dev) {
63621594c44SDmitry Yakunin 			ret = -ENODEV;
63721594c44SDmitry Yakunin 			goto out;
63821594c44SDmitry Yakunin 		}
63921594c44SDmitry Yakunin 	}
64021594c44SDmitry Yakunin 	skb->protocol = eth_type_trans(skb, dev);
6411cf1cae9SAlexei Starovoitov 	skb_reset_network_header(skb);
6421cf1cae9SAlexei Starovoitov 
643fa5cb548SDmitry Yakunin 	switch (skb->protocol) {
644fa5cb548SDmitry Yakunin 	case htons(ETH_P_IP):
645fa5cb548SDmitry Yakunin 		sk->sk_family = AF_INET;
646fa5cb548SDmitry Yakunin 		if (sizeof(struct iphdr) <= skb_headlen(skb)) {
647fa5cb548SDmitry Yakunin 			sk->sk_rcv_saddr = ip_hdr(skb)->saddr;
648fa5cb548SDmitry Yakunin 			sk->sk_daddr = ip_hdr(skb)->daddr;
649fa5cb548SDmitry Yakunin 		}
650fa5cb548SDmitry Yakunin 		break;
651fa5cb548SDmitry Yakunin #if IS_ENABLED(CONFIG_IPV6)
652fa5cb548SDmitry Yakunin 	case htons(ETH_P_IPV6):
653fa5cb548SDmitry Yakunin 		sk->sk_family = AF_INET6;
654fa5cb548SDmitry Yakunin 		if (sizeof(struct ipv6hdr) <= skb_headlen(skb)) {
655fa5cb548SDmitry Yakunin 			sk->sk_v6_rcv_saddr = ipv6_hdr(skb)->saddr;
656fa5cb548SDmitry Yakunin 			sk->sk_v6_daddr = ipv6_hdr(skb)->daddr;
657fa5cb548SDmitry Yakunin 		}
658fa5cb548SDmitry Yakunin 		break;
659fa5cb548SDmitry Yakunin #endif
660fa5cb548SDmitry Yakunin 	default:
661fa5cb548SDmitry Yakunin 		break;
662fa5cb548SDmitry Yakunin 	}
663fa5cb548SDmitry Yakunin 
6641cf1cae9SAlexei Starovoitov 	if (is_l2)
6656e6fddc7SDaniel Borkmann 		__skb_push(skb, hh_len);
6661cf1cae9SAlexei Starovoitov 	if (is_direct_pkt_access)
6676aaae2b6SDaniel Borkmann 		bpf_compute_data_pointers(skb);
668b0b9395dSStanislav Fomichev 	ret = convert___skb_to_skb(skb, ctx);
669b0b9395dSStanislav Fomichev 	if (ret)
670b0b9395dSStanislav Fomichev 		goto out;
671f23c4b39SBjörn Töpel 	ret = bpf_test_run(prog, skb, repeat, &retval, &duration, false);
672b0b9395dSStanislav Fomichev 	if (ret)
673b0b9395dSStanislav Fomichev 		goto out;
6746e6fddc7SDaniel Borkmann 	if (!is_l2) {
6756e6fddc7SDaniel Borkmann 		if (skb_headroom(skb) < hh_len) {
6766e6fddc7SDaniel Borkmann 			int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
6776e6fddc7SDaniel Borkmann 
6786e6fddc7SDaniel Borkmann 			if (pskb_expand_head(skb, nhead, 0, GFP_USER)) {
679b0b9395dSStanislav Fomichev 				ret = -ENOMEM;
680b0b9395dSStanislav Fomichev 				goto out;
6816e6fddc7SDaniel Borkmann 			}
6826e6fddc7SDaniel Borkmann 		}
6836e6fddc7SDaniel Borkmann 		memset(__skb_push(skb, hh_len), 0, hh_len);
6846e6fddc7SDaniel Borkmann 	}
685b0b9395dSStanislav Fomichev 	convert_skb_to___skb(skb, ctx);
6866e6fddc7SDaniel Borkmann 
6871cf1cae9SAlexei Starovoitov 	size = skb->len;
6881cf1cae9SAlexei Starovoitov 	/* bpf program can never convert linear skb to non-linear */
6891cf1cae9SAlexei Starovoitov 	if (WARN_ON_ONCE(skb_is_nonlinear(skb)))
6901cf1cae9SAlexei Starovoitov 		size = skb_headlen(skb);
69178e52272SDavid Miller 	ret = bpf_test_finish(kattr, uattr, skb->data, size, retval, duration);
692b0b9395dSStanislav Fomichev 	if (!ret)
693b0b9395dSStanislav Fomichev 		ret = bpf_ctx_finish(kattr, uattr, ctx,
694b0b9395dSStanislav Fomichev 				     sizeof(struct __sk_buff));
695b0b9395dSStanislav Fomichev out:
69621594c44SDmitry Yakunin 	if (dev && dev != net->loopback_dev)
69721594c44SDmitry Yakunin 		dev_put(dev);
6981cf1cae9SAlexei Starovoitov 	kfree_skb(skb);
699435b08ecSDaniel Borkmann 	sk_free(sk);
700b0b9395dSStanislav Fomichev 	kfree(ctx);
7011cf1cae9SAlexei Starovoitov 	return ret;
7021cf1cae9SAlexei Starovoitov }
7031cf1cae9SAlexei Starovoitov 
70447316f4aSZvi Effron static int xdp_convert_md_to_buff(struct xdp_md *xdp_md, struct xdp_buff *xdp)
70547316f4aSZvi Effron {
706ec94670fSZvi Effron 	unsigned int ingress_ifindex, rx_queue_index;
707ec94670fSZvi Effron 	struct netdev_rx_queue *rxqueue;
708ec94670fSZvi Effron 	struct net_device *device;
709ec94670fSZvi Effron 
71047316f4aSZvi Effron 	if (!xdp_md)
71147316f4aSZvi Effron 		return 0;
71247316f4aSZvi Effron 
71347316f4aSZvi Effron 	if (xdp_md->egress_ifindex != 0)
71447316f4aSZvi Effron 		return -EINVAL;
71547316f4aSZvi Effron 
716ec94670fSZvi Effron 	ingress_ifindex = xdp_md->ingress_ifindex;
717ec94670fSZvi Effron 	rx_queue_index = xdp_md->rx_queue_index;
718ec94670fSZvi Effron 
719ec94670fSZvi Effron 	if (!ingress_ifindex && rx_queue_index)
72047316f4aSZvi Effron 		return -EINVAL;
72147316f4aSZvi Effron 
722ec94670fSZvi Effron 	if (ingress_ifindex) {
723ec94670fSZvi Effron 		device = dev_get_by_index(current->nsproxy->net_ns,
724ec94670fSZvi Effron 					  ingress_ifindex);
725ec94670fSZvi Effron 		if (!device)
726ec94670fSZvi Effron 			return -ENODEV;
72747316f4aSZvi Effron 
728ec94670fSZvi Effron 		if (rx_queue_index >= device->real_num_rx_queues)
729ec94670fSZvi Effron 			goto free_dev;
730ec94670fSZvi Effron 
731ec94670fSZvi Effron 		rxqueue = __netif_get_rx_queue(device, rx_queue_index);
732ec94670fSZvi Effron 
733ec94670fSZvi Effron 		if (!xdp_rxq_info_is_reg(&rxqueue->xdp_rxq))
734ec94670fSZvi Effron 			goto free_dev;
735ec94670fSZvi Effron 
736ec94670fSZvi Effron 		xdp->rxq = &rxqueue->xdp_rxq;
737ec94670fSZvi Effron 		/* The device is now tracked in the xdp->rxq for later
738ec94670fSZvi Effron 		 * dev_put()
739ec94670fSZvi Effron 		 */
740ec94670fSZvi Effron 	}
741ec94670fSZvi Effron 
742ec94670fSZvi Effron 	xdp->data = xdp->data_meta + xdp_md->data;
74347316f4aSZvi Effron 	return 0;
744ec94670fSZvi Effron 
745ec94670fSZvi Effron free_dev:
746ec94670fSZvi Effron 	dev_put(device);
747ec94670fSZvi Effron 	return -EINVAL;
748ec94670fSZvi Effron }
749ec94670fSZvi Effron 
750ec94670fSZvi Effron static void xdp_convert_buff_to_md(struct xdp_buff *xdp, struct xdp_md *xdp_md)
751ec94670fSZvi Effron {
752ec94670fSZvi Effron 	if (!xdp_md)
753ec94670fSZvi Effron 		return;
754ec94670fSZvi Effron 
755ec94670fSZvi Effron 	xdp_md->data = xdp->data - xdp->data_meta;
756ec94670fSZvi Effron 	xdp_md->data_end = xdp->data_end - xdp->data_meta;
757ec94670fSZvi Effron 
758ec94670fSZvi Effron 	if (xdp_md->ingress_ifindex)
759ec94670fSZvi Effron 		dev_put(xdp->rxq->dev);
76047316f4aSZvi Effron }
76147316f4aSZvi Effron 
7621cf1cae9SAlexei Starovoitov int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
7631cf1cae9SAlexei Starovoitov 			  union bpf_attr __user *uattr)
7641cf1cae9SAlexei Starovoitov {
765bc56c919SJesper Dangaard Brouer 	u32 tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
766bc56c919SJesper Dangaard Brouer 	u32 headroom = XDP_PACKET_HEADROOM;
7671cf1cae9SAlexei Starovoitov 	u32 size = kattr->test.data_size_in;
7681cf1cae9SAlexei Starovoitov 	u32 repeat = kattr->test.repeat;
76965073a67SDaniel Borkmann 	struct netdev_rx_queue *rxqueue;
7701cf1cae9SAlexei Starovoitov 	struct xdp_buff xdp = {};
7711cf1cae9SAlexei Starovoitov 	u32 retval, duration;
77247316f4aSZvi Effron 	struct xdp_md *ctx;
773bc56c919SJesper Dangaard Brouer 	u32 max_data_sz;
7741cf1cae9SAlexei Starovoitov 	void *data;
77547316f4aSZvi Effron 	int ret = -EINVAL;
7761cf1cae9SAlexei Starovoitov 
7775e21bb4eSXuan Zhuo 	if (prog->expected_attach_type == BPF_XDP_DEVMAP ||
7785e21bb4eSXuan Zhuo 	    prog->expected_attach_type == BPF_XDP_CPUMAP)
7795e21bb4eSXuan Zhuo 		return -EINVAL;
7806d4eb36dSAndrii Nakryiko 
78147316f4aSZvi Effron 	ctx = bpf_ctx_init(kattr, sizeof(struct xdp_md));
78247316f4aSZvi Effron 	if (IS_ERR(ctx))
78347316f4aSZvi Effron 		return PTR_ERR(ctx);
78447316f4aSZvi Effron 
78547316f4aSZvi Effron 	if (ctx) {
78647316f4aSZvi Effron 		/* There can't be user provided data before the meta data */
78747316f4aSZvi Effron 		if (ctx->data_meta || ctx->data_end != size ||
78847316f4aSZvi Effron 		    ctx->data > ctx->data_end ||
78947316f4aSZvi Effron 		    unlikely(xdp_metalen_invalid(ctx->data)))
79047316f4aSZvi Effron 			goto free_ctx;
79147316f4aSZvi Effron 		/* Meta data is allocated from the headroom */
79247316f4aSZvi Effron 		headroom -= ctx->data;
79347316f4aSZvi Effron 	}
794947e8b59SStanislav Fomichev 
795bc56c919SJesper Dangaard Brouer 	/* XDP have extra tailroom as (most) drivers use full page */
796bc56c919SJesper Dangaard Brouer 	max_data_sz = 4096 - headroom - tailroom;
797bc56c919SJesper Dangaard Brouer 
798bc56c919SJesper Dangaard Brouer 	data = bpf_test_init(kattr, max_data_sz, headroom, tailroom);
79947316f4aSZvi Effron 	if (IS_ERR(data)) {
80047316f4aSZvi Effron 		ret = PTR_ERR(data);
80147316f4aSZvi Effron 		goto free_ctx;
80247316f4aSZvi Effron 	}
8031cf1cae9SAlexei Starovoitov 
80465073a67SDaniel Borkmann 	rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0);
80543b5169dSLorenzo Bianconi 	xdp_init_buff(&xdp, headroom + max_data_sz + tailroom,
80643b5169dSLorenzo Bianconi 		      &rxqueue->xdp_rxq);
807be9df4afSLorenzo Bianconi 	xdp_prepare_buff(&xdp, data, headroom, size, true);
808be9df4afSLorenzo Bianconi 
80947316f4aSZvi Effron 	ret = xdp_convert_md_to_buff(ctx, &xdp);
81047316f4aSZvi Effron 	if (ret)
81147316f4aSZvi Effron 		goto free_data;
81247316f4aSZvi Effron 
813de21d8bfSLorenz Bauer 	if (repeat > 1)
814f23c4b39SBjörn Töpel 		bpf_prog_change_xdp(NULL, prog);
815f23c4b39SBjörn Töpel 	ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration, true);
816ec94670fSZvi Effron 	/* We convert the xdp_buff back to an xdp_md before checking the return
817ec94670fSZvi Effron 	 * code so the reference count of any held netdevice will be decremented
818ec94670fSZvi Effron 	 * even if the test run failed.
819ec94670fSZvi Effron 	 */
820ec94670fSZvi Effron 	xdp_convert_buff_to_md(&xdp, ctx);
821dcb40590SRoman Gushchin 	if (ret)
822dcb40590SRoman Gushchin 		goto out;
82347316f4aSZvi Effron 
82447316f4aSZvi Effron 	if (xdp.data_meta != data + headroom ||
82547316f4aSZvi Effron 	    xdp.data_end != xdp.data_meta + size)
82647316f4aSZvi Effron 		size = xdp.data_end - xdp.data_meta;
82747316f4aSZvi Effron 
82847316f4aSZvi Effron 	ret = bpf_test_finish(kattr, uattr, xdp.data_meta, size, retval,
82947316f4aSZvi Effron 			      duration);
83047316f4aSZvi Effron 	if (!ret)
83147316f4aSZvi Effron 		ret = bpf_ctx_finish(kattr, uattr, ctx,
83247316f4aSZvi Effron 				     sizeof(struct xdp_md));
83347316f4aSZvi Effron 
834dcb40590SRoman Gushchin out:
835de21d8bfSLorenz Bauer 	if (repeat > 1)
836f23c4b39SBjörn Töpel 		bpf_prog_change_xdp(prog, NULL);
83747316f4aSZvi Effron free_data:
8381cf1cae9SAlexei Starovoitov 	kfree(data);
83947316f4aSZvi Effron free_ctx:
84047316f4aSZvi Effron 	kfree(ctx);
8411cf1cae9SAlexei Starovoitov 	return ret;
8421cf1cae9SAlexei Starovoitov }
843b7a1848eSStanislav Fomichev 
844b2ca4e1cSStanislav Fomichev static int verify_user_bpf_flow_keys(struct bpf_flow_keys *ctx)
845b2ca4e1cSStanislav Fomichev {
846b2ca4e1cSStanislav Fomichev 	/* make sure the fields we don't use are zeroed */
847b2ca4e1cSStanislav Fomichev 	if (!range_is_zero(ctx, 0, offsetof(struct bpf_flow_keys, flags)))
848b2ca4e1cSStanislav Fomichev 		return -EINVAL;
849b2ca4e1cSStanislav Fomichev 
850b2ca4e1cSStanislav Fomichev 	/* flags is allowed */
851b2ca4e1cSStanislav Fomichev 
852b590cb5fSStanislav Fomichev 	if (!range_is_zero(ctx, offsetofend(struct bpf_flow_keys, flags),
853b2ca4e1cSStanislav Fomichev 			   sizeof(struct bpf_flow_keys)))
854b2ca4e1cSStanislav Fomichev 		return -EINVAL;
855b2ca4e1cSStanislav Fomichev 
856b2ca4e1cSStanislav Fomichev 	return 0;
857b2ca4e1cSStanislav Fomichev }
858b2ca4e1cSStanislav Fomichev 
859b7a1848eSStanislav Fomichev int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
860b7a1848eSStanislav Fomichev 				     const union bpf_attr *kattr,
861b7a1848eSStanislav Fomichev 				     union bpf_attr __user *uattr)
862b7a1848eSStanislav Fomichev {
863607b9cc9SLorenz Bauer 	struct bpf_test_timer t = { NO_PREEMPT };
864b7a1848eSStanislav Fomichev 	u32 size = kattr->test.data_size_in;
8657b8a1304SStanislav Fomichev 	struct bpf_flow_dissector ctx = {};
866b7a1848eSStanislav Fomichev 	u32 repeat = kattr->test.repeat;
867b2ca4e1cSStanislav Fomichev 	struct bpf_flow_keys *user_ctx;
868b7a1848eSStanislav Fomichev 	struct bpf_flow_keys flow_keys;
8697b8a1304SStanislav Fomichev 	const struct ethhdr *eth;
870b2ca4e1cSStanislav Fomichev 	unsigned int flags = 0;
871b7a1848eSStanislav Fomichev 	u32 retval, duration;
872b7a1848eSStanislav Fomichev 	void *data;
873b7a1848eSStanislav Fomichev 	int ret;
874b7a1848eSStanislav Fomichev 
875b7a1848eSStanislav Fomichev 	if (prog->type != BPF_PROG_TYPE_FLOW_DISSECTOR)
876b7a1848eSStanislav Fomichev 		return -EINVAL;
877b7a1848eSStanislav Fomichev 
8781b4d60ecSSong Liu 	if (kattr->test.flags || kattr->test.cpu)
8791b4d60ecSSong Liu 		return -EINVAL;
8801b4d60ecSSong Liu 
8817b8a1304SStanislav Fomichev 	if (size < ETH_HLEN)
8827b8a1304SStanislav Fomichev 		return -EINVAL;
8837b8a1304SStanislav Fomichev 
8847b8a1304SStanislav Fomichev 	data = bpf_test_init(kattr, size, 0, 0);
885b7a1848eSStanislav Fomichev 	if (IS_ERR(data))
886b7a1848eSStanislav Fomichev 		return PTR_ERR(data);
887b7a1848eSStanislav Fomichev 
8887b8a1304SStanislav Fomichev 	eth = (struct ethhdr *)data;
889b7a1848eSStanislav Fomichev 
890b7a1848eSStanislav Fomichev 	if (!repeat)
891b7a1848eSStanislav Fomichev 		repeat = 1;
892b7a1848eSStanislav Fomichev 
893b2ca4e1cSStanislav Fomichev 	user_ctx = bpf_ctx_init(kattr, sizeof(struct bpf_flow_keys));
894b2ca4e1cSStanislav Fomichev 	if (IS_ERR(user_ctx)) {
895b2ca4e1cSStanislav Fomichev 		kfree(data);
896b2ca4e1cSStanislav Fomichev 		return PTR_ERR(user_ctx);
897b2ca4e1cSStanislav Fomichev 	}
898b2ca4e1cSStanislav Fomichev 	if (user_ctx) {
899b2ca4e1cSStanislav Fomichev 		ret = verify_user_bpf_flow_keys(user_ctx);
900b2ca4e1cSStanislav Fomichev 		if (ret)
901b2ca4e1cSStanislav Fomichev 			goto out;
902b2ca4e1cSStanislav Fomichev 		flags = user_ctx->flags;
903b2ca4e1cSStanislav Fomichev 	}
904b2ca4e1cSStanislav Fomichev 
9057b8a1304SStanislav Fomichev 	ctx.flow_keys = &flow_keys;
9067b8a1304SStanislav Fomichev 	ctx.data = data;
9077b8a1304SStanislav Fomichev 	ctx.data_end = (__u8 *)data + size;
9087b8a1304SStanislav Fomichev 
909607b9cc9SLorenz Bauer 	bpf_test_timer_enter(&t);
910607b9cc9SLorenz Bauer 	do {
9117b8a1304SStanislav Fomichev 		retval = bpf_flow_dissect(prog, &ctx, eth->h_proto, ETH_HLEN,
912b2ca4e1cSStanislav Fomichev 					  size, flags);
913607b9cc9SLorenz Bauer 	} while (bpf_test_timer_continue(&t, repeat, &ret, &duration));
914607b9cc9SLorenz Bauer 	bpf_test_timer_leave(&t);
9157b8a1304SStanislav Fomichev 
916607b9cc9SLorenz Bauer 	if (ret < 0)
917a439184dSStanislav Fomichev 		goto out;
918b7a1848eSStanislav Fomichev 
919b7a1848eSStanislav Fomichev 	ret = bpf_test_finish(kattr, uattr, &flow_keys, sizeof(flow_keys),
920b7a1848eSStanislav Fomichev 			      retval, duration);
921b2ca4e1cSStanislav Fomichev 	if (!ret)
922b2ca4e1cSStanislav Fomichev 		ret = bpf_ctx_finish(kattr, uattr, user_ctx,
923b2ca4e1cSStanislav Fomichev 				     sizeof(struct bpf_flow_keys));
924b7a1848eSStanislav Fomichev 
925a439184dSStanislav Fomichev out:
926b2ca4e1cSStanislav Fomichev 	kfree(user_ctx);
9277b8a1304SStanislav Fomichev 	kfree(data);
928b7a1848eSStanislav Fomichev 	return ret;
929b7a1848eSStanislav Fomichev }
9307c32e8f8SLorenz Bauer 
9317c32e8f8SLorenz Bauer int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, const union bpf_attr *kattr,
9327c32e8f8SLorenz Bauer 				union bpf_attr __user *uattr)
9337c32e8f8SLorenz Bauer {
9347c32e8f8SLorenz Bauer 	struct bpf_test_timer t = { NO_PREEMPT };
9357c32e8f8SLorenz Bauer 	struct bpf_prog_array *progs = NULL;
9367c32e8f8SLorenz Bauer 	struct bpf_sk_lookup_kern ctx = {};
9377c32e8f8SLorenz Bauer 	u32 repeat = kattr->test.repeat;
9387c32e8f8SLorenz Bauer 	struct bpf_sk_lookup *user_ctx;
9397c32e8f8SLorenz Bauer 	u32 retval, duration;
9407c32e8f8SLorenz Bauer 	int ret = -EINVAL;
9417c32e8f8SLorenz Bauer 
9427c32e8f8SLorenz Bauer 	if (prog->type != BPF_PROG_TYPE_SK_LOOKUP)
9437c32e8f8SLorenz Bauer 		return -EINVAL;
9447c32e8f8SLorenz Bauer 
9457c32e8f8SLorenz Bauer 	if (kattr->test.flags || kattr->test.cpu)
9467c32e8f8SLorenz Bauer 		return -EINVAL;
9477c32e8f8SLorenz Bauer 
9487c32e8f8SLorenz Bauer 	if (kattr->test.data_in || kattr->test.data_size_in || kattr->test.data_out ||
9497c32e8f8SLorenz Bauer 	    kattr->test.data_size_out)
9507c32e8f8SLorenz Bauer 		return -EINVAL;
9517c32e8f8SLorenz Bauer 
9527c32e8f8SLorenz Bauer 	if (!repeat)
9537c32e8f8SLorenz Bauer 		repeat = 1;
9547c32e8f8SLorenz Bauer 
9557c32e8f8SLorenz Bauer 	user_ctx = bpf_ctx_init(kattr, sizeof(*user_ctx));
9567c32e8f8SLorenz Bauer 	if (IS_ERR(user_ctx))
9577c32e8f8SLorenz Bauer 		return PTR_ERR(user_ctx);
9587c32e8f8SLorenz Bauer 
9597c32e8f8SLorenz Bauer 	if (!user_ctx)
9607c32e8f8SLorenz Bauer 		return -EINVAL;
9617c32e8f8SLorenz Bauer 
9627c32e8f8SLorenz Bauer 	if (user_ctx->sk)
9637c32e8f8SLorenz Bauer 		goto out;
9647c32e8f8SLorenz Bauer 
9657c32e8f8SLorenz Bauer 	if (!range_is_zero(user_ctx, offsetofend(typeof(*user_ctx), local_port), sizeof(*user_ctx)))
9667c32e8f8SLorenz Bauer 		goto out;
9677c32e8f8SLorenz Bauer 
9687c32e8f8SLorenz Bauer 	if (user_ctx->local_port > U16_MAX || user_ctx->remote_port > U16_MAX) {
9697c32e8f8SLorenz Bauer 		ret = -ERANGE;
9707c32e8f8SLorenz Bauer 		goto out;
9717c32e8f8SLorenz Bauer 	}
9727c32e8f8SLorenz Bauer 
9737c32e8f8SLorenz Bauer 	ctx.family = (u16)user_ctx->family;
9747c32e8f8SLorenz Bauer 	ctx.protocol = (u16)user_ctx->protocol;
9757c32e8f8SLorenz Bauer 	ctx.dport = (u16)user_ctx->local_port;
9767c32e8f8SLorenz Bauer 	ctx.sport = (__force __be16)user_ctx->remote_port;
9777c32e8f8SLorenz Bauer 
9787c32e8f8SLorenz Bauer 	switch (ctx.family) {
9797c32e8f8SLorenz Bauer 	case AF_INET:
9807c32e8f8SLorenz Bauer 		ctx.v4.daddr = (__force __be32)user_ctx->local_ip4;
9817c32e8f8SLorenz Bauer 		ctx.v4.saddr = (__force __be32)user_ctx->remote_ip4;
9827c32e8f8SLorenz Bauer 		break;
9837c32e8f8SLorenz Bauer 
9847c32e8f8SLorenz Bauer #if IS_ENABLED(CONFIG_IPV6)
9857c32e8f8SLorenz Bauer 	case AF_INET6:
9867c32e8f8SLorenz Bauer 		ctx.v6.daddr = (struct in6_addr *)user_ctx->local_ip6;
9877c32e8f8SLorenz Bauer 		ctx.v6.saddr = (struct in6_addr *)user_ctx->remote_ip6;
9887c32e8f8SLorenz Bauer 		break;
9897c32e8f8SLorenz Bauer #endif
9907c32e8f8SLorenz Bauer 
9917c32e8f8SLorenz Bauer 	default:
9927c32e8f8SLorenz Bauer 		ret = -EAFNOSUPPORT;
9937c32e8f8SLorenz Bauer 		goto out;
9947c32e8f8SLorenz Bauer 	}
9957c32e8f8SLorenz Bauer 
9967c32e8f8SLorenz Bauer 	progs = bpf_prog_array_alloc(1, GFP_KERNEL);
9977c32e8f8SLorenz Bauer 	if (!progs) {
9987c32e8f8SLorenz Bauer 		ret = -ENOMEM;
9997c32e8f8SLorenz Bauer 		goto out;
10007c32e8f8SLorenz Bauer 	}
10017c32e8f8SLorenz Bauer 
10027c32e8f8SLorenz Bauer 	progs->items[0].prog = prog;
10037c32e8f8SLorenz Bauer 
10047c32e8f8SLorenz Bauer 	bpf_test_timer_enter(&t);
10057c32e8f8SLorenz Bauer 	do {
10067c32e8f8SLorenz Bauer 		ctx.selected_sk = NULL;
1007fb7dd8bcSAndrii Nakryiko 		retval = BPF_PROG_SK_LOOKUP_RUN_ARRAY(progs, ctx, bpf_prog_run);
10087c32e8f8SLorenz Bauer 	} while (bpf_test_timer_continue(&t, repeat, &ret, &duration));
10097c32e8f8SLorenz Bauer 	bpf_test_timer_leave(&t);
10107c32e8f8SLorenz Bauer 
10117c32e8f8SLorenz Bauer 	if (ret < 0)
10127c32e8f8SLorenz Bauer 		goto out;
10137c32e8f8SLorenz Bauer 
10147c32e8f8SLorenz Bauer 	user_ctx->cookie = 0;
10157c32e8f8SLorenz Bauer 	if (ctx.selected_sk) {
10167c32e8f8SLorenz Bauer 		if (ctx.selected_sk->sk_reuseport && !ctx.no_reuseport) {
10177c32e8f8SLorenz Bauer 			ret = -EOPNOTSUPP;
10187c32e8f8SLorenz Bauer 			goto out;
10197c32e8f8SLorenz Bauer 		}
10207c32e8f8SLorenz Bauer 
10217c32e8f8SLorenz Bauer 		user_ctx->cookie = sock_gen_cookie(ctx.selected_sk);
10227c32e8f8SLorenz Bauer 	}
10237c32e8f8SLorenz Bauer 
10247c32e8f8SLorenz Bauer 	ret = bpf_test_finish(kattr, uattr, NULL, 0, retval, duration);
10257c32e8f8SLorenz Bauer 	if (!ret)
10267c32e8f8SLorenz Bauer 		ret = bpf_ctx_finish(kattr, uattr, user_ctx, sizeof(*user_ctx));
10277c32e8f8SLorenz Bauer 
10287c32e8f8SLorenz Bauer out:
10297c32e8f8SLorenz Bauer 	bpf_prog_array_free(progs);
10307c32e8f8SLorenz Bauer 	kfree(user_ctx);
10317c32e8f8SLorenz Bauer 	return ret;
10327c32e8f8SLorenz Bauer }
103379a7f8bdSAlexei Starovoitov 
103479a7f8bdSAlexei Starovoitov int bpf_prog_test_run_syscall(struct bpf_prog *prog,
103579a7f8bdSAlexei Starovoitov 			      const union bpf_attr *kattr,
103679a7f8bdSAlexei Starovoitov 			      union bpf_attr __user *uattr)
103779a7f8bdSAlexei Starovoitov {
103879a7f8bdSAlexei Starovoitov 	void __user *ctx_in = u64_to_user_ptr(kattr->test.ctx_in);
103979a7f8bdSAlexei Starovoitov 	__u32 ctx_size_in = kattr->test.ctx_size_in;
104079a7f8bdSAlexei Starovoitov 	void *ctx = NULL;
104179a7f8bdSAlexei Starovoitov 	u32 retval;
104279a7f8bdSAlexei Starovoitov 	int err = 0;
104379a7f8bdSAlexei Starovoitov 
104479a7f8bdSAlexei Starovoitov 	/* doesn't support data_in/out, ctx_out, duration, or repeat or flags */
104579a7f8bdSAlexei Starovoitov 	if (kattr->test.data_in || kattr->test.data_out ||
104679a7f8bdSAlexei Starovoitov 	    kattr->test.ctx_out || kattr->test.duration ||
104779a7f8bdSAlexei Starovoitov 	    kattr->test.repeat || kattr->test.flags)
104879a7f8bdSAlexei Starovoitov 		return -EINVAL;
104979a7f8bdSAlexei Starovoitov 
105079a7f8bdSAlexei Starovoitov 	if (ctx_size_in < prog->aux->max_ctx_offset ||
105179a7f8bdSAlexei Starovoitov 	    ctx_size_in > U16_MAX)
105279a7f8bdSAlexei Starovoitov 		return -EINVAL;
105379a7f8bdSAlexei Starovoitov 
105479a7f8bdSAlexei Starovoitov 	if (ctx_size_in) {
105579a7f8bdSAlexei Starovoitov 		ctx = kzalloc(ctx_size_in, GFP_USER);
105679a7f8bdSAlexei Starovoitov 		if (!ctx)
105779a7f8bdSAlexei Starovoitov 			return -ENOMEM;
105879a7f8bdSAlexei Starovoitov 		if (copy_from_user(ctx, ctx_in, ctx_size_in)) {
105979a7f8bdSAlexei Starovoitov 			err = -EFAULT;
106079a7f8bdSAlexei Starovoitov 			goto out;
106179a7f8bdSAlexei Starovoitov 		}
106279a7f8bdSAlexei Starovoitov 	}
106387b7b533SYonghong Song 
106487b7b533SYonghong Song 	rcu_read_lock_trace();
106579a7f8bdSAlexei Starovoitov 	retval = bpf_prog_run_pin_on_cpu(prog, ctx);
106687b7b533SYonghong Song 	rcu_read_unlock_trace();
106779a7f8bdSAlexei Starovoitov 
106879a7f8bdSAlexei Starovoitov 	if (copy_to_user(&uattr->test.retval, &retval, sizeof(u32))) {
106979a7f8bdSAlexei Starovoitov 		err = -EFAULT;
107079a7f8bdSAlexei Starovoitov 		goto out;
107179a7f8bdSAlexei Starovoitov 	}
107279a7f8bdSAlexei Starovoitov 	if (ctx_size_in)
107379a7f8bdSAlexei Starovoitov 		if (copy_to_user(ctx_in, ctx, ctx_size_in))
107479a7f8bdSAlexei Starovoitov 			err = -EFAULT;
107579a7f8bdSAlexei Starovoitov out:
107679a7f8bdSAlexei Starovoitov 	kfree(ctx);
107779a7f8bdSAlexei Starovoitov 	return err;
107879a7f8bdSAlexei Starovoitov }
1079