xref: /openbmc/linux/net/bpf/test_run.c (revision de6e9190)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2017 Facebook
3  */
4 #include <linux/bpf.h>
5 #include <linux/btf_ids.h>
6 #include <linux/slab.h>
7 #include <linux/vmalloc.h>
8 #include <linux/etherdevice.h>
9 #include <linux/filter.h>
10 #include <linux/rcupdate_trace.h>
11 #include <linux/sched/signal.h>
12 #include <net/bpf_sk_storage.h>
13 #include <net/sock.h>
14 #include <net/tcp.h>
15 #include <net/net_namespace.h>
16 #include <linux/error-injection.h>
17 #include <linux/smp.h>
18 #include <linux/sock_diag.h>
19 #include <net/xdp.h>
20 
21 #define CREATE_TRACE_POINTS
22 #include <trace/events/bpf_test_run.h>
23 
24 struct bpf_test_timer {
25 	enum { NO_PREEMPT, NO_MIGRATE } mode;
26 	u32 i;
27 	u64 time_start, time_spent;
28 };
29 
30 static void bpf_test_timer_enter(struct bpf_test_timer *t)
31 	__acquires(rcu)
32 {
33 	rcu_read_lock();
34 	if (t->mode == NO_PREEMPT)
35 		preempt_disable();
36 	else
37 		migrate_disable();
38 
39 	t->time_start = ktime_get_ns();
40 }
41 
42 static void bpf_test_timer_leave(struct bpf_test_timer *t)
43 	__releases(rcu)
44 {
45 	t->time_start = 0;
46 
47 	if (t->mode == NO_PREEMPT)
48 		preempt_enable();
49 	else
50 		migrate_enable();
51 	rcu_read_unlock();
52 }
53 
54 static bool bpf_test_timer_continue(struct bpf_test_timer *t, u32 repeat, int *err, u32 *duration)
55 	__must_hold(rcu)
56 {
57 	t->i++;
58 	if (t->i >= repeat) {
59 		/* We're done. */
60 		t->time_spent += ktime_get_ns() - t->time_start;
61 		do_div(t->time_spent, t->i);
62 		*duration = t->time_spent > U32_MAX ? U32_MAX : (u32)t->time_spent;
63 		*err = 0;
64 		goto reset;
65 	}
66 
67 	if (signal_pending(current)) {
68 		/* During iteration: we've been cancelled, abort. */
69 		*err = -EINTR;
70 		goto reset;
71 	}
72 
73 	if (need_resched()) {
74 		/* During iteration: we need to reschedule between runs. */
75 		t->time_spent += ktime_get_ns() - t->time_start;
76 		bpf_test_timer_leave(t);
77 		cond_resched();
78 		bpf_test_timer_enter(t);
79 	}
80 
81 	/* Do another round. */
82 	return true;
83 
84 reset:
85 	t->i = 0;
86 	return false;
87 }
88 
89 static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
90 			u32 *retval, u32 *time, bool xdp)
91 {
92 	struct bpf_prog_array_item item = {.prog = prog};
93 	struct bpf_run_ctx *old_ctx;
94 	struct bpf_cg_run_ctx run_ctx;
95 	struct bpf_test_timer t = { NO_MIGRATE };
96 	enum bpf_cgroup_storage_type stype;
97 	int ret;
98 
99 	for_each_cgroup_storage_type(stype) {
100 		item.cgroup_storage[stype] = bpf_cgroup_storage_alloc(prog, stype);
101 		if (IS_ERR(item.cgroup_storage[stype])) {
102 			item.cgroup_storage[stype] = NULL;
103 			for_each_cgroup_storage_type(stype)
104 				bpf_cgroup_storage_free(item.cgroup_storage[stype]);
105 			return -ENOMEM;
106 		}
107 	}
108 
109 	if (!repeat)
110 		repeat = 1;
111 
112 	bpf_test_timer_enter(&t);
113 	old_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
114 	do {
115 		run_ctx.prog_item = &item;
116 		if (xdp)
117 			*retval = bpf_prog_run_xdp(prog, ctx);
118 		else
119 			*retval = bpf_prog_run(prog, ctx);
120 	} while (bpf_test_timer_continue(&t, repeat, &ret, time));
121 	bpf_reset_run_ctx(old_ctx);
122 	bpf_test_timer_leave(&t);
123 
124 	for_each_cgroup_storage_type(stype)
125 		bpf_cgroup_storage_free(item.cgroup_storage[stype]);
126 
127 	return ret;
128 }
129 
130 static int bpf_test_finish(const union bpf_attr *kattr,
131 			   union bpf_attr __user *uattr, const void *data,
132 			   u32 size, u32 retval, u32 duration)
133 {
134 	void __user *data_out = u64_to_user_ptr(kattr->test.data_out);
135 	int err = -EFAULT;
136 	u32 copy_size = size;
137 
138 	/* Clamp copy if the user has provided a size hint, but copy the full
139 	 * buffer if not to retain old behaviour.
140 	 */
141 	if (kattr->test.data_size_out &&
142 	    copy_size > kattr->test.data_size_out) {
143 		copy_size = kattr->test.data_size_out;
144 		err = -ENOSPC;
145 	}
146 
147 	if (data_out && copy_to_user(data_out, data, copy_size))
148 		goto out;
149 	if (copy_to_user(&uattr->test.data_size_out, &size, sizeof(size)))
150 		goto out;
151 	if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
152 		goto out;
153 	if (copy_to_user(&uattr->test.duration, &duration, sizeof(duration)))
154 		goto out;
155 	if (err != -ENOSPC)
156 		err = 0;
157 out:
158 	trace_bpf_test_finish(&err);
159 	return err;
160 }
161 
162 /* Integer types of various sizes and pointer combinations cover variety of
163  * architecture dependent calling conventions. 7+ can be supported in the
164  * future.
165  */
166 __diag_push();
167 __diag_ignore(GCC, 8, "-Wmissing-prototypes",
168 	      "Global functions as their definitions will be in vmlinux BTF");
169 int noinline bpf_fentry_test1(int a)
170 {
171 	return a + 1;
172 }
173 
174 int noinline bpf_fentry_test2(int a, u64 b)
175 {
176 	return a + b;
177 }
178 
179 int noinline bpf_fentry_test3(char a, int b, u64 c)
180 {
181 	return a + b + c;
182 }
183 
184 int noinline bpf_fentry_test4(void *a, char b, int c, u64 d)
185 {
186 	return (long)a + b + c + d;
187 }
188 
189 int noinline bpf_fentry_test5(u64 a, void *b, short c, int d, u64 e)
190 {
191 	return a + (long)b + c + d + e;
192 }
193 
194 int noinline bpf_fentry_test6(u64 a, void *b, short c, int d, void *e, u64 f)
195 {
196 	return a + (long)b + c + d + (long)e + f;
197 }
198 
199 struct bpf_fentry_test_t {
200 	struct bpf_fentry_test_t *a;
201 };
202 
203 int noinline bpf_fentry_test7(struct bpf_fentry_test_t *arg)
204 {
205 	return (long)arg;
206 }
207 
208 int noinline bpf_fentry_test8(struct bpf_fentry_test_t *arg)
209 {
210 	return (long)arg->a;
211 }
212 
213 int noinline bpf_modify_return_test(int a, int *b)
214 {
215 	*b += 1;
216 	return a + *b;
217 }
218 
219 u64 noinline bpf_kfunc_call_test1(struct sock *sk, u32 a, u64 b, u32 c, u64 d)
220 {
221 	return a + b + c + d;
222 }
223 
224 int noinline bpf_kfunc_call_test2(struct sock *sk, u32 a, u32 b)
225 {
226 	return a + b;
227 }
228 
229 struct sock * noinline bpf_kfunc_call_test3(struct sock *sk)
230 {
231 	return sk;
232 }
233 
234 __diag_pop();
235 
236 ALLOW_ERROR_INJECTION(bpf_modify_return_test, ERRNO);
237 
238 BTF_SET_START(test_sk_kfunc_ids)
239 BTF_ID(func, bpf_kfunc_call_test1)
240 BTF_ID(func, bpf_kfunc_call_test2)
241 BTF_ID(func, bpf_kfunc_call_test3)
242 BTF_SET_END(test_sk_kfunc_ids)
243 
244 bool bpf_prog_test_check_kfunc_call(u32 kfunc_id)
245 {
246 	return btf_id_set_contains(&test_sk_kfunc_ids, kfunc_id);
247 }
248 
249 static void *bpf_test_init(const union bpf_attr *kattr, u32 size,
250 			   u32 headroom, u32 tailroom)
251 {
252 	void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
253 	u32 user_size = kattr->test.data_size_in;
254 	void *data;
255 
256 	if (size < ETH_HLEN || size > PAGE_SIZE - headroom - tailroom)
257 		return ERR_PTR(-EINVAL);
258 
259 	if (user_size > size)
260 		return ERR_PTR(-EMSGSIZE);
261 
262 	data = kzalloc(size + headroom + tailroom, GFP_USER);
263 	if (!data)
264 		return ERR_PTR(-ENOMEM);
265 
266 	if (copy_from_user(data + headroom, data_in, user_size)) {
267 		kfree(data);
268 		return ERR_PTR(-EFAULT);
269 	}
270 
271 	return data;
272 }
273 
274 int bpf_prog_test_run_tracing(struct bpf_prog *prog,
275 			      const union bpf_attr *kattr,
276 			      union bpf_attr __user *uattr)
277 {
278 	struct bpf_fentry_test_t arg = {};
279 	u16 side_effect = 0, ret = 0;
280 	int b = 2, err = -EFAULT;
281 	u32 retval = 0;
282 
283 	if (kattr->test.flags || kattr->test.cpu)
284 		return -EINVAL;
285 
286 	switch (prog->expected_attach_type) {
287 	case BPF_TRACE_FENTRY:
288 	case BPF_TRACE_FEXIT:
289 		if (bpf_fentry_test1(1) != 2 ||
290 		    bpf_fentry_test2(2, 3) != 5 ||
291 		    bpf_fentry_test3(4, 5, 6) != 15 ||
292 		    bpf_fentry_test4((void *)7, 8, 9, 10) != 34 ||
293 		    bpf_fentry_test5(11, (void *)12, 13, 14, 15) != 65 ||
294 		    bpf_fentry_test6(16, (void *)17, 18, 19, (void *)20, 21) != 111 ||
295 		    bpf_fentry_test7((struct bpf_fentry_test_t *)0) != 0 ||
296 		    bpf_fentry_test8(&arg) != 0)
297 			goto out;
298 		break;
299 	case BPF_MODIFY_RETURN:
300 		ret = bpf_modify_return_test(1, &b);
301 		if (b != 2)
302 			side_effect = 1;
303 		break;
304 	default:
305 		goto out;
306 	}
307 
308 	retval = ((u32)side_effect << 16) | ret;
309 	if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
310 		goto out;
311 
312 	err = 0;
313 out:
314 	trace_bpf_test_finish(&err);
315 	return err;
316 }
317 
318 struct bpf_raw_tp_test_run_info {
319 	struct bpf_prog *prog;
320 	void *ctx;
321 	u32 retval;
322 };
323 
324 static void
325 __bpf_prog_test_run_raw_tp(void *data)
326 {
327 	struct bpf_raw_tp_test_run_info *info = data;
328 
329 	rcu_read_lock();
330 	info->retval = bpf_prog_run(info->prog, info->ctx);
331 	rcu_read_unlock();
332 }
333 
334 int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
335 			     const union bpf_attr *kattr,
336 			     union bpf_attr __user *uattr)
337 {
338 	void __user *ctx_in = u64_to_user_ptr(kattr->test.ctx_in);
339 	__u32 ctx_size_in = kattr->test.ctx_size_in;
340 	struct bpf_raw_tp_test_run_info info;
341 	int cpu = kattr->test.cpu, err = 0;
342 	int current_cpu;
343 
344 	/* doesn't support data_in/out, ctx_out, duration, or repeat */
345 	if (kattr->test.data_in || kattr->test.data_out ||
346 	    kattr->test.ctx_out || kattr->test.duration ||
347 	    kattr->test.repeat)
348 		return -EINVAL;
349 
350 	if (ctx_size_in < prog->aux->max_ctx_offset ||
351 	    ctx_size_in > MAX_BPF_FUNC_ARGS * sizeof(u64))
352 		return -EINVAL;
353 
354 	if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 && cpu != 0)
355 		return -EINVAL;
356 
357 	if (ctx_size_in) {
358 		info.ctx = kzalloc(ctx_size_in, GFP_USER);
359 		if (!info.ctx)
360 			return -ENOMEM;
361 		if (copy_from_user(info.ctx, ctx_in, ctx_size_in)) {
362 			err = -EFAULT;
363 			goto out;
364 		}
365 	} else {
366 		info.ctx = NULL;
367 	}
368 
369 	info.prog = prog;
370 
371 	current_cpu = get_cpu();
372 	if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 ||
373 	    cpu == current_cpu) {
374 		__bpf_prog_test_run_raw_tp(&info);
375 	} else if (cpu >= nr_cpu_ids || !cpu_online(cpu)) {
376 		/* smp_call_function_single() also checks cpu_online()
377 		 * after csd_lock(). However, since cpu is from user
378 		 * space, let's do an extra quick check to filter out
379 		 * invalid value before smp_call_function_single().
380 		 */
381 		err = -ENXIO;
382 	} else {
383 		err = smp_call_function_single(cpu, __bpf_prog_test_run_raw_tp,
384 					       &info, 1);
385 	}
386 	put_cpu();
387 
388 	if (!err &&
389 	    copy_to_user(&uattr->test.retval, &info.retval, sizeof(u32)))
390 		err = -EFAULT;
391 
392 out:
393 	kfree(info.ctx);
394 	return err;
395 }
396 
397 static void *bpf_ctx_init(const union bpf_attr *kattr, u32 max_size)
398 {
399 	void __user *data_in = u64_to_user_ptr(kattr->test.ctx_in);
400 	void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out);
401 	u32 size = kattr->test.ctx_size_in;
402 	void *data;
403 	int err;
404 
405 	if (!data_in && !data_out)
406 		return NULL;
407 
408 	data = kzalloc(max_size, GFP_USER);
409 	if (!data)
410 		return ERR_PTR(-ENOMEM);
411 
412 	if (data_in) {
413 		err = bpf_check_uarg_tail_zero(USER_BPFPTR(data_in), max_size, size);
414 		if (err) {
415 			kfree(data);
416 			return ERR_PTR(err);
417 		}
418 
419 		size = min_t(u32, max_size, size);
420 		if (copy_from_user(data, data_in, size)) {
421 			kfree(data);
422 			return ERR_PTR(-EFAULT);
423 		}
424 	}
425 	return data;
426 }
427 
428 static int bpf_ctx_finish(const union bpf_attr *kattr,
429 			  union bpf_attr __user *uattr, const void *data,
430 			  u32 size)
431 {
432 	void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out);
433 	int err = -EFAULT;
434 	u32 copy_size = size;
435 
436 	if (!data || !data_out)
437 		return 0;
438 
439 	if (copy_size > kattr->test.ctx_size_out) {
440 		copy_size = kattr->test.ctx_size_out;
441 		err = -ENOSPC;
442 	}
443 
444 	if (copy_to_user(data_out, data, copy_size))
445 		goto out;
446 	if (copy_to_user(&uattr->test.ctx_size_out, &size, sizeof(size)))
447 		goto out;
448 	if (err != -ENOSPC)
449 		err = 0;
450 out:
451 	return err;
452 }
453 
454 /**
455  * range_is_zero - test whether buffer is initialized
456  * @buf: buffer to check
457  * @from: check from this position
458  * @to: check up until (excluding) this position
459  *
460  * This function returns true if the there is a non-zero byte
461  * in the buf in the range [from,to).
462  */
463 static inline bool range_is_zero(void *buf, size_t from, size_t to)
464 {
465 	return !memchr_inv((u8 *)buf + from, 0, to - from);
466 }
467 
468 static int convert___skb_to_skb(struct sk_buff *skb, struct __sk_buff *__skb)
469 {
470 	struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
471 
472 	if (!__skb)
473 		return 0;
474 
475 	/* make sure the fields we don't use are zeroed */
476 	if (!range_is_zero(__skb, 0, offsetof(struct __sk_buff, mark)))
477 		return -EINVAL;
478 
479 	/* mark is allowed */
480 
481 	if (!range_is_zero(__skb, offsetofend(struct __sk_buff, mark),
482 			   offsetof(struct __sk_buff, priority)))
483 		return -EINVAL;
484 
485 	/* priority is allowed */
486 
487 	if (!range_is_zero(__skb, offsetofend(struct __sk_buff, priority),
488 			   offsetof(struct __sk_buff, ifindex)))
489 		return -EINVAL;
490 
491 	/* ifindex is allowed */
492 
493 	if (!range_is_zero(__skb, offsetofend(struct __sk_buff, ifindex),
494 			   offsetof(struct __sk_buff, cb)))
495 		return -EINVAL;
496 
497 	/* cb is allowed */
498 
499 	if (!range_is_zero(__skb, offsetofend(struct __sk_buff, cb),
500 			   offsetof(struct __sk_buff, tstamp)))
501 		return -EINVAL;
502 
503 	/* tstamp is allowed */
504 	/* wire_len is allowed */
505 	/* gso_segs is allowed */
506 
507 	if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_segs),
508 			   offsetof(struct __sk_buff, gso_size)))
509 		return -EINVAL;
510 
511 	/* gso_size is allowed */
512 
513 	if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_size),
514 			   sizeof(struct __sk_buff)))
515 		return -EINVAL;
516 
517 	skb->mark = __skb->mark;
518 	skb->priority = __skb->priority;
519 	skb->tstamp = __skb->tstamp;
520 	memcpy(&cb->data, __skb->cb, QDISC_CB_PRIV_LEN);
521 
522 	if (__skb->wire_len == 0) {
523 		cb->pkt_len = skb->len;
524 	} else {
525 		if (__skb->wire_len < skb->len ||
526 		    __skb->wire_len > GSO_MAX_SIZE)
527 			return -EINVAL;
528 		cb->pkt_len = __skb->wire_len;
529 	}
530 
531 	if (__skb->gso_segs > GSO_MAX_SEGS)
532 		return -EINVAL;
533 	skb_shinfo(skb)->gso_segs = __skb->gso_segs;
534 	skb_shinfo(skb)->gso_size = __skb->gso_size;
535 
536 	return 0;
537 }
538 
539 static void convert_skb_to___skb(struct sk_buff *skb, struct __sk_buff *__skb)
540 {
541 	struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
542 
543 	if (!__skb)
544 		return;
545 
546 	__skb->mark = skb->mark;
547 	__skb->priority = skb->priority;
548 	__skb->ifindex = skb->dev->ifindex;
549 	__skb->tstamp = skb->tstamp;
550 	memcpy(__skb->cb, &cb->data, QDISC_CB_PRIV_LEN);
551 	__skb->wire_len = cb->pkt_len;
552 	__skb->gso_segs = skb_shinfo(skb)->gso_segs;
553 }
554 
555 static struct proto bpf_dummy_proto = {
556 	.name   = "bpf_dummy",
557 	.owner  = THIS_MODULE,
558 	.obj_size = sizeof(struct sock),
559 };
560 
561 int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
562 			  union bpf_attr __user *uattr)
563 {
564 	bool is_l2 = false, is_direct_pkt_access = false;
565 	struct net *net = current->nsproxy->net_ns;
566 	struct net_device *dev = net->loopback_dev;
567 	u32 size = kattr->test.data_size_in;
568 	u32 repeat = kattr->test.repeat;
569 	struct __sk_buff *ctx = NULL;
570 	u32 retval, duration;
571 	int hh_len = ETH_HLEN;
572 	struct sk_buff *skb;
573 	struct sock *sk;
574 	void *data;
575 	int ret;
576 
577 	if (kattr->test.flags || kattr->test.cpu)
578 		return -EINVAL;
579 
580 	data = bpf_test_init(kattr, size, NET_SKB_PAD + NET_IP_ALIGN,
581 			     SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
582 	if (IS_ERR(data))
583 		return PTR_ERR(data);
584 
585 	ctx = bpf_ctx_init(kattr, sizeof(struct __sk_buff));
586 	if (IS_ERR(ctx)) {
587 		kfree(data);
588 		return PTR_ERR(ctx);
589 	}
590 
591 	switch (prog->type) {
592 	case BPF_PROG_TYPE_SCHED_CLS:
593 	case BPF_PROG_TYPE_SCHED_ACT:
594 		is_l2 = true;
595 		fallthrough;
596 	case BPF_PROG_TYPE_LWT_IN:
597 	case BPF_PROG_TYPE_LWT_OUT:
598 	case BPF_PROG_TYPE_LWT_XMIT:
599 		is_direct_pkt_access = true;
600 		break;
601 	default:
602 		break;
603 	}
604 
605 	sk = sk_alloc(net, AF_UNSPEC, GFP_USER, &bpf_dummy_proto, 1);
606 	if (!sk) {
607 		kfree(data);
608 		kfree(ctx);
609 		return -ENOMEM;
610 	}
611 	sock_init_data(NULL, sk);
612 
613 	skb = build_skb(data, 0);
614 	if (!skb) {
615 		kfree(data);
616 		kfree(ctx);
617 		sk_free(sk);
618 		return -ENOMEM;
619 	}
620 	skb->sk = sk;
621 
622 	skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
623 	__skb_put(skb, size);
624 	if (ctx && ctx->ifindex > 1) {
625 		dev = dev_get_by_index(net, ctx->ifindex);
626 		if (!dev) {
627 			ret = -ENODEV;
628 			goto out;
629 		}
630 	}
631 	skb->protocol = eth_type_trans(skb, dev);
632 	skb_reset_network_header(skb);
633 
634 	switch (skb->protocol) {
635 	case htons(ETH_P_IP):
636 		sk->sk_family = AF_INET;
637 		if (sizeof(struct iphdr) <= skb_headlen(skb)) {
638 			sk->sk_rcv_saddr = ip_hdr(skb)->saddr;
639 			sk->sk_daddr = ip_hdr(skb)->daddr;
640 		}
641 		break;
642 #if IS_ENABLED(CONFIG_IPV6)
643 	case htons(ETH_P_IPV6):
644 		sk->sk_family = AF_INET6;
645 		if (sizeof(struct ipv6hdr) <= skb_headlen(skb)) {
646 			sk->sk_v6_rcv_saddr = ipv6_hdr(skb)->saddr;
647 			sk->sk_v6_daddr = ipv6_hdr(skb)->daddr;
648 		}
649 		break;
650 #endif
651 	default:
652 		break;
653 	}
654 
655 	if (is_l2)
656 		__skb_push(skb, hh_len);
657 	if (is_direct_pkt_access)
658 		bpf_compute_data_pointers(skb);
659 	ret = convert___skb_to_skb(skb, ctx);
660 	if (ret)
661 		goto out;
662 	ret = bpf_test_run(prog, skb, repeat, &retval, &duration, false);
663 	if (ret)
664 		goto out;
665 	if (!is_l2) {
666 		if (skb_headroom(skb) < hh_len) {
667 			int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
668 
669 			if (pskb_expand_head(skb, nhead, 0, GFP_USER)) {
670 				ret = -ENOMEM;
671 				goto out;
672 			}
673 		}
674 		memset(__skb_push(skb, hh_len), 0, hh_len);
675 	}
676 	convert_skb_to___skb(skb, ctx);
677 
678 	size = skb->len;
679 	/* bpf program can never convert linear skb to non-linear */
680 	if (WARN_ON_ONCE(skb_is_nonlinear(skb)))
681 		size = skb_headlen(skb);
682 	ret = bpf_test_finish(kattr, uattr, skb->data, size, retval, duration);
683 	if (!ret)
684 		ret = bpf_ctx_finish(kattr, uattr, ctx,
685 				     sizeof(struct __sk_buff));
686 out:
687 	if (dev && dev != net->loopback_dev)
688 		dev_put(dev);
689 	kfree_skb(skb);
690 	sk_free(sk);
691 	kfree(ctx);
692 	return ret;
693 }
694 
695 static int xdp_convert_md_to_buff(struct xdp_md *xdp_md, struct xdp_buff *xdp)
696 {
697 	unsigned int ingress_ifindex, rx_queue_index;
698 	struct netdev_rx_queue *rxqueue;
699 	struct net_device *device;
700 
701 	if (!xdp_md)
702 		return 0;
703 
704 	if (xdp_md->egress_ifindex != 0)
705 		return -EINVAL;
706 
707 	ingress_ifindex = xdp_md->ingress_ifindex;
708 	rx_queue_index = xdp_md->rx_queue_index;
709 
710 	if (!ingress_ifindex && rx_queue_index)
711 		return -EINVAL;
712 
713 	if (ingress_ifindex) {
714 		device = dev_get_by_index(current->nsproxy->net_ns,
715 					  ingress_ifindex);
716 		if (!device)
717 			return -ENODEV;
718 
719 		if (rx_queue_index >= device->real_num_rx_queues)
720 			goto free_dev;
721 
722 		rxqueue = __netif_get_rx_queue(device, rx_queue_index);
723 
724 		if (!xdp_rxq_info_is_reg(&rxqueue->xdp_rxq))
725 			goto free_dev;
726 
727 		xdp->rxq = &rxqueue->xdp_rxq;
728 		/* The device is now tracked in the xdp->rxq for later
729 		 * dev_put()
730 		 */
731 	}
732 
733 	xdp->data = xdp->data_meta + xdp_md->data;
734 	return 0;
735 
736 free_dev:
737 	dev_put(device);
738 	return -EINVAL;
739 }
740 
741 static void xdp_convert_buff_to_md(struct xdp_buff *xdp, struct xdp_md *xdp_md)
742 {
743 	if (!xdp_md)
744 		return;
745 
746 	xdp_md->data = xdp->data - xdp->data_meta;
747 	xdp_md->data_end = xdp->data_end - xdp->data_meta;
748 
749 	if (xdp_md->ingress_ifindex)
750 		dev_put(xdp->rxq->dev);
751 }
752 
753 int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
754 			  union bpf_attr __user *uattr)
755 {
756 	u32 tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
757 	u32 headroom = XDP_PACKET_HEADROOM;
758 	u32 size = kattr->test.data_size_in;
759 	u32 repeat = kattr->test.repeat;
760 	struct netdev_rx_queue *rxqueue;
761 	struct xdp_buff xdp = {};
762 	u32 retval, duration;
763 	struct xdp_md *ctx;
764 	u32 max_data_sz;
765 	void *data;
766 	int ret = -EINVAL;
767 
768 	if (prog->expected_attach_type == BPF_XDP_DEVMAP ||
769 	    prog->expected_attach_type == BPF_XDP_CPUMAP)
770 		return -EINVAL;
771 
772 	ctx = bpf_ctx_init(kattr, sizeof(struct xdp_md));
773 	if (IS_ERR(ctx))
774 		return PTR_ERR(ctx);
775 
776 	if (ctx) {
777 		/* There can't be user provided data before the meta data */
778 		if (ctx->data_meta || ctx->data_end != size ||
779 		    ctx->data > ctx->data_end ||
780 		    unlikely(xdp_metalen_invalid(ctx->data)))
781 			goto free_ctx;
782 		/* Meta data is allocated from the headroom */
783 		headroom -= ctx->data;
784 	}
785 
786 	/* XDP have extra tailroom as (most) drivers use full page */
787 	max_data_sz = 4096 - headroom - tailroom;
788 
789 	data = bpf_test_init(kattr, max_data_sz, headroom, tailroom);
790 	if (IS_ERR(data)) {
791 		ret = PTR_ERR(data);
792 		goto free_ctx;
793 	}
794 
795 	rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0);
796 	xdp_init_buff(&xdp, headroom + max_data_sz + tailroom,
797 		      &rxqueue->xdp_rxq);
798 	xdp_prepare_buff(&xdp, data, headroom, size, true);
799 
800 	ret = xdp_convert_md_to_buff(ctx, &xdp);
801 	if (ret)
802 		goto free_data;
803 
804 	bpf_prog_change_xdp(NULL, prog);
805 	ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration, true);
806 	/* We convert the xdp_buff back to an xdp_md before checking the return
807 	 * code so the reference count of any held netdevice will be decremented
808 	 * even if the test run failed.
809 	 */
810 	xdp_convert_buff_to_md(&xdp, ctx);
811 	if (ret)
812 		goto out;
813 
814 	if (xdp.data_meta != data + headroom ||
815 	    xdp.data_end != xdp.data_meta + size)
816 		size = xdp.data_end - xdp.data_meta;
817 
818 	ret = bpf_test_finish(kattr, uattr, xdp.data_meta, size, retval,
819 			      duration);
820 	if (!ret)
821 		ret = bpf_ctx_finish(kattr, uattr, ctx,
822 				     sizeof(struct xdp_md));
823 
824 out:
825 	bpf_prog_change_xdp(prog, NULL);
826 free_data:
827 	kfree(data);
828 free_ctx:
829 	kfree(ctx);
830 	return ret;
831 }
832 
833 static int verify_user_bpf_flow_keys(struct bpf_flow_keys *ctx)
834 {
835 	/* make sure the fields we don't use are zeroed */
836 	if (!range_is_zero(ctx, 0, offsetof(struct bpf_flow_keys, flags)))
837 		return -EINVAL;
838 
839 	/* flags is allowed */
840 
841 	if (!range_is_zero(ctx, offsetofend(struct bpf_flow_keys, flags),
842 			   sizeof(struct bpf_flow_keys)))
843 		return -EINVAL;
844 
845 	return 0;
846 }
847 
848 int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
849 				     const union bpf_attr *kattr,
850 				     union bpf_attr __user *uattr)
851 {
852 	struct bpf_test_timer t = { NO_PREEMPT };
853 	u32 size = kattr->test.data_size_in;
854 	struct bpf_flow_dissector ctx = {};
855 	u32 repeat = kattr->test.repeat;
856 	struct bpf_flow_keys *user_ctx;
857 	struct bpf_flow_keys flow_keys;
858 	const struct ethhdr *eth;
859 	unsigned int flags = 0;
860 	u32 retval, duration;
861 	void *data;
862 	int ret;
863 
864 	if (prog->type != BPF_PROG_TYPE_FLOW_DISSECTOR)
865 		return -EINVAL;
866 
867 	if (kattr->test.flags || kattr->test.cpu)
868 		return -EINVAL;
869 
870 	if (size < ETH_HLEN)
871 		return -EINVAL;
872 
873 	data = bpf_test_init(kattr, size, 0, 0);
874 	if (IS_ERR(data))
875 		return PTR_ERR(data);
876 
877 	eth = (struct ethhdr *)data;
878 
879 	if (!repeat)
880 		repeat = 1;
881 
882 	user_ctx = bpf_ctx_init(kattr, sizeof(struct bpf_flow_keys));
883 	if (IS_ERR(user_ctx)) {
884 		kfree(data);
885 		return PTR_ERR(user_ctx);
886 	}
887 	if (user_ctx) {
888 		ret = verify_user_bpf_flow_keys(user_ctx);
889 		if (ret)
890 			goto out;
891 		flags = user_ctx->flags;
892 	}
893 
894 	ctx.flow_keys = &flow_keys;
895 	ctx.data = data;
896 	ctx.data_end = (__u8 *)data + size;
897 
898 	bpf_test_timer_enter(&t);
899 	do {
900 		retval = bpf_flow_dissect(prog, &ctx, eth->h_proto, ETH_HLEN,
901 					  size, flags);
902 	} while (bpf_test_timer_continue(&t, repeat, &ret, &duration));
903 	bpf_test_timer_leave(&t);
904 
905 	if (ret < 0)
906 		goto out;
907 
908 	ret = bpf_test_finish(kattr, uattr, &flow_keys, sizeof(flow_keys),
909 			      retval, duration);
910 	if (!ret)
911 		ret = bpf_ctx_finish(kattr, uattr, user_ctx,
912 				     sizeof(struct bpf_flow_keys));
913 
914 out:
915 	kfree(user_ctx);
916 	kfree(data);
917 	return ret;
918 }
919 
920 int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, const union bpf_attr *kattr,
921 				union bpf_attr __user *uattr)
922 {
923 	struct bpf_test_timer t = { NO_PREEMPT };
924 	struct bpf_prog_array *progs = NULL;
925 	struct bpf_sk_lookup_kern ctx = {};
926 	u32 repeat = kattr->test.repeat;
927 	struct bpf_sk_lookup *user_ctx;
928 	u32 retval, duration;
929 	int ret = -EINVAL;
930 
931 	if (prog->type != BPF_PROG_TYPE_SK_LOOKUP)
932 		return -EINVAL;
933 
934 	if (kattr->test.flags || kattr->test.cpu)
935 		return -EINVAL;
936 
937 	if (kattr->test.data_in || kattr->test.data_size_in || kattr->test.data_out ||
938 	    kattr->test.data_size_out)
939 		return -EINVAL;
940 
941 	if (!repeat)
942 		repeat = 1;
943 
944 	user_ctx = bpf_ctx_init(kattr, sizeof(*user_ctx));
945 	if (IS_ERR(user_ctx))
946 		return PTR_ERR(user_ctx);
947 
948 	if (!user_ctx)
949 		return -EINVAL;
950 
951 	if (user_ctx->sk)
952 		goto out;
953 
954 	if (!range_is_zero(user_ctx, offsetofend(typeof(*user_ctx), local_port), sizeof(*user_ctx)))
955 		goto out;
956 
957 	if (user_ctx->local_port > U16_MAX || user_ctx->remote_port > U16_MAX) {
958 		ret = -ERANGE;
959 		goto out;
960 	}
961 
962 	ctx.family = (u16)user_ctx->family;
963 	ctx.protocol = (u16)user_ctx->protocol;
964 	ctx.dport = (u16)user_ctx->local_port;
965 	ctx.sport = (__force __be16)user_ctx->remote_port;
966 
967 	switch (ctx.family) {
968 	case AF_INET:
969 		ctx.v4.daddr = (__force __be32)user_ctx->local_ip4;
970 		ctx.v4.saddr = (__force __be32)user_ctx->remote_ip4;
971 		break;
972 
973 #if IS_ENABLED(CONFIG_IPV6)
974 	case AF_INET6:
975 		ctx.v6.daddr = (struct in6_addr *)user_ctx->local_ip6;
976 		ctx.v6.saddr = (struct in6_addr *)user_ctx->remote_ip6;
977 		break;
978 #endif
979 
980 	default:
981 		ret = -EAFNOSUPPORT;
982 		goto out;
983 	}
984 
985 	progs = bpf_prog_array_alloc(1, GFP_KERNEL);
986 	if (!progs) {
987 		ret = -ENOMEM;
988 		goto out;
989 	}
990 
991 	progs->items[0].prog = prog;
992 
993 	bpf_test_timer_enter(&t);
994 	do {
995 		ctx.selected_sk = NULL;
996 		retval = BPF_PROG_SK_LOOKUP_RUN_ARRAY(progs, ctx, bpf_prog_run);
997 	} while (bpf_test_timer_continue(&t, repeat, &ret, &duration));
998 	bpf_test_timer_leave(&t);
999 
1000 	if (ret < 0)
1001 		goto out;
1002 
1003 	user_ctx->cookie = 0;
1004 	if (ctx.selected_sk) {
1005 		if (ctx.selected_sk->sk_reuseport && !ctx.no_reuseport) {
1006 			ret = -EOPNOTSUPP;
1007 			goto out;
1008 		}
1009 
1010 		user_ctx->cookie = sock_gen_cookie(ctx.selected_sk);
1011 	}
1012 
1013 	ret = bpf_test_finish(kattr, uattr, NULL, 0, retval, duration);
1014 	if (!ret)
1015 		ret = bpf_ctx_finish(kattr, uattr, user_ctx, sizeof(*user_ctx));
1016 
1017 out:
1018 	bpf_prog_array_free(progs);
1019 	kfree(user_ctx);
1020 	return ret;
1021 }
1022 
1023 int bpf_prog_test_run_syscall(struct bpf_prog *prog,
1024 			      const union bpf_attr *kattr,
1025 			      union bpf_attr __user *uattr)
1026 {
1027 	void __user *ctx_in = u64_to_user_ptr(kattr->test.ctx_in);
1028 	__u32 ctx_size_in = kattr->test.ctx_size_in;
1029 	void *ctx = NULL;
1030 	u32 retval;
1031 	int err = 0;
1032 
1033 	/* doesn't support data_in/out, ctx_out, duration, or repeat or flags */
1034 	if (kattr->test.data_in || kattr->test.data_out ||
1035 	    kattr->test.ctx_out || kattr->test.duration ||
1036 	    kattr->test.repeat || kattr->test.flags)
1037 		return -EINVAL;
1038 
1039 	if (ctx_size_in < prog->aux->max_ctx_offset ||
1040 	    ctx_size_in > U16_MAX)
1041 		return -EINVAL;
1042 
1043 	if (ctx_size_in) {
1044 		ctx = kzalloc(ctx_size_in, GFP_USER);
1045 		if (!ctx)
1046 			return -ENOMEM;
1047 		if (copy_from_user(ctx, ctx_in, ctx_size_in)) {
1048 			err = -EFAULT;
1049 			goto out;
1050 		}
1051 	}
1052 
1053 	rcu_read_lock_trace();
1054 	retval = bpf_prog_run_pin_on_cpu(prog, ctx);
1055 	rcu_read_unlock_trace();
1056 
1057 	if (copy_to_user(&uattr->test.retval, &retval, sizeof(u32))) {
1058 		err = -EFAULT;
1059 		goto out;
1060 	}
1061 	if (ctx_size_in)
1062 		if (copy_to_user(ctx_in, ctx, ctx_size_in))
1063 			err = -EFAULT;
1064 out:
1065 	kfree(ctx);
1066 	return err;
1067 }
1068