xref: /openbmc/linux/net/bpf/test_run.c (revision d4c52c6a)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2017 Facebook
3  */
4 #include <linux/bpf.h>
5 #include <linux/btf.h>
6 #include <linux/btf_ids.h>
7 #include <linux/slab.h>
8 #include <linux/init.h>
9 #include <linux/vmalloc.h>
10 #include <linux/etherdevice.h>
11 #include <linux/filter.h>
12 #include <linux/rcupdate_trace.h>
13 #include <linux/sched/signal.h>
14 #include <net/bpf_sk_storage.h>
15 #include <net/sock.h>
16 #include <net/tcp.h>
17 #include <net/net_namespace.h>
18 #include <net/page_pool/helpers.h>
19 #include <linux/error-injection.h>
20 #include <linux/smp.h>
21 #include <linux/sock_diag.h>
22 #include <linux/netfilter.h>
23 #include <net/netdev_rx_queue.h>
24 #include <net/xdp.h>
25 #include <net/netfilter/nf_bpf_link.h>
26 
27 #define CREATE_TRACE_POINTS
28 #include <trace/events/bpf_test_run.h>
29 
30 struct bpf_test_timer {
31 	enum { NO_PREEMPT, NO_MIGRATE } mode;
32 	u32 i;
33 	u64 time_start, time_spent;
34 };
35 
36 static void bpf_test_timer_enter(struct bpf_test_timer *t)
37 	__acquires(rcu)
38 {
39 	rcu_read_lock();
40 	if (t->mode == NO_PREEMPT)
41 		preempt_disable();
42 	else
43 		migrate_disable();
44 
45 	t->time_start = ktime_get_ns();
46 }
47 
48 static void bpf_test_timer_leave(struct bpf_test_timer *t)
49 	__releases(rcu)
50 {
51 	t->time_start = 0;
52 
53 	if (t->mode == NO_PREEMPT)
54 		preempt_enable();
55 	else
56 		migrate_enable();
57 	rcu_read_unlock();
58 }
59 
60 static bool bpf_test_timer_continue(struct bpf_test_timer *t, int iterations,
61 				    u32 repeat, int *err, u32 *duration)
62 	__must_hold(rcu)
63 {
64 	t->i += iterations;
65 	if (t->i >= repeat) {
66 		/* We're done. */
67 		t->time_spent += ktime_get_ns() - t->time_start;
68 		do_div(t->time_spent, t->i);
69 		*duration = t->time_spent > U32_MAX ? U32_MAX : (u32)t->time_spent;
70 		*err = 0;
71 		goto reset;
72 	}
73 
74 	if (signal_pending(current)) {
75 		/* During iteration: we've been cancelled, abort. */
76 		*err = -EINTR;
77 		goto reset;
78 	}
79 
80 	if (need_resched()) {
81 		/* During iteration: we need to reschedule between runs. */
82 		t->time_spent += ktime_get_ns() - t->time_start;
83 		bpf_test_timer_leave(t);
84 		cond_resched();
85 		bpf_test_timer_enter(t);
86 	}
87 
88 	/* Do another round. */
89 	return true;
90 
91 reset:
92 	t->i = 0;
93 	return false;
94 }
95 
96 /* We put this struct at the head of each page with a context and frame
97  * initialised when the page is allocated, so we don't have to do this on each
98  * repetition of the test run.
99  */
100 struct xdp_page_head {
101 	struct xdp_buff orig_ctx;
102 	struct xdp_buff ctx;
103 	union {
104 		/* ::data_hard_start starts here */
105 		DECLARE_FLEX_ARRAY(struct xdp_frame, frame);
106 		DECLARE_FLEX_ARRAY(u8, data);
107 	};
108 };
109 
110 struct xdp_test_data {
111 	struct xdp_buff *orig_ctx;
112 	struct xdp_rxq_info rxq;
113 	struct net_device *dev;
114 	struct page_pool *pp;
115 	struct xdp_frame **frames;
116 	struct sk_buff **skbs;
117 	struct xdp_mem_info mem;
118 	u32 batch_size;
119 	u32 frame_cnt;
120 };
121 
122 /* tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c:%MAX_PKT_SIZE
123  * must be updated accordingly this gets changed, otherwise BPF selftests
124  * will fail.
125  */
126 #define TEST_XDP_FRAME_SIZE (PAGE_SIZE - sizeof(struct xdp_page_head))
127 #define TEST_XDP_MAX_BATCH 256
128 
129 static void xdp_test_run_init_page(struct page *page, void *arg)
130 {
131 	struct xdp_page_head *head = phys_to_virt(page_to_phys(page));
132 	struct xdp_buff *new_ctx, *orig_ctx;
133 	u32 headroom = XDP_PACKET_HEADROOM;
134 	struct xdp_test_data *xdp = arg;
135 	size_t frm_len, meta_len;
136 	struct xdp_frame *frm;
137 	void *data;
138 
139 	orig_ctx = xdp->orig_ctx;
140 	frm_len = orig_ctx->data_end - orig_ctx->data_meta;
141 	meta_len = orig_ctx->data - orig_ctx->data_meta;
142 	headroom -= meta_len;
143 
144 	new_ctx = &head->ctx;
145 	frm = head->frame;
146 	data = head->data;
147 	memcpy(data + headroom, orig_ctx->data_meta, frm_len);
148 
149 	xdp_init_buff(new_ctx, TEST_XDP_FRAME_SIZE, &xdp->rxq);
150 	xdp_prepare_buff(new_ctx, data, headroom, frm_len, true);
151 	new_ctx->data = new_ctx->data_meta + meta_len;
152 
153 	xdp_update_frame_from_buff(new_ctx, frm);
154 	frm->mem = new_ctx->rxq->mem;
155 
156 	memcpy(&head->orig_ctx, new_ctx, sizeof(head->orig_ctx));
157 }
158 
159 static int xdp_test_run_setup(struct xdp_test_data *xdp, struct xdp_buff *orig_ctx)
160 {
161 	struct page_pool *pp;
162 	int err = -ENOMEM;
163 	struct page_pool_params pp_params = {
164 		.order = 0,
165 		.flags = 0,
166 		.pool_size = xdp->batch_size,
167 		.nid = NUMA_NO_NODE,
168 		.init_callback = xdp_test_run_init_page,
169 		.init_arg = xdp,
170 	};
171 
172 	xdp->frames = kvmalloc_array(xdp->batch_size, sizeof(void *), GFP_KERNEL);
173 	if (!xdp->frames)
174 		return -ENOMEM;
175 
176 	xdp->skbs = kvmalloc_array(xdp->batch_size, sizeof(void *), GFP_KERNEL);
177 	if (!xdp->skbs)
178 		goto err_skbs;
179 
180 	pp = page_pool_create(&pp_params);
181 	if (IS_ERR(pp)) {
182 		err = PTR_ERR(pp);
183 		goto err_pp;
184 	}
185 
186 	/* will copy 'mem.id' into pp->xdp_mem_id */
187 	err = xdp_reg_mem_model(&xdp->mem, MEM_TYPE_PAGE_POOL, pp);
188 	if (err)
189 		goto err_mmodel;
190 
191 	xdp->pp = pp;
192 
193 	/* We create a 'fake' RXQ referencing the original dev, but with an
194 	 * xdp_mem_info pointing to our page_pool
195 	 */
196 	xdp_rxq_info_reg(&xdp->rxq, orig_ctx->rxq->dev, 0, 0);
197 	xdp->rxq.mem.type = MEM_TYPE_PAGE_POOL;
198 	xdp->rxq.mem.id = pp->xdp_mem_id;
199 	xdp->dev = orig_ctx->rxq->dev;
200 	xdp->orig_ctx = orig_ctx;
201 
202 	return 0;
203 
204 err_mmodel:
205 	page_pool_destroy(pp);
206 err_pp:
207 	kvfree(xdp->skbs);
208 err_skbs:
209 	kvfree(xdp->frames);
210 	return err;
211 }
212 
213 static void xdp_test_run_teardown(struct xdp_test_data *xdp)
214 {
215 	xdp_unreg_mem_model(&xdp->mem);
216 	page_pool_destroy(xdp->pp);
217 	kfree(xdp->frames);
218 	kfree(xdp->skbs);
219 }
220 
221 static bool frame_was_changed(const struct xdp_page_head *head)
222 {
223 	/* xdp_scrub_frame() zeroes the data pointer, flags is the last field,
224 	 * i.e. has the highest chances to be overwritten. If those two are
225 	 * untouched, it's most likely safe to skip the context reset.
226 	 */
227 	return head->frame->data != head->orig_ctx.data ||
228 	       head->frame->flags != head->orig_ctx.flags;
229 }
230 
231 static bool ctx_was_changed(struct xdp_page_head *head)
232 {
233 	return head->orig_ctx.data != head->ctx.data ||
234 		head->orig_ctx.data_meta != head->ctx.data_meta ||
235 		head->orig_ctx.data_end != head->ctx.data_end;
236 }
237 
238 static void reset_ctx(struct xdp_page_head *head)
239 {
240 	if (likely(!frame_was_changed(head) && !ctx_was_changed(head)))
241 		return;
242 
243 	head->ctx.data = head->orig_ctx.data;
244 	head->ctx.data_meta = head->orig_ctx.data_meta;
245 	head->ctx.data_end = head->orig_ctx.data_end;
246 	xdp_update_frame_from_buff(&head->ctx, head->frame);
247 	head->frame->mem = head->orig_ctx.rxq->mem;
248 }
249 
250 static int xdp_recv_frames(struct xdp_frame **frames, int nframes,
251 			   struct sk_buff **skbs,
252 			   struct net_device *dev)
253 {
254 	gfp_t gfp = __GFP_ZERO | GFP_ATOMIC;
255 	int i, n;
256 	LIST_HEAD(list);
257 
258 	n = kmem_cache_alloc_bulk(skbuff_cache, gfp, nframes, (void **)skbs);
259 	if (unlikely(n == 0)) {
260 		for (i = 0; i < nframes; i++)
261 			xdp_return_frame(frames[i]);
262 		return -ENOMEM;
263 	}
264 
265 	for (i = 0; i < nframes; i++) {
266 		struct xdp_frame *xdpf = frames[i];
267 		struct sk_buff *skb = skbs[i];
268 
269 		skb = __xdp_build_skb_from_frame(xdpf, skb, dev);
270 		if (!skb) {
271 			xdp_return_frame(xdpf);
272 			continue;
273 		}
274 
275 		list_add_tail(&skb->list, &list);
276 	}
277 	netif_receive_skb_list(&list);
278 
279 	return 0;
280 }
281 
282 static int xdp_test_run_batch(struct xdp_test_data *xdp, struct bpf_prog *prog,
283 			      u32 repeat)
284 {
285 	struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
286 	int err = 0, act, ret, i, nframes = 0, batch_sz;
287 	struct xdp_frame **frames = xdp->frames;
288 	struct xdp_page_head *head;
289 	struct xdp_frame *frm;
290 	bool redirect = false;
291 	struct xdp_buff *ctx;
292 	struct page *page;
293 
294 	batch_sz = min_t(u32, repeat, xdp->batch_size);
295 
296 	local_bh_disable();
297 	xdp_set_return_frame_no_direct();
298 
299 	for (i = 0; i < batch_sz; i++) {
300 		page = page_pool_dev_alloc_pages(xdp->pp);
301 		if (!page) {
302 			err = -ENOMEM;
303 			goto out;
304 		}
305 
306 		head = phys_to_virt(page_to_phys(page));
307 		reset_ctx(head);
308 		ctx = &head->ctx;
309 		frm = head->frame;
310 		xdp->frame_cnt++;
311 
312 		act = bpf_prog_run_xdp(prog, ctx);
313 
314 		/* if program changed pkt bounds we need to update the xdp_frame */
315 		if (unlikely(ctx_was_changed(head))) {
316 			ret = xdp_update_frame_from_buff(ctx, frm);
317 			if (ret) {
318 				xdp_return_buff(ctx);
319 				continue;
320 			}
321 		}
322 
323 		switch (act) {
324 		case XDP_TX:
325 			/* we can't do a real XDP_TX since we're not in the
326 			 * driver, so turn it into a REDIRECT back to the same
327 			 * index
328 			 */
329 			ri->tgt_index = xdp->dev->ifindex;
330 			ri->map_id = INT_MAX;
331 			ri->map_type = BPF_MAP_TYPE_UNSPEC;
332 			fallthrough;
333 		case XDP_REDIRECT:
334 			redirect = true;
335 			ret = xdp_do_redirect_frame(xdp->dev, ctx, frm, prog);
336 			if (ret)
337 				xdp_return_buff(ctx);
338 			break;
339 		case XDP_PASS:
340 			frames[nframes++] = frm;
341 			break;
342 		default:
343 			bpf_warn_invalid_xdp_action(NULL, prog, act);
344 			fallthrough;
345 		case XDP_DROP:
346 			xdp_return_buff(ctx);
347 			break;
348 		}
349 	}
350 
351 out:
352 	if (redirect)
353 		xdp_do_flush();
354 	if (nframes) {
355 		ret = xdp_recv_frames(frames, nframes, xdp->skbs, xdp->dev);
356 		if (ret)
357 			err = ret;
358 	}
359 
360 	xdp_clear_return_frame_no_direct();
361 	local_bh_enable();
362 	return err;
363 }
364 
365 static int bpf_test_run_xdp_live(struct bpf_prog *prog, struct xdp_buff *ctx,
366 				 u32 repeat, u32 batch_size, u32 *time)
367 
368 {
369 	struct xdp_test_data xdp = { .batch_size = batch_size };
370 	struct bpf_test_timer t = { .mode = NO_MIGRATE };
371 	int ret;
372 
373 	if (!repeat)
374 		repeat = 1;
375 
376 	ret = xdp_test_run_setup(&xdp, ctx);
377 	if (ret)
378 		return ret;
379 
380 	bpf_test_timer_enter(&t);
381 	do {
382 		xdp.frame_cnt = 0;
383 		ret = xdp_test_run_batch(&xdp, prog, repeat - t.i);
384 		if (unlikely(ret < 0))
385 			break;
386 	} while (bpf_test_timer_continue(&t, xdp.frame_cnt, repeat, &ret, time));
387 	bpf_test_timer_leave(&t);
388 
389 	xdp_test_run_teardown(&xdp);
390 	return ret;
391 }
392 
393 static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
394 			u32 *retval, u32 *time, bool xdp)
395 {
396 	struct bpf_prog_array_item item = {.prog = prog};
397 	struct bpf_run_ctx *old_ctx;
398 	struct bpf_cg_run_ctx run_ctx;
399 	struct bpf_test_timer t = { NO_MIGRATE };
400 	enum bpf_cgroup_storage_type stype;
401 	int ret;
402 
403 	for_each_cgroup_storage_type(stype) {
404 		item.cgroup_storage[stype] = bpf_cgroup_storage_alloc(prog, stype);
405 		if (IS_ERR(item.cgroup_storage[stype])) {
406 			item.cgroup_storage[stype] = NULL;
407 			for_each_cgroup_storage_type(stype)
408 				bpf_cgroup_storage_free(item.cgroup_storage[stype]);
409 			return -ENOMEM;
410 		}
411 	}
412 
413 	if (!repeat)
414 		repeat = 1;
415 
416 	bpf_test_timer_enter(&t);
417 	old_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
418 	do {
419 		run_ctx.prog_item = &item;
420 		local_bh_disable();
421 		if (xdp)
422 			*retval = bpf_prog_run_xdp(prog, ctx);
423 		else
424 			*retval = bpf_prog_run(prog, ctx);
425 		local_bh_enable();
426 	} while (bpf_test_timer_continue(&t, 1, repeat, &ret, time));
427 	bpf_reset_run_ctx(old_ctx);
428 	bpf_test_timer_leave(&t);
429 
430 	for_each_cgroup_storage_type(stype)
431 		bpf_cgroup_storage_free(item.cgroup_storage[stype]);
432 
433 	return ret;
434 }
435 
436 static int bpf_test_finish(const union bpf_attr *kattr,
437 			   union bpf_attr __user *uattr, const void *data,
438 			   struct skb_shared_info *sinfo, u32 size,
439 			   u32 retval, u32 duration)
440 {
441 	void __user *data_out = u64_to_user_ptr(kattr->test.data_out);
442 	int err = -EFAULT;
443 	u32 copy_size = size;
444 
445 	/* Clamp copy if the user has provided a size hint, but copy the full
446 	 * buffer if not to retain old behaviour.
447 	 */
448 	if (kattr->test.data_size_out &&
449 	    copy_size > kattr->test.data_size_out) {
450 		copy_size = kattr->test.data_size_out;
451 		err = -ENOSPC;
452 	}
453 
454 	if (data_out) {
455 		int len = sinfo ? copy_size - sinfo->xdp_frags_size : copy_size;
456 
457 		if (len < 0) {
458 			err = -ENOSPC;
459 			goto out;
460 		}
461 
462 		if (copy_to_user(data_out, data, len))
463 			goto out;
464 
465 		if (sinfo) {
466 			int i, offset = len;
467 			u32 data_len;
468 
469 			for (i = 0; i < sinfo->nr_frags; i++) {
470 				skb_frag_t *frag = &sinfo->frags[i];
471 
472 				if (offset >= copy_size) {
473 					err = -ENOSPC;
474 					break;
475 				}
476 
477 				data_len = min_t(u32, copy_size - offset,
478 						 skb_frag_size(frag));
479 
480 				if (copy_to_user(data_out + offset,
481 						 skb_frag_address(frag),
482 						 data_len))
483 					goto out;
484 
485 				offset += data_len;
486 			}
487 		}
488 	}
489 
490 	if (copy_to_user(&uattr->test.data_size_out, &size, sizeof(size)))
491 		goto out;
492 	if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
493 		goto out;
494 	if (copy_to_user(&uattr->test.duration, &duration, sizeof(duration)))
495 		goto out;
496 	if (err != -ENOSPC)
497 		err = 0;
498 out:
499 	trace_bpf_test_finish(&err);
500 	return err;
501 }
502 
503 /* Integer types of various sizes and pointer combinations cover variety of
504  * architecture dependent calling conventions. 7+ can be supported in the
505  * future.
506  */
507 __diag_push();
508 __diag_ignore_all("-Wmissing-prototypes",
509 		  "Global functions as their definitions will be in vmlinux BTF");
510 __bpf_kfunc int bpf_fentry_test1(int a)
511 {
512 	return a + 1;
513 }
514 EXPORT_SYMBOL_GPL(bpf_fentry_test1);
515 
516 int noinline bpf_fentry_test2(int a, u64 b)
517 {
518 	return a + b;
519 }
520 
521 int noinline bpf_fentry_test3(char a, int b, u64 c)
522 {
523 	return a + b + c;
524 }
525 
526 int noinline bpf_fentry_test4(void *a, char b, int c, u64 d)
527 {
528 	return (long)a + b + c + d;
529 }
530 
531 int noinline bpf_fentry_test5(u64 a, void *b, short c, int d, u64 e)
532 {
533 	return a + (long)b + c + d + e;
534 }
535 
536 int noinline bpf_fentry_test6(u64 a, void *b, short c, int d, void *e, u64 f)
537 {
538 	return a + (long)b + c + d + (long)e + f;
539 }
540 
541 struct bpf_fentry_test_t {
542 	struct bpf_fentry_test_t *a;
543 };
544 
545 int noinline bpf_fentry_test7(struct bpf_fentry_test_t *arg)
546 {
547 	asm volatile ("": "+r"(arg));
548 	return (long)arg;
549 }
550 
551 int noinline bpf_fentry_test8(struct bpf_fentry_test_t *arg)
552 {
553 	return (long)arg->a;
554 }
555 
556 __bpf_kfunc u32 bpf_fentry_test9(u32 *a)
557 {
558 	return *a;
559 }
560 
561 void noinline bpf_fentry_test_sinfo(struct skb_shared_info *sinfo)
562 {
563 }
564 
565 __bpf_kfunc int bpf_modify_return_test(int a, int *b)
566 {
567 	*b += 1;
568 	return a + *b;
569 }
570 
571 __bpf_kfunc int bpf_modify_return_test2(int a, int *b, short c, int d,
572 					void *e, char f, int g)
573 {
574 	*b += 1;
575 	return a + *b + c + d + (long)e + f + g;
576 }
577 
578 int noinline bpf_fentry_shadow_test(int a)
579 {
580 	return a + 1;
581 }
582 
583 struct prog_test_member1 {
584 	int a;
585 };
586 
587 struct prog_test_member {
588 	struct prog_test_member1 m;
589 	int c;
590 };
591 
592 struct prog_test_ref_kfunc {
593 	int a;
594 	int b;
595 	struct prog_test_member memb;
596 	struct prog_test_ref_kfunc *next;
597 	refcount_t cnt;
598 };
599 
600 __bpf_kfunc void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p)
601 {
602 	refcount_dec(&p->cnt);
603 }
604 
605 __bpf_kfunc void bpf_kfunc_call_memb_release(struct prog_test_member *p)
606 {
607 }
608 
609 __diag_pop();
610 
611 BTF_SET8_START(bpf_test_modify_return_ids)
612 BTF_ID_FLAGS(func, bpf_modify_return_test)
613 BTF_ID_FLAGS(func, bpf_modify_return_test2)
614 BTF_ID_FLAGS(func, bpf_fentry_test1, KF_SLEEPABLE)
615 BTF_SET8_END(bpf_test_modify_return_ids)
616 
617 static const struct btf_kfunc_id_set bpf_test_modify_return_set = {
618 	.owner = THIS_MODULE,
619 	.set   = &bpf_test_modify_return_ids,
620 };
621 
622 BTF_SET8_START(test_sk_check_kfunc_ids)
623 BTF_ID_FLAGS(func, bpf_kfunc_call_test_release, KF_RELEASE)
624 BTF_ID_FLAGS(func, bpf_kfunc_call_memb_release, KF_RELEASE)
625 BTF_SET8_END(test_sk_check_kfunc_ids)
626 
627 static void *bpf_test_init(const union bpf_attr *kattr, u32 user_size,
628 			   u32 size, u32 headroom, u32 tailroom)
629 {
630 	void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
631 	void *data;
632 
633 	if (size < ETH_HLEN || size > PAGE_SIZE - headroom - tailroom)
634 		return ERR_PTR(-EINVAL);
635 
636 	if (user_size > size)
637 		return ERR_PTR(-EMSGSIZE);
638 
639 	size = SKB_DATA_ALIGN(size);
640 	data = kzalloc(size + headroom + tailroom, GFP_USER);
641 	if (!data)
642 		return ERR_PTR(-ENOMEM);
643 
644 	if (copy_from_user(data + headroom, data_in, user_size)) {
645 		kfree(data);
646 		return ERR_PTR(-EFAULT);
647 	}
648 
649 	return data;
650 }
651 
652 int bpf_prog_test_run_tracing(struct bpf_prog *prog,
653 			      const union bpf_attr *kattr,
654 			      union bpf_attr __user *uattr)
655 {
656 	struct bpf_fentry_test_t arg = {};
657 	u16 side_effect = 0, ret = 0;
658 	int b = 2, err = -EFAULT;
659 	u32 retval = 0;
660 
661 	if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size)
662 		return -EINVAL;
663 
664 	switch (prog->expected_attach_type) {
665 	case BPF_TRACE_FENTRY:
666 	case BPF_TRACE_FEXIT:
667 		if (bpf_fentry_test1(1) != 2 ||
668 		    bpf_fentry_test2(2, 3) != 5 ||
669 		    bpf_fentry_test3(4, 5, 6) != 15 ||
670 		    bpf_fentry_test4((void *)7, 8, 9, 10) != 34 ||
671 		    bpf_fentry_test5(11, (void *)12, 13, 14, 15) != 65 ||
672 		    bpf_fentry_test6(16, (void *)17, 18, 19, (void *)20, 21) != 111 ||
673 		    bpf_fentry_test7((struct bpf_fentry_test_t *)0) != 0 ||
674 		    bpf_fentry_test8(&arg) != 0 ||
675 		    bpf_fentry_test9(&retval) != 0)
676 			goto out;
677 		break;
678 	case BPF_MODIFY_RETURN:
679 		ret = bpf_modify_return_test(1, &b);
680 		if (b != 2)
681 			side_effect++;
682 		b = 2;
683 		ret += bpf_modify_return_test2(1, &b, 3, 4, (void *)5, 6, 7);
684 		if (b != 2)
685 			side_effect++;
686 		break;
687 	default:
688 		goto out;
689 	}
690 
691 	retval = ((u32)side_effect << 16) | ret;
692 	if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
693 		goto out;
694 
695 	err = 0;
696 out:
697 	trace_bpf_test_finish(&err);
698 	return err;
699 }
700 
701 struct bpf_raw_tp_test_run_info {
702 	struct bpf_prog *prog;
703 	void *ctx;
704 	u32 retval;
705 };
706 
707 static void
708 __bpf_prog_test_run_raw_tp(void *data)
709 {
710 	struct bpf_raw_tp_test_run_info *info = data;
711 	struct bpf_trace_run_ctx run_ctx = {};
712 	struct bpf_run_ctx *old_run_ctx;
713 
714 	old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
715 
716 	rcu_read_lock();
717 	info->retval = bpf_prog_run(info->prog, info->ctx);
718 	rcu_read_unlock();
719 
720 	bpf_reset_run_ctx(old_run_ctx);
721 }
722 
723 int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
724 			     const union bpf_attr *kattr,
725 			     union bpf_attr __user *uattr)
726 {
727 	void __user *ctx_in = u64_to_user_ptr(kattr->test.ctx_in);
728 	__u32 ctx_size_in = kattr->test.ctx_size_in;
729 	struct bpf_raw_tp_test_run_info info;
730 	int cpu = kattr->test.cpu, err = 0;
731 	int current_cpu;
732 
733 	/* doesn't support data_in/out, ctx_out, duration, or repeat */
734 	if (kattr->test.data_in || kattr->test.data_out ||
735 	    kattr->test.ctx_out || kattr->test.duration ||
736 	    kattr->test.repeat || kattr->test.batch_size)
737 		return -EINVAL;
738 
739 	if (ctx_size_in < prog->aux->max_ctx_offset ||
740 	    ctx_size_in > MAX_BPF_FUNC_ARGS * sizeof(u64))
741 		return -EINVAL;
742 
743 	if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 && cpu != 0)
744 		return -EINVAL;
745 
746 	if (ctx_size_in) {
747 		info.ctx = memdup_user(ctx_in, ctx_size_in);
748 		if (IS_ERR(info.ctx))
749 			return PTR_ERR(info.ctx);
750 	} else {
751 		info.ctx = NULL;
752 	}
753 
754 	info.prog = prog;
755 
756 	current_cpu = get_cpu();
757 	if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 ||
758 	    cpu == current_cpu) {
759 		__bpf_prog_test_run_raw_tp(&info);
760 	} else if (cpu >= nr_cpu_ids || !cpu_online(cpu)) {
761 		/* smp_call_function_single() also checks cpu_online()
762 		 * after csd_lock(). However, since cpu is from user
763 		 * space, let's do an extra quick check to filter out
764 		 * invalid value before smp_call_function_single().
765 		 */
766 		err = -ENXIO;
767 	} else {
768 		err = smp_call_function_single(cpu, __bpf_prog_test_run_raw_tp,
769 					       &info, 1);
770 	}
771 	put_cpu();
772 
773 	if (!err &&
774 	    copy_to_user(&uattr->test.retval, &info.retval, sizeof(u32)))
775 		err = -EFAULT;
776 
777 	kfree(info.ctx);
778 	return err;
779 }
780 
781 static void *bpf_ctx_init(const union bpf_attr *kattr, u32 max_size)
782 {
783 	void __user *data_in = u64_to_user_ptr(kattr->test.ctx_in);
784 	void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out);
785 	u32 size = kattr->test.ctx_size_in;
786 	void *data;
787 	int err;
788 
789 	if (!data_in && !data_out)
790 		return NULL;
791 
792 	data = kzalloc(max_size, GFP_USER);
793 	if (!data)
794 		return ERR_PTR(-ENOMEM);
795 
796 	if (data_in) {
797 		err = bpf_check_uarg_tail_zero(USER_BPFPTR(data_in), max_size, size);
798 		if (err) {
799 			kfree(data);
800 			return ERR_PTR(err);
801 		}
802 
803 		size = min_t(u32, max_size, size);
804 		if (copy_from_user(data, data_in, size)) {
805 			kfree(data);
806 			return ERR_PTR(-EFAULT);
807 		}
808 	}
809 	return data;
810 }
811 
812 static int bpf_ctx_finish(const union bpf_attr *kattr,
813 			  union bpf_attr __user *uattr, const void *data,
814 			  u32 size)
815 {
816 	void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out);
817 	int err = -EFAULT;
818 	u32 copy_size = size;
819 
820 	if (!data || !data_out)
821 		return 0;
822 
823 	if (copy_size > kattr->test.ctx_size_out) {
824 		copy_size = kattr->test.ctx_size_out;
825 		err = -ENOSPC;
826 	}
827 
828 	if (copy_to_user(data_out, data, copy_size))
829 		goto out;
830 	if (copy_to_user(&uattr->test.ctx_size_out, &size, sizeof(size)))
831 		goto out;
832 	if (err != -ENOSPC)
833 		err = 0;
834 out:
835 	return err;
836 }
837 
838 /**
839  * range_is_zero - test whether buffer is initialized
840  * @buf: buffer to check
841  * @from: check from this position
842  * @to: check up until (excluding) this position
843  *
844  * This function returns true if the there is a non-zero byte
845  * in the buf in the range [from,to).
846  */
847 static inline bool range_is_zero(void *buf, size_t from, size_t to)
848 {
849 	return !memchr_inv((u8 *)buf + from, 0, to - from);
850 }
851 
852 static int convert___skb_to_skb(struct sk_buff *skb, struct __sk_buff *__skb)
853 {
854 	struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
855 
856 	if (!__skb)
857 		return 0;
858 
859 	/* make sure the fields we don't use are zeroed */
860 	if (!range_is_zero(__skb, 0, offsetof(struct __sk_buff, mark)))
861 		return -EINVAL;
862 
863 	/* mark is allowed */
864 
865 	if (!range_is_zero(__skb, offsetofend(struct __sk_buff, mark),
866 			   offsetof(struct __sk_buff, priority)))
867 		return -EINVAL;
868 
869 	/* priority is allowed */
870 	/* ingress_ifindex is allowed */
871 	/* ifindex is allowed */
872 
873 	if (!range_is_zero(__skb, offsetofend(struct __sk_buff, ifindex),
874 			   offsetof(struct __sk_buff, cb)))
875 		return -EINVAL;
876 
877 	/* cb is allowed */
878 
879 	if (!range_is_zero(__skb, offsetofend(struct __sk_buff, cb),
880 			   offsetof(struct __sk_buff, tstamp)))
881 		return -EINVAL;
882 
883 	/* tstamp is allowed */
884 	/* wire_len is allowed */
885 	/* gso_segs is allowed */
886 
887 	if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_segs),
888 			   offsetof(struct __sk_buff, gso_size)))
889 		return -EINVAL;
890 
891 	/* gso_size is allowed */
892 
893 	if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_size),
894 			   offsetof(struct __sk_buff, hwtstamp)))
895 		return -EINVAL;
896 
897 	/* hwtstamp is allowed */
898 
899 	if (!range_is_zero(__skb, offsetofend(struct __sk_buff, hwtstamp),
900 			   sizeof(struct __sk_buff)))
901 		return -EINVAL;
902 
903 	skb->mark = __skb->mark;
904 	skb->priority = __skb->priority;
905 	skb->skb_iif = __skb->ingress_ifindex;
906 	skb->tstamp = __skb->tstamp;
907 	memcpy(&cb->data, __skb->cb, QDISC_CB_PRIV_LEN);
908 
909 	if (__skb->wire_len == 0) {
910 		cb->pkt_len = skb->len;
911 	} else {
912 		if (__skb->wire_len < skb->len ||
913 		    __skb->wire_len > GSO_LEGACY_MAX_SIZE)
914 			return -EINVAL;
915 		cb->pkt_len = __skb->wire_len;
916 	}
917 
918 	if (__skb->gso_segs > GSO_MAX_SEGS)
919 		return -EINVAL;
920 	skb_shinfo(skb)->gso_segs = __skb->gso_segs;
921 	skb_shinfo(skb)->gso_size = __skb->gso_size;
922 	skb_shinfo(skb)->hwtstamps.hwtstamp = __skb->hwtstamp;
923 
924 	return 0;
925 }
926 
927 static void convert_skb_to___skb(struct sk_buff *skb, struct __sk_buff *__skb)
928 {
929 	struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
930 
931 	if (!__skb)
932 		return;
933 
934 	__skb->mark = skb->mark;
935 	__skb->priority = skb->priority;
936 	__skb->ingress_ifindex = skb->skb_iif;
937 	__skb->ifindex = skb->dev->ifindex;
938 	__skb->tstamp = skb->tstamp;
939 	memcpy(__skb->cb, &cb->data, QDISC_CB_PRIV_LEN);
940 	__skb->wire_len = cb->pkt_len;
941 	__skb->gso_segs = skb_shinfo(skb)->gso_segs;
942 	__skb->hwtstamp = skb_shinfo(skb)->hwtstamps.hwtstamp;
943 }
944 
945 static struct proto bpf_dummy_proto = {
946 	.name   = "bpf_dummy",
947 	.owner  = THIS_MODULE,
948 	.obj_size = sizeof(struct sock),
949 };
950 
951 int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
952 			  union bpf_attr __user *uattr)
953 {
954 	bool is_l2 = false, is_direct_pkt_access = false;
955 	struct net *net = current->nsproxy->net_ns;
956 	struct net_device *dev = net->loopback_dev;
957 	u32 size = kattr->test.data_size_in;
958 	u32 repeat = kattr->test.repeat;
959 	struct __sk_buff *ctx = NULL;
960 	u32 retval, duration;
961 	int hh_len = ETH_HLEN;
962 	struct sk_buff *skb;
963 	struct sock *sk;
964 	void *data;
965 	int ret;
966 
967 	if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size)
968 		return -EINVAL;
969 
970 	data = bpf_test_init(kattr, kattr->test.data_size_in,
971 			     size, NET_SKB_PAD + NET_IP_ALIGN,
972 			     SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
973 	if (IS_ERR(data))
974 		return PTR_ERR(data);
975 
976 	ctx = bpf_ctx_init(kattr, sizeof(struct __sk_buff));
977 	if (IS_ERR(ctx)) {
978 		kfree(data);
979 		return PTR_ERR(ctx);
980 	}
981 
982 	switch (prog->type) {
983 	case BPF_PROG_TYPE_SCHED_CLS:
984 	case BPF_PROG_TYPE_SCHED_ACT:
985 		is_l2 = true;
986 		fallthrough;
987 	case BPF_PROG_TYPE_LWT_IN:
988 	case BPF_PROG_TYPE_LWT_OUT:
989 	case BPF_PROG_TYPE_LWT_XMIT:
990 		is_direct_pkt_access = true;
991 		break;
992 	default:
993 		break;
994 	}
995 
996 	sk = sk_alloc(net, AF_UNSPEC, GFP_USER, &bpf_dummy_proto, 1);
997 	if (!sk) {
998 		kfree(data);
999 		kfree(ctx);
1000 		return -ENOMEM;
1001 	}
1002 	sock_init_data(NULL, sk);
1003 
1004 	skb = slab_build_skb(data);
1005 	if (!skb) {
1006 		kfree(data);
1007 		kfree(ctx);
1008 		sk_free(sk);
1009 		return -ENOMEM;
1010 	}
1011 	skb->sk = sk;
1012 
1013 	skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
1014 	__skb_put(skb, size);
1015 	if (ctx && ctx->ifindex > 1) {
1016 		dev = dev_get_by_index(net, ctx->ifindex);
1017 		if (!dev) {
1018 			ret = -ENODEV;
1019 			goto out;
1020 		}
1021 	}
1022 	skb->protocol = eth_type_trans(skb, dev);
1023 	skb_reset_network_header(skb);
1024 
1025 	switch (skb->protocol) {
1026 	case htons(ETH_P_IP):
1027 		sk->sk_family = AF_INET;
1028 		if (sizeof(struct iphdr) <= skb_headlen(skb)) {
1029 			sk->sk_rcv_saddr = ip_hdr(skb)->saddr;
1030 			sk->sk_daddr = ip_hdr(skb)->daddr;
1031 		}
1032 		break;
1033 #if IS_ENABLED(CONFIG_IPV6)
1034 	case htons(ETH_P_IPV6):
1035 		sk->sk_family = AF_INET6;
1036 		if (sizeof(struct ipv6hdr) <= skb_headlen(skb)) {
1037 			sk->sk_v6_rcv_saddr = ipv6_hdr(skb)->saddr;
1038 			sk->sk_v6_daddr = ipv6_hdr(skb)->daddr;
1039 		}
1040 		break;
1041 #endif
1042 	default:
1043 		break;
1044 	}
1045 
1046 	if (is_l2)
1047 		__skb_push(skb, hh_len);
1048 	if (is_direct_pkt_access)
1049 		bpf_compute_data_pointers(skb);
1050 	ret = convert___skb_to_skb(skb, ctx);
1051 	if (ret)
1052 		goto out;
1053 	ret = bpf_test_run(prog, skb, repeat, &retval, &duration, false);
1054 	if (ret)
1055 		goto out;
1056 	if (!is_l2) {
1057 		if (skb_headroom(skb) < hh_len) {
1058 			int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
1059 
1060 			if (pskb_expand_head(skb, nhead, 0, GFP_USER)) {
1061 				ret = -ENOMEM;
1062 				goto out;
1063 			}
1064 		}
1065 		memset(__skb_push(skb, hh_len), 0, hh_len);
1066 	}
1067 	convert_skb_to___skb(skb, ctx);
1068 
1069 	size = skb->len;
1070 	/* bpf program can never convert linear skb to non-linear */
1071 	if (WARN_ON_ONCE(skb_is_nonlinear(skb)))
1072 		size = skb_headlen(skb);
1073 	ret = bpf_test_finish(kattr, uattr, skb->data, NULL, size, retval,
1074 			      duration);
1075 	if (!ret)
1076 		ret = bpf_ctx_finish(kattr, uattr, ctx,
1077 				     sizeof(struct __sk_buff));
1078 out:
1079 	if (dev && dev != net->loopback_dev)
1080 		dev_put(dev);
1081 	kfree_skb(skb);
1082 	sk_free(sk);
1083 	kfree(ctx);
1084 	return ret;
1085 }
1086 
1087 static int xdp_convert_md_to_buff(struct xdp_md *xdp_md, struct xdp_buff *xdp)
1088 {
1089 	unsigned int ingress_ifindex, rx_queue_index;
1090 	struct netdev_rx_queue *rxqueue;
1091 	struct net_device *device;
1092 
1093 	if (!xdp_md)
1094 		return 0;
1095 
1096 	if (xdp_md->egress_ifindex != 0)
1097 		return -EINVAL;
1098 
1099 	ingress_ifindex = xdp_md->ingress_ifindex;
1100 	rx_queue_index = xdp_md->rx_queue_index;
1101 
1102 	if (!ingress_ifindex && rx_queue_index)
1103 		return -EINVAL;
1104 
1105 	if (ingress_ifindex) {
1106 		device = dev_get_by_index(current->nsproxy->net_ns,
1107 					  ingress_ifindex);
1108 		if (!device)
1109 			return -ENODEV;
1110 
1111 		if (rx_queue_index >= device->real_num_rx_queues)
1112 			goto free_dev;
1113 
1114 		rxqueue = __netif_get_rx_queue(device, rx_queue_index);
1115 
1116 		if (!xdp_rxq_info_is_reg(&rxqueue->xdp_rxq))
1117 			goto free_dev;
1118 
1119 		xdp->rxq = &rxqueue->xdp_rxq;
1120 		/* The device is now tracked in the xdp->rxq for later
1121 		 * dev_put()
1122 		 */
1123 	}
1124 
1125 	xdp->data = xdp->data_meta + xdp_md->data;
1126 	return 0;
1127 
1128 free_dev:
1129 	dev_put(device);
1130 	return -EINVAL;
1131 }
1132 
1133 static void xdp_convert_buff_to_md(struct xdp_buff *xdp, struct xdp_md *xdp_md)
1134 {
1135 	if (!xdp_md)
1136 		return;
1137 
1138 	xdp_md->data = xdp->data - xdp->data_meta;
1139 	xdp_md->data_end = xdp->data_end - xdp->data_meta;
1140 
1141 	if (xdp_md->ingress_ifindex)
1142 		dev_put(xdp->rxq->dev);
1143 }
1144 
1145 int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
1146 			  union bpf_attr __user *uattr)
1147 {
1148 	bool do_live = (kattr->test.flags & BPF_F_TEST_XDP_LIVE_FRAMES);
1149 	u32 tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1150 	u32 batch_size = kattr->test.batch_size;
1151 	u32 retval = 0, duration, max_data_sz;
1152 	u32 size = kattr->test.data_size_in;
1153 	u32 headroom = XDP_PACKET_HEADROOM;
1154 	u32 repeat = kattr->test.repeat;
1155 	struct netdev_rx_queue *rxqueue;
1156 	struct skb_shared_info *sinfo;
1157 	struct xdp_buff xdp = {};
1158 	int i, ret = -EINVAL;
1159 	struct xdp_md *ctx;
1160 	void *data;
1161 
1162 	if (prog->expected_attach_type == BPF_XDP_DEVMAP ||
1163 	    prog->expected_attach_type == BPF_XDP_CPUMAP)
1164 		return -EINVAL;
1165 
1166 	if (kattr->test.flags & ~BPF_F_TEST_XDP_LIVE_FRAMES)
1167 		return -EINVAL;
1168 
1169 	if (bpf_prog_is_dev_bound(prog->aux))
1170 		return -EINVAL;
1171 
1172 	if (do_live) {
1173 		if (!batch_size)
1174 			batch_size = NAPI_POLL_WEIGHT;
1175 		else if (batch_size > TEST_XDP_MAX_BATCH)
1176 			return -E2BIG;
1177 
1178 		headroom += sizeof(struct xdp_page_head);
1179 	} else if (batch_size) {
1180 		return -EINVAL;
1181 	}
1182 
1183 	ctx = bpf_ctx_init(kattr, sizeof(struct xdp_md));
1184 	if (IS_ERR(ctx))
1185 		return PTR_ERR(ctx);
1186 
1187 	if (ctx) {
1188 		/* There can't be user provided data before the meta data */
1189 		if (ctx->data_meta || ctx->data_end != size ||
1190 		    ctx->data > ctx->data_end ||
1191 		    unlikely(xdp_metalen_invalid(ctx->data)) ||
1192 		    (do_live && (kattr->test.data_out || kattr->test.ctx_out)))
1193 			goto free_ctx;
1194 		/* Meta data is allocated from the headroom */
1195 		headroom -= ctx->data;
1196 	}
1197 
1198 	max_data_sz = 4096 - headroom - tailroom;
1199 	if (size > max_data_sz) {
1200 		/* disallow live data mode for jumbo frames */
1201 		if (do_live)
1202 			goto free_ctx;
1203 		size = max_data_sz;
1204 	}
1205 
1206 	data = bpf_test_init(kattr, size, max_data_sz, headroom, tailroom);
1207 	if (IS_ERR(data)) {
1208 		ret = PTR_ERR(data);
1209 		goto free_ctx;
1210 	}
1211 
1212 	rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0);
1213 	rxqueue->xdp_rxq.frag_size = headroom + max_data_sz + tailroom;
1214 	xdp_init_buff(&xdp, rxqueue->xdp_rxq.frag_size, &rxqueue->xdp_rxq);
1215 	xdp_prepare_buff(&xdp, data, headroom, size, true);
1216 	sinfo = xdp_get_shared_info_from_buff(&xdp);
1217 
1218 	ret = xdp_convert_md_to_buff(ctx, &xdp);
1219 	if (ret)
1220 		goto free_data;
1221 
1222 	if (unlikely(kattr->test.data_size_in > size)) {
1223 		void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
1224 
1225 		while (size < kattr->test.data_size_in) {
1226 			struct page *page;
1227 			skb_frag_t *frag;
1228 			u32 data_len;
1229 
1230 			if (sinfo->nr_frags == MAX_SKB_FRAGS) {
1231 				ret = -ENOMEM;
1232 				goto out;
1233 			}
1234 
1235 			page = alloc_page(GFP_KERNEL);
1236 			if (!page) {
1237 				ret = -ENOMEM;
1238 				goto out;
1239 			}
1240 
1241 			frag = &sinfo->frags[sinfo->nr_frags++];
1242 
1243 			data_len = min_t(u32, kattr->test.data_size_in - size,
1244 					 PAGE_SIZE);
1245 			skb_frag_fill_page_desc(frag, page, 0, data_len);
1246 
1247 			if (copy_from_user(page_address(page), data_in + size,
1248 					   data_len)) {
1249 				ret = -EFAULT;
1250 				goto out;
1251 			}
1252 			sinfo->xdp_frags_size += data_len;
1253 			size += data_len;
1254 		}
1255 		xdp_buff_set_frags_flag(&xdp);
1256 	}
1257 
1258 	if (repeat > 1)
1259 		bpf_prog_change_xdp(NULL, prog);
1260 
1261 	if (do_live)
1262 		ret = bpf_test_run_xdp_live(prog, &xdp, repeat, batch_size, &duration);
1263 	else
1264 		ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration, true);
1265 	/* We convert the xdp_buff back to an xdp_md before checking the return
1266 	 * code so the reference count of any held netdevice will be decremented
1267 	 * even if the test run failed.
1268 	 */
1269 	xdp_convert_buff_to_md(&xdp, ctx);
1270 	if (ret)
1271 		goto out;
1272 
1273 	size = xdp.data_end - xdp.data_meta + sinfo->xdp_frags_size;
1274 	ret = bpf_test_finish(kattr, uattr, xdp.data_meta, sinfo, size,
1275 			      retval, duration);
1276 	if (!ret)
1277 		ret = bpf_ctx_finish(kattr, uattr, ctx,
1278 				     sizeof(struct xdp_md));
1279 
1280 out:
1281 	if (repeat > 1)
1282 		bpf_prog_change_xdp(prog, NULL);
1283 free_data:
1284 	for (i = 0; i < sinfo->nr_frags; i++)
1285 		__free_page(skb_frag_page(&sinfo->frags[i]));
1286 	kfree(data);
1287 free_ctx:
1288 	kfree(ctx);
1289 	return ret;
1290 }
1291 
1292 static int verify_user_bpf_flow_keys(struct bpf_flow_keys *ctx)
1293 {
1294 	/* make sure the fields we don't use are zeroed */
1295 	if (!range_is_zero(ctx, 0, offsetof(struct bpf_flow_keys, flags)))
1296 		return -EINVAL;
1297 
1298 	/* flags is allowed */
1299 
1300 	if (!range_is_zero(ctx, offsetofend(struct bpf_flow_keys, flags),
1301 			   sizeof(struct bpf_flow_keys)))
1302 		return -EINVAL;
1303 
1304 	return 0;
1305 }
1306 
1307 int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
1308 				     const union bpf_attr *kattr,
1309 				     union bpf_attr __user *uattr)
1310 {
1311 	struct bpf_test_timer t = { NO_PREEMPT };
1312 	u32 size = kattr->test.data_size_in;
1313 	struct bpf_flow_dissector ctx = {};
1314 	u32 repeat = kattr->test.repeat;
1315 	struct bpf_flow_keys *user_ctx;
1316 	struct bpf_flow_keys flow_keys;
1317 	const struct ethhdr *eth;
1318 	unsigned int flags = 0;
1319 	u32 retval, duration;
1320 	void *data;
1321 	int ret;
1322 
1323 	if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size)
1324 		return -EINVAL;
1325 
1326 	if (size < ETH_HLEN)
1327 		return -EINVAL;
1328 
1329 	data = bpf_test_init(kattr, kattr->test.data_size_in, size, 0, 0);
1330 	if (IS_ERR(data))
1331 		return PTR_ERR(data);
1332 
1333 	eth = (struct ethhdr *)data;
1334 
1335 	if (!repeat)
1336 		repeat = 1;
1337 
1338 	user_ctx = bpf_ctx_init(kattr, sizeof(struct bpf_flow_keys));
1339 	if (IS_ERR(user_ctx)) {
1340 		kfree(data);
1341 		return PTR_ERR(user_ctx);
1342 	}
1343 	if (user_ctx) {
1344 		ret = verify_user_bpf_flow_keys(user_ctx);
1345 		if (ret)
1346 			goto out;
1347 		flags = user_ctx->flags;
1348 	}
1349 
1350 	ctx.flow_keys = &flow_keys;
1351 	ctx.data = data;
1352 	ctx.data_end = (__u8 *)data + size;
1353 
1354 	bpf_test_timer_enter(&t);
1355 	do {
1356 		retval = bpf_flow_dissect(prog, &ctx, eth->h_proto, ETH_HLEN,
1357 					  size, flags);
1358 	} while (bpf_test_timer_continue(&t, 1, repeat, &ret, &duration));
1359 	bpf_test_timer_leave(&t);
1360 
1361 	if (ret < 0)
1362 		goto out;
1363 
1364 	ret = bpf_test_finish(kattr, uattr, &flow_keys, NULL,
1365 			      sizeof(flow_keys), retval, duration);
1366 	if (!ret)
1367 		ret = bpf_ctx_finish(kattr, uattr, user_ctx,
1368 				     sizeof(struct bpf_flow_keys));
1369 
1370 out:
1371 	kfree(user_ctx);
1372 	kfree(data);
1373 	return ret;
1374 }
1375 
1376 int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, const union bpf_attr *kattr,
1377 				union bpf_attr __user *uattr)
1378 {
1379 	struct bpf_test_timer t = { NO_PREEMPT };
1380 	struct bpf_prog_array *progs = NULL;
1381 	struct bpf_sk_lookup_kern ctx = {};
1382 	u32 repeat = kattr->test.repeat;
1383 	struct bpf_sk_lookup *user_ctx;
1384 	u32 retval, duration;
1385 	int ret = -EINVAL;
1386 
1387 	if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size)
1388 		return -EINVAL;
1389 
1390 	if (kattr->test.data_in || kattr->test.data_size_in || kattr->test.data_out ||
1391 	    kattr->test.data_size_out)
1392 		return -EINVAL;
1393 
1394 	if (!repeat)
1395 		repeat = 1;
1396 
1397 	user_ctx = bpf_ctx_init(kattr, sizeof(*user_ctx));
1398 	if (IS_ERR(user_ctx))
1399 		return PTR_ERR(user_ctx);
1400 
1401 	if (!user_ctx)
1402 		return -EINVAL;
1403 
1404 	if (user_ctx->sk)
1405 		goto out;
1406 
1407 	if (!range_is_zero(user_ctx, offsetofend(typeof(*user_ctx), local_port), sizeof(*user_ctx)))
1408 		goto out;
1409 
1410 	if (user_ctx->local_port > U16_MAX) {
1411 		ret = -ERANGE;
1412 		goto out;
1413 	}
1414 
1415 	ctx.family = (u16)user_ctx->family;
1416 	ctx.protocol = (u16)user_ctx->protocol;
1417 	ctx.dport = (u16)user_ctx->local_port;
1418 	ctx.sport = user_ctx->remote_port;
1419 
1420 	switch (ctx.family) {
1421 	case AF_INET:
1422 		ctx.v4.daddr = (__force __be32)user_ctx->local_ip4;
1423 		ctx.v4.saddr = (__force __be32)user_ctx->remote_ip4;
1424 		break;
1425 
1426 #if IS_ENABLED(CONFIG_IPV6)
1427 	case AF_INET6:
1428 		ctx.v6.daddr = (struct in6_addr *)user_ctx->local_ip6;
1429 		ctx.v6.saddr = (struct in6_addr *)user_ctx->remote_ip6;
1430 		break;
1431 #endif
1432 
1433 	default:
1434 		ret = -EAFNOSUPPORT;
1435 		goto out;
1436 	}
1437 
1438 	progs = bpf_prog_array_alloc(1, GFP_KERNEL);
1439 	if (!progs) {
1440 		ret = -ENOMEM;
1441 		goto out;
1442 	}
1443 
1444 	progs->items[0].prog = prog;
1445 
1446 	bpf_test_timer_enter(&t);
1447 	do {
1448 		ctx.selected_sk = NULL;
1449 		retval = BPF_PROG_SK_LOOKUP_RUN_ARRAY(progs, ctx, bpf_prog_run);
1450 	} while (bpf_test_timer_continue(&t, 1, repeat, &ret, &duration));
1451 	bpf_test_timer_leave(&t);
1452 
1453 	if (ret < 0)
1454 		goto out;
1455 
1456 	user_ctx->cookie = 0;
1457 	if (ctx.selected_sk) {
1458 		if (ctx.selected_sk->sk_reuseport && !ctx.no_reuseport) {
1459 			ret = -EOPNOTSUPP;
1460 			goto out;
1461 		}
1462 
1463 		user_ctx->cookie = sock_gen_cookie(ctx.selected_sk);
1464 	}
1465 
1466 	ret = bpf_test_finish(kattr, uattr, NULL, NULL, 0, retval, duration);
1467 	if (!ret)
1468 		ret = bpf_ctx_finish(kattr, uattr, user_ctx, sizeof(*user_ctx));
1469 
1470 out:
1471 	bpf_prog_array_free(progs);
1472 	kfree(user_ctx);
1473 	return ret;
1474 }
1475 
1476 int bpf_prog_test_run_syscall(struct bpf_prog *prog,
1477 			      const union bpf_attr *kattr,
1478 			      union bpf_attr __user *uattr)
1479 {
1480 	void __user *ctx_in = u64_to_user_ptr(kattr->test.ctx_in);
1481 	__u32 ctx_size_in = kattr->test.ctx_size_in;
1482 	void *ctx = NULL;
1483 	u32 retval;
1484 	int err = 0;
1485 
1486 	/* doesn't support data_in/out, ctx_out, duration, or repeat or flags */
1487 	if (kattr->test.data_in || kattr->test.data_out ||
1488 	    kattr->test.ctx_out || kattr->test.duration ||
1489 	    kattr->test.repeat || kattr->test.flags ||
1490 	    kattr->test.batch_size)
1491 		return -EINVAL;
1492 
1493 	if (ctx_size_in < prog->aux->max_ctx_offset ||
1494 	    ctx_size_in > U16_MAX)
1495 		return -EINVAL;
1496 
1497 	if (ctx_size_in) {
1498 		ctx = memdup_user(ctx_in, ctx_size_in);
1499 		if (IS_ERR(ctx))
1500 			return PTR_ERR(ctx);
1501 	}
1502 
1503 	rcu_read_lock_trace();
1504 	retval = bpf_prog_run_pin_on_cpu(prog, ctx);
1505 	rcu_read_unlock_trace();
1506 
1507 	if (copy_to_user(&uattr->test.retval, &retval, sizeof(u32))) {
1508 		err = -EFAULT;
1509 		goto out;
1510 	}
1511 	if (ctx_size_in)
1512 		if (copy_to_user(ctx_in, ctx, ctx_size_in))
1513 			err = -EFAULT;
1514 out:
1515 	kfree(ctx);
1516 	return err;
1517 }
1518 
1519 static int verify_and_copy_hook_state(struct nf_hook_state *state,
1520 				      const struct nf_hook_state *user,
1521 				      struct net_device *dev)
1522 {
1523 	if (user->in || user->out)
1524 		return -EINVAL;
1525 
1526 	if (user->net || user->sk || user->okfn)
1527 		return -EINVAL;
1528 
1529 	switch (user->pf) {
1530 	case NFPROTO_IPV4:
1531 	case NFPROTO_IPV6:
1532 		switch (state->hook) {
1533 		case NF_INET_PRE_ROUTING:
1534 			state->in = dev;
1535 			break;
1536 		case NF_INET_LOCAL_IN:
1537 			state->in = dev;
1538 			break;
1539 		case NF_INET_FORWARD:
1540 			state->in = dev;
1541 			state->out = dev;
1542 			break;
1543 		case NF_INET_LOCAL_OUT:
1544 			state->out = dev;
1545 			break;
1546 		case NF_INET_POST_ROUTING:
1547 			state->out = dev;
1548 			break;
1549 		}
1550 
1551 		break;
1552 	default:
1553 		return -EINVAL;
1554 	}
1555 
1556 	state->pf = user->pf;
1557 	state->hook = user->hook;
1558 
1559 	return 0;
1560 }
1561 
1562 static __be16 nfproto_eth(int nfproto)
1563 {
1564 	switch (nfproto) {
1565 	case NFPROTO_IPV4:
1566 		return htons(ETH_P_IP);
1567 	case NFPROTO_IPV6:
1568 		break;
1569 	}
1570 
1571 	return htons(ETH_P_IPV6);
1572 }
1573 
1574 int bpf_prog_test_run_nf(struct bpf_prog *prog,
1575 			 const union bpf_attr *kattr,
1576 			 union bpf_attr __user *uattr)
1577 {
1578 	struct net *net = current->nsproxy->net_ns;
1579 	struct net_device *dev = net->loopback_dev;
1580 	struct nf_hook_state *user_ctx, hook_state = {
1581 		.pf = NFPROTO_IPV4,
1582 		.hook = NF_INET_LOCAL_OUT,
1583 	};
1584 	u32 size = kattr->test.data_size_in;
1585 	u32 repeat = kattr->test.repeat;
1586 	struct bpf_nf_ctx ctx = {
1587 		.state = &hook_state,
1588 	};
1589 	struct sk_buff *skb = NULL;
1590 	u32 retval, duration;
1591 	void *data;
1592 	int ret;
1593 
1594 	if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size)
1595 		return -EINVAL;
1596 
1597 	if (size < sizeof(struct iphdr))
1598 		return -EINVAL;
1599 
1600 	data = bpf_test_init(kattr, kattr->test.data_size_in, size,
1601 			     NET_SKB_PAD + NET_IP_ALIGN,
1602 			     SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
1603 	if (IS_ERR(data))
1604 		return PTR_ERR(data);
1605 
1606 	if (!repeat)
1607 		repeat = 1;
1608 
1609 	user_ctx = bpf_ctx_init(kattr, sizeof(struct nf_hook_state));
1610 	if (IS_ERR(user_ctx)) {
1611 		kfree(data);
1612 		return PTR_ERR(user_ctx);
1613 	}
1614 
1615 	if (user_ctx) {
1616 		ret = verify_and_copy_hook_state(&hook_state, user_ctx, dev);
1617 		if (ret)
1618 			goto out;
1619 	}
1620 
1621 	skb = slab_build_skb(data);
1622 	if (!skb) {
1623 		ret = -ENOMEM;
1624 		goto out;
1625 	}
1626 
1627 	data = NULL; /* data released via kfree_skb */
1628 
1629 	skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
1630 	__skb_put(skb, size);
1631 
1632 	ret = -EINVAL;
1633 
1634 	if (hook_state.hook != NF_INET_LOCAL_OUT) {
1635 		if (size < ETH_HLEN + sizeof(struct iphdr))
1636 			goto out;
1637 
1638 		skb->protocol = eth_type_trans(skb, dev);
1639 		switch (skb->protocol) {
1640 		case htons(ETH_P_IP):
1641 			if (hook_state.pf == NFPROTO_IPV4)
1642 				break;
1643 			goto out;
1644 		case htons(ETH_P_IPV6):
1645 			if (size < ETH_HLEN + sizeof(struct ipv6hdr))
1646 				goto out;
1647 			if (hook_state.pf == NFPROTO_IPV6)
1648 				break;
1649 			goto out;
1650 		default:
1651 			ret = -EPROTO;
1652 			goto out;
1653 		}
1654 
1655 		skb_reset_network_header(skb);
1656 	} else {
1657 		skb->protocol = nfproto_eth(hook_state.pf);
1658 	}
1659 
1660 	ctx.skb = skb;
1661 
1662 	ret = bpf_test_run(prog, &ctx, repeat, &retval, &duration, false);
1663 	if (ret)
1664 		goto out;
1665 
1666 	ret = bpf_test_finish(kattr, uattr, NULL, NULL, 0, retval, duration);
1667 
1668 out:
1669 	kfree(user_ctx);
1670 	kfree_skb(skb);
1671 	kfree(data);
1672 	return ret;
1673 }
1674 
1675 static const struct btf_kfunc_id_set bpf_prog_test_kfunc_set = {
1676 	.owner = THIS_MODULE,
1677 	.set   = &test_sk_check_kfunc_ids,
1678 };
1679 
1680 BTF_ID_LIST(bpf_prog_test_dtor_kfunc_ids)
1681 BTF_ID(struct, prog_test_ref_kfunc)
1682 BTF_ID(func, bpf_kfunc_call_test_release)
1683 BTF_ID(struct, prog_test_member)
1684 BTF_ID(func, bpf_kfunc_call_memb_release)
1685 
1686 static int __init bpf_prog_test_run_init(void)
1687 {
1688 	const struct btf_id_dtor_kfunc bpf_prog_test_dtor_kfunc[] = {
1689 		{
1690 		  .btf_id       = bpf_prog_test_dtor_kfunc_ids[0],
1691 		  .kfunc_btf_id = bpf_prog_test_dtor_kfunc_ids[1]
1692 		},
1693 		{
1694 		  .btf_id	= bpf_prog_test_dtor_kfunc_ids[2],
1695 		  .kfunc_btf_id = bpf_prog_test_dtor_kfunc_ids[3],
1696 		},
1697 	};
1698 	int ret;
1699 
1700 	ret = register_btf_fmodret_id_set(&bpf_test_modify_return_set);
1701 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_prog_test_kfunc_set);
1702 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &bpf_prog_test_kfunc_set);
1703 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &bpf_prog_test_kfunc_set);
1704 	return ret ?: register_btf_id_dtor_kfuncs(bpf_prog_test_dtor_kfunc,
1705 						  ARRAY_SIZE(bpf_prog_test_dtor_kfunc),
1706 						  THIS_MODULE);
1707 }
1708 late_initcall(bpf_prog_test_run_init);
1709