xref: /openbmc/linux/net/bpf/test_run.c (revision 337cbeb2)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2017 Facebook
3  */
4 #include <linux/bpf.h>
5 #include <linux/slab.h>
6 #include <linux/vmalloc.h>
7 #include <linux/etherdevice.h>
8 #include <linux/filter.h>
9 #include <linux/sched/signal.h>
10 #include <net/bpf_sk_storage.h>
11 #include <net/sock.h>
12 #include <net/tcp.h>
13 #include <linux/error-injection.h>
14 
15 #define CREATE_TRACE_POINTS
16 #include <trace/events/bpf_test_run.h>
17 
18 static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
19 			u32 *retval, u32 *time, bool xdp)
20 {
21 	struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { NULL };
22 	enum bpf_cgroup_storage_type stype;
23 	u64 time_start, time_spent = 0;
24 	int ret = 0;
25 	u32 i;
26 
27 	for_each_cgroup_storage_type(stype) {
28 		storage[stype] = bpf_cgroup_storage_alloc(prog, stype);
29 		if (IS_ERR(storage[stype])) {
30 			storage[stype] = NULL;
31 			for_each_cgroup_storage_type(stype)
32 				bpf_cgroup_storage_free(storage[stype]);
33 			return -ENOMEM;
34 		}
35 	}
36 
37 	if (!repeat)
38 		repeat = 1;
39 
40 	rcu_read_lock();
41 	migrate_disable();
42 	time_start = ktime_get_ns();
43 	for (i = 0; i < repeat; i++) {
44 		bpf_cgroup_storage_set(storage);
45 
46 		if (xdp)
47 			*retval = bpf_prog_run_xdp(prog, ctx);
48 		else
49 			*retval = BPF_PROG_RUN(prog, ctx);
50 
51 		if (signal_pending(current)) {
52 			ret = -EINTR;
53 			break;
54 		}
55 
56 		if (need_resched()) {
57 			time_spent += ktime_get_ns() - time_start;
58 			migrate_enable();
59 			rcu_read_unlock();
60 
61 			cond_resched();
62 
63 			rcu_read_lock();
64 			migrate_disable();
65 			time_start = ktime_get_ns();
66 		}
67 	}
68 	time_spent += ktime_get_ns() - time_start;
69 	migrate_enable();
70 	rcu_read_unlock();
71 
72 	do_div(time_spent, repeat);
73 	*time = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
74 
75 	for_each_cgroup_storage_type(stype)
76 		bpf_cgroup_storage_free(storage[stype]);
77 
78 	return ret;
79 }
80 
81 static int bpf_test_finish(const union bpf_attr *kattr,
82 			   union bpf_attr __user *uattr, const void *data,
83 			   u32 size, u32 retval, u32 duration)
84 {
85 	void __user *data_out = u64_to_user_ptr(kattr->test.data_out);
86 	int err = -EFAULT;
87 	u32 copy_size = size;
88 
89 	/* Clamp copy if the user has provided a size hint, but copy the full
90 	 * buffer if not to retain old behaviour.
91 	 */
92 	if (kattr->test.data_size_out &&
93 	    copy_size > kattr->test.data_size_out) {
94 		copy_size = kattr->test.data_size_out;
95 		err = -ENOSPC;
96 	}
97 
98 	if (data_out && copy_to_user(data_out, data, copy_size))
99 		goto out;
100 	if (copy_to_user(&uattr->test.data_size_out, &size, sizeof(size)))
101 		goto out;
102 	if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
103 		goto out;
104 	if (copy_to_user(&uattr->test.duration, &duration, sizeof(duration)))
105 		goto out;
106 	if (err != -ENOSPC)
107 		err = 0;
108 out:
109 	trace_bpf_test_finish(&err);
110 	return err;
111 }
112 
113 /* Integer types of various sizes and pointer combinations cover variety of
114  * architecture dependent calling conventions. 7+ can be supported in the
115  * future.
116  */
117 __diag_push();
118 __diag_ignore(GCC, 8, "-Wmissing-prototypes",
119 	      "Global functions as their definitions will be in vmlinux BTF");
120 int noinline bpf_fentry_test1(int a)
121 {
122 	return a + 1;
123 }
124 
125 int noinline bpf_fentry_test2(int a, u64 b)
126 {
127 	return a + b;
128 }
129 
130 int noinline bpf_fentry_test3(char a, int b, u64 c)
131 {
132 	return a + b + c;
133 }
134 
135 int noinline bpf_fentry_test4(void *a, char b, int c, u64 d)
136 {
137 	return (long)a + b + c + d;
138 }
139 
140 int noinline bpf_fentry_test5(u64 a, void *b, short c, int d, u64 e)
141 {
142 	return a + (long)b + c + d + e;
143 }
144 
145 int noinline bpf_fentry_test6(u64 a, void *b, short c, int d, void *e, u64 f)
146 {
147 	return a + (long)b + c + d + (long)e + f;
148 }
149 
150 int noinline bpf_modify_return_test(int a, int *b)
151 {
152 	*b += 1;
153 	return a + *b;
154 }
155 __diag_pop();
156 
157 ALLOW_ERROR_INJECTION(bpf_modify_return_test, ERRNO);
158 
159 static void *bpf_test_init(const union bpf_attr *kattr, u32 size,
160 			   u32 headroom, u32 tailroom)
161 {
162 	void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
163 	void *data;
164 
165 	if (size < ETH_HLEN || size > PAGE_SIZE - headroom - tailroom)
166 		return ERR_PTR(-EINVAL);
167 
168 	data = kzalloc(size + headroom + tailroom, GFP_USER);
169 	if (!data)
170 		return ERR_PTR(-ENOMEM);
171 
172 	if (copy_from_user(data + headroom, data_in, size)) {
173 		kfree(data);
174 		return ERR_PTR(-EFAULT);
175 	}
176 
177 	return data;
178 }
179 
180 int bpf_prog_test_run_tracing(struct bpf_prog *prog,
181 			      const union bpf_attr *kattr,
182 			      union bpf_attr __user *uattr)
183 {
184 	u16 side_effect = 0, ret = 0;
185 	int b = 2, err = -EFAULT;
186 	u32 retval = 0;
187 
188 	switch (prog->expected_attach_type) {
189 	case BPF_TRACE_FENTRY:
190 	case BPF_TRACE_FEXIT:
191 		if (bpf_fentry_test1(1) != 2 ||
192 		    bpf_fentry_test2(2, 3) != 5 ||
193 		    bpf_fentry_test3(4, 5, 6) != 15 ||
194 		    bpf_fentry_test4((void *)7, 8, 9, 10) != 34 ||
195 		    bpf_fentry_test5(11, (void *)12, 13, 14, 15) != 65 ||
196 		    bpf_fentry_test6(16, (void *)17, 18, 19, (void *)20, 21) != 111)
197 			goto out;
198 		break;
199 	case BPF_MODIFY_RETURN:
200 		ret = bpf_modify_return_test(1, &b);
201 		if (b != 2)
202 			side_effect = 1;
203 		break;
204 	default:
205 		goto out;
206 	}
207 
208 	retval = ((u32)side_effect << 16) | ret;
209 	if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
210 		goto out;
211 
212 	err = 0;
213 out:
214 	trace_bpf_test_finish(&err);
215 	return err;
216 }
217 
218 static void *bpf_ctx_init(const union bpf_attr *kattr, u32 max_size)
219 {
220 	void __user *data_in = u64_to_user_ptr(kattr->test.ctx_in);
221 	void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out);
222 	u32 size = kattr->test.ctx_size_in;
223 	void *data;
224 	int err;
225 
226 	if (!data_in && !data_out)
227 		return NULL;
228 
229 	data = kzalloc(max_size, GFP_USER);
230 	if (!data)
231 		return ERR_PTR(-ENOMEM);
232 
233 	if (data_in) {
234 		err = bpf_check_uarg_tail_zero(data_in, max_size, size);
235 		if (err) {
236 			kfree(data);
237 			return ERR_PTR(err);
238 		}
239 
240 		size = min_t(u32, max_size, size);
241 		if (copy_from_user(data, data_in, size)) {
242 			kfree(data);
243 			return ERR_PTR(-EFAULT);
244 		}
245 	}
246 	return data;
247 }
248 
249 static int bpf_ctx_finish(const union bpf_attr *kattr,
250 			  union bpf_attr __user *uattr, const void *data,
251 			  u32 size)
252 {
253 	void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out);
254 	int err = -EFAULT;
255 	u32 copy_size = size;
256 
257 	if (!data || !data_out)
258 		return 0;
259 
260 	if (copy_size > kattr->test.ctx_size_out) {
261 		copy_size = kattr->test.ctx_size_out;
262 		err = -ENOSPC;
263 	}
264 
265 	if (copy_to_user(data_out, data, copy_size))
266 		goto out;
267 	if (copy_to_user(&uattr->test.ctx_size_out, &size, sizeof(size)))
268 		goto out;
269 	if (err != -ENOSPC)
270 		err = 0;
271 out:
272 	return err;
273 }
274 
275 /**
276  * range_is_zero - test whether buffer is initialized
277  * @buf: buffer to check
278  * @from: check from this position
279  * @to: check up until (excluding) this position
280  *
281  * This function returns true if the there is a non-zero byte
282  * in the buf in the range [from,to).
283  */
284 static inline bool range_is_zero(void *buf, size_t from, size_t to)
285 {
286 	return !memchr_inv((u8 *)buf + from, 0, to - from);
287 }
288 
289 static int convert___skb_to_skb(struct sk_buff *skb, struct __sk_buff *__skb)
290 {
291 	struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
292 
293 	if (!__skb)
294 		return 0;
295 
296 	/* make sure the fields we don't use are zeroed */
297 	if (!range_is_zero(__skb, 0, offsetof(struct __sk_buff, mark)))
298 		return -EINVAL;
299 
300 	/* mark is allowed */
301 
302 	if (!range_is_zero(__skb, offsetofend(struct __sk_buff, mark),
303 			   offsetof(struct __sk_buff, priority)))
304 		return -EINVAL;
305 
306 	/* priority is allowed */
307 
308 	if (!range_is_zero(__skb, offsetofend(struct __sk_buff, priority),
309 			   offsetof(struct __sk_buff, cb)))
310 		return -EINVAL;
311 
312 	/* cb is allowed */
313 
314 	if (!range_is_zero(__skb, offsetofend(struct __sk_buff, cb),
315 			   offsetof(struct __sk_buff, tstamp)))
316 		return -EINVAL;
317 
318 	/* tstamp is allowed */
319 	/* wire_len is allowed */
320 	/* gso_segs is allowed */
321 
322 	if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_segs),
323 			   offsetof(struct __sk_buff, gso_size)))
324 		return -EINVAL;
325 
326 	/* gso_size is allowed */
327 
328 	if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_size),
329 			   sizeof(struct __sk_buff)))
330 		return -EINVAL;
331 
332 	skb->mark = __skb->mark;
333 	skb->priority = __skb->priority;
334 	skb->tstamp = __skb->tstamp;
335 	memcpy(&cb->data, __skb->cb, QDISC_CB_PRIV_LEN);
336 
337 	if (__skb->wire_len == 0) {
338 		cb->pkt_len = skb->len;
339 	} else {
340 		if (__skb->wire_len < skb->len ||
341 		    __skb->wire_len > GSO_MAX_SIZE)
342 			return -EINVAL;
343 		cb->pkt_len = __skb->wire_len;
344 	}
345 
346 	if (__skb->gso_segs > GSO_MAX_SEGS)
347 		return -EINVAL;
348 	skb_shinfo(skb)->gso_segs = __skb->gso_segs;
349 	skb_shinfo(skb)->gso_size = __skb->gso_size;
350 
351 	return 0;
352 }
353 
354 static void convert_skb_to___skb(struct sk_buff *skb, struct __sk_buff *__skb)
355 {
356 	struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
357 
358 	if (!__skb)
359 		return;
360 
361 	__skb->mark = skb->mark;
362 	__skb->priority = skb->priority;
363 	__skb->tstamp = skb->tstamp;
364 	memcpy(__skb->cb, &cb->data, QDISC_CB_PRIV_LEN);
365 	__skb->wire_len = cb->pkt_len;
366 	__skb->gso_segs = skb_shinfo(skb)->gso_segs;
367 }
368 
369 int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
370 			  union bpf_attr __user *uattr)
371 {
372 	bool is_l2 = false, is_direct_pkt_access = false;
373 	u32 size = kattr->test.data_size_in;
374 	u32 repeat = kattr->test.repeat;
375 	struct __sk_buff *ctx = NULL;
376 	u32 retval, duration;
377 	int hh_len = ETH_HLEN;
378 	struct sk_buff *skb;
379 	struct sock *sk;
380 	void *data;
381 	int ret;
382 
383 	data = bpf_test_init(kattr, size, NET_SKB_PAD + NET_IP_ALIGN,
384 			     SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
385 	if (IS_ERR(data))
386 		return PTR_ERR(data);
387 
388 	ctx = bpf_ctx_init(kattr, sizeof(struct __sk_buff));
389 	if (IS_ERR(ctx)) {
390 		kfree(data);
391 		return PTR_ERR(ctx);
392 	}
393 
394 	switch (prog->type) {
395 	case BPF_PROG_TYPE_SCHED_CLS:
396 	case BPF_PROG_TYPE_SCHED_ACT:
397 		is_l2 = true;
398 		/* fall through */
399 	case BPF_PROG_TYPE_LWT_IN:
400 	case BPF_PROG_TYPE_LWT_OUT:
401 	case BPF_PROG_TYPE_LWT_XMIT:
402 		is_direct_pkt_access = true;
403 		break;
404 	default:
405 		break;
406 	}
407 
408 	sk = kzalloc(sizeof(struct sock), GFP_USER);
409 	if (!sk) {
410 		kfree(data);
411 		kfree(ctx);
412 		return -ENOMEM;
413 	}
414 	sock_net_set(sk, current->nsproxy->net_ns);
415 	sock_init_data(NULL, sk);
416 
417 	skb = build_skb(data, 0);
418 	if (!skb) {
419 		kfree(data);
420 		kfree(ctx);
421 		kfree(sk);
422 		return -ENOMEM;
423 	}
424 	skb->sk = sk;
425 
426 	skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
427 	__skb_put(skb, size);
428 	skb->protocol = eth_type_trans(skb, current->nsproxy->net_ns->loopback_dev);
429 	skb_reset_network_header(skb);
430 
431 	if (is_l2)
432 		__skb_push(skb, hh_len);
433 	if (is_direct_pkt_access)
434 		bpf_compute_data_pointers(skb);
435 	ret = convert___skb_to_skb(skb, ctx);
436 	if (ret)
437 		goto out;
438 	ret = bpf_test_run(prog, skb, repeat, &retval, &duration, false);
439 	if (ret)
440 		goto out;
441 	if (!is_l2) {
442 		if (skb_headroom(skb) < hh_len) {
443 			int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
444 
445 			if (pskb_expand_head(skb, nhead, 0, GFP_USER)) {
446 				ret = -ENOMEM;
447 				goto out;
448 			}
449 		}
450 		memset(__skb_push(skb, hh_len), 0, hh_len);
451 	}
452 	convert_skb_to___skb(skb, ctx);
453 
454 	size = skb->len;
455 	/* bpf program can never convert linear skb to non-linear */
456 	if (WARN_ON_ONCE(skb_is_nonlinear(skb)))
457 		size = skb_headlen(skb);
458 	ret = bpf_test_finish(kattr, uattr, skb->data, size, retval, duration);
459 	if (!ret)
460 		ret = bpf_ctx_finish(kattr, uattr, ctx,
461 				     sizeof(struct __sk_buff));
462 out:
463 	kfree_skb(skb);
464 	bpf_sk_storage_free(sk);
465 	kfree(sk);
466 	kfree(ctx);
467 	return ret;
468 }
469 
470 int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
471 			  union bpf_attr __user *uattr)
472 {
473 	u32 size = kattr->test.data_size_in;
474 	u32 repeat = kattr->test.repeat;
475 	struct netdev_rx_queue *rxqueue;
476 	struct xdp_buff xdp = {};
477 	u32 retval, duration;
478 	void *data;
479 	int ret;
480 
481 	if (kattr->test.ctx_in || kattr->test.ctx_out)
482 		return -EINVAL;
483 
484 	data = bpf_test_init(kattr, size, XDP_PACKET_HEADROOM + NET_IP_ALIGN, 0);
485 	if (IS_ERR(data))
486 		return PTR_ERR(data);
487 
488 	xdp.data_hard_start = data;
489 	xdp.data = data + XDP_PACKET_HEADROOM + NET_IP_ALIGN;
490 	xdp.data_meta = xdp.data;
491 	xdp.data_end = xdp.data + size;
492 
493 	rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0);
494 	xdp.rxq = &rxqueue->xdp_rxq;
495 	bpf_prog_change_xdp(NULL, prog);
496 	ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration, true);
497 	if (ret)
498 		goto out;
499 	if (xdp.data != data + XDP_PACKET_HEADROOM + NET_IP_ALIGN ||
500 	    xdp.data_end != xdp.data + size)
501 		size = xdp.data_end - xdp.data;
502 	ret = bpf_test_finish(kattr, uattr, xdp.data, size, retval, duration);
503 out:
504 	bpf_prog_change_xdp(prog, NULL);
505 	kfree(data);
506 	return ret;
507 }
508 
509 static int verify_user_bpf_flow_keys(struct bpf_flow_keys *ctx)
510 {
511 	/* make sure the fields we don't use are zeroed */
512 	if (!range_is_zero(ctx, 0, offsetof(struct bpf_flow_keys, flags)))
513 		return -EINVAL;
514 
515 	/* flags is allowed */
516 
517 	if (!range_is_zero(ctx, offsetofend(struct bpf_flow_keys, flags),
518 			   sizeof(struct bpf_flow_keys)))
519 		return -EINVAL;
520 
521 	return 0;
522 }
523 
524 int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
525 				     const union bpf_attr *kattr,
526 				     union bpf_attr __user *uattr)
527 {
528 	u32 size = kattr->test.data_size_in;
529 	struct bpf_flow_dissector ctx = {};
530 	u32 repeat = kattr->test.repeat;
531 	struct bpf_flow_keys *user_ctx;
532 	struct bpf_flow_keys flow_keys;
533 	u64 time_start, time_spent = 0;
534 	const struct ethhdr *eth;
535 	unsigned int flags = 0;
536 	u32 retval, duration;
537 	void *data;
538 	int ret;
539 	u32 i;
540 
541 	if (prog->type != BPF_PROG_TYPE_FLOW_DISSECTOR)
542 		return -EINVAL;
543 
544 	if (size < ETH_HLEN)
545 		return -EINVAL;
546 
547 	data = bpf_test_init(kattr, size, 0, 0);
548 	if (IS_ERR(data))
549 		return PTR_ERR(data);
550 
551 	eth = (struct ethhdr *)data;
552 
553 	if (!repeat)
554 		repeat = 1;
555 
556 	user_ctx = bpf_ctx_init(kattr, sizeof(struct bpf_flow_keys));
557 	if (IS_ERR(user_ctx)) {
558 		kfree(data);
559 		return PTR_ERR(user_ctx);
560 	}
561 	if (user_ctx) {
562 		ret = verify_user_bpf_flow_keys(user_ctx);
563 		if (ret)
564 			goto out;
565 		flags = user_ctx->flags;
566 	}
567 
568 	ctx.flow_keys = &flow_keys;
569 	ctx.data = data;
570 	ctx.data_end = (__u8 *)data + size;
571 
572 	rcu_read_lock();
573 	preempt_disable();
574 	time_start = ktime_get_ns();
575 	for (i = 0; i < repeat; i++) {
576 		retval = bpf_flow_dissect(prog, &ctx, eth->h_proto, ETH_HLEN,
577 					  size, flags);
578 
579 		if (signal_pending(current)) {
580 			preempt_enable();
581 			rcu_read_unlock();
582 
583 			ret = -EINTR;
584 			goto out;
585 		}
586 
587 		if (need_resched()) {
588 			time_spent += ktime_get_ns() - time_start;
589 			preempt_enable();
590 			rcu_read_unlock();
591 
592 			cond_resched();
593 
594 			rcu_read_lock();
595 			preempt_disable();
596 			time_start = ktime_get_ns();
597 		}
598 	}
599 	time_spent += ktime_get_ns() - time_start;
600 	preempt_enable();
601 	rcu_read_unlock();
602 
603 	do_div(time_spent, repeat);
604 	duration = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
605 
606 	ret = bpf_test_finish(kattr, uattr, &flow_keys, sizeof(flow_keys),
607 			      retval, duration);
608 	if (!ret)
609 		ret = bpf_ctx_finish(kattr, uattr, user_ctx,
610 				     sizeof(struct bpf_flow_keys));
611 
612 out:
613 	kfree(user_ctx);
614 	kfree(data);
615 	return ret;
616 }
617