xref: /openbmc/linux/net/bpf/test_run.c (revision 31527da5)
1 /* Copyright (c) 2017 Facebook
2  *
3  * This program is free software; you can redistribute it and/or
4  * modify it under the terms of version 2 of the GNU General Public
5  * License as published by the Free Software Foundation.
6  */
7 #include <linux/bpf.h>
8 #include <linux/slab.h>
9 #include <linux/vmalloc.h>
10 #include <linux/etherdevice.h>
11 #include <linux/filter.h>
12 #include <linux/sched/signal.h>
13 
14 static __always_inline u32 bpf_test_run_one(struct bpf_prog *prog, void *ctx,
15 					    struct bpf_cgroup_storage *storage)
16 {
17 	u32 ret;
18 
19 	preempt_disable();
20 	rcu_read_lock();
21 	bpf_cgroup_storage_set(storage);
22 	ret = BPF_PROG_RUN(prog, ctx);
23 	rcu_read_unlock();
24 	preempt_enable();
25 
26 	return ret;
27 }
28 
29 static u32 bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *time)
30 {
31 	struct bpf_cgroup_storage *storage = NULL;
32 	u64 time_start, time_spent = 0;
33 	u32 ret = 0, i;
34 
35 	storage = bpf_cgroup_storage_alloc(prog);
36 	if (IS_ERR(storage))
37 		return PTR_ERR(storage);
38 
39 	if (!repeat)
40 		repeat = 1;
41 	time_start = ktime_get_ns();
42 	for (i = 0; i < repeat; i++) {
43 		ret = bpf_test_run_one(prog, ctx, storage);
44 		if (need_resched()) {
45 			if (signal_pending(current))
46 				break;
47 			time_spent += ktime_get_ns() - time_start;
48 			cond_resched();
49 			time_start = ktime_get_ns();
50 		}
51 	}
52 	time_spent += ktime_get_ns() - time_start;
53 	do_div(time_spent, repeat);
54 	*time = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
55 
56 	bpf_cgroup_storage_free(storage);
57 
58 	return ret;
59 }
60 
61 static int bpf_test_finish(const union bpf_attr *kattr,
62 			   union bpf_attr __user *uattr, const void *data,
63 			   u32 size, u32 retval, u32 duration)
64 {
65 	void __user *data_out = u64_to_user_ptr(kattr->test.data_out);
66 	int err = -EFAULT;
67 
68 	if (data_out && copy_to_user(data_out, data, size))
69 		goto out;
70 	if (copy_to_user(&uattr->test.data_size_out, &size, sizeof(size)))
71 		goto out;
72 	if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
73 		goto out;
74 	if (copy_to_user(&uattr->test.duration, &duration, sizeof(duration)))
75 		goto out;
76 	err = 0;
77 out:
78 	return err;
79 }
80 
81 static void *bpf_test_init(const union bpf_attr *kattr, u32 size,
82 			   u32 headroom, u32 tailroom)
83 {
84 	void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
85 	void *data;
86 
87 	if (size < ETH_HLEN || size > PAGE_SIZE - headroom - tailroom)
88 		return ERR_PTR(-EINVAL);
89 
90 	data = kzalloc(size + headroom + tailroom, GFP_USER);
91 	if (!data)
92 		return ERR_PTR(-ENOMEM);
93 
94 	if (copy_from_user(data + headroom, data_in, size)) {
95 		kfree(data);
96 		return ERR_PTR(-EFAULT);
97 	}
98 	return data;
99 }
100 
101 int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
102 			  union bpf_attr __user *uattr)
103 {
104 	bool is_l2 = false, is_direct_pkt_access = false;
105 	u32 size = kattr->test.data_size_in;
106 	u32 repeat = kattr->test.repeat;
107 	u32 retval, duration;
108 	int hh_len = ETH_HLEN;
109 	struct sk_buff *skb;
110 	void *data;
111 	int ret;
112 
113 	data = bpf_test_init(kattr, size, NET_SKB_PAD + NET_IP_ALIGN,
114 			     SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
115 	if (IS_ERR(data))
116 		return PTR_ERR(data);
117 
118 	switch (prog->type) {
119 	case BPF_PROG_TYPE_SCHED_CLS:
120 	case BPF_PROG_TYPE_SCHED_ACT:
121 		is_l2 = true;
122 		/* fall through */
123 	case BPF_PROG_TYPE_LWT_IN:
124 	case BPF_PROG_TYPE_LWT_OUT:
125 	case BPF_PROG_TYPE_LWT_XMIT:
126 		is_direct_pkt_access = true;
127 		break;
128 	default:
129 		break;
130 	}
131 
132 	skb = build_skb(data, 0);
133 	if (!skb) {
134 		kfree(data);
135 		return -ENOMEM;
136 	}
137 
138 	skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
139 	__skb_put(skb, size);
140 	skb->protocol = eth_type_trans(skb, current->nsproxy->net_ns->loopback_dev);
141 	skb_reset_network_header(skb);
142 
143 	if (is_l2)
144 		__skb_push(skb, hh_len);
145 	if (is_direct_pkt_access)
146 		bpf_compute_data_pointers(skb);
147 	retval = bpf_test_run(prog, skb, repeat, &duration);
148 	if (!is_l2) {
149 		if (skb_headroom(skb) < hh_len) {
150 			int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
151 
152 			if (pskb_expand_head(skb, nhead, 0, GFP_USER)) {
153 				kfree_skb(skb);
154 				return -ENOMEM;
155 			}
156 		}
157 		memset(__skb_push(skb, hh_len), 0, hh_len);
158 	}
159 
160 	size = skb->len;
161 	/* bpf program can never convert linear skb to non-linear */
162 	if (WARN_ON_ONCE(skb_is_nonlinear(skb)))
163 		size = skb_headlen(skb);
164 	ret = bpf_test_finish(kattr, uattr, skb->data, size, retval, duration);
165 	kfree_skb(skb);
166 	return ret;
167 }
168 
169 int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
170 			  union bpf_attr __user *uattr)
171 {
172 	u32 size = kattr->test.data_size_in;
173 	u32 repeat = kattr->test.repeat;
174 	struct netdev_rx_queue *rxqueue;
175 	struct xdp_buff xdp = {};
176 	u32 retval, duration;
177 	void *data;
178 	int ret;
179 
180 	data = bpf_test_init(kattr, size, XDP_PACKET_HEADROOM + NET_IP_ALIGN, 0);
181 	if (IS_ERR(data))
182 		return PTR_ERR(data);
183 
184 	xdp.data_hard_start = data;
185 	xdp.data = data + XDP_PACKET_HEADROOM + NET_IP_ALIGN;
186 	xdp.data_meta = xdp.data;
187 	xdp.data_end = xdp.data + size;
188 
189 	rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0);
190 	xdp.rxq = &rxqueue->xdp_rxq;
191 
192 	retval = bpf_test_run(prog, &xdp, repeat, &duration);
193 	if (xdp.data != data + XDP_PACKET_HEADROOM + NET_IP_ALIGN ||
194 	    xdp.data_end != xdp.data + size)
195 		size = xdp.data_end - xdp.data;
196 	ret = bpf_test_finish(kattr, uattr, xdp.data, size, retval, duration);
197 	kfree(data);
198 	return ret;
199 }
200