test_run.c (a4654e9bde4ecedb4921e6c8fe2088114bdff1b3) test_run.c (e9ff9d52540a53ce8c9eff5bf8b66467fe81eb2b)
1// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright (c) 2017 Facebook
3 */
4#include <linux/bpf.h>
5#include <linux/slab.h>
6#include <linux/vmalloc.h>
7#include <linux/etherdevice.h>
8#include <linux/filter.h>
9#include <linux/sched/signal.h>
10#include <net/bpf_sk_storage.h>
11#include <net/sock.h>
12#include <net/tcp.h>
1// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright (c) 2017 Facebook
3 */
4#include <linux/bpf.h>
5#include <linux/slab.h>
6#include <linux/vmalloc.h>
7#include <linux/etherdevice.h>
8#include <linux/filter.h>
9#include <linux/sched/signal.h>
10#include <net/bpf_sk_storage.h>
11#include <net/sock.h>
12#include <net/tcp.h>
13#include <linux/error-injection.h>
13
14#define CREATE_TRACE_POINTS
15#include <trace/events/bpf_test_run.h>
16
17static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
18 u32 *retval, u32 *time, bool xdp)
19{
20 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { NULL };

--- 11 unchanged lines hidden (view full) ---

32 return -ENOMEM;
33 }
34 }
35
36 if (!repeat)
37 repeat = 1;
38
39 rcu_read_lock();
14
15#define CREATE_TRACE_POINTS
16#include <trace/events/bpf_test_run.h>
17
18static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
19 u32 *retval, u32 *time, bool xdp)
20{
21 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { NULL };

--- 11 unchanged lines hidden (view full) ---

33 return -ENOMEM;
34 }
35 }
36
37 if (!repeat)
38 repeat = 1;
39
40 rcu_read_lock();
40 preempt_disable();
41 migrate_disable();
41 time_start = ktime_get_ns();
42 for (i = 0; i < repeat; i++) {
43 bpf_cgroup_storage_set(storage);
44
45 if (xdp)
46 *retval = bpf_prog_run_xdp(prog, ctx);
47 else
48 *retval = BPF_PROG_RUN(prog, ctx);
49
50 if (signal_pending(current)) {
51 ret = -EINTR;
52 break;
53 }
54
55 if (need_resched()) {
56 time_spent += ktime_get_ns() - time_start;
42 time_start = ktime_get_ns();
43 for (i = 0; i < repeat; i++) {
44 bpf_cgroup_storage_set(storage);
45
46 if (xdp)
47 *retval = bpf_prog_run_xdp(prog, ctx);
48 else
49 *retval = BPF_PROG_RUN(prog, ctx);
50
51 if (signal_pending(current)) {
52 ret = -EINTR;
53 break;
54 }
55
56 if (need_resched()) {
57 time_spent += ktime_get_ns() - time_start;
57 preempt_enable();
58 migrate_enable();
58 rcu_read_unlock();
59
60 cond_resched();
61
62 rcu_read_lock();
59 rcu_read_unlock();
60
61 cond_resched();
62
63 rcu_read_lock();
63 preempt_disable();
64 migrate_disable();
64 time_start = ktime_get_ns();
65 }
66 }
67 time_spent += ktime_get_ns() - time_start;
65 time_start = ktime_get_ns();
66 }
67 }
68 time_spent += ktime_get_ns() - time_start;
68 preempt_enable();
69 migrate_enable();
69 rcu_read_unlock();
70
71 do_div(time_spent, repeat);
72 *time = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
73
74 for_each_cgroup_storage_type(stype)
75 bpf_cgroup_storage_free(storage[stype]);
76

--- 31 unchanged lines hidden (view full) ---

108 trace_bpf_test_finish(&err);
109 return err;
110}
111
112/* Integer types of various sizes and pointer combinations cover variety of
113 * architecture dependent calling conventions. 7+ can be supported in the
114 * future.
115 */
70 rcu_read_unlock();
71
72 do_div(time_spent, repeat);
73 *time = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
74
75 for_each_cgroup_storage_type(stype)
76 bpf_cgroup_storage_free(storage[stype]);
77

--- 31 unchanged lines hidden (view full) ---

109 trace_bpf_test_finish(&err);
110 return err;
111}
112
113/* Integer types of various sizes and pointer combinations cover variety of
114 * architecture dependent calling conventions. 7+ can be supported in the
115 * future.
116 */
117__diag_push();
118__diag_ignore(GCC, 8, "-Wmissing-prototypes",
119 "Global functions as their definitions will be in vmlinux BTF");
116int noinline bpf_fentry_test1(int a)
117{
118 return a + 1;
119}
120
121int noinline bpf_fentry_test2(int a, u64 b)
122{
123 return a + b;

--- 14 unchanged lines hidden (view full) ---

138 return a + (long)b + c + d + e;
139}
140
141int noinline bpf_fentry_test6(u64 a, void *b, short c, int d, void *e, u64 f)
142{
143 return a + (long)b + c + d + (long)e + f;
144}
145
120int noinline bpf_fentry_test1(int a)
121{
122 return a + 1;
123}
124
125int noinline bpf_fentry_test2(int a, u64 b)
126{
127 return a + b;

--- 14 unchanged lines hidden (view full) ---

142 return a + (long)b + c + d + e;
143}
144
145int noinline bpf_fentry_test6(u64 a, void *b, short c, int d, void *e, u64 f)
146{
147 return a + (long)b + c + d + (long)e + f;
148}
149
150int noinline bpf_modify_return_test(int a, int *b)
151{
152 *b += 1;
153 return a + *b;
154}
155__diag_pop();
156
157ALLOW_ERROR_INJECTION(bpf_modify_return_test, ERRNO);
158
146static void *bpf_test_init(const union bpf_attr *kattr, u32 size,
147 u32 headroom, u32 tailroom)
148{
149 void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
150 void *data;
151
152 if (size < ETH_HLEN || size > PAGE_SIZE - headroom - tailroom)
153 return ERR_PTR(-EINVAL);
154
155 data = kzalloc(size + headroom + tailroom, GFP_USER);
156 if (!data)
157 return ERR_PTR(-ENOMEM);
158
159 if (copy_from_user(data + headroom, data_in, size)) {
160 kfree(data);
161 return ERR_PTR(-EFAULT);
162 }
159static void *bpf_test_init(const union bpf_attr *kattr, u32 size,
160 u32 headroom, u32 tailroom)
161{
162 void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
163 void *data;
164
165 if (size < ETH_HLEN || size > PAGE_SIZE - headroom - tailroom)
166 return ERR_PTR(-EINVAL);
167
168 data = kzalloc(size + headroom + tailroom, GFP_USER);
169 if (!data)
170 return ERR_PTR(-ENOMEM);
171
172 if (copy_from_user(data + headroom, data_in, size)) {
173 kfree(data);
174 return ERR_PTR(-EFAULT);
175 }
163 if (bpf_fentry_test1(1) != 2 ||
164 bpf_fentry_test2(2, 3) != 5 ||
165 bpf_fentry_test3(4, 5, 6) != 15 ||
166 bpf_fentry_test4((void *)7, 8, 9, 10) != 34 ||
167 bpf_fentry_test5(11, (void *)12, 13, 14, 15) != 65 ||
168 bpf_fentry_test6(16, (void *)17, 18, 19, (void *)20, 21) != 111) {
169 kfree(data);
170 return ERR_PTR(-EFAULT);
171 }
176
172 return data;
173}
174
177 return data;
178}
179
180int bpf_prog_test_run_tracing(struct bpf_prog *prog,
181 const union bpf_attr *kattr,
182 union bpf_attr __user *uattr)
183{
184 u16 side_effect = 0, ret = 0;
185 int b = 2, err = -EFAULT;
186 u32 retval = 0;
187
188 switch (prog->expected_attach_type) {
189 case BPF_TRACE_FENTRY:
190 case BPF_TRACE_FEXIT:
191 if (bpf_fentry_test1(1) != 2 ||
192 bpf_fentry_test2(2, 3) != 5 ||
193 bpf_fentry_test3(4, 5, 6) != 15 ||
194 bpf_fentry_test4((void *)7, 8, 9, 10) != 34 ||
195 bpf_fentry_test5(11, (void *)12, 13, 14, 15) != 65 ||
196 bpf_fentry_test6(16, (void *)17, 18, 19, (void *)20, 21) != 111)
197 goto out;
198 break;
199 case BPF_MODIFY_RETURN:
200 ret = bpf_modify_return_test(1, &b);
201 if (b != 2)
202 side_effect = 1;
203 break;
204 default:
205 goto out;
206 }
207
208 retval = ((u32)side_effect << 16) | ret;
209 if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
210 goto out;
211
212 err = 0;
213out:
214 trace_bpf_test_finish(&err);
215 return err;
216}
217
175static void *bpf_ctx_init(const union bpf_attr *kattr, u32 max_size)
176{
177 void __user *data_in = u64_to_user_ptr(kattr->test.ctx_in);
178 void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out);
179 u32 size = kattr->test.ctx_size_in;
180 void *data;
181 int err;
182

--- 89 unchanged lines hidden (view full) ---

272 offsetof(struct __sk_buff, tstamp)))
273 return -EINVAL;
274
275 /* tstamp is allowed */
276 /* wire_len is allowed */
277 /* gso_segs is allowed */
278
279 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_segs),
218static void *bpf_ctx_init(const union bpf_attr *kattr, u32 max_size)
219{
220 void __user *data_in = u64_to_user_ptr(kattr->test.ctx_in);
221 void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out);
222 u32 size = kattr->test.ctx_size_in;
223 void *data;
224 int err;
225

--- 89 unchanged lines hidden (view full) ---

315 offsetof(struct __sk_buff, tstamp)))
316 return -EINVAL;
317
318 /* tstamp is allowed */
319 /* wire_len is allowed */
320 /* gso_segs is allowed */
321
322 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_segs),
323 offsetof(struct __sk_buff, gso_size)))
324 return -EINVAL;
325
326 /* gso_size is allowed */
327
328 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_size),
280 sizeof(struct __sk_buff)))
281 return -EINVAL;
282
283 skb->mark = __skb->mark;
284 skb->priority = __skb->priority;
285 skb->tstamp = __skb->tstamp;
286 memcpy(&cb->data, __skb->cb, QDISC_CB_PRIV_LEN);
287

--- 4 unchanged lines hidden (view full) ---

292 __skb->wire_len > GSO_MAX_SIZE)
293 return -EINVAL;
294 cb->pkt_len = __skb->wire_len;
295 }
296
297 if (__skb->gso_segs > GSO_MAX_SEGS)
298 return -EINVAL;
299 skb_shinfo(skb)->gso_segs = __skb->gso_segs;
329 sizeof(struct __sk_buff)))
330 return -EINVAL;
331
332 skb->mark = __skb->mark;
333 skb->priority = __skb->priority;
334 skb->tstamp = __skb->tstamp;
335 memcpy(&cb->data, __skb->cb, QDISC_CB_PRIV_LEN);
336

--- 4 unchanged lines hidden (view full) ---

341 __skb->wire_len > GSO_MAX_SIZE)
342 return -EINVAL;
343 cb->pkt_len = __skb->wire_len;
344 }
345
346 if (__skb->gso_segs > GSO_MAX_SEGS)
347 return -EINVAL;
348 skb_shinfo(skb)->gso_segs = __skb->gso_segs;
349 skb_shinfo(skb)->gso_size = __skb->gso_size;
300
301 return 0;
302}
303
304static void convert_skb_to___skb(struct sk_buff *skb, struct __sk_buff *__skb)
305{
306 struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
307

--- 259 unchanged lines hidden ---
350
351 return 0;
352}
353
354static void convert_skb_to___skb(struct sk_buff *skb, struct __sk_buff *__skb)
355{
356 struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
357

--- 259 unchanged lines hidden ---