xref: /openbmc/linux/kernel/trace/bpf_trace.c (revision 0edbfea5)
1 /* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com
2  *
3  * This program is free software; you can redistribute it and/or
4  * modify it under the terms of version 2 of the GNU General Public
5  * License as published by the Free Software Foundation.
6  */
7 #include <linux/kernel.h>
8 #include <linux/types.h>
9 #include <linux/slab.h>
10 #include <linux/bpf.h>
11 #include <linux/filter.h>
12 #include <linux/uaccess.h>
13 #include <linux/ctype.h>
14 #include "trace.h"
15 
16 /**
17  * trace_call_bpf - invoke BPF program
18  * @prog: BPF program
19  * @ctx: opaque context pointer
20  *
21  * kprobe handlers execute BPF programs via this helper.
22  * Can be used from static tracepoints in the future.
23  *
24  * Return: BPF programs always return an integer which is interpreted by
25  * kprobe handler as:
26  * 0 - return from kprobe (event is filtered out)
27  * 1 - store kprobe event into ring buffer
28  * Other values are reserved and currently alias to 1
29  */
30 unsigned int trace_call_bpf(struct bpf_prog *prog, void *ctx)
31 {
32 	unsigned int ret;
33 
34 	if (in_nmi()) /* not supported yet */
35 		return 1;
36 
37 	preempt_disable();
38 
39 	if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
40 		/*
41 		 * since some bpf program is already running on this cpu,
42 		 * don't call into another bpf program (same or different)
43 		 * and don't send kprobe event into ring-buffer,
44 		 * so return zero here
45 		 */
46 		ret = 0;
47 		goto out;
48 	}
49 
50 	rcu_read_lock();
51 	ret = BPF_PROG_RUN(prog, ctx);
52 	rcu_read_unlock();
53 
54  out:
55 	__this_cpu_dec(bpf_prog_active);
56 	preempt_enable();
57 
58 	return ret;
59 }
60 EXPORT_SYMBOL_GPL(trace_call_bpf);
61 
62 static u64 bpf_probe_read(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
63 {
64 	void *dst = (void *) (long) r1;
65 	int ret, size = (int) r2;
66 	void *unsafe_ptr = (void *) (long) r3;
67 
68 	ret = probe_kernel_read(dst, unsafe_ptr, size);
69 	if (unlikely(ret < 0))
70 		memset(dst, 0, size);
71 
72 	return ret;
73 }
74 
75 static const struct bpf_func_proto bpf_probe_read_proto = {
76 	.func		= bpf_probe_read,
77 	.gpl_only	= true,
78 	.ret_type	= RET_INTEGER,
79 	.arg1_type	= ARG_PTR_TO_RAW_STACK,
80 	.arg2_type	= ARG_CONST_STACK_SIZE,
81 	.arg3_type	= ARG_ANYTHING,
82 };
83 
84 /*
85  * limited trace_printk()
86  * only %d %u %x %ld %lu %lx %lld %llu %llx %p %s conversion specifiers allowed
87  */
88 static u64 bpf_trace_printk(u64 r1, u64 fmt_size, u64 r3, u64 r4, u64 r5)
89 {
90 	char *fmt = (char *) (long) r1;
91 	bool str_seen = false;
92 	int mod[3] = {};
93 	int fmt_cnt = 0;
94 	u64 unsafe_addr;
95 	char buf[64];
96 	int i;
97 
98 	/*
99 	 * bpf_check()->check_func_arg()->check_stack_boundary()
100 	 * guarantees that fmt points to bpf program stack,
101 	 * fmt_size bytes of it were initialized and fmt_size > 0
102 	 */
103 	if (fmt[--fmt_size] != 0)
104 		return -EINVAL;
105 
106 	/* check format string for allowed specifiers */
107 	for (i = 0; i < fmt_size; i++) {
108 		if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i]))
109 			return -EINVAL;
110 
111 		if (fmt[i] != '%')
112 			continue;
113 
114 		if (fmt_cnt >= 3)
115 			return -EINVAL;
116 
117 		/* fmt[i] != 0 && fmt[last] == 0, so we can access fmt[i + 1] */
118 		i++;
119 		if (fmt[i] == 'l') {
120 			mod[fmt_cnt]++;
121 			i++;
122 		} else if (fmt[i] == 'p' || fmt[i] == 's') {
123 			mod[fmt_cnt]++;
124 			i++;
125 			if (!isspace(fmt[i]) && !ispunct(fmt[i]) && fmt[i] != 0)
126 				return -EINVAL;
127 			fmt_cnt++;
128 			if (fmt[i - 1] == 's') {
129 				if (str_seen)
130 					/* allow only one '%s' per fmt string */
131 					return -EINVAL;
132 				str_seen = true;
133 
134 				switch (fmt_cnt) {
135 				case 1:
136 					unsafe_addr = r3;
137 					r3 = (long) buf;
138 					break;
139 				case 2:
140 					unsafe_addr = r4;
141 					r4 = (long) buf;
142 					break;
143 				case 3:
144 					unsafe_addr = r5;
145 					r5 = (long) buf;
146 					break;
147 				}
148 				buf[0] = 0;
149 				strncpy_from_unsafe(buf,
150 						    (void *) (long) unsafe_addr,
151 						    sizeof(buf));
152 			}
153 			continue;
154 		}
155 
156 		if (fmt[i] == 'l') {
157 			mod[fmt_cnt]++;
158 			i++;
159 		}
160 
161 		if (fmt[i] != 'd' && fmt[i] != 'u' && fmt[i] != 'x')
162 			return -EINVAL;
163 		fmt_cnt++;
164 	}
165 
166 	return __trace_printk(1/* fake ip will not be printed */, fmt,
167 			      mod[0] == 2 ? r3 : mod[0] == 1 ? (long) r3 : (u32) r3,
168 			      mod[1] == 2 ? r4 : mod[1] == 1 ? (long) r4 : (u32) r4,
169 			      mod[2] == 2 ? r5 : mod[2] == 1 ? (long) r5 : (u32) r5);
170 }
171 
172 static const struct bpf_func_proto bpf_trace_printk_proto = {
173 	.func		= bpf_trace_printk,
174 	.gpl_only	= true,
175 	.ret_type	= RET_INTEGER,
176 	.arg1_type	= ARG_PTR_TO_STACK,
177 	.arg2_type	= ARG_CONST_STACK_SIZE,
178 };
179 
180 const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
181 {
182 	/*
183 	 * this program might be calling bpf_trace_printk,
184 	 * so allocate per-cpu printk buffers
185 	 */
186 	trace_printk_init_buffers();
187 
188 	return &bpf_trace_printk_proto;
189 }
190 
191 static u64 bpf_perf_event_read(u64 r1, u64 index, u64 r3, u64 r4, u64 r5)
192 {
193 	struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
194 	struct bpf_array *array = container_of(map, struct bpf_array, map);
195 	struct perf_event *event;
196 	struct file *file;
197 
198 	if (unlikely(index >= array->map.max_entries))
199 		return -E2BIG;
200 
201 	file = (struct file *)array->ptrs[index];
202 	if (unlikely(!file))
203 		return -ENOENT;
204 
205 	event = file->private_data;
206 
207 	/* make sure event is local and doesn't have pmu::count */
208 	if (event->oncpu != smp_processor_id() ||
209 	    event->pmu->count)
210 		return -EINVAL;
211 
212 	/*
213 	 * we don't know if the function is run successfully by the
214 	 * return value. It can be judged in other places, such as
215 	 * eBPF programs.
216 	 */
217 	return perf_event_read_local(event);
218 }
219 
220 static const struct bpf_func_proto bpf_perf_event_read_proto = {
221 	.func		= bpf_perf_event_read,
222 	.gpl_only	= true,
223 	.ret_type	= RET_INTEGER,
224 	.arg1_type	= ARG_CONST_MAP_PTR,
225 	.arg2_type	= ARG_ANYTHING,
226 };
227 
228 static u64 bpf_perf_event_output(u64 r1, u64 r2, u64 flags, u64 r4, u64 size)
229 {
230 	struct pt_regs *regs = (struct pt_regs *) (long) r1;
231 	struct bpf_map *map = (struct bpf_map *) (long) r2;
232 	struct bpf_array *array = container_of(map, struct bpf_array, map);
233 	u64 index = flags & BPF_F_INDEX_MASK;
234 	void *data = (void *) (long) r4;
235 	struct perf_sample_data sample_data;
236 	struct perf_event *event;
237 	struct file *file;
238 	struct perf_raw_record raw = {
239 		.size = size,
240 		.data = data,
241 	};
242 
243 	if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
244 		return -EINVAL;
245 	if (index == BPF_F_CURRENT_CPU)
246 		index = raw_smp_processor_id();
247 	if (unlikely(index >= array->map.max_entries))
248 		return -E2BIG;
249 
250 	file = (struct file *)array->ptrs[index];
251 	if (unlikely(!file))
252 		return -ENOENT;
253 
254 	event = file->private_data;
255 
256 	if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE ||
257 		     event->attr.config != PERF_COUNT_SW_BPF_OUTPUT))
258 		return -EINVAL;
259 
260 	if (unlikely(event->oncpu != smp_processor_id()))
261 		return -EOPNOTSUPP;
262 
263 	perf_sample_data_init(&sample_data, 0, 0);
264 	sample_data.raw = &raw;
265 	perf_event_output(event, &sample_data, regs);
266 	return 0;
267 }
268 
269 static const struct bpf_func_proto bpf_perf_event_output_proto = {
270 	.func		= bpf_perf_event_output,
271 	.gpl_only	= true,
272 	.ret_type	= RET_INTEGER,
273 	.arg1_type	= ARG_PTR_TO_CTX,
274 	.arg2_type	= ARG_CONST_MAP_PTR,
275 	.arg3_type	= ARG_ANYTHING,
276 	.arg4_type	= ARG_PTR_TO_STACK,
277 	.arg5_type	= ARG_CONST_STACK_SIZE,
278 };
279 
280 static DEFINE_PER_CPU(struct pt_regs, bpf_pt_regs);
281 
282 static u64 bpf_event_output(u64 r1, u64 r2, u64 flags, u64 r4, u64 size)
283 {
284 	struct pt_regs *regs = this_cpu_ptr(&bpf_pt_regs);
285 
286 	perf_fetch_caller_regs(regs);
287 
288 	return bpf_perf_event_output((long)regs, r2, flags, r4, size);
289 }
290 
291 static const struct bpf_func_proto bpf_event_output_proto = {
292 	.func		= bpf_event_output,
293 	.gpl_only	= true,
294 	.ret_type	= RET_INTEGER,
295 	.arg1_type	= ARG_PTR_TO_CTX,
296 	.arg2_type	= ARG_CONST_MAP_PTR,
297 	.arg3_type	= ARG_ANYTHING,
298 	.arg4_type	= ARG_PTR_TO_STACK,
299 	.arg5_type	= ARG_CONST_STACK_SIZE,
300 };
301 
302 const struct bpf_func_proto *bpf_get_event_output_proto(void)
303 {
304 	return &bpf_event_output_proto;
305 }
306 
307 static const struct bpf_func_proto *tracing_func_proto(enum bpf_func_id func_id)
308 {
309 	switch (func_id) {
310 	case BPF_FUNC_map_lookup_elem:
311 		return &bpf_map_lookup_elem_proto;
312 	case BPF_FUNC_map_update_elem:
313 		return &bpf_map_update_elem_proto;
314 	case BPF_FUNC_map_delete_elem:
315 		return &bpf_map_delete_elem_proto;
316 	case BPF_FUNC_probe_read:
317 		return &bpf_probe_read_proto;
318 	case BPF_FUNC_ktime_get_ns:
319 		return &bpf_ktime_get_ns_proto;
320 	case BPF_FUNC_tail_call:
321 		return &bpf_tail_call_proto;
322 	case BPF_FUNC_get_current_pid_tgid:
323 		return &bpf_get_current_pid_tgid_proto;
324 	case BPF_FUNC_get_current_uid_gid:
325 		return &bpf_get_current_uid_gid_proto;
326 	case BPF_FUNC_get_current_comm:
327 		return &bpf_get_current_comm_proto;
328 	case BPF_FUNC_trace_printk:
329 		return bpf_get_trace_printk_proto();
330 	case BPF_FUNC_get_smp_processor_id:
331 		return &bpf_get_smp_processor_id_proto;
332 	case BPF_FUNC_perf_event_read:
333 		return &bpf_perf_event_read_proto;
334 	default:
335 		return NULL;
336 	}
337 }
338 
339 static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func_id)
340 {
341 	switch (func_id) {
342 	case BPF_FUNC_perf_event_output:
343 		return &bpf_perf_event_output_proto;
344 	case BPF_FUNC_get_stackid:
345 		return &bpf_get_stackid_proto;
346 	default:
347 		return tracing_func_proto(func_id);
348 	}
349 }
350 
351 /* bpf+kprobe programs can access fields of 'struct pt_regs' */
352 static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type)
353 {
354 	/* check bounds */
355 	if (off < 0 || off >= sizeof(struct pt_regs))
356 		return false;
357 
358 	/* only read is allowed */
359 	if (type != BPF_READ)
360 		return false;
361 
362 	/* disallow misaligned access */
363 	if (off % size != 0)
364 		return false;
365 
366 	return true;
367 }
368 
369 static const struct bpf_verifier_ops kprobe_prog_ops = {
370 	.get_func_proto  = kprobe_prog_func_proto,
371 	.is_valid_access = kprobe_prog_is_valid_access,
372 };
373 
374 static struct bpf_prog_type_list kprobe_tl = {
375 	.ops	= &kprobe_prog_ops,
376 	.type	= BPF_PROG_TYPE_KPROBE,
377 };
378 
379 static u64 bpf_perf_event_output_tp(u64 r1, u64 r2, u64 index, u64 r4, u64 size)
380 {
381 	/*
382 	 * r1 points to perf tracepoint buffer where first 8 bytes are hidden
383 	 * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it
384 	 * from there and call the same bpf_perf_event_output() helper
385 	 */
386 	u64 ctx = *(long *)(uintptr_t)r1;
387 
388 	return bpf_perf_event_output(ctx, r2, index, r4, size);
389 }
390 
391 static const struct bpf_func_proto bpf_perf_event_output_proto_tp = {
392 	.func		= bpf_perf_event_output_tp,
393 	.gpl_only	= true,
394 	.ret_type	= RET_INTEGER,
395 	.arg1_type	= ARG_PTR_TO_CTX,
396 	.arg2_type	= ARG_CONST_MAP_PTR,
397 	.arg3_type	= ARG_ANYTHING,
398 	.arg4_type	= ARG_PTR_TO_STACK,
399 	.arg5_type	= ARG_CONST_STACK_SIZE,
400 };
401 
402 static u64 bpf_get_stackid_tp(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
403 {
404 	u64 ctx = *(long *)(uintptr_t)r1;
405 
406 	return bpf_get_stackid(ctx, r2, r3, r4, r5);
407 }
408 
409 static const struct bpf_func_proto bpf_get_stackid_proto_tp = {
410 	.func		= bpf_get_stackid_tp,
411 	.gpl_only	= true,
412 	.ret_type	= RET_INTEGER,
413 	.arg1_type	= ARG_PTR_TO_CTX,
414 	.arg2_type	= ARG_CONST_MAP_PTR,
415 	.arg3_type	= ARG_ANYTHING,
416 };
417 
418 static const struct bpf_func_proto *tp_prog_func_proto(enum bpf_func_id func_id)
419 {
420 	switch (func_id) {
421 	case BPF_FUNC_perf_event_output:
422 		return &bpf_perf_event_output_proto_tp;
423 	case BPF_FUNC_get_stackid:
424 		return &bpf_get_stackid_proto_tp;
425 	default:
426 		return tracing_func_proto(func_id);
427 	}
428 }
429 
430 static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type)
431 {
432 	if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
433 		return false;
434 	if (type != BPF_READ)
435 		return false;
436 	if (off % size != 0)
437 		return false;
438 	return true;
439 }
440 
441 static const struct bpf_verifier_ops tracepoint_prog_ops = {
442 	.get_func_proto  = tp_prog_func_proto,
443 	.is_valid_access = tp_prog_is_valid_access,
444 };
445 
446 static struct bpf_prog_type_list tracepoint_tl = {
447 	.ops	= &tracepoint_prog_ops,
448 	.type	= BPF_PROG_TYPE_TRACEPOINT,
449 };
450 
451 static int __init register_kprobe_prog_ops(void)
452 {
453 	bpf_register_prog_type(&kprobe_tl);
454 	bpf_register_prog_type(&tracepoint_tl);
455 	return 0;
456 }
457 late_initcall(register_kprobe_prog_ops);
458