1 /* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com 2 * 3 * This program is free software; you can redistribute it and/or 4 * modify it under the terms of version 2 of the GNU General Public 5 * License as published by the Free Software Foundation. 6 */ 7 #include <linux/kernel.h> 8 #include <linux/types.h> 9 #include <linux/slab.h> 10 #include <linux/bpf.h> 11 #include <linux/filter.h> 12 #include <linux/uaccess.h> 13 #include <linux/ctype.h> 14 #include "trace.h" 15 16 /** 17 * trace_call_bpf - invoke BPF program 18 * @prog: BPF program 19 * @ctx: opaque context pointer 20 * 21 * kprobe handlers execute BPF programs via this helper. 22 * Can be used from static tracepoints in the future. 23 * 24 * Return: BPF programs always return an integer which is interpreted by 25 * kprobe handler as: 26 * 0 - return from kprobe (event is filtered out) 27 * 1 - store kprobe event into ring buffer 28 * Other values are reserved and currently alias to 1 29 */ 30 unsigned int trace_call_bpf(struct bpf_prog *prog, void *ctx) 31 { 32 unsigned int ret; 33 34 if (in_nmi()) /* not supported yet */ 35 return 1; 36 37 preempt_disable(); 38 39 if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) { 40 /* 41 * since some bpf program is already running on this cpu, 42 * don't call into another bpf program (same or different) 43 * and don't send kprobe event into ring-buffer, 44 * so return zero here 45 */ 46 ret = 0; 47 goto out; 48 } 49 50 rcu_read_lock(); 51 ret = BPF_PROG_RUN(prog, ctx); 52 rcu_read_unlock(); 53 54 out: 55 __this_cpu_dec(bpf_prog_active); 56 preempt_enable(); 57 58 return ret; 59 } 60 EXPORT_SYMBOL_GPL(trace_call_bpf); 61 62 static u64 bpf_probe_read(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 63 { 64 void *dst = (void *) (long) r1; 65 int ret, size = (int) r2; 66 void *unsafe_ptr = (void *) (long) r3; 67 68 ret = probe_kernel_read(dst, unsafe_ptr, size); 69 if (unlikely(ret < 0)) 70 memset(dst, 0, size); 71 72 return ret; 73 } 74 75 static const struct bpf_func_proto bpf_probe_read_proto = { 76 .func = bpf_probe_read, 77 .gpl_only = true, 78 .ret_type = RET_INTEGER, 79 .arg1_type = ARG_PTR_TO_RAW_STACK, 80 .arg2_type = ARG_CONST_STACK_SIZE, 81 .arg3_type = ARG_ANYTHING, 82 }; 83 84 /* 85 * limited trace_printk() 86 * only %d %u %x %ld %lu %lx %lld %llu %llx %p %s conversion specifiers allowed 87 */ 88 static u64 bpf_trace_printk(u64 r1, u64 fmt_size, u64 r3, u64 r4, u64 r5) 89 { 90 char *fmt = (char *) (long) r1; 91 bool str_seen = false; 92 int mod[3] = {}; 93 int fmt_cnt = 0; 94 u64 unsafe_addr; 95 char buf[64]; 96 int i; 97 98 /* 99 * bpf_check()->check_func_arg()->check_stack_boundary() 100 * guarantees that fmt points to bpf program stack, 101 * fmt_size bytes of it were initialized and fmt_size > 0 102 */ 103 if (fmt[--fmt_size] != 0) 104 return -EINVAL; 105 106 /* check format string for allowed specifiers */ 107 for (i = 0; i < fmt_size; i++) { 108 if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i])) 109 return -EINVAL; 110 111 if (fmt[i] != '%') 112 continue; 113 114 if (fmt_cnt >= 3) 115 return -EINVAL; 116 117 /* fmt[i] != 0 && fmt[last] == 0, so we can access fmt[i + 1] */ 118 i++; 119 if (fmt[i] == 'l') { 120 mod[fmt_cnt]++; 121 i++; 122 } else if (fmt[i] == 'p' || fmt[i] == 's') { 123 mod[fmt_cnt]++; 124 i++; 125 if (!isspace(fmt[i]) && !ispunct(fmt[i]) && fmt[i] != 0) 126 return -EINVAL; 127 fmt_cnt++; 128 if (fmt[i - 1] == 's') { 129 if (str_seen) 130 /* allow only one '%s' per fmt string */ 131 return -EINVAL; 132 str_seen = true; 133 134 switch (fmt_cnt) { 135 case 1: 136 unsafe_addr = r3; 137 r3 = (long) buf; 138 break; 139 case 2: 140 unsafe_addr = r4; 141 r4 = (long) buf; 142 break; 143 case 3: 144 unsafe_addr = r5; 145 r5 = (long) buf; 146 break; 147 } 148 buf[0] = 0; 149 strncpy_from_unsafe(buf, 150 (void *) (long) unsafe_addr, 151 sizeof(buf)); 152 } 153 continue; 154 } 155 156 if (fmt[i] == 'l') { 157 mod[fmt_cnt]++; 158 i++; 159 } 160 161 if (fmt[i] != 'd' && fmt[i] != 'u' && fmt[i] != 'x') 162 return -EINVAL; 163 fmt_cnt++; 164 } 165 166 return __trace_printk(1/* fake ip will not be printed */, fmt, 167 mod[0] == 2 ? r3 : mod[0] == 1 ? (long) r3 : (u32) r3, 168 mod[1] == 2 ? r4 : mod[1] == 1 ? (long) r4 : (u32) r4, 169 mod[2] == 2 ? r5 : mod[2] == 1 ? (long) r5 : (u32) r5); 170 } 171 172 static const struct bpf_func_proto bpf_trace_printk_proto = { 173 .func = bpf_trace_printk, 174 .gpl_only = true, 175 .ret_type = RET_INTEGER, 176 .arg1_type = ARG_PTR_TO_STACK, 177 .arg2_type = ARG_CONST_STACK_SIZE, 178 }; 179 180 const struct bpf_func_proto *bpf_get_trace_printk_proto(void) 181 { 182 /* 183 * this program might be calling bpf_trace_printk, 184 * so allocate per-cpu printk buffers 185 */ 186 trace_printk_init_buffers(); 187 188 return &bpf_trace_printk_proto; 189 } 190 191 static u64 bpf_perf_event_read(u64 r1, u64 index, u64 r3, u64 r4, u64 r5) 192 { 193 struct bpf_map *map = (struct bpf_map *) (unsigned long) r1; 194 struct bpf_array *array = container_of(map, struct bpf_array, map); 195 struct perf_event *event; 196 struct file *file; 197 198 if (unlikely(index >= array->map.max_entries)) 199 return -E2BIG; 200 201 file = READ_ONCE(array->ptrs[index]); 202 if (unlikely(!file)) 203 return -ENOENT; 204 205 event = file->private_data; 206 207 /* make sure event is local and doesn't have pmu::count */ 208 if (event->oncpu != smp_processor_id() || 209 event->pmu->count) 210 return -EINVAL; 211 212 if (unlikely(event->attr.type != PERF_TYPE_HARDWARE && 213 event->attr.type != PERF_TYPE_RAW)) 214 return -EINVAL; 215 216 /* 217 * we don't know if the function is run successfully by the 218 * return value. It can be judged in other places, such as 219 * eBPF programs. 220 */ 221 return perf_event_read_local(event); 222 } 223 224 static const struct bpf_func_proto bpf_perf_event_read_proto = { 225 .func = bpf_perf_event_read, 226 .gpl_only = true, 227 .ret_type = RET_INTEGER, 228 .arg1_type = ARG_CONST_MAP_PTR, 229 .arg2_type = ARG_ANYTHING, 230 }; 231 232 static u64 bpf_perf_event_output(u64 r1, u64 r2, u64 flags, u64 r4, u64 size) 233 { 234 struct pt_regs *regs = (struct pt_regs *) (long) r1; 235 struct bpf_map *map = (struct bpf_map *) (long) r2; 236 struct bpf_array *array = container_of(map, struct bpf_array, map); 237 u64 index = flags & BPF_F_INDEX_MASK; 238 void *data = (void *) (long) r4; 239 struct perf_sample_data sample_data; 240 struct perf_event *event; 241 struct file *file; 242 struct perf_raw_record raw = { 243 .size = size, 244 .data = data, 245 }; 246 247 if (unlikely(flags & ~(BPF_F_INDEX_MASK))) 248 return -EINVAL; 249 if (index == BPF_F_CURRENT_CPU) 250 index = raw_smp_processor_id(); 251 if (unlikely(index >= array->map.max_entries)) 252 return -E2BIG; 253 254 file = READ_ONCE(array->ptrs[index]); 255 if (unlikely(!file)) 256 return -ENOENT; 257 258 event = file->private_data; 259 260 if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE || 261 event->attr.config != PERF_COUNT_SW_BPF_OUTPUT)) 262 return -EINVAL; 263 264 if (unlikely(event->oncpu != smp_processor_id())) 265 return -EOPNOTSUPP; 266 267 perf_sample_data_init(&sample_data, 0, 0); 268 sample_data.raw = &raw; 269 perf_event_output(event, &sample_data, regs); 270 return 0; 271 } 272 273 static const struct bpf_func_proto bpf_perf_event_output_proto = { 274 .func = bpf_perf_event_output, 275 .gpl_only = true, 276 .ret_type = RET_INTEGER, 277 .arg1_type = ARG_PTR_TO_CTX, 278 .arg2_type = ARG_CONST_MAP_PTR, 279 .arg3_type = ARG_ANYTHING, 280 .arg4_type = ARG_PTR_TO_STACK, 281 .arg5_type = ARG_CONST_STACK_SIZE, 282 }; 283 284 static DEFINE_PER_CPU(struct pt_regs, bpf_pt_regs); 285 286 static u64 bpf_event_output(u64 r1, u64 r2, u64 flags, u64 r4, u64 size) 287 { 288 struct pt_regs *regs = this_cpu_ptr(&bpf_pt_regs); 289 290 perf_fetch_caller_regs(regs); 291 292 return bpf_perf_event_output((long)regs, r2, flags, r4, size); 293 } 294 295 static const struct bpf_func_proto bpf_event_output_proto = { 296 .func = bpf_event_output, 297 .gpl_only = true, 298 .ret_type = RET_INTEGER, 299 .arg1_type = ARG_PTR_TO_CTX, 300 .arg2_type = ARG_CONST_MAP_PTR, 301 .arg3_type = ARG_ANYTHING, 302 .arg4_type = ARG_PTR_TO_STACK, 303 .arg5_type = ARG_CONST_STACK_SIZE, 304 }; 305 306 const struct bpf_func_proto *bpf_get_event_output_proto(void) 307 { 308 return &bpf_event_output_proto; 309 } 310 311 static const struct bpf_func_proto *tracing_func_proto(enum bpf_func_id func_id) 312 { 313 switch (func_id) { 314 case BPF_FUNC_map_lookup_elem: 315 return &bpf_map_lookup_elem_proto; 316 case BPF_FUNC_map_update_elem: 317 return &bpf_map_update_elem_proto; 318 case BPF_FUNC_map_delete_elem: 319 return &bpf_map_delete_elem_proto; 320 case BPF_FUNC_probe_read: 321 return &bpf_probe_read_proto; 322 case BPF_FUNC_ktime_get_ns: 323 return &bpf_ktime_get_ns_proto; 324 case BPF_FUNC_tail_call: 325 return &bpf_tail_call_proto; 326 case BPF_FUNC_get_current_pid_tgid: 327 return &bpf_get_current_pid_tgid_proto; 328 case BPF_FUNC_get_current_uid_gid: 329 return &bpf_get_current_uid_gid_proto; 330 case BPF_FUNC_get_current_comm: 331 return &bpf_get_current_comm_proto; 332 case BPF_FUNC_trace_printk: 333 return bpf_get_trace_printk_proto(); 334 case BPF_FUNC_get_smp_processor_id: 335 return &bpf_get_smp_processor_id_proto; 336 case BPF_FUNC_perf_event_read: 337 return &bpf_perf_event_read_proto; 338 default: 339 return NULL; 340 } 341 } 342 343 static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func_id) 344 { 345 switch (func_id) { 346 case BPF_FUNC_perf_event_output: 347 return &bpf_perf_event_output_proto; 348 case BPF_FUNC_get_stackid: 349 return &bpf_get_stackid_proto; 350 default: 351 return tracing_func_proto(func_id); 352 } 353 } 354 355 /* bpf+kprobe programs can access fields of 'struct pt_regs' */ 356 static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type, 357 enum bpf_reg_type *reg_type) 358 { 359 /* check bounds */ 360 if (off < 0 || off >= sizeof(struct pt_regs)) 361 return false; 362 363 /* only read is allowed */ 364 if (type != BPF_READ) 365 return false; 366 367 /* disallow misaligned access */ 368 if (off % size != 0) 369 return false; 370 371 return true; 372 } 373 374 static const struct bpf_verifier_ops kprobe_prog_ops = { 375 .get_func_proto = kprobe_prog_func_proto, 376 .is_valid_access = kprobe_prog_is_valid_access, 377 }; 378 379 static struct bpf_prog_type_list kprobe_tl = { 380 .ops = &kprobe_prog_ops, 381 .type = BPF_PROG_TYPE_KPROBE, 382 }; 383 384 static u64 bpf_perf_event_output_tp(u64 r1, u64 r2, u64 index, u64 r4, u64 size) 385 { 386 /* 387 * r1 points to perf tracepoint buffer where first 8 bytes are hidden 388 * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it 389 * from there and call the same bpf_perf_event_output() helper 390 */ 391 u64 ctx = *(long *)(uintptr_t)r1; 392 393 return bpf_perf_event_output(ctx, r2, index, r4, size); 394 } 395 396 static const struct bpf_func_proto bpf_perf_event_output_proto_tp = { 397 .func = bpf_perf_event_output_tp, 398 .gpl_only = true, 399 .ret_type = RET_INTEGER, 400 .arg1_type = ARG_PTR_TO_CTX, 401 .arg2_type = ARG_CONST_MAP_PTR, 402 .arg3_type = ARG_ANYTHING, 403 .arg4_type = ARG_PTR_TO_STACK, 404 .arg5_type = ARG_CONST_STACK_SIZE, 405 }; 406 407 static u64 bpf_get_stackid_tp(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 408 { 409 u64 ctx = *(long *)(uintptr_t)r1; 410 411 return bpf_get_stackid(ctx, r2, r3, r4, r5); 412 } 413 414 static const struct bpf_func_proto bpf_get_stackid_proto_tp = { 415 .func = bpf_get_stackid_tp, 416 .gpl_only = true, 417 .ret_type = RET_INTEGER, 418 .arg1_type = ARG_PTR_TO_CTX, 419 .arg2_type = ARG_CONST_MAP_PTR, 420 .arg3_type = ARG_ANYTHING, 421 }; 422 423 static const struct bpf_func_proto *tp_prog_func_proto(enum bpf_func_id func_id) 424 { 425 switch (func_id) { 426 case BPF_FUNC_perf_event_output: 427 return &bpf_perf_event_output_proto_tp; 428 case BPF_FUNC_get_stackid: 429 return &bpf_get_stackid_proto_tp; 430 default: 431 return tracing_func_proto(func_id); 432 } 433 } 434 435 static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type, 436 enum bpf_reg_type *reg_type) 437 { 438 if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE) 439 return false; 440 if (type != BPF_READ) 441 return false; 442 if (off % size != 0) 443 return false; 444 return true; 445 } 446 447 static const struct bpf_verifier_ops tracepoint_prog_ops = { 448 .get_func_proto = tp_prog_func_proto, 449 .is_valid_access = tp_prog_is_valid_access, 450 }; 451 452 static struct bpf_prog_type_list tracepoint_tl = { 453 .ops = &tracepoint_prog_ops, 454 .type = BPF_PROG_TYPE_TRACEPOINT, 455 }; 456 457 static int __init register_kprobe_prog_ops(void) 458 { 459 bpf_register_prog_type(&kprobe_tl); 460 bpf_register_prog_type(&tracepoint_tl); 461 return 0; 462 } 463 late_initcall(register_kprobe_prog_ops); 464