1 /* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com 2 * 3 * This program is free software; you can redistribute it and/or 4 * modify it under the terms of version 2 of the GNU General Public 5 * License as published by the Free Software Foundation. 6 */ 7 #include <linux/kernel.h> 8 #include <linux/types.h> 9 #include <linux/slab.h> 10 #include <linux/bpf.h> 11 #include <linux/filter.h> 12 #include <linux/uaccess.h> 13 #include <linux/ctype.h> 14 #include "trace.h" 15 16 /** 17 * trace_call_bpf - invoke BPF program 18 * @prog: BPF program 19 * @ctx: opaque context pointer 20 * 21 * kprobe handlers execute BPF programs via this helper. 22 * Can be used from static tracepoints in the future. 23 * 24 * Return: BPF programs always return an integer which is interpreted by 25 * kprobe handler as: 26 * 0 - return from kprobe (event is filtered out) 27 * 1 - store kprobe event into ring buffer 28 * Other values are reserved and currently alias to 1 29 */ 30 unsigned int trace_call_bpf(struct bpf_prog *prog, void *ctx) 31 { 32 unsigned int ret; 33 34 if (in_nmi()) /* not supported yet */ 35 return 1; 36 37 preempt_disable(); 38 39 if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) { 40 /* 41 * since some bpf program is already running on this cpu, 42 * don't call into another bpf program (same or different) 43 * and don't send kprobe event into ring-buffer, 44 * so return zero here 45 */ 46 ret = 0; 47 goto out; 48 } 49 50 rcu_read_lock(); 51 ret = BPF_PROG_RUN(prog, ctx); 52 rcu_read_unlock(); 53 54 out: 55 __this_cpu_dec(bpf_prog_active); 56 preempt_enable(); 57 58 return ret; 59 } 60 EXPORT_SYMBOL_GPL(trace_call_bpf); 61 62 static u64 bpf_probe_read(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 63 { 64 void *dst = (void *) (long) r1; 65 int ret, size = (int) r2; 66 void *unsafe_ptr = (void *) (long) r3; 67 68 ret = probe_kernel_read(dst, unsafe_ptr, size); 69 if (unlikely(ret < 0)) 70 memset(dst, 0, size); 71 72 return ret; 73 } 74 75 static const struct bpf_func_proto bpf_probe_read_proto = { 76 .func = bpf_probe_read, 77 .gpl_only = true, 78 .ret_type = RET_INTEGER, 79 .arg1_type = ARG_PTR_TO_RAW_STACK, 80 .arg2_type = ARG_CONST_STACK_SIZE, 81 .arg3_type = ARG_ANYTHING, 82 }; 83 84 static u64 bpf_probe_write_user(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 85 { 86 void *unsafe_ptr = (void *) (long) r1; 87 void *src = (void *) (long) r2; 88 int size = (int) r3; 89 90 /* 91 * Ensure we're in user context which is safe for the helper to 92 * run. This helper has no business in a kthread. 93 * 94 * access_ok() should prevent writing to non-user memory, but in 95 * some situations (nommu, temporary switch, etc) access_ok() does 96 * not provide enough validation, hence the check on KERNEL_DS. 97 */ 98 99 if (unlikely(in_interrupt() || 100 current->flags & (PF_KTHREAD | PF_EXITING))) 101 return -EPERM; 102 if (unlikely(segment_eq(get_fs(), KERNEL_DS))) 103 return -EPERM; 104 if (!access_ok(VERIFY_WRITE, unsafe_ptr, size)) 105 return -EPERM; 106 107 return probe_kernel_write(unsafe_ptr, src, size); 108 } 109 110 static const struct bpf_func_proto bpf_probe_write_user_proto = { 111 .func = bpf_probe_write_user, 112 .gpl_only = true, 113 .ret_type = RET_INTEGER, 114 .arg1_type = ARG_ANYTHING, 115 .arg2_type = ARG_PTR_TO_STACK, 116 .arg3_type = ARG_CONST_STACK_SIZE, 117 }; 118 119 static const struct bpf_func_proto *bpf_get_probe_write_proto(void) 120 { 121 pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!", 122 current->comm, task_pid_nr(current)); 123 124 return &bpf_probe_write_user_proto; 125 } 126 127 /* 128 * limited trace_printk() 129 * only %d %u %x %ld %lu %lx %lld %llu %llx %p %s conversion specifiers allowed 130 */ 131 static u64 bpf_trace_printk(u64 r1, u64 fmt_size, u64 r3, u64 r4, u64 r5) 132 { 133 char *fmt = (char *) (long) r1; 134 bool str_seen = false; 135 int mod[3] = {}; 136 int fmt_cnt = 0; 137 u64 unsafe_addr; 138 char buf[64]; 139 int i; 140 141 /* 142 * bpf_check()->check_func_arg()->check_stack_boundary() 143 * guarantees that fmt points to bpf program stack, 144 * fmt_size bytes of it were initialized and fmt_size > 0 145 */ 146 if (fmt[--fmt_size] != 0) 147 return -EINVAL; 148 149 /* check format string for allowed specifiers */ 150 for (i = 0; i < fmt_size; i++) { 151 if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i])) 152 return -EINVAL; 153 154 if (fmt[i] != '%') 155 continue; 156 157 if (fmt_cnt >= 3) 158 return -EINVAL; 159 160 /* fmt[i] != 0 && fmt[last] == 0, so we can access fmt[i + 1] */ 161 i++; 162 if (fmt[i] == 'l') { 163 mod[fmt_cnt]++; 164 i++; 165 } else if (fmt[i] == 'p' || fmt[i] == 's') { 166 mod[fmt_cnt]++; 167 i++; 168 if (!isspace(fmt[i]) && !ispunct(fmt[i]) && fmt[i] != 0) 169 return -EINVAL; 170 fmt_cnt++; 171 if (fmt[i - 1] == 's') { 172 if (str_seen) 173 /* allow only one '%s' per fmt string */ 174 return -EINVAL; 175 str_seen = true; 176 177 switch (fmt_cnt) { 178 case 1: 179 unsafe_addr = r3; 180 r3 = (long) buf; 181 break; 182 case 2: 183 unsafe_addr = r4; 184 r4 = (long) buf; 185 break; 186 case 3: 187 unsafe_addr = r5; 188 r5 = (long) buf; 189 break; 190 } 191 buf[0] = 0; 192 strncpy_from_unsafe(buf, 193 (void *) (long) unsafe_addr, 194 sizeof(buf)); 195 } 196 continue; 197 } 198 199 if (fmt[i] == 'l') { 200 mod[fmt_cnt]++; 201 i++; 202 } 203 204 if (fmt[i] != 'd' && fmt[i] != 'u' && fmt[i] != 'x') 205 return -EINVAL; 206 fmt_cnt++; 207 } 208 209 return __trace_printk(1/* fake ip will not be printed */, fmt, 210 mod[0] == 2 ? r3 : mod[0] == 1 ? (long) r3 : (u32) r3, 211 mod[1] == 2 ? r4 : mod[1] == 1 ? (long) r4 : (u32) r4, 212 mod[2] == 2 ? r5 : mod[2] == 1 ? (long) r5 : (u32) r5); 213 } 214 215 static const struct bpf_func_proto bpf_trace_printk_proto = { 216 .func = bpf_trace_printk, 217 .gpl_only = true, 218 .ret_type = RET_INTEGER, 219 .arg1_type = ARG_PTR_TO_STACK, 220 .arg2_type = ARG_CONST_STACK_SIZE, 221 }; 222 223 const struct bpf_func_proto *bpf_get_trace_printk_proto(void) 224 { 225 /* 226 * this program might be calling bpf_trace_printk, 227 * so allocate per-cpu printk buffers 228 */ 229 trace_printk_init_buffers(); 230 231 return &bpf_trace_printk_proto; 232 } 233 234 static u64 bpf_perf_event_read(u64 r1, u64 flags, u64 r3, u64 r4, u64 r5) 235 { 236 struct bpf_map *map = (struct bpf_map *) (unsigned long) r1; 237 struct bpf_array *array = container_of(map, struct bpf_array, map); 238 unsigned int cpu = smp_processor_id(); 239 u64 index = flags & BPF_F_INDEX_MASK; 240 struct bpf_event_entry *ee; 241 struct perf_event *event; 242 243 if (unlikely(flags & ~(BPF_F_INDEX_MASK))) 244 return -EINVAL; 245 if (index == BPF_F_CURRENT_CPU) 246 index = cpu; 247 if (unlikely(index >= array->map.max_entries)) 248 return -E2BIG; 249 250 ee = READ_ONCE(array->ptrs[index]); 251 if (!ee) 252 return -ENOENT; 253 254 event = ee->event; 255 if (unlikely(event->attr.type != PERF_TYPE_HARDWARE && 256 event->attr.type != PERF_TYPE_RAW)) 257 return -EINVAL; 258 259 /* make sure event is local and doesn't have pmu::count */ 260 if (unlikely(event->oncpu != cpu || event->pmu->count)) 261 return -EINVAL; 262 263 /* 264 * we don't know if the function is run successfully by the 265 * return value. It can be judged in other places, such as 266 * eBPF programs. 267 */ 268 return perf_event_read_local(event); 269 } 270 271 static const struct bpf_func_proto bpf_perf_event_read_proto = { 272 .func = bpf_perf_event_read, 273 .gpl_only = true, 274 .ret_type = RET_INTEGER, 275 .arg1_type = ARG_CONST_MAP_PTR, 276 .arg2_type = ARG_ANYTHING, 277 }; 278 279 static __always_inline u64 280 __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map, 281 u64 flags, struct perf_raw_record *raw) 282 { 283 struct bpf_array *array = container_of(map, struct bpf_array, map); 284 unsigned int cpu = smp_processor_id(); 285 u64 index = flags & BPF_F_INDEX_MASK; 286 struct perf_sample_data sample_data; 287 struct bpf_event_entry *ee; 288 struct perf_event *event; 289 290 if (index == BPF_F_CURRENT_CPU) 291 index = cpu; 292 if (unlikely(index >= array->map.max_entries)) 293 return -E2BIG; 294 295 ee = READ_ONCE(array->ptrs[index]); 296 if (!ee) 297 return -ENOENT; 298 299 event = ee->event; 300 if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE || 301 event->attr.config != PERF_COUNT_SW_BPF_OUTPUT)) 302 return -EINVAL; 303 304 if (unlikely(event->oncpu != cpu)) 305 return -EOPNOTSUPP; 306 307 perf_sample_data_init(&sample_data, 0, 0); 308 sample_data.raw = raw; 309 perf_event_output(event, &sample_data, regs); 310 return 0; 311 } 312 313 static u64 bpf_perf_event_output(u64 r1, u64 r2, u64 flags, u64 r4, u64 size) 314 { 315 struct pt_regs *regs = (struct pt_regs *)(long) r1; 316 struct bpf_map *map = (struct bpf_map *)(long) r2; 317 void *data = (void *)(long) r4; 318 struct perf_raw_record raw = { 319 .frag = { 320 .size = size, 321 .data = data, 322 }, 323 }; 324 325 if (unlikely(flags & ~(BPF_F_INDEX_MASK))) 326 return -EINVAL; 327 328 return __bpf_perf_event_output(regs, map, flags, &raw); 329 } 330 331 static const struct bpf_func_proto bpf_perf_event_output_proto = { 332 .func = bpf_perf_event_output, 333 .gpl_only = true, 334 .ret_type = RET_INTEGER, 335 .arg1_type = ARG_PTR_TO_CTX, 336 .arg2_type = ARG_CONST_MAP_PTR, 337 .arg3_type = ARG_ANYTHING, 338 .arg4_type = ARG_PTR_TO_STACK, 339 .arg5_type = ARG_CONST_STACK_SIZE, 340 }; 341 342 static DEFINE_PER_CPU(struct pt_regs, bpf_pt_regs); 343 344 u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, 345 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy) 346 { 347 struct pt_regs *regs = this_cpu_ptr(&bpf_pt_regs); 348 struct perf_raw_frag frag = { 349 .copy = ctx_copy, 350 .size = ctx_size, 351 .data = ctx, 352 }; 353 struct perf_raw_record raw = { 354 .frag = { 355 { 356 .next = ctx_size ? &frag : NULL, 357 }, 358 .size = meta_size, 359 .data = meta, 360 }, 361 }; 362 363 perf_fetch_caller_regs(regs); 364 365 return __bpf_perf_event_output(regs, map, flags, &raw); 366 } 367 368 static u64 bpf_get_current_task(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 369 { 370 return (long) current; 371 } 372 373 static const struct bpf_func_proto bpf_get_current_task_proto = { 374 .func = bpf_get_current_task, 375 .gpl_only = true, 376 .ret_type = RET_INTEGER, 377 }; 378 379 static const struct bpf_func_proto *tracing_func_proto(enum bpf_func_id func_id) 380 { 381 switch (func_id) { 382 case BPF_FUNC_map_lookup_elem: 383 return &bpf_map_lookup_elem_proto; 384 case BPF_FUNC_map_update_elem: 385 return &bpf_map_update_elem_proto; 386 case BPF_FUNC_map_delete_elem: 387 return &bpf_map_delete_elem_proto; 388 case BPF_FUNC_probe_read: 389 return &bpf_probe_read_proto; 390 case BPF_FUNC_ktime_get_ns: 391 return &bpf_ktime_get_ns_proto; 392 case BPF_FUNC_tail_call: 393 return &bpf_tail_call_proto; 394 case BPF_FUNC_get_current_pid_tgid: 395 return &bpf_get_current_pid_tgid_proto; 396 case BPF_FUNC_get_current_task: 397 return &bpf_get_current_task_proto; 398 case BPF_FUNC_get_current_uid_gid: 399 return &bpf_get_current_uid_gid_proto; 400 case BPF_FUNC_get_current_comm: 401 return &bpf_get_current_comm_proto; 402 case BPF_FUNC_trace_printk: 403 return bpf_get_trace_printk_proto(); 404 case BPF_FUNC_get_smp_processor_id: 405 return &bpf_get_smp_processor_id_proto; 406 case BPF_FUNC_perf_event_read: 407 return &bpf_perf_event_read_proto; 408 case BPF_FUNC_probe_write_user: 409 return bpf_get_probe_write_proto(); 410 default: 411 return NULL; 412 } 413 } 414 415 static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func_id) 416 { 417 switch (func_id) { 418 case BPF_FUNC_perf_event_output: 419 return &bpf_perf_event_output_proto; 420 case BPF_FUNC_get_stackid: 421 return &bpf_get_stackid_proto; 422 default: 423 return tracing_func_proto(func_id); 424 } 425 } 426 427 /* bpf+kprobe programs can access fields of 'struct pt_regs' */ 428 static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type, 429 enum bpf_reg_type *reg_type) 430 { 431 if (off < 0 || off >= sizeof(struct pt_regs)) 432 return false; 433 if (type != BPF_READ) 434 return false; 435 if (off % size != 0) 436 return false; 437 return true; 438 } 439 440 static const struct bpf_verifier_ops kprobe_prog_ops = { 441 .get_func_proto = kprobe_prog_func_proto, 442 .is_valid_access = kprobe_prog_is_valid_access, 443 }; 444 445 static struct bpf_prog_type_list kprobe_tl = { 446 .ops = &kprobe_prog_ops, 447 .type = BPF_PROG_TYPE_KPROBE, 448 }; 449 450 static u64 bpf_perf_event_output_tp(u64 r1, u64 r2, u64 index, u64 r4, u64 size) 451 { 452 /* 453 * r1 points to perf tracepoint buffer where first 8 bytes are hidden 454 * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it 455 * from there and call the same bpf_perf_event_output() helper 456 */ 457 u64 ctx = *(long *)(uintptr_t)r1; 458 459 return bpf_perf_event_output(ctx, r2, index, r4, size); 460 } 461 462 static const struct bpf_func_proto bpf_perf_event_output_proto_tp = { 463 .func = bpf_perf_event_output_tp, 464 .gpl_only = true, 465 .ret_type = RET_INTEGER, 466 .arg1_type = ARG_PTR_TO_CTX, 467 .arg2_type = ARG_CONST_MAP_PTR, 468 .arg3_type = ARG_ANYTHING, 469 .arg4_type = ARG_PTR_TO_STACK, 470 .arg5_type = ARG_CONST_STACK_SIZE, 471 }; 472 473 static u64 bpf_get_stackid_tp(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 474 { 475 u64 ctx = *(long *)(uintptr_t)r1; 476 477 return bpf_get_stackid(ctx, r2, r3, r4, r5); 478 } 479 480 static const struct bpf_func_proto bpf_get_stackid_proto_tp = { 481 .func = bpf_get_stackid_tp, 482 .gpl_only = true, 483 .ret_type = RET_INTEGER, 484 .arg1_type = ARG_PTR_TO_CTX, 485 .arg2_type = ARG_CONST_MAP_PTR, 486 .arg3_type = ARG_ANYTHING, 487 }; 488 489 static const struct bpf_func_proto *tp_prog_func_proto(enum bpf_func_id func_id) 490 { 491 switch (func_id) { 492 case BPF_FUNC_perf_event_output: 493 return &bpf_perf_event_output_proto_tp; 494 case BPF_FUNC_get_stackid: 495 return &bpf_get_stackid_proto_tp; 496 default: 497 return tracing_func_proto(func_id); 498 } 499 } 500 501 static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type, 502 enum bpf_reg_type *reg_type) 503 { 504 if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE) 505 return false; 506 if (type != BPF_READ) 507 return false; 508 if (off % size != 0) 509 return false; 510 return true; 511 } 512 513 static const struct bpf_verifier_ops tracepoint_prog_ops = { 514 .get_func_proto = tp_prog_func_proto, 515 .is_valid_access = tp_prog_is_valid_access, 516 }; 517 518 static struct bpf_prog_type_list tracepoint_tl = { 519 .ops = &tracepoint_prog_ops, 520 .type = BPF_PROG_TYPE_TRACEPOINT, 521 }; 522 523 static int __init register_kprobe_prog_ops(void) 524 { 525 bpf_register_prog_type(&kprobe_tl); 526 bpf_register_prog_type(&tracepoint_tl); 527 return 0; 528 } 529 late_initcall(register_kprobe_prog_ops); 530