1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com 3 * Copyright (c) 2016 Facebook 4 */ 5 #include <linux/kernel.h> 6 #include <linux/types.h> 7 #include <linux/slab.h> 8 #include <linux/bpf.h> 9 #include <linux/bpf_perf_event.h> 10 #include <linux/btf.h> 11 #include <linux/filter.h> 12 #include <linux/uaccess.h> 13 #include <linux/ctype.h> 14 #include <linux/kprobes.h> 15 #include <linux/spinlock.h> 16 #include <linux/syscalls.h> 17 #include <linux/error-injection.h> 18 #include <linux/btf_ids.h> 19 20 #include <uapi/linux/bpf.h> 21 #include <uapi/linux/btf.h> 22 23 #include <asm/tlb.h> 24 25 #include "trace_probe.h" 26 #include "trace.h" 27 28 #define CREATE_TRACE_POINTS 29 #include "bpf_trace.h" 30 31 #define bpf_event_rcu_dereference(p) \ 32 rcu_dereference_protected(p, lockdep_is_held(&bpf_event_mutex)) 33 34 #ifdef CONFIG_MODULES 35 struct bpf_trace_module { 36 struct module *module; 37 struct list_head list; 38 }; 39 40 static LIST_HEAD(bpf_trace_modules); 41 static DEFINE_MUTEX(bpf_module_mutex); 42 43 static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name) 44 { 45 struct bpf_raw_event_map *btp, *ret = NULL; 46 struct bpf_trace_module *btm; 47 unsigned int i; 48 49 mutex_lock(&bpf_module_mutex); 50 list_for_each_entry(btm, &bpf_trace_modules, list) { 51 for (i = 0; i < btm->module->num_bpf_raw_events; ++i) { 52 btp = &btm->module->bpf_raw_events[i]; 53 if (!strcmp(btp->tp->name, name)) { 54 if (try_module_get(btm->module)) 55 ret = btp; 56 goto out; 57 } 58 } 59 } 60 out: 61 mutex_unlock(&bpf_module_mutex); 62 return ret; 63 } 64 #else 65 static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name) 66 { 67 return NULL; 68 } 69 #endif /* CONFIG_MODULES */ 70 71 u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); 72 u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); 73 74 static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size, 75 u64 flags, const struct btf **btf, 76 s32 *btf_id); 77 78 /** 79 * trace_call_bpf - invoke BPF program 80 * @call: tracepoint event 81 * @ctx: opaque context pointer 82 * 83 * kprobe handlers execute BPF programs via this helper. 84 * Can be used from static tracepoints in the future. 85 * 86 * Return: BPF programs always return an integer which is interpreted by 87 * kprobe handler as: 88 * 0 - return from kprobe (event is filtered out) 89 * 1 - store kprobe event into ring buffer 90 * Other values are reserved and currently alias to 1 91 */ 92 unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx) 93 { 94 unsigned int ret; 95 96 if (in_nmi()) /* not supported yet */ 97 return 1; 98 99 cant_sleep(); 100 101 if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) { 102 /* 103 * since some bpf program is already running on this cpu, 104 * don't call into another bpf program (same or different) 105 * and don't send kprobe event into ring-buffer, 106 * so return zero here 107 */ 108 ret = 0; 109 goto out; 110 } 111 112 /* 113 * Instead of moving rcu_read_lock/rcu_dereference/rcu_read_unlock 114 * to all call sites, we did a bpf_prog_array_valid() there to check 115 * whether call->prog_array is empty or not, which is 116 * a heurisitc to speed up execution. 117 * 118 * If bpf_prog_array_valid() fetched prog_array was 119 * non-NULL, we go into trace_call_bpf() and do the actual 120 * proper rcu_dereference() under RCU lock. 121 * If it turns out that prog_array is NULL then, we bail out. 122 * For the opposite, if the bpf_prog_array_valid() fetched pointer 123 * was NULL, you'll skip the prog_array with the risk of missing 124 * out of events when it was updated in between this and the 125 * rcu_dereference() which is accepted risk. 126 */ 127 ret = BPF_PROG_RUN_ARRAY_CHECK(call->prog_array, ctx, BPF_PROG_RUN); 128 129 out: 130 __this_cpu_dec(bpf_prog_active); 131 132 return ret; 133 } 134 135 #ifdef CONFIG_BPF_KPROBE_OVERRIDE 136 BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc) 137 { 138 regs_set_return_value(regs, rc); 139 override_function_with_return(regs); 140 return 0; 141 } 142 143 static const struct bpf_func_proto bpf_override_return_proto = { 144 .func = bpf_override_return, 145 .gpl_only = true, 146 .ret_type = RET_INTEGER, 147 .arg1_type = ARG_PTR_TO_CTX, 148 .arg2_type = ARG_ANYTHING, 149 }; 150 #endif 151 152 static __always_inline int 153 bpf_probe_read_user_common(void *dst, u32 size, const void __user *unsafe_ptr) 154 { 155 int ret; 156 157 ret = copy_from_user_nofault(dst, unsafe_ptr, size); 158 if (unlikely(ret < 0)) 159 memset(dst, 0, size); 160 return ret; 161 } 162 163 BPF_CALL_3(bpf_probe_read_user, void *, dst, u32, size, 164 const void __user *, unsafe_ptr) 165 { 166 return bpf_probe_read_user_common(dst, size, unsafe_ptr); 167 } 168 169 const struct bpf_func_proto bpf_probe_read_user_proto = { 170 .func = bpf_probe_read_user, 171 .gpl_only = true, 172 .ret_type = RET_INTEGER, 173 .arg1_type = ARG_PTR_TO_UNINIT_MEM, 174 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 175 .arg3_type = ARG_ANYTHING, 176 }; 177 178 static __always_inline int 179 bpf_probe_read_user_str_common(void *dst, u32 size, 180 const void __user *unsafe_ptr) 181 { 182 int ret; 183 184 ret = strncpy_from_user_nofault(dst, unsafe_ptr, size); 185 if (unlikely(ret < 0)) 186 memset(dst, 0, size); 187 return ret; 188 } 189 190 BPF_CALL_3(bpf_probe_read_user_str, void *, dst, u32, size, 191 const void __user *, unsafe_ptr) 192 { 193 return bpf_probe_read_user_str_common(dst, size, unsafe_ptr); 194 } 195 196 const struct bpf_func_proto bpf_probe_read_user_str_proto = { 197 .func = bpf_probe_read_user_str, 198 .gpl_only = true, 199 .ret_type = RET_INTEGER, 200 .arg1_type = ARG_PTR_TO_UNINIT_MEM, 201 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 202 .arg3_type = ARG_ANYTHING, 203 }; 204 205 static __always_inline int 206 bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr) 207 { 208 int ret = security_locked_down(LOCKDOWN_BPF_READ); 209 210 if (unlikely(ret < 0)) 211 goto fail; 212 ret = copy_from_kernel_nofault(dst, unsafe_ptr, size); 213 if (unlikely(ret < 0)) 214 goto fail; 215 return ret; 216 fail: 217 memset(dst, 0, size); 218 return ret; 219 } 220 221 BPF_CALL_3(bpf_probe_read_kernel, void *, dst, u32, size, 222 const void *, unsafe_ptr) 223 { 224 return bpf_probe_read_kernel_common(dst, size, unsafe_ptr); 225 } 226 227 const struct bpf_func_proto bpf_probe_read_kernel_proto = { 228 .func = bpf_probe_read_kernel, 229 .gpl_only = true, 230 .ret_type = RET_INTEGER, 231 .arg1_type = ARG_PTR_TO_UNINIT_MEM, 232 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 233 .arg3_type = ARG_ANYTHING, 234 }; 235 236 static __always_inline int 237 bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr) 238 { 239 int ret = security_locked_down(LOCKDOWN_BPF_READ); 240 241 if (unlikely(ret < 0)) 242 goto fail; 243 244 /* 245 * The strncpy_from_kernel_nofault() call will likely not fill the 246 * entire buffer, but that's okay in this circumstance as we're probing 247 * arbitrary memory anyway similar to bpf_probe_read_*() and might 248 * as well probe the stack. Thus, memory is explicitly cleared 249 * only in error case, so that improper users ignoring return 250 * code altogether don't copy garbage; otherwise length of string 251 * is returned that can be used for bpf_perf_event_output() et al. 252 */ 253 ret = strncpy_from_kernel_nofault(dst, unsafe_ptr, size); 254 if (unlikely(ret < 0)) 255 goto fail; 256 257 return ret; 258 fail: 259 memset(dst, 0, size); 260 return ret; 261 } 262 263 BPF_CALL_3(bpf_probe_read_kernel_str, void *, dst, u32, size, 264 const void *, unsafe_ptr) 265 { 266 return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr); 267 } 268 269 const struct bpf_func_proto bpf_probe_read_kernel_str_proto = { 270 .func = bpf_probe_read_kernel_str, 271 .gpl_only = true, 272 .ret_type = RET_INTEGER, 273 .arg1_type = ARG_PTR_TO_UNINIT_MEM, 274 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 275 .arg3_type = ARG_ANYTHING, 276 }; 277 278 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE 279 BPF_CALL_3(bpf_probe_read_compat, void *, dst, u32, size, 280 const void *, unsafe_ptr) 281 { 282 if ((unsigned long)unsafe_ptr < TASK_SIZE) { 283 return bpf_probe_read_user_common(dst, size, 284 (__force void __user *)unsafe_ptr); 285 } 286 return bpf_probe_read_kernel_common(dst, size, unsafe_ptr); 287 } 288 289 static const struct bpf_func_proto bpf_probe_read_compat_proto = { 290 .func = bpf_probe_read_compat, 291 .gpl_only = true, 292 .ret_type = RET_INTEGER, 293 .arg1_type = ARG_PTR_TO_UNINIT_MEM, 294 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 295 .arg3_type = ARG_ANYTHING, 296 }; 297 298 BPF_CALL_3(bpf_probe_read_compat_str, void *, dst, u32, size, 299 const void *, unsafe_ptr) 300 { 301 if ((unsigned long)unsafe_ptr < TASK_SIZE) { 302 return bpf_probe_read_user_str_common(dst, size, 303 (__force void __user *)unsafe_ptr); 304 } 305 return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr); 306 } 307 308 static const struct bpf_func_proto bpf_probe_read_compat_str_proto = { 309 .func = bpf_probe_read_compat_str, 310 .gpl_only = true, 311 .ret_type = RET_INTEGER, 312 .arg1_type = ARG_PTR_TO_UNINIT_MEM, 313 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 314 .arg3_type = ARG_ANYTHING, 315 }; 316 #endif /* CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE */ 317 318 BPF_CALL_3(bpf_probe_write_user, void __user *, unsafe_ptr, const void *, src, 319 u32, size) 320 { 321 /* 322 * Ensure we're in user context which is safe for the helper to 323 * run. This helper has no business in a kthread. 324 * 325 * access_ok() should prevent writing to non-user memory, but in 326 * some situations (nommu, temporary switch, etc) access_ok() does 327 * not provide enough validation, hence the check on KERNEL_DS. 328 * 329 * nmi_uaccess_okay() ensures the probe is not run in an interim 330 * state, when the task or mm are switched. This is specifically 331 * required to prevent the use of temporary mm. 332 */ 333 334 if (unlikely(in_interrupt() || 335 current->flags & (PF_KTHREAD | PF_EXITING))) 336 return -EPERM; 337 if (unlikely(uaccess_kernel())) 338 return -EPERM; 339 if (unlikely(!nmi_uaccess_okay())) 340 return -EPERM; 341 342 return copy_to_user_nofault(unsafe_ptr, src, size); 343 } 344 345 static const struct bpf_func_proto bpf_probe_write_user_proto = { 346 .func = bpf_probe_write_user, 347 .gpl_only = true, 348 .ret_type = RET_INTEGER, 349 .arg1_type = ARG_ANYTHING, 350 .arg2_type = ARG_PTR_TO_MEM, 351 .arg3_type = ARG_CONST_SIZE, 352 }; 353 354 static const struct bpf_func_proto *bpf_get_probe_write_proto(void) 355 { 356 if (!capable(CAP_SYS_ADMIN)) 357 return NULL; 358 359 pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!", 360 current->comm, task_pid_nr(current)); 361 362 return &bpf_probe_write_user_proto; 363 } 364 365 static void bpf_trace_copy_string(char *buf, void *unsafe_ptr, char fmt_ptype, 366 size_t bufsz) 367 { 368 void __user *user_ptr = (__force void __user *)unsafe_ptr; 369 370 buf[0] = 0; 371 372 switch (fmt_ptype) { 373 case 's': 374 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE 375 if ((unsigned long)unsafe_ptr < TASK_SIZE) { 376 strncpy_from_user_nofault(buf, user_ptr, bufsz); 377 break; 378 } 379 fallthrough; 380 #endif 381 case 'k': 382 strncpy_from_kernel_nofault(buf, unsafe_ptr, bufsz); 383 break; 384 case 'u': 385 strncpy_from_user_nofault(buf, user_ptr, bufsz); 386 break; 387 } 388 } 389 390 static DEFINE_RAW_SPINLOCK(trace_printk_lock); 391 392 #define BPF_TRACE_PRINTK_SIZE 1024 393 394 static __printf(1, 0) int bpf_do_trace_printk(const char *fmt, ...) 395 { 396 static char buf[BPF_TRACE_PRINTK_SIZE]; 397 unsigned long flags; 398 va_list ap; 399 int ret; 400 401 raw_spin_lock_irqsave(&trace_printk_lock, flags); 402 va_start(ap, fmt); 403 ret = vsnprintf(buf, sizeof(buf), fmt, ap); 404 va_end(ap); 405 /* vsnprintf() will not append null for zero-length strings */ 406 if (ret == 0) 407 buf[0] = '\0'; 408 trace_bpf_trace_printk(buf); 409 raw_spin_unlock_irqrestore(&trace_printk_lock, flags); 410 411 return ret; 412 } 413 414 /* 415 * Only limited trace_printk() conversion specifiers allowed: 416 * %d %i %u %x %ld %li %lu %lx %lld %lli %llu %llx %p %pB %pks %pus %s 417 */ 418 BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1, 419 u64, arg2, u64, arg3) 420 { 421 int i, mod[3] = {}, fmt_cnt = 0; 422 char buf[64], fmt_ptype; 423 void *unsafe_ptr = NULL; 424 bool str_seen = false; 425 426 /* 427 * bpf_check()->check_func_arg()->check_stack_boundary() 428 * guarantees that fmt points to bpf program stack, 429 * fmt_size bytes of it were initialized and fmt_size > 0 430 */ 431 if (fmt[--fmt_size] != 0) 432 return -EINVAL; 433 434 /* check format string for allowed specifiers */ 435 for (i = 0; i < fmt_size; i++) { 436 if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i])) 437 return -EINVAL; 438 439 if (fmt[i] != '%') 440 continue; 441 442 if (fmt_cnt >= 3) 443 return -EINVAL; 444 445 /* fmt[i] != 0 && fmt[last] == 0, so we can access fmt[i + 1] */ 446 i++; 447 if (fmt[i] == 'l') { 448 mod[fmt_cnt]++; 449 i++; 450 } else if (fmt[i] == 'p') { 451 mod[fmt_cnt]++; 452 if ((fmt[i + 1] == 'k' || 453 fmt[i + 1] == 'u') && 454 fmt[i + 2] == 's') { 455 fmt_ptype = fmt[i + 1]; 456 i += 2; 457 goto fmt_str; 458 } 459 460 if (fmt[i + 1] == 'B') { 461 i++; 462 goto fmt_next; 463 } 464 465 /* disallow any further format extensions */ 466 if (fmt[i + 1] != 0 && 467 !isspace(fmt[i + 1]) && 468 !ispunct(fmt[i + 1])) 469 return -EINVAL; 470 471 goto fmt_next; 472 } else if (fmt[i] == 's') { 473 mod[fmt_cnt]++; 474 fmt_ptype = fmt[i]; 475 fmt_str: 476 if (str_seen) 477 /* allow only one '%s' per fmt string */ 478 return -EINVAL; 479 str_seen = true; 480 481 if (fmt[i + 1] != 0 && 482 !isspace(fmt[i + 1]) && 483 !ispunct(fmt[i + 1])) 484 return -EINVAL; 485 486 switch (fmt_cnt) { 487 case 0: 488 unsafe_ptr = (void *)(long)arg1; 489 arg1 = (long)buf; 490 break; 491 case 1: 492 unsafe_ptr = (void *)(long)arg2; 493 arg2 = (long)buf; 494 break; 495 case 2: 496 unsafe_ptr = (void *)(long)arg3; 497 arg3 = (long)buf; 498 break; 499 } 500 501 bpf_trace_copy_string(buf, unsafe_ptr, fmt_ptype, 502 sizeof(buf)); 503 goto fmt_next; 504 } 505 506 if (fmt[i] == 'l') { 507 mod[fmt_cnt]++; 508 i++; 509 } 510 511 if (fmt[i] != 'i' && fmt[i] != 'd' && 512 fmt[i] != 'u' && fmt[i] != 'x') 513 return -EINVAL; 514 fmt_next: 515 fmt_cnt++; 516 } 517 518 /* Horrid workaround for getting va_list handling working with different 519 * argument type combinations generically for 32 and 64 bit archs. 520 */ 521 #define __BPF_TP_EMIT() __BPF_ARG3_TP() 522 #define __BPF_TP(...) \ 523 bpf_do_trace_printk(fmt, ##__VA_ARGS__) 524 525 #define __BPF_ARG1_TP(...) \ 526 ((mod[0] == 2 || (mod[0] == 1 && __BITS_PER_LONG == 64)) \ 527 ? __BPF_TP(arg1, ##__VA_ARGS__) \ 528 : ((mod[0] == 1 || (mod[0] == 0 && __BITS_PER_LONG == 32)) \ 529 ? __BPF_TP((long)arg1, ##__VA_ARGS__) \ 530 : __BPF_TP((u32)arg1, ##__VA_ARGS__))) 531 532 #define __BPF_ARG2_TP(...) \ 533 ((mod[1] == 2 || (mod[1] == 1 && __BITS_PER_LONG == 64)) \ 534 ? __BPF_ARG1_TP(arg2, ##__VA_ARGS__) \ 535 : ((mod[1] == 1 || (mod[1] == 0 && __BITS_PER_LONG == 32)) \ 536 ? __BPF_ARG1_TP((long)arg2, ##__VA_ARGS__) \ 537 : __BPF_ARG1_TP((u32)arg2, ##__VA_ARGS__))) 538 539 #define __BPF_ARG3_TP(...) \ 540 ((mod[2] == 2 || (mod[2] == 1 && __BITS_PER_LONG == 64)) \ 541 ? __BPF_ARG2_TP(arg3, ##__VA_ARGS__) \ 542 : ((mod[2] == 1 || (mod[2] == 0 && __BITS_PER_LONG == 32)) \ 543 ? __BPF_ARG2_TP((long)arg3, ##__VA_ARGS__) \ 544 : __BPF_ARG2_TP((u32)arg3, ##__VA_ARGS__))) 545 546 return __BPF_TP_EMIT(); 547 } 548 549 static const struct bpf_func_proto bpf_trace_printk_proto = { 550 .func = bpf_trace_printk, 551 .gpl_only = true, 552 .ret_type = RET_INTEGER, 553 .arg1_type = ARG_PTR_TO_MEM, 554 .arg2_type = ARG_CONST_SIZE, 555 }; 556 557 const struct bpf_func_proto *bpf_get_trace_printk_proto(void) 558 { 559 /* 560 * This program might be calling bpf_trace_printk, 561 * so enable the associated bpf_trace/bpf_trace_printk event. 562 * Repeat this each time as it is possible a user has 563 * disabled bpf_trace_printk events. By loading a program 564 * calling bpf_trace_printk() however the user has expressed 565 * the intent to see such events. 566 */ 567 if (trace_set_clr_event("bpf_trace", "bpf_trace_printk", 1)) 568 pr_warn_ratelimited("could not enable bpf_trace_printk events"); 569 570 return &bpf_trace_printk_proto; 571 } 572 573 #define MAX_SEQ_PRINTF_VARARGS 12 574 #define MAX_SEQ_PRINTF_MAX_MEMCPY 6 575 #define MAX_SEQ_PRINTF_STR_LEN 128 576 577 struct bpf_seq_printf_buf { 578 char buf[MAX_SEQ_PRINTF_MAX_MEMCPY][MAX_SEQ_PRINTF_STR_LEN]; 579 }; 580 static DEFINE_PER_CPU(struct bpf_seq_printf_buf, bpf_seq_printf_buf); 581 static DEFINE_PER_CPU(int, bpf_seq_printf_buf_used); 582 583 BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size, 584 const void *, data, u32, data_len) 585 { 586 int err = -EINVAL, fmt_cnt = 0, memcpy_cnt = 0; 587 int i, buf_used, copy_size, num_args; 588 u64 params[MAX_SEQ_PRINTF_VARARGS]; 589 struct bpf_seq_printf_buf *bufs; 590 const u64 *args = data; 591 592 buf_used = this_cpu_inc_return(bpf_seq_printf_buf_used); 593 if (WARN_ON_ONCE(buf_used > 1)) { 594 err = -EBUSY; 595 goto out; 596 } 597 598 bufs = this_cpu_ptr(&bpf_seq_printf_buf); 599 600 /* 601 * bpf_check()->check_func_arg()->check_stack_boundary() 602 * guarantees that fmt points to bpf program stack, 603 * fmt_size bytes of it were initialized and fmt_size > 0 604 */ 605 if (fmt[--fmt_size] != 0) 606 goto out; 607 608 if (data_len & 7) 609 goto out; 610 611 for (i = 0; i < fmt_size; i++) { 612 if (fmt[i] == '%') { 613 if (fmt[i + 1] == '%') 614 i++; 615 else if (!data || !data_len) 616 goto out; 617 } 618 } 619 620 num_args = data_len / 8; 621 622 /* check format string for allowed specifiers */ 623 for (i = 0; i < fmt_size; i++) { 624 /* only printable ascii for now. */ 625 if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i])) { 626 err = -EINVAL; 627 goto out; 628 } 629 630 if (fmt[i] != '%') 631 continue; 632 633 if (fmt[i + 1] == '%') { 634 i++; 635 continue; 636 } 637 638 if (fmt_cnt >= MAX_SEQ_PRINTF_VARARGS) { 639 err = -E2BIG; 640 goto out; 641 } 642 643 if (fmt_cnt >= num_args) { 644 err = -EINVAL; 645 goto out; 646 } 647 648 /* fmt[i] != 0 && fmt[last] == 0, so we can access fmt[i + 1] */ 649 i++; 650 651 /* skip optional "[0 +-][num]" width formating field */ 652 while (fmt[i] == '0' || fmt[i] == '+' || fmt[i] == '-' || 653 fmt[i] == ' ') 654 i++; 655 if (fmt[i] >= '1' && fmt[i] <= '9') { 656 i++; 657 while (fmt[i] >= '0' && fmt[i] <= '9') 658 i++; 659 } 660 661 if (fmt[i] == 's') { 662 void *unsafe_ptr; 663 664 /* try our best to copy */ 665 if (memcpy_cnt >= MAX_SEQ_PRINTF_MAX_MEMCPY) { 666 err = -E2BIG; 667 goto out; 668 } 669 670 unsafe_ptr = (void *)(long)args[fmt_cnt]; 671 err = strncpy_from_kernel_nofault(bufs->buf[memcpy_cnt], 672 unsafe_ptr, MAX_SEQ_PRINTF_STR_LEN); 673 if (err < 0) 674 bufs->buf[memcpy_cnt][0] = '\0'; 675 params[fmt_cnt] = (u64)(long)bufs->buf[memcpy_cnt]; 676 677 fmt_cnt++; 678 memcpy_cnt++; 679 continue; 680 } 681 682 if (fmt[i] == 'p') { 683 if (fmt[i + 1] == 0 || 684 fmt[i + 1] == 'K' || 685 fmt[i + 1] == 'x' || 686 fmt[i + 1] == 'B') { 687 /* just kernel pointers */ 688 params[fmt_cnt] = args[fmt_cnt]; 689 fmt_cnt++; 690 continue; 691 } 692 693 /* only support "%pI4", "%pi4", "%pI6" and "%pi6". */ 694 if (fmt[i + 1] != 'i' && fmt[i + 1] != 'I') { 695 err = -EINVAL; 696 goto out; 697 } 698 if (fmt[i + 2] != '4' && fmt[i + 2] != '6') { 699 err = -EINVAL; 700 goto out; 701 } 702 703 if (memcpy_cnt >= MAX_SEQ_PRINTF_MAX_MEMCPY) { 704 err = -E2BIG; 705 goto out; 706 } 707 708 709 copy_size = (fmt[i + 2] == '4') ? 4 : 16; 710 711 err = copy_from_kernel_nofault(bufs->buf[memcpy_cnt], 712 (void *) (long) args[fmt_cnt], 713 copy_size); 714 if (err < 0) 715 memset(bufs->buf[memcpy_cnt], 0, copy_size); 716 params[fmt_cnt] = (u64)(long)bufs->buf[memcpy_cnt]; 717 718 i += 2; 719 fmt_cnt++; 720 memcpy_cnt++; 721 continue; 722 } 723 724 if (fmt[i] == 'l') { 725 i++; 726 if (fmt[i] == 'l') 727 i++; 728 } 729 730 if (fmt[i] != 'i' && fmt[i] != 'd' && 731 fmt[i] != 'u' && fmt[i] != 'x' && 732 fmt[i] != 'X') { 733 err = -EINVAL; 734 goto out; 735 } 736 737 params[fmt_cnt] = args[fmt_cnt]; 738 fmt_cnt++; 739 } 740 741 /* Maximumly we can have MAX_SEQ_PRINTF_VARARGS parameter, just give 742 * all of them to seq_printf(). 743 */ 744 seq_printf(m, fmt, params[0], params[1], params[2], params[3], 745 params[4], params[5], params[6], params[7], params[8], 746 params[9], params[10], params[11]); 747 748 err = seq_has_overflowed(m) ? -EOVERFLOW : 0; 749 out: 750 this_cpu_dec(bpf_seq_printf_buf_used); 751 return err; 752 } 753 754 BTF_ID_LIST_SINGLE(btf_seq_file_ids, struct, seq_file) 755 756 static const struct bpf_func_proto bpf_seq_printf_proto = { 757 .func = bpf_seq_printf, 758 .gpl_only = true, 759 .ret_type = RET_INTEGER, 760 .arg1_type = ARG_PTR_TO_BTF_ID, 761 .arg1_btf_id = &btf_seq_file_ids[0], 762 .arg2_type = ARG_PTR_TO_MEM, 763 .arg3_type = ARG_CONST_SIZE, 764 .arg4_type = ARG_PTR_TO_MEM_OR_NULL, 765 .arg5_type = ARG_CONST_SIZE_OR_ZERO, 766 }; 767 768 BPF_CALL_3(bpf_seq_write, struct seq_file *, m, const void *, data, u32, len) 769 { 770 return seq_write(m, data, len) ? -EOVERFLOW : 0; 771 } 772 773 static const struct bpf_func_proto bpf_seq_write_proto = { 774 .func = bpf_seq_write, 775 .gpl_only = true, 776 .ret_type = RET_INTEGER, 777 .arg1_type = ARG_PTR_TO_BTF_ID, 778 .arg1_btf_id = &btf_seq_file_ids[0], 779 .arg2_type = ARG_PTR_TO_MEM, 780 .arg3_type = ARG_CONST_SIZE_OR_ZERO, 781 }; 782 783 BPF_CALL_4(bpf_seq_printf_btf, struct seq_file *, m, struct btf_ptr *, ptr, 784 u32, btf_ptr_size, u64, flags) 785 { 786 const struct btf *btf; 787 s32 btf_id; 788 int ret; 789 790 ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id); 791 if (ret) 792 return ret; 793 794 return btf_type_seq_show_flags(btf, btf_id, ptr->ptr, m, flags); 795 } 796 797 static const struct bpf_func_proto bpf_seq_printf_btf_proto = { 798 .func = bpf_seq_printf_btf, 799 .gpl_only = true, 800 .ret_type = RET_INTEGER, 801 .arg1_type = ARG_PTR_TO_BTF_ID, 802 .arg1_btf_id = &btf_seq_file_ids[0], 803 .arg2_type = ARG_PTR_TO_MEM, 804 .arg3_type = ARG_CONST_SIZE_OR_ZERO, 805 .arg4_type = ARG_ANYTHING, 806 }; 807 808 static __always_inline int 809 get_map_perf_counter(struct bpf_map *map, u64 flags, 810 u64 *value, u64 *enabled, u64 *running) 811 { 812 struct bpf_array *array = container_of(map, struct bpf_array, map); 813 unsigned int cpu = smp_processor_id(); 814 u64 index = flags & BPF_F_INDEX_MASK; 815 struct bpf_event_entry *ee; 816 817 if (unlikely(flags & ~(BPF_F_INDEX_MASK))) 818 return -EINVAL; 819 if (index == BPF_F_CURRENT_CPU) 820 index = cpu; 821 if (unlikely(index >= array->map.max_entries)) 822 return -E2BIG; 823 824 ee = READ_ONCE(array->ptrs[index]); 825 if (!ee) 826 return -ENOENT; 827 828 return perf_event_read_local(ee->event, value, enabled, running); 829 } 830 831 BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags) 832 { 833 u64 value = 0; 834 int err; 835 836 err = get_map_perf_counter(map, flags, &value, NULL, NULL); 837 /* 838 * this api is ugly since we miss [-22..-2] range of valid 839 * counter values, but that's uapi 840 */ 841 if (err) 842 return err; 843 return value; 844 } 845 846 static const struct bpf_func_proto bpf_perf_event_read_proto = { 847 .func = bpf_perf_event_read, 848 .gpl_only = true, 849 .ret_type = RET_INTEGER, 850 .arg1_type = ARG_CONST_MAP_PTR, 851 .arg2_type = ARG_ANYTHING, 852 }; 853 854 BPF_CALL_4(bpf_perf_event_read_value, struct bpf_map *, map, u64, flags, 855 struct bpf_perf_event_value *, buf, u32, size) 856 { 857 int err = -EINVAL; 858 859 if (unlikely(size != sizeof(struct bpf_perf_event_value))) 860 goto clear; 861 err = get_map_perf_counter(map, flags, &buf->counter, &buf->enabled, 862 &buf->running); 863 if (unlikely(err)) 864 goto clear; 865 return 0; 866 clear: 867 memset(buf, 0, size); 868 return err; 869 } 870 871 static const struct bpf_func_proto bpf_perf_event_read_value_proto = { 872 .func = bpf_perf_event_read_value, 873 .gpl_only = true, 874 .ret_type = RET_INTEGER, 875 .arg1_type = ARG_CONST_MAP_PTR, 876 .arg2_type = ARG_ANYTHING, 877 .arg3_type = ARG_PTR_TO_UNINIT_MEM, 878 .arg4_type = ARG_CONST_SIZE, 879 }; 880 881 static __always_inline u64 882 __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map, 883 u64 flags, struct perf_sample_data *sd) 884 { 885 struct bpf_array *array = container_of(map, struct bpf_array, map); 886 unsigned int cpu = smp_processor_id(); 887 u64 index = flags & BPF_F_INDEX_MASK; 888 struct bpf_event_entry *ee; 889 struct perf_event *event; 890 891 if (index == BPF_F_CURRENT_CPU) 892 index = cpu; 893 if (unlikely(index >= array->map.max_entries)) 894 return -E2BIG; 895 896 ee = READ_ONCE(array->ptrs[index]); 897 if (!ee) 898 return -ENOENT; 899 900 event = ee->event; 901 if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE || 902 event->attr.config != PERF_COUNT_SW_BPF_OUTPUT)) 903 return -EINVAL; 904 905 if (unlikely(event->oncpu != cpu)) 906 return -EOPNOTSUPP; 907 908 return perf_event_output(event, sd, regs); 909 } 910 911 /* 912 * Support executing tracepoints in normal, irq, and nmi context that each call 913 * bpf_perf_event_output 914 */ 915 struct bpf_trace_sample_data { 916 struct perf_sample_data sds[3]; 917 }; 918 919 static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_trace_sds); 920 static DEFINE_PER_CPU(int, bpf_trace_nest_level); 921 BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map, 922 u64, flags, void *, data, u64, size) 923 { 924 struct bpf_trace_sample_data *sds = this_cpu_ptr(&bpf_trace_sds); 925 int nest_level = this_cpu_inc_return(bpf_trace_nest_level); 926 struct perf_raw_record raw = { 927 .frag = { 928 .size = size, 929 .data = data, 930 }, 931 }; 932 struct perf_sample_data *sd; 933 int err; 934 935 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(sds->sds))) { 936 err = -EBUSY; 937 goto out; 938 } 939 940 sd = &sds->sds[nest_level - 1]; 941 942 if (unlikely(flags & ~(BPF_F_INDEX_MASK))) { 943 err = -EINVAL; 944 goto out; 945 } 946 947 perf_sample_data_init(sd, 0, 0); 948 sd->raw = &raw; 949 950 err = __bpf_perf_event_output(regs, map, flags, sd); 951 952 out: 953 this_cpu_dec(bpf_trace_nest_level); 954 return err; 955 } 956 957 static const struct bpf_func_proto bpf_perf_event_output_proto = { 958 .func = bpf_perf_event_output, 959 .gpl_only = true, 960 .ret_type = RET_INTEGER, 961 .arg1_type = ARG_PTR_TO_CTX, 962 .arg2_type = ARG_CONST_MAP_PTR, 963 .arg3_type = ARG_ANYTHING, 964 .arg4_type = ARG_PTR_TO_MEM, 965 .arg5_type = ARG_CONST_SIZE_OR_ZERO, 966 }; 967 968 static DEFINE_PER_CPU(int, bpf_event_output_nest_level); 969 struct bpf_nested_pt_regs { 970 struct pt_regs regs[3]; 971 }; 972 static DEFINE_PER_CPU(struct bpf_nested_pt_regs, bpf_pt_regs); 973 static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_misc_sds); 974 975 u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, 976 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy) 977 { 978 int nest_level = this_cpu_inc_return(bpf_event_output_nest_level); 979 struct perf_raw_frag frag = { 980 .copy = ctx_copy, 981 .size = ctx_size, 982 .data = ctx, 983 }; 984 struct perf_raw_record raw = { 985 .frag = { 986 { 987 .next = ctx_size ? &frag : NULL, 988 }, 989 .size = meta_size, 990 .data = meta, 991 }, 992 }; 993 struct perf_sample_data *sd; 994 struct pt_regs *regs; 995 u64 ret; 996 997 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(bpf_misc_sds.sds))) { 998 ret = -EBUSY; 999 goto out; 1000 } 1001 sd = this_cpu_ptr(&bpf_misc_sds.sds[nest_level - 1]); 1002 regs = this_cpu_ptr(&bpf_pt_regs.regs[nest_level - 1]); 1003 1004 perf_fetch_caller_regs(regs); 1005 perf_sample_data_init(sd, 0, 0); 1006 sd->raw = &raw; 1007 1008 ret = __bpf_perf_event_output(regs, map, flags, sd); 1009 out: 1010 this_cpu_dec(bpf_event_output_nest_level); 1011 return ret; 1012 } 1013 1014 BPF_CALL_0(bpf_get_current_task) 1015 { 1016 return (long) current; 1017 } 1018 1019 const struct bpf_func_proto bpf_get_current_task_proto = { 1020 .func = bpf_get_current_task, 1021 .gpl_only = true, 1022 .ret_type = RET_INTEGER, 1023 }; 1024 1025 BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx) 1026 { 1027 struct bpf_array *array = container_of(map, struct bpf_array, map); 1028 struct cgroup *cgrp; 1029 1030 if (unlikely(idx >= array->map.max_entries)) 1031 return -E2BIG; 1032 1033 cgrp = READ_ONCE(array->ptrs[idx]); 1034 if (unlikely(!cgrp)) 1035 return -EAGAIN; 1036 1037 return task_under_cgroup_hierarchy(current, cgrp); 1038 } 1039 1040 static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = { 1041 .func = bpf_current_task_under_cgroup, 1042 .gpl_only = false, 1043 .ret_type = RET_INTEGER, 1044 .arg1_type = ARG_CONST_MAP_PTR, 1045 .arg2_type = ARG_ANYTHING, 1046 }; 1047 1048 struct send_signal_irq_work { 1049 struct irq_work irq_work; 1050 struct task_struct *task; 1051 u32 sig; 1052 enum pid_type type; 1053 }; 1054 1055 static DEFINE_PER_CPU(struct send_signal_irq_work, send_signal_work); 1056 1057 static void do_bpf_send_signal(struct irq_work *entry) 1058 { 1059 struct send_signal_irq_work *work; 1060 1061 work = container_of(entry, struct send_signal_irq_work, irq_work); 1062 group_send_sig_info(work->sig, SEND_SIG_PRIV, work->task, work->type); 1063 } 1064 1065 static int bpf_send_signal_common(u32 sig, enum pid_type type) 1066 { 1067 struct send_signal_irq_work *work = NULL; 1068 1069 /* Similar to bpf_probe_write_user, task needs to be 1070 * in a sound condition and kernel memory access be 1071 * permitted in order to send signal to the current 1072 * task. 1073 */ 1074 if (unlikely(current->flags & (PF_KTHREAD | PF_EXITING))) 1075 return -EPERM; 1076 if (unlikely(uaccess_kernel())) 1077 return -EPERM; 1078 if (unlikely(!nmi_uaccess_okay())) 1079 return -EPERM; 1080 1081 if (irqs_disabled()) { 1082 /* Do an early check on signal validity. Otherwise, 1083 * the error is lost in deferred irq_work. 1084 */ 1085 if (unlikely(!valid_signal(sig))) 1086 return -EINVAL; 1087 1088 work = this_cpu_ptr(&send_signal_work); 1089 if (atomic_read(&work->irq_work.flags) & IRQ_WORK_BUSY) 1090 return -EBUSY; 1091 1092 /* Add the current task, which is the target of sending signal, 1093 * to the irq_work. The current task may change when queued 1094 * irq works get executed. 1095 */ 1096 work->task = current; 1097 work->sig = sig; 1098 work->type = type; 1099 irq_work_queue(&work->irq_work); 1100 return 0; 1101 } 1102 1103 return group_send_sig_info(sig, SEND_SIG_PRIV, current, type); 1104 } 1105 1106 BPF_CALL_1(bpf_send_signal, u32, sig) 1107 { 1108 return bpf_send_signal_common(sig, PIDTYPE_TGID); 1109 } 1110 1111 static const struct bpf_func_proto bpf_send_signal_proto = { 1112 .func = bpf_send_signal, 1113 .gpl_only = false, 1114 .ret_type = RET_INTEGER, 1115 .arg1_type = ARG_ANYTHING, 1116 }; 1117 1118 BPF_CALL_1(bpf_send_signal_thread, u32, sig) 1119 { 1120 return bpf_send_signal_common(sig, PIDTYPE_PID); 1121 } 1122 1123 static const struct bpf_func_proto bpf_send_signal_thread_proto = { 1124 .func = bpf_send_signal_thread, 1125 .gpl_only = false, 1126 .ret_type = RET_INTEGER, 1127 .arg1_type = ARG_ANYTHING, 1128 }; 1129 1130 BPF_CALL_3(bpf_d_path, struct path *, path, char *, buf, u32, sz) 1131 { 1132 long len; 1133 char *p; 1134 1135 if (!sz) 1136 return 0; 1137 1138 p = d_path(path, buf, sz); 1139 if (IS_ERR(p)) { 1140 len = PTR_ERR(p); 1141 } else { 1142 len = buf + sz - p; 1143 memmove(buf, p, len); 1144 } 1145 1146 return len; 1147 } 1148 1149 BTF_SET_START(btf_allowlist_d_path) 1150 #ifdef CONFIG_SECURITY 1151 BTF_ID(func, security_file_permission) 1152 BTF_ID(func, security_inode_getattr) 1153 BTF_ID(func, security_file_open) 1154 #endif 1155 #ifdef CONFIG_SECURITY_PATH 1156 BTF_ID(func, security_path_truncate) 1157 #endif 1158 BTF_ID(func, vfs_truncate) 1159 BTF_ID(func, vfs_fallocate) 1160 BTF_ID(func, dentry_open) 1161 BTF_ID(func, vfs_getattr) 1162 BTF_ID(func, filp_close) 1163 BTF_SET_END(btf_allowlist_d_path) 1164 1165 static bool bpf_d_path_allowed(const struct bpf_prog *prog) 1166 { 1167 return btf_id_set_contains(&btf_allowlist_d_path, prog->aux->attach_btf_id); 1168 } 1169 1170 BTF_ID_LIST_SINGLE(bpf_d_path_btf_ids, struct, path) 1171 1172 static const struct bpf_func_proto bpf_d_path_proto = { 1173 .func = bpf_d_path, 1174 .gpl_only = false, 1175 .ret_type = RET_INTEGER, 1176 .arg1_type = ARG_PTR_TO_BTF_ID, 1177 .arg1_btf_id = &bpf_d_path_btf_ids[0], 1178 .arg2_type = ARG_PTR_TO_MEM, 1179 .arg3_type = ARG_CONST_SIZE_OR_ZERO, 1180 .allowed = bpf_d_path_allowed, 1181 }; 1182 1183 #define BTF_F_ALL (BTF_F_COMPACT | BTF_F_NONAME | \ 1184 BTF_F_PTR_RAW | BTF_F_ZERO) 1185 1186 static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size, 1187 u64 flags, const struct btf **btf, 1188 s32 *btf_id) 1189 { 1190 const struct btf_type *t; 1191 1192 if (unlikely(flags & ~(BTF_F_ALL))) 1193 return -EINVAL; 1194 1195 if (btf_ptr_size != sizeof(struct btf_ptr)) 1196 return -EINVAL; 1197 1198 *btf = bpf_get_btf_vmlinux(); 1199 1200 if (IS_ERR_OR_NULL(*btf)) 1201 return PTR_ERR(*btf); 1202 1203 if (ptr->type_id > 0) 1204 *btf_id = ptr->type_id; 1205 else 1206 return -EINVAL; 1207 1208 if (*btf_id > 0) 1209 t = btf_type_by_id(*btf, *btf_id); 1210 if (*btf_id <= 0 || !t) 1211 return -ENOENT; 1212 1213 return 0; 1214 } 1215 1216 BPF_CALL_5(bpf_snprintf_btf, char *, str, u32, str_size, struct btf_ptr *, ptr, 1217 u32, btf_ptr_size, u64, flags) 1218 { 1219 const struct btf *btf; 1220 s32 btf_id; 1221 int ret; 1222 1223 ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id); 1224 if (ret) 1225 return ret; 1226 1227 return btf_type_snprintf_show(btf, btf_id, ptr->ptr, str, str_size, 1228 flags); 1229 } 1230 1231 const struct bpf_func_proto bpf_snprintf_btf_proto = { 1232 .func = bpf_snprintf_btf, 1233 .gpl_only = false, 1234 .ret_type = RET_INTEGER, 1235 .arg1_type = ARG_PTR_TO_MEM, 1236 .arg2_type = ARG_CONST_SIZE, 1237 .arg3_type = ARG_PTR_TO_MEM, 1238 .arg4_type = ARG_CONST_SIZE, 1239 .arg5_type = ARG_ANYTHING, 1240 }; 1241 1242 const struct bpf_func_proto * 1243 bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 1244 { 1245 switch (func_id) { 1246 case BPF_FUNC_map_lookup_elem: 1247 return &bpf_map_lookup_elem_proto; 1248 case BPF_FUNC_map_update_elem: 1249 return &bpf_map_update_elem_proto; 1250 case BPF_FUNC_map_delete_elem: 1251 return &bpf_map_delete_elem_proto; 1252 case BPF_FUNC_map_push_elem: 1253 return &bpf_map_push_elem_proto; 1254 case BPF_FUNC_map_pop_elem: 1255 return &bpf_map_pop_elem_proto; 1256 case BPF_FUNC_map_peek_elem: 1257 return &bpf_map_peek_elem_proto; 1258 case BPF_FUNC_ktime_get_ns: 1259 return &bpf_ktime_get_ns_proto; 1260 case BPF_FUNC_ktime_get_boot_ns: 1261 return &bpf_ktime_get_boot_ns_proto; 1262 case BPF_FUNC_tail_call: 1263 return &bpf_tail_call_proto; 1264 case BPF_FUNC_get_current_pid_tgid: 1265 return &bpf_get_current_pid_tgid_proto; 1266 case BPF_FUNC_get_current_task: 1267 return &bpf_get_current_task_proto; 1268 case BPF_FUNC_get_current_uid_gid: 1269 return &bpf_get_current_uid_gid_proto; 1270 case BPF_FUNC_get_current_comm: 1271 return &bpf_get_current_comm_proto; 1272 case BPF_FUNC_trace_printk: 1273 return bpf_get_trace_printk_proto(); 1274 case BPF_FUNC_get_smp_processor_id: 1275 return &bpf_get_smp_processor_id_proto; 1276 case BPF_FUNC_get_numa_node_id: 1277 return &bpf_get_numa_node_id_proto; 1278 case BPF_FUNC_perf_event_read: 1279 return &bpf_perf_event_read_proto; 1280 case BPF_FUNC_probe_write_user: 1281 return bpf_get_probe_write_proto(); 1282 case BPF_FUNC_current_task_under_cgroup: 1283 return &bpf_current_task_under_cgroup_proto; 1284 case BPF_FUNC_get_prandom_u32: 1285 return &bpf_get_prandom_u32_proto; 1286 case BPF_FUNC_probe_read_user: 1287 return &bpf_probe_read_user_proto; 1288 case BPF_FUNC_probe_read_kernel: 1289 return &bpf_probe_read_kernel_proto; 1290 case BPF_FUNC_probe_read_user_str: 1291 return &bpf_probe_read_user_str_proto; 1292 case BPF_FUNC_probe_read_kernel_str: 1293 return &bpf_probe_read_kernel_str_proto; 1294 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE 1295 case BPF_FUNC_probe_read: 1296 return &bpf_probe_read_compat_proto; 1297 case BPF_FUNC_probe_read_str: 1298 return &bpf_probe_read_compat_str_proto; 1299 #endif 1300 #ifdef CONFIG_CGROUPS 1301 case BPF_FUNC_get_current_cgroup_id: 1302 return &bpf_get_current_cgroup_id_proto; 1303 #endif 1304 case BPF_FUNC_send_signal: 1305 return &bpf_send_signal_proto; 1306 case BPF_FUNC_send_signal_thread: 1307 return &bpf_send_signal_thread_proto; 1308 case BPF_FUNC_perf_event_read_value: 1309 return &bpf_perf_event_read_value_proto; 1310 case BPF_FUNC_get_ns_current_pid_tgid: 1311 return &bpf_get_ns_current_pid_tgid_proto; 1312 case BPF_FUNC_ringbuf_output: 1313 return &bpf_ringbuf_output_proto; 1314 case BPF_FUNC_ringbuf_reserve: 1315 return &bpf_ringbuf_reserve_proto; 1316 case BPF_FUNC_ringbuf_submit: 1317 return &bpf_ringbuf_submit_proto; 1318 case BPF_FUNC_ringbuf_discard: 1319 return &bpf_ringbuf_discard_proto; 1320 case BPF_FUNC_ringbuf_query: 1321 return &bpf_ringbuf_query_proto; 1322 case BPF_FUNC_jiffies64: 1323 return &bpf_jiffies64_proto; 1324 case BPF_FUNC_get_task_stack: 1325 return &bpf_get_task_stack_proto; 1326 case BPF_FUNC_copy_from_user: 1327 return prog->aux->sleepable ? &bpf_copy_from_user_proto : NULL; 1328 case BPF_FUNC_snprintf_btf: 1329 return &bpf_snprintf_btf_proto; 1330 case BPF_FUNC_bpf_per_cpu_ptr: 1331 return &bpf_per_cpu_ptr_proto; 1332 case BPF_FUNC_bpf_this_cpu_ptr: 1333 return &bpf_this_cpu_ptr_proto; 1334 default: 1335 return NULL; 1336 } 1337 } 1338 1339 static const struct bpf_func_proto * 1340 kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 1341 { 1342 switch (func_id) { 1343 case BPF_FUNC_perf_event_output: 1344 return &bpf_perf_event_output_proto; 1345 case BPF_FUNC_get_stackid: 1346 return &bpf_get_stackid_proto; 1347 case BPF_FUNC_get_stack: 1348 return &bpf_get_stack_proto; 1349 #ifdef CONFIG_BPF_KPROBE_OVERRIDE 1350 case BPF_FUNC_override_return: 1351 return &bpf_override_return_proto; 1352 #endif 1353 default: 1354 return bpf_tracing_func_proto(func_id, prog); 1355 } 1356 } 1357 1358 /* bpf+kprobe programs can access fields of 'struct pt_regs' */ 1359 static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type, 1360 const struct bpf_prog *prog, 1361 struct bpf_insn_access_aux *info) 1362 { 1363 if (off < 0 || off >= sizeof(struct pt_regs)) 1364 return false; 1365 if (type != BPF_READ) 1366 return false; 1367 if (off % size != 0) 1368 return false; 1369 /* 1370 * Assertion for 32 bit to make sure last 8 byte access 1371 * (BPF_DW) to the last 4 byte member is disallowed. 1372 */ 1373 if (off + size > sizeof(struct pt_regs)) 1374 return false; 1375 1376 return true; 1377 } 1378 1379 const struct bpf_verifier_ops kprobe_verifier_ops = { 1380 .get_func_proto = kprobe_prog_func_proto, 1381 .is_valid_access = kprobe_prog_is_valid_access, 1382 }; 1383 1384 const struct bpf_prog_ops kprobe_prog_ops = { 1385 }; 1386 1387 BPF_CALL_5(bpf_perf_event_output_tp, void *, tp_buff, struct bpf_map *, map, 1388 u64, flags, void *, data, u64, size) 1389 { 1390 struct pt_regs *regs = *(struct pt_regs **)tp_buff; 1391 1392 /* 1393 * r1 points to perf tracepoint buffer where first 8 bytes are hidden 1394 * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it 1395 * from there and call the same bpf_perf_event_output() helper inline. 1396 */ 1397 return ____bpf_perf_event_output(regs, map, flags, data, size); 1398 } 1399 1400 static const struct bpf_func_proto bpf_perf_event_output_proto_tp = { 1401 .func = bpf_perf_event_output_tp, 1402 .gpl_only = true, 1403 .ret_type = RET_INTEGER, 1404 .arg1_type = ARG_PTR_TO_CTX, 1405 .arg2_type = ARG_CONST_MAP_PTR, 1406 .arg3_type = ARG_ANYTHING, 1407 .arg4_type = ARG_PTR_TO_MEM, 1408 .arg5_type = ARG_CONST_SIZE_OR_ZERO, 1409 }; 1410 1411 BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map, 1412 u64, flags) 1413 { 1414 struct pt_regs *regs = *(struct pt_regs **)tp_buff; 1415 1416 /* 1417 * Same comment as in bpf_perf_event_output_tp(), only that this time 1418 * the other helper's function body cannot be inlined due to being 1419 * external, thus we need to call raw helper function. 1420 */ 1421 return bpf_get_stackid((unsigned long) regs, (unsigned long) map, 1422 flags, 0, 0); 1423 } 1424 1425 static const struct bpf_func_proto bpf_get_stackid_proto_tp = { 1426 .func = bpf_get_stackid_tp, 1427 .gpl_only = true, 1428 .ret_type = RET_INTEGER, 1429 .arg1_type = ARG_PTR_TO_CTX, 1430 .arg2_type = ARG_CONST_MAP_PTR, 1431 .arg3_type = ARG_ANYTHING, 1432 }; 1433 1434 BPF_CALL_4(bpf_get_stack_tp, void *, tp_buff, void *, buf, u32, size, 1435 u64, flags) 1436 { 1437 struct pt_regs *regs = *(struct pt_regs **)tp_buff; 1438 1439 return bpf_get_stack((unsigned long) regs, (unsigned long) buf, 1440 (unsigned long) size, flags, 0); 1441 } 1442 1443 static const struct bpf_func_proto bpf_get_stack_proto_tp = { 1444 .func = bpf_get_stack_tp, 1445 .gpl_only = true, 1446 .ret_type = RET_INTEGER, 1447 .arg1_type = ARG_PTR_TO_CTX, 1448 .arg2_type = ARG_PTR_TO_UNINIT_MEM, 1449 .arg3_type = ARG_CONST_SIZE_OR_ZERO, 1450 .arg4_type = ARG_ANYTHING, 1451 }; 1452 1453 static const struct bpf_func_proto * 1454 tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 1455 { 1456 switch (func_id) { 1457 case BPF_FUNC_perf_event_output: 1458 return &bpf_perf_event_output_proto_tp; 1459 case BPF_FUNC_get_stackid: 1460 return &bpf_get_stackid_proto_tp; 1461 case BPF_FUNC_get_stack: 1462 return &bpf_get_stack_proto_tp; 1463 default: 1464 return bpf_tracing_func_proto(func_id, prog); 1465 } 1466 } 1467 1468 static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type, 1469 const struct bpf_prog *prog, 1470 struct bpf_insn_access_aux *info) 1471 { 1472 if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE) 1473 return false; 1474 if (type != BPF_READ) 1475 return false; 1476 if (off % size != 0) 1477 return false; 1478 1479 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64)); 1480 return true; 1481 } 1482 1483 const struct bpf_verifier_ops tracepoint_verifier_ops = { 1484 .get_func_proto = tp_prog_func_proto, 1485 .is_valid_access = tp_prog_is_valid_access, 1486 }; 1487 1488 const struct bpf_prog_ops tracepoint_prog_ops = { 1489 }; 1490 1491 BPF_CALL_3(bpf_perf_prog_read_value, struct bpf_perf_event_data_kern *, ctx, 1492 struct bpf_perf_event_value *, buf, u32, size) 1493 { 1494 int err = -EINVAL; 1495 1496 if (unlikely(size != sizeof(struct bpf_perf_event_value))) 1497 goto clear; 1498 err = perf_event_read_local(ctx->event, &buf->counter, &buf->enabled, 1499 &buf->running); 1500 if (unlikely(err)) 1501 goto clear; 1502 return 0; 1503 clear: 1504 memset(buf, 0, size); 1505 return err; 1506 } 1507 1508 static const struct bpf_func_proto bpf_perf_prog_read_value_proto = { 1509 .func = bpf_perf_prog_read_value, 1510 .gpl_only = true, 1511 .ret_type = RET_INTEGER, 1512 .arg1_type = ARG_PTR_TO_CTX, 1513 .arg2_type = ARG_PTR_TO_UNINIT_MEM, 1514 .arg3_type = ARG_CONST_SIZE, 1515 }; 1516 1517 BPF_CALL_4(bpf_read_branch_records, struct bpf_perf_event_data_kern *, ctx, 1518 void *, buf, u32, size, u64, flags) 1519 { 1520 #ifndef CONFIG_X86 1521 return -ENOENT; 1522 #else 1523 static const u32 br_entry_size = sizeof(struct perf_branch_entry); 1524 struct perf_branch_stack *br_stack = ctx->data->br_stack; 1525 u32 to_copy; 1526 1527 if (unlikely(flags & ~BPF_F_GET_BRANCH_RECORDS_SIZE)) 1528 return -EINVAL; 1529 1530 if (unlikely(!br_stack)) 1531 return -EINVAL; 1532 1533 if (flags & BPF_F_GET_BRANCH_RECORDS_SIZE) 1534 return br_stack->nr * br_entry_size; 1535 1536 if (!buf || (size % br_entry_size != 0)) 1537 return -EINVAL; 1538 1539 to_copy = min_t(u32, br_stack->nr * br_entry_size, size); 1540 memcpy(buf, br_stack->entries, to_copy); 1541 1542 return to_copy; 1543 #endif 1544 } 1545 1546 static const struct bpf_func_proto bpf_read_branch_records_proto = { 1547 .func = bpf_read_branch_records, 1548 .gpl_only = true, 1549 .ret_type = RET_INTEGER, 1550 .arg1_type = ARG_PTR_TO_CTX, 1551 .arg2_type = ARG_PTR_TO_MEM_OR_NULL, 1552 .arg3_type = ARG_CONST_SIZE_OR_ZERO, 1553 .arg4_type = ARG_ANYTHING, 1554 }; 1555 1556 static const struct bpf_func_proto * 1557 pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 1558 { 1559 switch (func_id) { 1560 case BPF_FUNC_perf_event_output: 1561 return &bpf_perf_event_output_proto_tp; 1562 case BPF_FUNC_get_stackid: 1563 return &bpf_get_stackid_proto_pe; 1564 case BPF_FUNC_get_stack: 1565 return &bpf_get_stack_proto_pe; 1566 case BPF_FUNC_perf_prog_read_value: 1567 return &bpf_perf_prog_read_value_proto; 1568 case BPF_FUNC_read_branch_records: 1569 return &bpf_read_branch_records_proto; 1570 default: 1571 return bpf_tracing_func_proto(func_id, prog); 1572 } 1573 } 1574 1575 /* 1576 * bpf_raw_tp_regs are separate from bpf_pt_regs used from skb/xdp 1577 * to avoid potential recursive reuse issue when/if tracepoints are added 1578 * inside bpf_*_event_output, bpf_get_stackid and/or bpf_get_stack. 1579 * 1580 * Since raw tracepoints run despite bpf_prog_active, support concurrent usage 1581 * in normal, irq, and nmi context. 1582 */ 1583 struct bpf_raw_tp_regs { 1584 struct pt_regs regs[3]; 1585 }; 1586 static DEFINE_PER_CPU(struct bpf_raw_tp_regs, bpf_raw_tp_regs); 1587 static DEFINE_PER_CPU(int, bpf_raw_tp_nest_level); 1588 static struct pt_regs *get_bpf_raw_tp_regs(void) 1589 { 1590 struct bpf_raw_tp_regs *tp_regs = this_cpu_ptr(&bpf_raw_tp_regs); 1591 int nest_level = this_cpu_inc_return(bpf_raw_tp_nest_level); 1592 1593 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(tp_regs->regs))) { 1594 this_cpu_dec(bpf_raw_tp_nest_level); 1595 return ERR_PTR(-EBUSY); 1596 } 1597 1598 return &tp_regs->regs[nest_level - 1]; 1599 } 1600 1601 static void put_bpf_raw_tp_regs(void) 1602 { 1603 this_cpu_dec(bpf_raw_tp_nest_level); 1604 } 1605 1606 BPF_CALL_5(bpf_perf_event_output_raw_tp, struct bpf_raw_tracepoint_args *, args, 1607 struct bpf_map *, map, u64, flags, void *, data, u64, size) 1608 { 1609 struct pt_regs *regs = get_bpf_raw_tp_regs(); 1610 int ret; 1611 1612 if (IS_ERR(regs)) 1613 return PTR_ERR(regs); 1614 1615 perf_fetch_caller_regs(regs); 1616 ret = ____bpf_perf_event_output(regs, map, flags, data, size); 1617 1618 put_bpf_raw_tp_regs(); 1619 return ret; 1620 } 1621 1622 static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = { 1623 .func = bpf_perf_event_output_raw_tp, 1624 .gpl_only = true, 1625 .ret_type = RET_INTEGER, 1626 .arg1_type = ARG_PTR_TO_CTX, 1627 .arg2_type = ARG_CONST_MAP_PTR, 1628 .arg3_type = ARG_ANYTHING, 1629 .arg4_type = ARG_PTR_TO_MEM, 1630 .arg5_type = ARG_CONST_SIZE_OR_ZERO, 1631 }; 1632 1633 extern const struct bpf_func_proto bpf_skb_output_proto; 1634 extern const struct bpf_func_proto bpf_xdp_output_proto; 1635 1636 BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args, 1637 struct bpf_map *, map, u64, flags) 1638 { 1639 struct pt_regs *regs = get_bpf_raw_tp_regs(); 1640 int ret; 1641 1642 if (IS_ERR(regs)) 1643 return PTR_ERR(regs); 1644 1645 perf_fetch_caller_regs(regs); 1646 /* similar to bpf_perf_event_output_tp, but pt_regs fetched differently */ 1647 ret = bpf_get_stackid((unsigned long) regs, (unsigned long) map, 1648 flags, 0, 0); 1649 put_bpf_raw_tp_regs(); 1650 return ret; 1651 } 1652 1653 static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = { 1654 .func = bpf_get_stackid_raw_tp, 1655 .gpl_only = true, 1656 .ret_type = RET_INTEGER, 1657 .arg1_type = ARG_PTR_TO_CTX, 1658 .arg2_type = ARG_CONST_MAP_PTR, 1659 .arg3_type = ARG_ANYTHING, 1660 }; 1661 1662 BPF_CALL_4(bpf_get_stack_raw_tp, struct bpf_raw_tracepoint_args *, args, 1663 void *, buf, u32, size, u64, flags) 1664 { 1665 struct pt_regs *regs = get_bpf_raw_tp_regs(); 1666 int ret; 1667 1668 if (IS_ERR(regs)) 1669 return PTR_ERR(regs); 1670 1671 perf_fetch_caller_regs(regs); 1672 ret = bpf_get_stack((unsigned long) regs, (unsigned long) buf, 1673 (unsigned long) size, flags, 0); 1674 put_bpf_raw_tp_regs(); 1675 return ret; 1676 } 1677 1678 static const struct bpf_func_proto bpf_get_stack_proto_raw_tp = { 1679 .func = bpf_get_stack_raw_tp, 1680 .gpl_only = true, 1681 .ret_type = RET_INTEGER, 1682 .arg1_type = ARG_PTR_TO_CTX, 1683 .arg2_type = ARG_PTR_TO_MEM, 1684 .arg3_type = ARG_CONST_SIZE_OR_ZERO, 1685 .arg4_type = ARG_ANYTHING, 1686 }; 1687 1688 static const struct bpf_func_proto * 1689 raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 1690 { 1691 switch (func_id) { 1692 case BPF_FUNC_perf_event_output: 1693 return &bpf_perf_event_output_proto_raw_tp; 1694 case BPF_FUNC_get_stackid: 1695 return &bpf_get_stackid_proto_raw_tp; 1696 case BPF_FUNC_get_stack: 1697 return &bpf_get_stack_proto_raw_tp; 1698 default: 1699 return bpf_tracing_func_proto(func_id, prog); 1700 } 1701 } 1702 1703 const struct bpf_func_proto * 1704 tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 1705 { 1706 switch (func_id) { 1707 #ifdef CONFIG_NET 1708 case BPF_FUNC_skb_output: 1709 return &bpf_skb_output_proto; 1710 case BPF_FUNC_xdp_output: 1711 return &bpf_xdp_output_proto; 1712 case BPF_FUNC_skc_to_tcp6_sock: 1713 return &bpf_skc_to_tcp6_sock_proto; 1714 case BPF_FUNC_skc_to_tcp_sock: 1715 return &bpf_skc_to_tcp_sock_proto; 1716 case BPF_FUNC_skc_to_tcp_timewait_sock: 1717 return &bpf_skc_to_tcp_timewait_sock_proto; 1718 case BPF_FUNC_skc_to_tcp_request_sock: 1719 return &bpf_skc_to_tcp_request_sock_proto; 1720 case BPF_FUNC_skc_to_udp6_sock: 1721 return &bpf_skc_to_udp6_sock_proto; 1722 #endif 1723 case BPF_FUNC_seq_printf: 1724 return prog->expected_attach_type == BPF_TRACE_ITER ? 1725 &bpf_seq_printf_proto : 1726 NULL; 1727 case BPF_FUNC_seq_write: 1728 return prog->expected_attach_type == BPF_TRACE_ITER ? 1729 &bpf_seq_write_proto : 1730 NULL; 1731 case BPF_FUNC_seq_printf_btf: 1732 return prog->expected_attach_type == BPF_TRACE_ITER ? 1733 &bpf_seq_printf_btf_proto : 1734 NULL; 1735 case BPF_FUNC_d_path: 1736 return &bpf_d_path_proto; 1737 default: 1738 return raw_tp_prog_func_proto(func_id, prog); 1739 } 1740 } 1741 1742 static bool raw_tp_prog_is_valid_access(int off, int size, 1743 enum bpf_access_type type, 1744 const struct bpf_prog *prog, 1745 struct bpf_insn_access_aux *info) 1746 { 1747 if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS) 1748 return false; 1749 if (type != BPF_READ) 1750 return false; 1751 if (off % size != 0) 1752 return false; 1753 return true; 1754 } 1755 1756 static bool tracing_prog_is_valid_access(int off, int size, 1757 enum bpf_access_type type, 1758 const struct bpf_prog *prog, 1759 struct bpf_insn_access_aux *info) 1760 { 1761 if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS) 1762 return false; 1763 if (type != BPF_READ) 1764 return false; 1765 if (off % size != 0) 1766 return false; 1767 return btf_ctx_access(off, size, type, prog, info); 1768 } 1769 1770 int __weak bpf_prog_test_run_tracing(struct bpf_prog *prog, 1771 const union bpf_attr *kattr, 1772 union bpf_attr __user *uattr) 1773 { 1774 return -ENOTSUPP; 1775 } 1776 1777 const struct bpf_verifier_ops raw_tracepoint_verifier_ops = { 1778 .get_func_proto = raw_tp_prog_func_proto, 1779 .is_valid_access = raw_tp_prog_is_valid_access, 1780 }; 1781 1782 const struct bpf_prog_ops raw_tracepoint_prog_ops = { 1783 #ifdef CONFIG_NET 1784 .test_run = bpf_prog_test_run_raw_tp, 1785 #endif 1786 }; 1787 1788 const struct bpf_verifier_ops tracing_verifier_ops = { 1789 .get_func_proto = tracing_prog_func_proto, 1790 .is_valid_access = tracing_prog_is_valid_access, 1791 }; 1792 1793 const struct bpf_prog_ops tracing_prog_ops = { 1794 .test_run = bpf_prog_test_run_tracing, 1795 }; 1796 1797 static bool raw_tp_writable_prog_is_valid_access(int off, int size, 1798 enum bpf_access_type type, 1799 const struct bpf_prog *prog, 1800 struct bpf_insn_access_aux *info) 1801 { 1802 if (off == 0) { 1803 if (size != sizeof(u64) || type != BPF_READ) 1804 return false; 1805 info->reg_type = PTR_TO_TP_BUFFER; 1806 } 1807 return raw_tp_prog_is_valid_access(off, size, type, prog, info); 1808 } 1809 1810 const struct bpf_verifier_ops raw_tracepoint_writable_verifier_ops = { 1811 .get_func_proto = raw_tp_prog_func_proto, 1812 .is_valid_access = raw_tp_writable_prog_is_valid_access, 1813 }; 1814 1815 const struct bpf_prog_ops raw_tracepoint_writable_prog_ops = { 1816 }; 1817 1818 static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type, 1819 const struct bpf_prog *prog, 1820 struct bpf_insn_access_aux *info) 1821 { 1822 const int size_u64 = sizeof(u64); 1823 1824 if (off < 0 || off >= sizeof(struct bpf_perf_event_data)) 1825 return false; 1826 if (type != BPF_READ) 1827 return false; 1828 if (off % size != 0) { 1829 if (sizeof(unsigned long) != 4) 1830 return false; 1831 if (size != 8) 1832 return false; 1833 if (off % size != 4) 1834 return false; 1835 } 1836 1837 switch (off) { 1838 case bpf_ctx_range(struct bpf_perf_event_data, sample_period): 1839 bpf_ctx_record_field_size(info, size_u64); 1840 if (!bpf_ctx_narrow_access_ok(off, size, size_u64)) 1841 return false; 1842 break; 1843 case bpf_ctx_range(struct bpf_perf_event_data, addr): 1844 bpf_ctx_record_field_size(info, size_u64); 1845 if (!bpf_ctx_narrow_access_ok(off, size, size_u64)) 1846 return false; 1847 break; 1848 default: 1849 if (size != sizeof(long)) 1850 return false; 1851 } 1852 1853 return true; 1854 } 1855 1856 static u32 pe_prog_convert_ctx_access(enum bpf_access_type type, 1857 const struct bpf_insn *si, 1858 struct bpf_insn *insn_buf, 1859 struct bpf_prog *prog, u32 *target_size) 1860 { 1861 struct bpf_insn *insn = insn_buf; 1862 1863 switch (si->off) { 1864 case offsetof(struct bpf_perf_event_data, sample_period): 1865 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern, 1866 data), si->dst_reg, si->src_reg, 1867 offsetof(struct bpf_perf_event_data_kern, data)); 1868 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg, 1869 bpf_target_off(struct perf_sample_data, period, 8, 1870 target_size)); 1871 break; 1872 case offsetof(struct bpf_perf_event_data, addr): 1873 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern, 1874 data), si->dst_reg, si->src_reg, 1875 offsetof(struct bpf_perf_event_data_kern, data)); 1876 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg, 1877 bpf_target_off(struct perf_sample_data, addr, 8, 1878 target_size)); 1879 break; 1880 default: 1881 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern, 1882 regs), si->dst_reg, si->src_reg, 1883 offsetof(struct bpf_perf_event_data_kern, regs)); 1884 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), si->dst_reg, si->dst_reg, 1885 si->off); 1886 break; 1887 } 1888 1889 return insn - insn_buf; 1890 } 1891 1892 const struct bpf_verifier_ops perf_event_verifier_ops = { 1893 .get_func_proto = pe_prog_func_proto, 1894 .is_valid_access = pe_prog_is_valid_access, 1895 .convert_ctx_access = pe_prog_convert_ctx_access, 1896 }; 1897 1898 const struct bpf_prog_ops perf_event_prog_ops = { 1899 }; 1900 1901 static DEFINE_MUTEX(bpf_event_mutex); 1902 1903 #define BPF_TRACE_MAX_PROGS 64 1904 1905 int perf_event_attach_bpf_prog(struct perf_event *event, 1906 struct bpf_prog *prog) 1907 { 1908 struct bpf_prog_array *old_array; 1909 struct bpf_prog_array *new_array; 1910 int ret = -EEXIST; 1911 1912 /* 1913 * Kprobe override only works if they are on the function entry, 1914 * and only if they are on the opt-in list. 1915 */ 1916 if (prog->kprobe_override && 1917 (!trace_kprobe_on_func_entry(event->tp_event) || 1918 !trace_kprobe_error_injectable(event->tp_event))) 1919 return -EINVAL; 1920 1921 mutex_lock(&bpf_event_mutex); 1922 1923 if (event->prog) 1924 goto unlock; 1925 1926 old_array = bpf_event_rcu_dereference(event->tp_event->prog_array); 1927 if (old_array && 1928 bpf_prog_array_length(old_array) >= BPF_TRACE_MAX_PROGS) { 1929 ret = -E2BIG; 1930 goto unlock; 1931 } 1932 1933 ret = bpf_prog_array_copy(old_array, NULL, prog, &new_array); 1934 if (ret < 0) 1935 goto unlock; 1936 1937 /* set the new array to event->tp_event and set event->prog */ 1938 event->prog = prog; 1939 rcu_assign_pointer(event->tp_event->prog_array, new_array); 1940 bpf_prog_array_free(old_array); 1941 1942 unlock: 1943 mutex_unlock(&bpf_event_mutex); 1944 return ret; 1945 } 1946 1947 void perf_event_detach_bpf_prog(struct perf_event *event) 1948 { 1949 struct bpf_prog_array *old_array; 1950 struct bpf_prog_array *new_array; 1951 int ret; 1952 1953 mutex_lock(&bpf_event_mutex); 1954 1955 if (!event->prog) 1956 goto unlock; 1957 1958 old_array = bpf_event_rcu_dereference(event->tp_event->prog_array); 1959 ret = bpf_prog_array_copy(old_array, event->prog, NULL, &new_array); 1960 if (ret == -ENOENT) 1961 goto unlock; 1962 if (ret < 0) { 1963 bpf_prog_array_delete_safe(old_array, event->prog); 1964 } else { 1965 rcu_assign_pointer(event->tp_event->prog_array, new_array); 1966 bpf_prog_array_free(old_array); 1967 } 1968 1969 bpf_prog_put(event->prog); 1970 event->prog = NULL; 1971 1972 unlock: 1973 mutex_unlock(&bpf_event_mutex); 1974 } 1975 1976 int perf_event_query_prog_array(struct perf_event *event, void __user *info) 1977 { 1978 struct perf_event_query_bpf __user *uquery = info; 1979 struct perf_event_query_bpf query = {}; 1980 struct bpf_prog_array *progs; 1981 u32 *ids, prog_cnt, ids_len; 1982 int ret; 1983 1984 if (!perfmon_capable()) 1985 return -EPERM; 1986 if (event->attr.type != PERF_TYPE_TRACEPOINT) 1987 return -EINVAL; 1988 if (copy_from_user(&query, uquery, sizeof(query))) 1989 return -EFAULT; 1990 1991 ids_len = query.ids_len; 1992 if (ids_len > BPF_TRACE_MAX_PROGS) 1993 return -E2BIG; 1994 ids = kcalloc(ids_len, sizeof(u32), GFP_USER | __GFP_NOWARN); 1995 if (!ids) 1996 return -ENOMEM; 1997 /* 1998 * The above kcalloc returns ZERO_SIZE_PTR when ids_len = 0, which 1999 * is required when user only wants to check for uquery->prog_cnt. 2000 * There is no need to check for it since the case is handled 2001 * gracefully in bpf_prog_array_copy_info. 2002 */ 2003 2004 mutex_lock(&bpf_event_mutex); 2005 progs = bpf_event_rcu_dereference(event->tp_event->prog_array); 2006 ret = bpf_prog_array_copy_info(progs, ids, ids_len, &prog_cnt); 2007 mutex_unlock(&bpf_event_mutex); 2008 2009 if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) || 2010 copy_to_user(uquery->ids, ids, ids_len * sizeof(u32))) 2011 ret = -EFAULT; 2012 2013 kfree(ids); 2014 return ret; 2015 } 2016 2017 extern struct bpf_raw_event_map __start__bpf_raw_tp[]; 2018 extern struct bpf_raw_event_map __stop__bpf_raw_tp[]; 2019 2020 struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name) 2021 { 2022 struct bpf_raw_event_map *btp = __start__bpf_raw_tp; 2023 2024 for (; btp < __stop__bpf_raw_tp; btp++) { 2025 if (!strcmp(btp->tp->name, name)) 2026 return btp; 2027 } 2028 2029 return bpf_get_raw_tracepoint_module(name); 2030 } 2031 2032 void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp) 2033 { 2034 struct module *mod = __module_address((unsigned long)btp); 2035 2036 if (mod) 2037 module_put(mod); 2038 } 2039 2040 static __always_inline 2041 void __bpf_trace_run(struct bpf_prog *prog, u64 *args) 2042 { 2043 cant_sleep(); 2044 rcu_read_lock(); 2045 (void) BPF_PROG_RUN(prog, args); 2046 rcu_read_unlock(); 2047 } 2048 2049 #define UNPACK(...) __VA_ARGS__ 2050 #define REPEAT_1(FN, DL, X, ...) FN(X) 2051 #define REPEAT_2(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_1(FN, DL, __VA_ARGS__) 2052 #define REPEAT_3(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_2(FN, DL, __VA_ARGS__) 2053 #define REPEAT_4(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_3(FN, DL, __VA_ARGS__) 2054 #define REPEAT_5(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_4(FN, DL, __VA_ARGS__) 2055 #define REPEAT_6(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_5(FN, DL, __VA_ARGS__) 2056 #define REPEAT_7(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_6(FN, DL, __VA_ARGS__) 2057 #define REPEAT_8(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_7(FN, DL, __VA_ARGS__) 2058 #define REPEAT_9(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_8(FN, DL, __VA_ARGS__) 2059 #define REPEAT_10(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_9(FN, DL, __VA_ARGS__) 2060 #define REPEAT_11(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_10(FN, DL, __VA_ARGS__) 2061 #define REPEAT_12(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_11(FN, DL, __VA_ARGS__) 2062 #define REPEAT(X, FN, DL, ...) REPEAT_##X(FN, DL, __VA_ARGS__) 2063 2064 #define SARG(X) u64 arg##X 2065 #define COPY(X) args[X] = arg##X 2066 2067 #define __DL_COM (,) 2068 #define __DL_SEM (;) 2069 2070 #define __SEQ_0_11 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 2071 2072 #define BPF_TRACE_DEFN_x(x) \ 2073 void bpf_trace_run##x(struct bpf_prog *prog, \ 2074 REPEAT(x, SARG, __DL_COM, __SEQ_0_11)) \ 2075 { \ 2076 u64 args[x]; \ 2077 REPEAT(x, COPY, __DL_SEM, __SEQ_0_11); \ 2078 __bpf_trace_run(prog, args); \ 2079 } \ 2080 EXPORT_SYMBOL_GPL(bpf_trace_run##x) 2081 BPF_TRACE_DEFN_x(1); 2082 BPF_TRACE_DEFN_x(2); 2083 BPF_TRACE_DEFN_x(3); 2084 BPF_TRACE_DEFN_x(4); 2085 BPF_TRACE_DEFN_x(5); 2086 BPF_TRACE_DEFN_x(6); 2087 BPF_TRACE_DEFN_x(7); 2088 BPF_TRACE_DEFN_x(8); 2089 BPF_TRACE_DEFN_x(9); 2090 BPF_TRACE_DEFN_x(10); 2091 BPF_TRACE_DEFN_x(11); 2092 BPF_TRACE_DEFN_x(12); 2093 2094 static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog) 2095 { 2096 struct tracepoint *tp = btp->tp; 2097 2098 /* 2099 * check that program doesn't access arguments beyond what's 2100 * available in this tracepoint 2101 */ 2102 if (prog->aux->max_ctx_offset > btp->num_args * sizeof(u64)) 2103 return -EINVAL; 2104 2105 if (prog->aux->max_tp_access > btp->writable_size) 2106 return -EINVAL; 2107 2108 return tracepoint_probe_register(tp, (void *)btp->bpf_func, prog); 2109 } 2110 2111 int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog) 2112 { 2113 return __bpf_probe_register(btp, prog); 2114 } 2115 2116 int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog) 2117 { 2118 return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog); 2119 } 2120 2121 int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id, 2122 u32 *fd_type, const char **buf, 2123 u64 *probe_offset, u64 *probe_addr) 2124 { 2125 bool is_tracepoint, is_syscall_tp; 2126 struct bpf_prog *prog; 2127 int flags, err = 0; 2128 2129 prog = event->prog; 2130 if (!prog) 2131 return -ENOENT; 2132 2133 /* not supporting BPF_PROG_TYPE_PERF_EVENT yet */ 2134 if (prog->type == BPF_PROG_TYPE_PERF_EVENT) 2135 return -EOPNOTSUPP; 2136 2137 *prog_id = prog->aux->id; 2138 flags = event->tp_event->flags; 2139 is_tracepoint = flags & TRACE_EVENT_FL_TRACEPOINT; 2140 is_syscall_tp = is_syscall_trace_event(event->tp_event); 2141 2142 if (is_tracepoint || is_syscall_tp) { 2143 *buf = is_tracepoint ? event->tp_event->tp->name 2144 : event->tp_event->name; 2145 *fd_type = BPF_FD_TYPE_TRACEPOINT; 2146 *probe_offset = 0x0; 2147 *probe_addr = 0x0; 2148 } else { 2149 /* kprobe/uprobe */ 2150 err = -EOPNOTSUPP; 2151 #ifdef CONFIG_KPROBE_EVENTS 2152 if (flags & TRACE_EVENT_FL_KPROBE) 2153 err = bpf_get_kprobe_info(event, fd_type, buf, 2154 probe_offset, probe_addr, 2155 event->attr.type == PERF_TYPE_TRACEPOINT); 2156 #endif 2157 #ifdef CONFIG_UPROBE_EVENTS 2158 if (flags & TRACE_EVENT_FL_UPROBE) 2159 err = bpf_get_uprobe_info(event, fd_type, buf, 2160 probe_offset, 2161 event->attr.type == PERF_TYPE_TRACEPOINT); 2162 #endif 2163 } 2164 2165 return err; 2166 } 2167 2168 static int __init send_signal_irq_work_init(void) 2169 { 2170 int cpu; 2171 struct send_signal_irq_work *work; 2172 2173 for_each_possible_cpu(cpu) { 2174 work = per_cpu_ptr(&send_signal_work, cpu); 2175 init_irq_work(&work->irq_work, do_bpf_send_signal); 2176 } 2177 return 0; 2178 } 2179 2180 subsys_initcall(send_signal_irq_work_init); 2181 2182 #ifdef CONFIG_MODULES 2183 static int bpf_event_notify(struct notifier_block *nb, unsigned long op, 2184 void *module) 2185 { 2186 struct bpf_trace_module *btm, *tmp; 2187 struct module *mod = module; 2188 int ret = 0; 2189 2190 if (mod->num_bpf_raw_events == 0 || 2191 (op != MODULE_STATE_COMING && op != MODULE_STATE_GOING)) 2192 goto out; 2193 2194 mutex_lock(&bpf_module_mutex); 2195 2196 switch (op) { 2197 case MODULE_STATE_COMING: 2198 btm = kzalloc(sizeof(*btm), GFP_KERNEL); 2199 if (btm) { 2200 btm->module = module; 2201 list_add(&btm->list, &bpf_trace_modules); 2202 } else { 2203 ret = -ENOMEM; 2204 } 2205 break; 2206 case MODULE_STATE_GOING: 2207 list_for_each_entry_safe(btm, tmp, &bpf_trace_modules, list) { 2208 if (btm->module == module) { 2209 list_del(&btm->list); 2210 kfree(btm); 2211 break; 2212 } 2213 } 2214 break; 2215 } 2216 2217 mutex_unlock(&bpf_module_mutex); 2218 2219 out: 2220 return notifier_from_errno(ret); 2221 } 2222 2223 static struct notifier_block bpf_module_nb = { 2224 .notifier_call = bpf_event_notify, 2225 }; 2226 2227 static int __init bpf_event_init(void) 2228 { 2229 register_module_notifier(&bpf_module_nb); 2230 return 0; 2231 } 2232 2233 fs_initcall(bpf_event_init); 2234 #endif /* CONFIG_MODULES */ 2235