1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com 3 * Copyright (c) 2016 Facebook 4 */ 5 #include <linux/kernel.h> 6 #include <linux/types.h> 7 #include <linux/slab.h> 8 #include <linux/bpf.h> 9 #include <linux/bpf_perf_event.h> 10 #include <linux/btf.h> 11 #include <linux/filter.h> 12 #include <linux/uaccess.h> 13 #include <linux/ctype.h> 14 #include <linux/kprobes.h> 15 #include <linux/spinlock.h> 16 #include <linux/syscalls.h> 17 #include <linux/error-injection.h> 18 #include <linux/btf_ids.h> 19 #include <linux/bpf_lsm.h> 20 21 #include <net/bpf_sk_storage.h> 22 23 #include <uapi/linux/bpf.h> 24 #include <uapi/linux/btf.h> 25 26 #include <asm/tlb.h> 27 28 #include "trace_probe.h" 29 #include "trace.h" 30 31 #define CREATE_TRACE_POINTS 32 #include "bpf_trace.h" 33 34 #define bpf_event_rcu_dereference(p) \ 35 rcu_dereference_protected(p, lockdep_is_held(&bpf_event_mutex)) 36 37 #ifdef CONFIG_MODULES 38 struct bpf_trace_module { 39 struct module *module; 40 struct list_head list; 41 }; 42 43 static LIST_HEAD(bpf_trace_modules); 44 static DEFINE_MUTEX(bpf_module_mutex); 45 46 static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name) 47 { 48 struct bpf_raw_event_map *btp, *ret = NULL; 49 struct bpf_trace_module *btm; 50 unsigned int i; 51 52 mutex_lock(&bpf_module_mutex); 53 list_for_each_entry(btm, &bpf_trace_modules, list) { 54 for (i = 0; i < btm->module->num_bpf_raw_events; ++i) { 55 btp = &btm->module->bpf_raw_events[i]; 56 if (!strcmp(btp->tp->name, name)) { 57 if (try_module_get(btm->module)) 58 ret = btp; 59 goto out; 60 } 61 } 62 } 63 out: 64 mutex_unlock(&bpf_module_mutex); 65 return ret; 66 } 67 #else 68 static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name) 69 { 70 return NULL; 71 } 72 #endif /* CONFIG_MODULES */ 73 74 u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); 75 u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); 76 77 static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size, 78 u64 flags, const struct btf **btf, 79 s32 *btf_id); 80 81 /** 82 * trace_call_bpf - invoke BPF program 83 * @call: tracepoint event 84 * @ctx: opaque context pointer 85 * 86 * kprobe handlers execute BPF programs via this helper. 87 * Can be used from static tracepoints in the future. 88 * 89 * Return: BPF programs always return an integer which is interpreted by 90 * kprobe handler as: 91 * 0 - return from kprobe (event is filtered out) 92 * 1 - store kprobe event into ring buffer 93 * Other values are reserved and currently alias to 1 94 */ 95 unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx) 96 { 97 unsigned int ret; 98 99 if (in_nmi()) /* not supported yet */ 100 return 1; 101 102 cant_sleep(); 103 104 if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) { 105 /* 106 * since some bpf program is already running on this cpu, 107 * don't call into another bpf program (same or different) 108 * and don't send kprobe event into ring-buffer, 109 * so return zero here 110 */ 111 ret = 0; 112 goto out; 113 } 114 115 /* 116 * Instead of moving rcu_read_lock/rcu_dereference/rcu_read_unlock 117 * to all call sites, we did a bpf_prog_array_valid() there to check 118 * whether call->prog_array is empty or not, which is 119 * a heurisitc to speed up execution. 120 * 121 * If bpf_prog_array_valid() fetched prog_array was 122 * non-NULL, we go into trace_call_bpf() and do the actual 123 * proper rcu_dereference() under RCU lock. 124 * If it turns out that prog_array is NULL then, we bail out. 125 * For the opposite, if the bpf_prog_array_valid() fetched pointer 126 * was NULL, you'll skip the prog_array with the risk of missing 127 * out of events when it was updated in between this and the 128 * rcu_dereference() which is accepted risk. 129 */ 130 ret = BPF_PROG_RUN_ARRAY_CHECK(call->prog_array, ctx, BPF_PROG_RUN); 131 132 out: 133 __this_cpu_dec(bpf_prog_active); 134 135 return ret; 136 } 137 138 #ifdef CONFIG_BPF_KPROBE_OVERRIDE 139 BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc) 140 { 141 regs_set_return_value(regs, rc); 142 override_function_with_return(regs); 143 return 0; 144 } 145 146 static const struct bpf_func_proto bpf_override_return_proto = { 147 .func = bpf_override_return, 148 .gpl_only = true, 149 .ret_type = RET_INTEGER, 150 .arg1_type = ARG_PTR_TO_CTX, 151 .arg2_type = ARG_ANYTHING, 152 }; 153 #endif 154 155 static __always_inline int 156 bpf_probe_read_user_common(void *dst, u32 size, const void __user *unsafe_ptr) 157 { 158 int ret; 159 160 ret = copy_from_user_nofault(dst, unsafe_ptr, size); 161 if (unlikely(ret < 0)) 162 memset(dst, 0, size); 163 return ret; 164 } 165 166 BPF_CALL_3(bpf_probe_read_user, void *, dst, u32, size, 167 const void __user *, unsafe_ptr) 168 { 169 return bpf_probe_read_user_common(dst, size, unsafe_ptr); 170 } 171 172 const struct bpf_func_proto bpf_probe_read_user_proto = { 173 .func = bpf_probe_read_user, 174 .gpl_only = true, 175 .ret_type = RET_INTEGER, 176 .arg1_type = ARG_PTR_TO_UNINIT_MEM, 177 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 178 .arg3_type = ARG_ANYTHING, 179 }; 180 181 static __always_inline int 182 bpf_probe_read_user_str_common(void *dst, u32 size, 183 const void __user *unsafe_ptr) 184 { 185 int ret; 186 187 /* 188 * NB: We rely on strncpy_from_user() not copying junk past the NUL 189 * terminator into `dst`. 190 * 191 * strncpy_from_user() does long-sized strides in the fast path. If the 192 * strncpy does not mask out the bytes after the NUL in `unsafe_ptr`, 193 * then there could be junk after the NUL in `dst`. If user takes `dst` 194 * and keys a hash map with it, then semantically identical strings can 195 * occupy multiple entries in the map. 196 */ 197 ret = strncpy_from_user_nofault(dst, unsafe_ptr, size); 198 if (unlikely(ret < 0)) 199 memset(dst, 0, size); 200 return ret; 201 } 202 203 BPF_CALL_3(bpf_probe_read_user_str, void *, dst, u32, size, 204 const void __user *, unsafe_ptr) 205 { 206 return bpf_probe_read_user_str_common(dst, size, unsafe_ptr); 207 } 208 209 const struct bpf_func_proto bpf_probe_read_user_str_proto = { 210 .func = bpf_probe_read_user_str, 211 .gpl_only = true, 212 .ret_type = RET_INTEGER, 213 .arg1_type = ARG_PTR_TO_UNINIT_MEM, 214 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 215 .arg3_type = ARG_ANYTHING, 216 }; 217 218 static __always_inline int 219 bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr) 220 { 221 int ret = security_locked_down(LOCKDOWN_BPF_READ); 222 223 if (unlikely(ret < 0)) 224 goto fail; 225 ret = copy_from_kernel_nofault(dst, unsafe_ptr, size); 226 if (unlikely(ret < 0)) 227 goto fail; 228 return ret; 229 fail: 230 memset(dst, 0, size); 231 return ret; 232 } 233 234 BPF_CALL_3(bpf_probe_read_kernel, void *, dst, u32, size, 235 const void *, unsafe_ptr) 236 { 237 return bpf_probe_read_kernel_common(dst, size, unsafe_ptr); 238 } 239 240 const struct bpf_func_proto bpf_probe_read_kernel_proto = { 241 .func = bpf_probe_read_kernel, 242 .gpl_only = true, 243 .ret_type = RET_INTEGER, 244 .arg1_type = ARG_PTR_TO_UNINIT_MEM, 245 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 246 .arg3_type = ARG_ANYTHING, 247 }; 248 249 static __always_inline int 250 bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr) 251 { 252 int ret = security_locked_down(LOCKDOWN_BPF_READ); 253 254 if (unlikely(ret < 0)) 255 goto fail; 256 257 /* 258 * The strncpy_from_kernel_nofault() call will likely not fill the 259 * entire buffer, but that's okay in this circumstance as we're probing 260 * arbitrary memory anyway similar to bpf_probe_read_*() and might 261 * as well probe the stack. Thus, memory is explicitly cleared 262 * only in error case, so that improper users ignoring return 263 * code altogether don't copy garbage; otherwise length of string 264 * is returned that can be used for bpf_perf_event_output() et al. 265 */ 266 ret = strncpy_from_kernel_nofault(dst, unsafe_ptr, size); 267 if (unlikely(ret < 0)) 268 goto fail; 269 270 return ret; 271 fail: 272 memset(dst, 0, size); 273 return ret; 274 } 275 276 BPF_CALL_3(bpf_probe_read_kernel_str, void *, dst, u32, size, 277 const void *, unsafe_ptr) 278 { 279 return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr); 280 } 281 282 const struct bpf_func_proto bpf_probe_read_kernel_str_proto = { 283 .func = bpf_probe_read_kernel_str, 284 .gpl_only = true, 285 .ret_type = RET_INTEGER, 286 .arg1_type = ARG_PTR_TO_UNINIT_MEM, 287 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 288 .arg3_type = ARG_ANYTHING, 289 }; 290 291 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE 292 BPF_CALL_3(bpf_probe_read_compat, void *, dst, u32, size, 293 const void *, unsafe_ptr) 294 { 295 if ((unsigned long)unsafe_ptr < TASK_SIZE) { 296 return bpf_probe_read_user_common(dst, size, 297 (__force void __user *)unsafe_ptr); 298 } 299 return bpf_probe_read_kernel_common(dst, size, unsafe_ptr); 300 } 301 302 static const struct bpf_func_proto bpf_probe_read_compat_proto = { 303 .func = bpf_probe_read_compat, 304 .gpl_only = true, 305 .ret_type = RET_INTEGER, 306 .arg1_type = ARG_PTR_TO_UNINIT_MEM, 307 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 308 .arg3_type = ARG_ANYTHING, 309 }; 310 311 BPF_CALL_3(bpf_probe_read_compat_str, void *, dst, u32, size, 312 const void *, unsafe_ptr) 313 { 314 if ((unsigned long)unsafe_ptr < TASK_SIZE) { 315 return bpf_probe_read_user_str_common(dst, size, 316 (__force void __user *)unsafe_ptr); 317 } 318 return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr); 319 } 320 321 static const struct bpf_func_proto bpf_probe_read_compat_str_proto = { 322 .func = bpf_probe_read_compat_str, 323 .gpl_only = true, 324 .ret_type = RET_INTEGER, 325 .arg1_type = ARG_PTR_TO_UNINIT_MEM, 326 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 327 .arg3_type = ARG_ANYTHING, 328 }; 329 #endif /* CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE */ 330 331 BPF_CALL_3(bpf_probe_write_user, void __user *, unsafe_ptr, const void *, src, 332 u32, size) 333 { 334 /* 335 * Ensure we're in user context which is safe for the helper to 336 * run. This helper has no business in a kthread. 337 * 338 * access_ok() should prevent writing to non-user memory, but in 339 * some situations (nommu, temporary switch, etc) access_ok() does 340 * not provide enough validation, hence the check on KERNEL_DS. 341 * 342 * nmi_uaccess_okay() ensures the probe is not run in an interim 343 * state, when the task or mm are switched. This is specifically 344 * required to prevent the use of temporary mm. 345 */ 346 347 if (unlikely(in_interrupt() || 348 current->flags & (PF_KTHREAD | PF_EXITING))) 349 return -EPERM; 350 if (unlikely(uaccess_kernel())) 351 return -EPERM; 352 if (unlikely(!nmi_uaccess_okay())) 353 return -EPERM; 354 355 return copy_to_user_nofault(unsafe_ptr, src, size); 356 } 357 358 static const struct bpf_func_proto bpf_probe_write_user_proto = { 359 .func = bpf_probe_write_user, 360 .gpl_only = true, 361 .ret_type = RET_INTEGER, 362 .arg1_type = ARG_ANYTHING, 363 .arg2_type = ARG_PTR_TO_MEM, 364 .arg3_type = ARG_CONST_SIZE, 365 }; 366 367 static const struct bpf_func_proto *bpf_get_probe_write_proto(void) 368 { 369 if (!capable(CAP_SYS_ADMIN)) 370 return NULL; 371 372 pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!", 373 current->comm, task_pid_nr(current)); 374 375 return &bpf_probe_write_user_proto; 376 } 377 378 static void bpf_trace_copy_string(char *buf, void *unsafe_ptr, char fmt_ptype, 379 size_t bufsz) 380 { 381 void __user *user_ptr = (__force void __user *)unsafe_ptr; 382 383 buf[0] = 0; 384 385 switch (fmt_ptype) { 386 case 's': 387 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE 388 if ((unsigned long)unsafe_ptr < TASK_SIZE) { 389 strncpy_from_user_nofault(buf, user_ptr, bufsz); 390 break; 391 } 392 fallthrough; 393 #endif 394 case 'k': 395 strncpy_from_kernel_nofault(buf, unsafe_ptr, bufsz); 396 break; 397 case 'u': 398 strncpy_from_user_nofault(buf, user_ptr, bufsz); 399 break; 400 } 401 } 402 403 static DEFINE_RAW_SPINLOCK(trace_printk_lock); 404 405 #define BPF_TRACE_PRINTK_SIZE 1024 406 407 static __printf(1, 0) int bpf_do_trace_printk(const char *fmt, ...) 408 { 409 static char buf[BPF_TRACE_PRINTK_SIZE]; 410 unsigned long flags; 411 va_list ap; 412 int ret; 413 414 raw_spin_lock_irqsave(&trace_printk_lock, flags); 415 va_start(ap, fmt); 416 ret = vsnprintf(buf, sizeof(buf), fmt, ap); 417 va_end(ap); 418 /* vsnprintf() will not append null for zero-length strings */ 419 if (ret == 0) 420 buf[0] = '\0'; 421 trace_bpf_trace_printk(buf); 422 raw_spin_unlock_irqrestore(&trace_printk_lock, flags); 423 424 return ret; 425 } 426 427 /* 428 * Only limited trace_printk() conversion specifiers allowed: 429 * %d %i %u %x %ld %li %lu %lx %lld %lli %llu %llx %p %pB %pks %pus %s 430 */ 431 BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1, 432 u64, arg2, u64, arg3) 433 { 434 int i, mod[3] = {}, fmt_cnt = 0; 435 char buf[64], fmt_ptype; 436 void *unsafe_ptr = NULL; 437 bool str_seen = false; 438 439 /* 440 * bpf_check()->check_func_arg()->check_stack_boundary() 441 * guarantees that fmt points to bpf program stack, 442 * fmt_size bytes of it were initialized and fmt_size > 0 443 */ 444 if (fmt[--fmt_size] != 0) 445 return -EINVAL; 446 447 /* check format string for allowed specifiers */ 448 for (i = 0; i < fmt_size; i++) { 449 if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i])) 450 return -EINVAL; 451 452 if (fmt[i] != '%') 453 continue; 454 455 if (fmt_cnt >= 3) 456 return -EINVAL; 457 458 /* fmt[i] != 0 && fmt[last] == 0, so we can access fmt[i + 1] */ 459 i++; 460 if (fmt[i] == 'l') { 461 mod[fmt_cnt]++; 462 i++; 463 } else if (fmt[i] == 'p') { 464 mod[fmt_cnt]++; 465 if ((fmt[i + 1] == 'k' || 466 fmt[i + 1] == 'u') && 467 fmt[i + 2] == 's') { 468 fmt_ptype = fmt[i + 1]; 469 i += 2; 470 goto fmt_str; 471 } 472 473 if (fmt[i + 1] == 'B') { 474 i++; 475 goto fmt_next; 476 } 477 478 /* disallow any further format extensions */ 479 if (fmt[i + 1] != 0 && 480 !isspace(fmt[i + 1]) && 481 !ispunct(fmt[i + 1])) 482 return -EINVAL; 483 484 goto fmt_next; 485 } else if (fmt[i] == 's') { 486 mod[fmt_cnt]++; 487 fmt_ptype = fmt[i]; 488 fmt_str: 489 if (str_seen) 490 /* allow only one '%s' per fmt string */ 491 return -EINVAL; 492 str_seen = true; 493 494 if (fmt[i + 1] != 0 && 495 !isspace(fmt[i + 1]) && 496 !ispunct(fmt[i + 1])) 497 return -EINVAL; 498 499 switch (fmt_cnt) { 500 case 0: 501 unsafe_ptr = (void *)(long)arg1; 502 arg1 = (long)buf; 503 break; 504 case 1: 505 unsafe_ptr = (void *)(long)arg2; 506 arg2 = (long)buf; 507 break; 508 case 2: 509 unsafe_ptr = (void *)(long)arg3; 510 arg3 = (long)buf; 511 break; 512 } 513 514 bpf_trace_copy_string(buf, unsafe_ptr, fmt_ptype, 515 sizeof(buf)); 516 goto fmt_next; 517 } 518 519 if (fmt[i] == 'l') { 520 mod[fmt_cnt]++; 521 i++; 522 } 523 524 if (fmt[i] != 'i' && fmt[i] != 'd' && 525 fmt[i] != 'u' && fmt[i] != 'x') 526 return -EINVAL; 527 fmt_next: 528 fmt_cnt++; 529 } 530 531 /* Horrid workaround for getting va_list handling working with different 532 * argument type combinations generically for 32 and 64 bit archs. 533 */ 534 #define __BPF_TP_EMIT() __BPF_ARG3_TP() 535 #define __BPF_TP(...) \ 536 bpf_do_trace_printk(fmt, ##__VA_ARGS__) 537 538 #define __BPF_ARG1_TP(...) \ 539 ((mod[0] == 2 || (mod[0] == 1 && __BITS_PER_LONG == 64)) \ 540 ? __BPF_TP(arg1, ##__VA_ARGS__) \ 541 : ((mod[0] == 1 || (mod[0] == 0 && __BITS_PER_LONG == 32)) \ 542 ? __BPF_TP((long)arg1, ##__VA_ARGS__) \ 543 : __BPF_TP((u32)arg1, ##__VA_ARGS__))) 544 545 #define __BPF_ARG2_TP(...) \ 546 ((mod[1] == 2 || (mod[1] == 1 && __BITS_PER_LONG == 64)) \ 547 ? __BPF_ARG1_TP(arg2, ##__VA_ARGS__) \ 548 : ((mod[1] == 1 || (mod[1] == 0 && __BITS_PER_LONG == 32)) \ 549 ? __BPF_ARG1_TP((long)arg2, ##__VA_ARGS__) \ 550 : __BPF_ARG1_TP((u32)arg2, ##__VA_ARGS__))) 551 552 #define __BPF_ARG3_TP(...) \ 553 ((mod[2] == 2 || (mod[2] == 1 && __BITS_PER_LONG == 64)) \ 554 ? __BPF_ARG2_TP(arg3, ##__VA_ARGS__) \ 555 : ((mod[2] == 1 || (mod[2] == 0 && __BITS_PER_LONG == 32)) \ 556 ? __BPF_ARG2_TP((long)arg3, ##__VA_ARGS__) \ 557 : __BPF_ARG2_TP((u32)arg3, ##__VA_ARGS__))) 558 559 return __BPF_TP_EMIT(); 560 } 561 562 static const struct bpf_func_proto bpf_trace_printk_proto = { 563 .func = bpf_trace_printk, 564 .gpl_only = true, 565 .ret_type = RET_INTEGER, 566 .arg1_type = ARG_PTR_TO_MEM, 567 .arg2_type = ARG_CONST_SIZE, 568 }; 569 570 const struct bpf_func_proto *bpf_get_trace_printk_proto(void) 571 { 572 /* 573 * This program might be calling bpf_trace_printk, 574 * so enable the associated bpf_trace/bpf_trace_printk event. 575 * Repeat this each time as it is possible a user has 576 * disabled bpf_trace_printk events. By loading a program 577 * calling bpf_trace_printk() however the user has expressed 578 * the intent to see such events. 579 */ 580 if (trace_set_clr_event("bpf_trace", "bpf_trace_printk", 1)) 581 pr_warn_ratelimited("could not enable bpf_trace_printk events"); 582 583 return &bpf_trace_printk_proto; 584 } 585 586 #define MAX_SEQ_PRINTF_VARARGS 12 587 #define MAX_SEQ_PRINTF_MAX_MEMCPY 6 588 #define MAX_SEQ_PRINTF_STR_LEN 128 589 590 struct bpf_seq_printf_buf { 591 char buf[MAX_SEQ_PRINTF_MAX_MEMCPY][MAX_SEQ_PRINTF_STR_LEN]; 592 }; 593 static DEFINE_PER_CPU(struct bpf_seq_printf_buf, bpf_seq_printf_buf); 594 static DEFINE_PER_CPU(int, bpf_seq_printf_buf_used); 595 596 BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size, 597 const void *, data, u32, data_len) 598 { 599 int err = -EINVAL, fmt_cnt = 0, memcpy_cnt = 0; 600 int i, buf_used, copy_size, num_args; 601 u64 params[MAX_SEQ_PRINTF_VARARGS]; 602 struct bpf_seq_printf_buf *bufs; 603 const u64 *args = data; 604 605 buf_used = this_cpu_inc_return(bpf_seq_printf_buf_used); 606 if (WARN_ON_ONCE(buf_used > 1)) { 607 err = -EBUSY; 608 goto out; 609 } 610 611 bufs = this_cpu_ptr(&bpf_seq_printf_buf); 612 613 /* 614 * bpf_check()->check_func_arg()->check_stack_boundary() 615 * guarantees that fmt points to bpf program stack, 616 * fmt_size bytes of it were initialized and fmt_size > 0 617 */ 618 if (fmt[--fmt_size] != 0) 619 goto out; 620 621 if (data_len & 7) 622 goto out; 623 624 for (i = 0; i < fmt_size; i++) { 625 if (fmt[i] == '%') { 626 if (fmt[i + 1] == '%') 627 i++; 628 else if (!data || !data_len) 629 goto out; 630 } 631 } 632 633 num_args = data_len / 8; 634 635 /* check format string for allowed specifiers */ 636 for (i = 0; i < fmt_size; i++) { 637 /* only printable ascii for now. */ 638 if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i])) { 639 err = -EINVAL; 640 goto out; 641 } 642 643 if (fmt[i] != '%') 644 continue; 645 646 if (fmt[i + 1] == '%') { 647 i++; 648 continue; 649 } 650 651 if (fmt_cnt >= MAX_SEQ_PRINTF_VARARGS) { 652 err = -E2BIG; 653 goto out; 654 } 655 656 if (fmt_cnt >= num_args) { 657 err = -EINVAL; 658 goto out; 659 } 660 661 /* fmt[i] != 0 && fmt[last] == 0, so we can access fmt[i + 1] */ 662 i++; 663 664 /* skip optional "[0 +-][num]" width formating field */ 665 while (fmt[i] == '0' || fmt[i] == '+' || fmt[i] == '-' || 666 fmt[i] == ' ') 667 i++; 668 if (fmt[i] >= '1' && fmt[i] <= '9') { 669 i++; 670 while (fmt[i] >= '0' && fmt[i] <= '9') 671 i++; 672 } 673 674 if (fmt[i] == 's') { 675 void *unsafe_ptr; 676 677 /* try our best to copy */ 678 if (memcpy_cnt >= MAX_SEQ_PRINTF_MAX_MEMCPY) { 679 err = -E2BIG; 680 goto out; 681 } 682 683 unsafe_ptr = (void *)(long)args[fmt_cnt]; 684 err = strncpy_from_kernel_nofault(bufs->buf[memcpy_cnt], 685 unsafe_ptr, MAX_SEQ_PRINTF_STR_LEN); 686 if (err < 0) 687 bufs->buf[memcpy_cnt][0] = '\0'; 688 params[fmt_cnt] = (u64)(long)bufs->buf[memcpy_cnt]; 689 690 fmt_cnt++; 691 memcpy_cnt++; 692 continue; 693 } 694 695 if (fmt[i] == 'p') { 696 if (fmt[i + 1] == 0 || 697 fmt[i + 1] == 'K' || 698 fmt[i + 1] == 'x' || 699 fmt[i + 1] == 'B') { 700 /* just kernel pointers */ 701 params[fmt_cnt] = args[fmt_cnt]; 702 fmt_cnt++; 703 continue; 704 } 705 706 /* only support "%pI4", "%pi4", "%pI6" and "%pi6". */ 707 if (fmt[i + 1] != 'i' && fmt[i + 1] != 'I') { 708 err = -EINVAL; 709 goto out; 710 } 711 if (fmt[i + 2] != '4' && fmt[i + 2] != '6') { 712 err = -EINVAL; 713 goto out; 714 } 715 716 if (memcpy_cnt >= MAX_SEQ_PRINTF_MAX_MEMCPY) { 717 err = -E2BIG; 718 goto out; 719 } 720 721 722 copy_size = (fmt[i + 2] == '4') ? 4 : 16; 723 724 err = copy_from_kernel_nofault(bufs->buf[memcpy_cnt], 725 (void *) (long) args[fmt_cnt], 726 copy_size); 727 if (err < 0) 728 memset(bufs->buf[memcpy_cnt], 0, copy_size); 729 params[fmt_cnt] = (u64)(long)bufs->buf[memcpy_cnt]; 730 731 i += 2; 732 fmt_cnt++; 733 memcpy_cnt++; 734 continue; 735 } 736 737 if (fmt[i] == 'l') { 738 i++; 739 if (fmt[i] == 'l') 740 i++; 741 } 742 743 if (fmt[i] != 'i' && fmt[i] != 'd' && 744 fmt[i] != 'u' && fmt[i] != 'x' && 745 fmt[i] != 'X') { 746 err = -EINVAL; 747 goto out; 748 } 749 750 params[fmt_cnt] = args[fmt_cnt]; 751 fmt_cnt++; 752 } 753 754 /* Maximumly we can have MAX_SEQ_PRINTF_VARARGS parameter, just give 755 * all of them to seq_printf(). 756 */ 757 seq_printf(m, fmt, params[0], params[1], params[2], params[3], 758 params[4], params[5], params[6], params[7], params[8], 759 params[9], params[10], params[11]); 760 761 err = seq_has_overflowed(m) ? -EOVERFLOW : 0; 762 out: 763 this_cpu_dec(bpf_seq_printf_buf_used); 764 return err; 765 } 766 767 BTF_ID_LIST_SINGLE(btf_seq_file_ids, struct, seq_file) 768 769 static const struct bpf_func_proto bpf_seq_printf_proto = { 770 .func = bpf_seq_printf, 771 .gpl_only = true, 772 .ret_type = RET_INTEGER, 773 .arg1_type = ARG_PTR_TO_BTF_ID, 774 .arg1_btf_id = &btf_seq_file_ids[0], 775 .arg2_type = ARG_PTR_TO_MEM, 776 .arg3_type = ARG_CONST_SIZE, 777 .arg4_type = ARG_PTR_TO_MEM_OR_NULL, 778 .arg5_type = ARG_CONST_SIZE_OR_ZERO, 779 }; 780 781 BPF_CALL_3(bpf_seq_write, struct seq_file *, m, const void *, data, u32, len) 782 { 783 return seq_write(m, data, len) ? -EOVERFLOW : 0; 784 } 785 786 static const struct bpf_func_proto bpf_seq_write_proto = { 787 .func = bpf_seq_write, 788 .gpl_only = true, 789 .ret_type = RET_INTEGER, 790 .arg1_type = ARG_PTR_TO_BTF_ID, 791 .arg1_btf_id = &btf_seq_file_ids[0], 792 .arg2_type = ARG_PTR_TO_MEM, 793 .arg3_type = ARG_CONST_SIZE_OR_ZERO, 794 }; 795 796 BPF_CALL_4(bpf_seq_printf_btf, struct seq_file *, m, struct btf_ptr *, ptr, 797 u32, btf_ptr_size, u64, flags) 798 { 799 const struct btf *btf; 800 s32 btf_id; 801 int ret; 802 803 ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id); 804 if (ret) 805 return ret; 806 807 return btf_type_seq_show_flags(btf, btf_id, ptr->ptr, m, flags); 808 } 809 810 static const struct bpf_func_proto bpf_seq_printf_btf_proto = { 811 .func = bpf_seq_printf_btf, 812 .gpl_only = true, 813 .ret_type = RET_INTEGER, 814 .arg1_type = ARG_PTR_TO_BTF_ID, 815 .arg1_btf_id = &btf_seq_file_ids[0], 816 .arg2_type = ARG_PTR_TO_MEM, 817 .arg3_type = ARG_CONST_SIZE_OR_ZERO, 818 .arg4_type = ARG_ANYTHING, 819 }; 820 821 static __always_inline int 822 get_map_perf_counter(struct bpf_map *map, u64 flags, 823 u64 *value, u64 *enabled, u64 *running) 824 { 825 struct bpf_array *array = container_of(map, struct bpf_array, map); 826 unsigned int cpu = smp_processor_id(); 827 u64 index = flags & BPF_F_INDEX_MASK; 828 struct bpf_event_entry *ee; 829 830 if (unlikely(flags & ~(BPF_F_INDEX_MASK))) 831 return -EINVAL; 832 if (index == BPF_F_CURRENT_CPU) 833 index = cpu; 834 if (unlikely(index >= array->map.max_entries)) 835 return -E2BIG; 836 837 ee = READ_ONCE(array->ptrs[index]); 838 if (!ee) 839 return -ENOENT; 840 841 return perf_event_read_local(ee->event, value, enabled, running); 842 } 843 844 BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags) 845 { 846 u64 value = 0; 847 int err; 848 849 err = get_map_perf_counter(map, flags, &value, NULL, NULL); 850 /* 851 * this api is ugly since we miss [-22..-2] range of valid 852 * counter values, but that's uapi 853 */ 854 if (err) 855 return err; 856 return value; 857 } 858 859 static const struct bpf_func_proto bpf_perf_event_read_proto = { 860 .func = bpf_perf_event_read, 861 .gpl_only = true, 862 .ret_type = RET_INTEGER, 863 .arg1_type = ARG_CONST_MAP_PTR, 864 .arg2_type = ARG_ANYTHING, 865 }; 866 867 BPF_CALL_4(bpf_perf_event_read_value, struct bpf_map *, map, u64, flags, 868 struct bpf_perf_event_value *, buf, u32, size) 869 { 870 int err = -EINVAL; 871 872 if (unlikely(size != sizeof(struct bpf_perf_event_value))) 873 goto clear; 874 err = get_map_perf_counter(map, flags, &buf->counter, &buf->enabled, 875 &buf->running); 876 if (unlikely(err)) 877 goto clear; 878 return 0; 879 clear: 880 memset(buf, 0, size); 881 return err; 882 } 883 884 static const struct bpf_func_proto bpf_perf_event_read_value_proto = { 885 .func = bpf_perf_event_read_value, 886 .gpl_only = true, 887 .ret_type = RET_INTEGER, 888 .arg1_type = ARG_CONST_MAP_PTR, 889 .arg2_type = ARG_ANYTHING, 890 .arg3_type = ARG_PTR_TO_UNINIT_MEM, 891 .arg4_type = ARG_CONST_SIZE, 892 }; 893 894 static __always_inline u64 895 __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map, 896 u64 flags, struct perf_sample_data *sd) 897 { 898 struct bpf_array *array = container_of(map, struct bpf_array, map); 899 unsigned int cpu = smp_processor_id(); 900 u64 index = flags & BPF_F_INDEX_MASK; 901 struct bpf_event_entry *ee; 902 struct perf_event *event; 903 904 if (index == BPF_F_CURRENT_CPU) 905 index = cpu; 906 if (unlikely(index >= array->map.max_entries)) 907 return -E2BIG; 908 909 ee = READ_ONCE(array->ptrs[index]); 910 if (!ee) 911 return -ENOENT; 912 913 event = ee->event; 914 if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE || 915 event->attr.config != PERF_COUNT_SW_BPF_OUTPUT)) 916 return -EINVAL; 917 918 if (unlikely(event->oncpu != cpu)) 919 return -EOPNOTSUPP; 920 921 return perf_event_output(event, sd, regs); 922 } 923 924 /* 925 * Support executing tracepoints in normal, irq, and nmi context that each call 926 * bpf_perf_event_output 927 */ 928 struct bpf_trace_sample_data { 929 struct perf_sample_data sds[3]; 930 }; 931 932 static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_trace_sds); 933 static DEFINE_PER_CPU(int, bpf_trace_nest_level); 934 BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map, 935 u64, flags, void *, data, u64, size) 936 { 937 struct bpf_trace_sample_data *sds = this_cpu_ptr(&bpf_trace_sds); 938 int nest_level = this_cpu_inc_return(bpf_trace_nest_level); 939 struct perf_raw_record raw = { 940 .frag = { 941 .size = size, 942 .data = data, 943 }, 944 }; 945 struct perf_sample_data *sd; 946 int err; 947 948 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(sds->sds))) { 949 err = -EBUSY; 950 goto out; 951 } 952 953 sd = &sds->sds[nest_level - 1]; 954 955 if (unlikely(flags & ~(BPF_F_INDEX_MASK))) { 956 err = -EINVAL; 957 goto out; 958 } 959 960 perf_sample_data_init(sd, 0, 0); 961 sd->raw = &raw; 962 963 err = __bpf_perf_event_output(regs, map, flags, sd); 964 965 out: 966 this_cpu_dec(bpf_trace_nest_level); 967 return err; 968 } 969 970 static const struct bpf_func_proto bpf_perf_event_output_proto = { 971 .func = bpf_perf_event_output, 972 .gpl_only = true, 973 .ret_type = RET_INTEGER, 974 .arg1_type = ARG_PTR_TO_CTX, 975 .arg2_type = ARG_CONST_MAP_PTR, 976 .arg3_type = ARG_ANYTHING, 977 .arg4_type = ARG_PTR_TO_MEM, 978 .arg5_type = ARG_CONST_SIZE_OR_ZERO, 979 }; 980 981 static DEFINE_PER_CPU(int, bpf_event_output_nest_level); 982 struct bpf_nested_pt_regs { 983 struct pt_regs regs[3]; 984 }; 985 static DEFINE_PER_CPU(struct bpf_nested_pt_regs, bpf_pt_regs); 986 static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_misc_sds); 987 988 u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, 989 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy) 990 { 991 int nest_level = this_cpu_inc_return(bpf_event_output_nest_level); 992 struct perf_raw_frag frag = { 993 .copy = ctx_copy, 994 .size = ctx_size, 995 .data = ctx, 996 }; 997 struct perf_raw_record raw = { 998 .frag = { 999 { 1000 .next = ctx_size ? &frag : NULL, 1001 }, 1002 .size = meta_size, 1003 .data = meta, 1004 }, 1005 }; 1006 struct perf_sample_data *sd; 1007 struct pt_regs *regs; 1008 u64 ret; 1009 1010 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(bpf_misc_sds.sds))) { 1011 ret = -EBUSY; 1012 goto out; 1013 } 1014 sd = this_cpu_ptr(&bpf_misc_sds.sds[nest_level - 1]); 1015 regs = this_cpu_ptr(&bpf_pt_regs.regs[nest_level - 1]); 1016 1017 perf_fetch_caller_regs(regs); 1018 perf_sample_data_init(sd, 0, 0); 1019 sd->raw = &raw; 1020 1021 ret = __bpf_perf_event_output(regs, map, flags, sd); 1022 out: 1023 this_cpu_dec(bpf_event_output_nest_level); 1024 return ret; 1025 } 1026 1027 BPF_CALL_0(bpf_get_current_task) 1028 { 1029 return (long) current; 1030 } 1031 1032 const struct bpf_func_proto bpf_get_current_task_proto = { 1033 .func = bpf_get_current_task, 1034 .gpl_only = true, 1035 .ret_type = RET_INTEGER, 1036 }; 1037 1038 BPF_CALL_0(bpf_get_current_task_btf) 1039 { 1040 return (unsigned long) current; 1041 } 1042 1043 BTF_ID_LIST_SINGLE(bpf_get_current_btf_ids, struct, task_struct) 1044 1045 static const struct bpf_func_proto bpf_get_current_task_btf_proto = { 1046 .func = bpf_get_current_task_btf, 1047 .gpl_only = true, 1048 .ret_type = RET_PTR_TO_BTF_ID, 1049 .ret_btf_id = &bpf_get_current_btf_ids[0], 1050 }; 1051 1052 BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx) 1053 { 1054 struct bpf_array *array = container_of(map, struct bpf_array, map); 1055 struct cgroup *cgrp; 1056 1057 if (unlikely(idx >= array->map.max_entries)) 1058 return -E2BIG; 1059 1060 cgrp = READ_ONCE(array->ptrs[idx]); 1061 if (unlikely(!cgrp)) 1062 return -EAGAIN; 1063 1064 return task_under_cgroup_hierarchy(current, cgrp); 1065 } 1066 1067 static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = { 1068 .func = bpf_current_task_under_cgroup, 1069 .gpl_only = false, 1070 .ret_type = RET_INTEGER, 1071 .arg1_type = ARG_CONST_MAP_PTR, 1072 .arg2_type = ARG_ANYTHING, 1073 }; 1074 1075 struct send_signal_irq_work { 1076 struct irq_work irq_work; 1077 struct task_struct *task; 1078 u32 sig; 1079 enum pid_type type; 1080 }; 1081 1082 static DEFINE_PER_CPU(struct send_signal_irq_work, send_signal_work); 1083 1084 static void do_bpf_send_signal(struct irq_work *entry) 1085 { 1086 struct send_signal_irq_work *work; 1087 1088 work = container_of(entry, struct send_signal_irq_work, irq_work); 1089 group_send_sig_info(work->sig, SEND_SIG_PRIV, work->task, work->type); 1090 } 1091 1092 static int bpf_send_signal_common(u32 sig, enum pid_type type) 1093 { 1094 struct send_signal_irq_work *work = NULL; 1095 1096 /* Similar to bpf_probe_write_user, task needs to be 1097 * in a sound condition and kernel memory access be 1098 * permitted in order to send signal to the current 1099 * task. 1100 */ 1101 if (unlikely(current->flags & (PF_KTHREAD | PF_EXITING))) 1102 return -EPERM; 1103 if (unlikely(uaccess_kernel())) 1104 return -EPERM; 1105 if (unlikely(!nmi_uaccess_okay())) 1106 return -EPERM; 1107 1108 if (irqs_disabled()) { 1109 /* Do an early check on signal validity. Otherwise, 1110 * the error is lost in deferred irq_work. 1111 */ 1112 if (unlikely(!valid_signal(sig))) 1113 return -EINVAL; 1114 1115 work = this_cpu_ptr(&send_signal_work); 1116 if (atomic_read(&work->irq_work.flags) & IRQ_WORK_BUSY) 1117 return -EBUSY; 1118 1119 /* Add the current task, which is the target of sending signal, 1120 * to the irq_work. The current task may change when queued 1121 * irq works get executed. 1122 */ 1123 work->task = current; 1124 work->sig = sig; 1125 work->type = type; 1126 irq_work_queue(&work->irq_work); 1127 return 0; 1128 } 1129 1130 return group_send_sig_info(sig, SEND_SIG_PRIV, current, type); 1131 } 1132 1133 BPF_CALL_1(bpf_send_signal, u32, sig) 1134 { 1135 return bpf_send_signal_common(sig, PIDTYPE_TGID); 1136 } 1137 1138 static const struct bpf_func_proto bpf_send_signal_proto = { 1139 .func = bpf_send_signal, 1140 .gpl_only = false, 1141 .ret_type = RET_INTEGER, 1142 .arg1_type = ARG_ANYTHING, 1143 }; 1144 1145 BPF_CALL_1(bpf_send_signal_thread, u32, sig) 1146 { 1147 return bpf_send_signal_common(sig, PIDTYPE_PID); 1148 } 1149 1150 static const struct bpf_func_proto bpf_send_signal_thread_proto = { 1151 .func = bpf_send_signal_thread, 1152 .gpl_only = false, 1153 .ret_type = RET_INTEGER, 1154 .arg1_type = ARG_ANYTHING, 1155 }; 1156 1157 BPF_CALL_3(bpf_d_path, struct path *, path, char *, buf, u32, sz) 1158 { 1159 long len; 1160 char *p; 1161 1162 if (!sz) 1163 return 0; 1164 1165 p = d_path(path, buf, sz); 1166 if (IS_ERR(p)) { 1167 len = PTR_ERR(p); 1168 } else { 1169 len = buf + sz - p; 1170 memmove(buf, p, len); 1171 } 1172 1173 return len; 1174 } 1175 1176 BTF_SET_START(btf_allowlist_d_path) 1177 #ifdef CONFIG_SECURITY 1178 BTF_ID(func, security_file_permission) 1179 BTF_ID(func, security_inode_getattr) 1180 BTF_ID(func, security_file_open) 1181 #endif 1182 #ifdef CONFIG_SECURITY_PATH 1183 BTF_ID(func, security_path_truncate) 1184 #endif 1185 BTF_ID(func, vfs_truncate) 1186 BTF_ID(func, vfs_fallocate) 1187 BTF_ID(func, dentry_open) 1188 BTF_ID(func, vfs_getattr) 1189 BTF_ID(func, filp_close) 1190 BTF_SET_END(btf_allowlist_d_path) 1191 1192 static bool bpf_d_path_allowed(const struct bpf_prog *prog) 1193 { 1194 if (prog->type == BPF_PROG_TYPE_LSM) 1195 return bpf_lsm_is_sleepable_hook(prog->aux->attach_btf_id); 1196 1197 return btf_id_set_contains(&btf_allowlist_d_path, 1198 prog->aux->attach_btf_id); 1199 } 1200 1201 BTF_ID_LIST_SINGLE(bpf_d_path_btf_ids, struct, path) 1202 1203 static const struct bpf_func_proto bpf_d_path_proto = { 1204 .func = bpf_d_path, 1205 .gpl_only = false, 1206 .ret_type = RET_INTEGER, 1207 .arg1_type = ARG_PTR_TO_BTF_ID, 1208 .arg1_btf_id = &bpf_d_path_btf_ids[0], 1209 .arg2_type = ARG_PTR_TO_MEM, 1210 .arg3_type = ARG_CONST_SIZE_OR_ZERO, 1211 .allowed = bpf_d_path_allowed, 1212 }; 1213 1214 #define BTF_F_ALL (BTF_F_COMPACT | BTF_F_NONAME | \ 1215 BTF_F_PTR_RAW | BTF_F_ZERO) 1216 1217 static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size, 1218 u64 flags, const struct btf **btf, 1219 s32 *btf_id) 1220 { 1221 const struct btf_type *t; 1222 1223 if (unlikely(flags & ~(BTF_F_ALL))) 1224 return -EINVAL; 1225 1226 if (btf_ptr_size != sizeof(struct btf_ptr)) 1227 return -EINVAL; 1228 1229 *btf = bpf_get_btf_vmlinux(); 1230 1231 if (IS_ERR_OR_NULL(*btf)) 1232 return IS_ERR(*btf) ? PTR_ERR(*btf) : -EINVAL; 1233 1234 if (ptr->type_id > 0) 1235 *btf_id = ptr->type_id; 1236 else 1237 return -EINVAL; 1238 1239 if (*btf_id > 0) 1240 t = btf_type_by_id(*btf, *btf_id); 1241 if (*btf_id <= 0 || !t) 1242 return -ENOENT; 1243 1244 return 0; 1245 } 1246 1247 BPF_CALL_5(bpf_snprintf_btf, char *, str, u32, str_size, struct btf_ptr *, ptr, 1248 u32, btf_ptr_size, u64, flags) 1249 { 1250 const struct btf *btf; 1251 s32 btf_id; 1252 int ret; 1253 1254 ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id); 1255 if (ret) 1256 return ret; 1257 1258 return btf_type_snprintf_show(btf, btf_id, ptr->ptr, str, str_size, 1259 flags); 1260 } 1261 1262 const struct bpf_func_proto bpf_snprintf_btf_proto = { 1263 .func = bpf_snprintf_btf, 1264 .gpl_only = false, 1265 .ret_type = RET_INTEGER, 1266 .arg1_type = ARG_PTR_TO_MEM, 1267 .arg2_type = ARG_CONST_SIZE, 1268 .arg3_type = ARG_PTR_TO_MEM, 1269 .arg4_type = ARG_CONST_SIZE, 1270 .arg5_type = ARG_ANYTHING, 1271 }; 1272 1273 const struct bpf_func_proto * 1274 bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 1275 { 1276 switch (func_id) { 1277 case BPF_FUNC_map_lookup_elem: 1278 return &bpf_map_lookup_elem_proto; 1279 case BPF_FUNC_map_update_elem: 1280 return &bpf_map_update_elem_proto; 1281 case BPF_FUNC_map_delete_elem: 1282 return &bpf_map_delete_elem_proto; 1283 case BPF_FUNC_map_push_elem: 1284 return &bpf_map_push_elem_proto; 1285 case BPF_FUNC_map_pop_elem: 1286 return &bpf_map_pop_elem_proto; 1287 case BPF_FUNC_map_peek_elem: 1288 return &bpf_map_peek_elem_proto; 1289 case BPF_FUNC_ktime_get_ns: 1290 return &bpf_ktime_get_ns_proto; 1291 case BPF_FUNC_ktime_get_boot_ns: 1292 return &bpf_ktime_get_boot_ns_proto; 1293 case BPF_FUNC_tail_call: 1294 return &bpf_tail_call_proto; 1295 case BPF_FUNC_get_current_pid_tgid: 1296 return &bpf_get_current_pid_tgid_proto; 1297 case BPF_FUNC_get_current_task: 1298 return &bpf_get_current_task_proto; 1299 case BPF_FUNC_get_current_task_btf: 1300 return &bpf_get_current_task_btf_proto; 1301 case BPF_FUNC_get_current_uid_gid: 1302 return &bpf_get_current_uid_gid_proto; 1303 case BPF_FUNC_get_current_comm: 1304 return &bpf_get_current_comm_proto; 1305 case BPF_FUNC_trace_printk: 1306 return bpf_get_trace_printk_proto(); 1307 case BPF_FUNC_get_smp_processor_id: 1308 return &bpf_get_smp_processor_id_proto; 1309 case BPF_FUNC_get_numa_node_id: 1310 return &bpf_get_numa_node_id_proto; 1311 case BPF_FUNC_perf_event_read: 1312 return &bpf_perf_event_read_proto; 1313 case BPF_FUNC_probe_write_user: 1314 return bpf_get_probe_write_proto(); 1315 case BPF_FUNC_current_task_under_cgroup: 1316 return &bpf_current_task_under_cgroup_proto; 1317 case BPF_FUNC_get_prandom_u32: 1318 return &bpf_get_prandom_u32_proto; 1319 case BPF_FUNC_probe_read_user: 1320 return &bpf_probe_read_user_proto; 1321 case BPF_FUNC_probe_read_kernel: 1322 return &bpf_probe_read_kernel_proto; 1323 case BPF_FUNC_probe_read_user_str: 1324 return &bpf_probe_read_user_str_proto; 1325 case BPF_FUNC_probe_read_kernel_str: 1326 return &bpf_probe_read_kernel_str_proto; 1327 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE 1328 case BPF_FUNC_probe_read: 1329 return &bpf_probe_read_compat_proto; 1330 case BPF_FUNC_probe_read_str: 1331 return &bpf_probe_read_compat_str_proto; 1332 #endif 1333 #ifdef CONFIG_CGROUPS 1334 case BPF_FUNC_get_current_cgroup_id: 1335 return &bpf_get_current_cgroup_id_proto; 1336 #endif 1337 case BPF_FUNC_send_signal: 1338 return &bpf_send_signal_proto; 1339 case BPF_FUNC_send_signal_thread: 1340 return &bpf_send_signal_thread_proto; 1341 case BPF_FUNC_perf_event_read_value: 1342 return &bpf_perf_event_read_value_proto; 1343 case BPF_FUNC_get_ns_current_pid_tgid: 1344 return &bpf_get_ns_current_pid_tgid_proto; 1345 case BPF_FUNC_ringbuf_output: 1346 return &bpf_ringbuf_output_proto; 1347 case BPF_FUNC_ringbuf_reserve: 1348 return &bpf_ringbuf_reserve_proto; 1349 case BPF_FUNC_ringbuf_submit: 1350 return &bpf_ringbuf_submit_proto; 1351 case BPF_FUNC_ringbuf_discard: 1352 return &bpf_ringbuf_discard_proto; 1353 case BPF_FUNC_ringbuf_query: 1354 return &bpf_ringbuf_query_proto; 1355 case BPF_FUNC_jiffies64: 1356 return &bpf_jiffies64_proto; 1357 case BPF_FUNC_get_task_stack: 1358 return &bpf_get_task_stack_proto; 1359 case BPF_FUNC_copy_from_user: 1360 return prog->aux->sleepable ? &bpf_copy_from_user_proto : NULL; 1361 case BPF_FUNC_snprintf_btf: 1362 return &bpf_snprintf_btf_proto; 1363 case BPF_FUNC_bpf_per_cpu_ptr: 1364 return &bpf_per_cpu_ptr_proto; 1365 case BPF_FUNC_bpf_this_cpu_ptr: 1366 return &bpf_this_cpu_ptr_proto; 1367 default: 1368 return NULL; 1369 } 1370 } 1371 1372 static const struct bpf_func_proto * 1373 kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 1374 { 1375 switch (func_id) { 1376 case BPF_FUNC_perf_event_output: 1377 return &bpf_perf_event_output_proto; 1378 case BPF_FUNC_get_stackid: 1379 return &bpf_get_stackid_proto; 1380 case BPF_FUNC_get_stack: 1381 return &bpf_get_stack_proto; 1382 #ifdef CONFIG_BPF_KPROBE_OVERRIDE 1383 case BPF_FUNC_override_return: 1384 return &bpf_override_return_proto; 1385 #endif 1386 default: 1387 return bpf_tracing_func_proto(func_id, prog); 1388 } 1389 } 1390 1391 /* bpf+kprobe programs can access fields of 'struct pt_regs' */ 1392 static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type, 1393 const struct bpf_prog *prog, 1394 struct bpf_insn_access_aux *info) 1395 { 1396 if (off < 0 || off >= sizeof(struct pt_regs)) 1397 return false; 1398 if (type != BPF_READ) 1399 return false; 1400 if (off % size != 0) 1401 return false; 1402 /* 1403 * Assertion for 32 bit to make sure last 8 byte access 1404 * (BPF_DW) to the last 4 byte member is disallowed. 1405 */ 1406 if (off + size > sizeof(struct pt_regs)) 1407 return false; 1408 1409 return true; 1410 } 1411 1412 const struct bpf_verifier_ops kprobe_verifier_ops = { 1413 .get_func_proto = kprobe_prog_func_proto, 1414 .is_valid_access = kprobe_prog_is_valid_access, 1415 }; 1416 1417 const struct bpf_prog_ops kprobe_prog_ops = { 1418 }; 1419 1420 BPF_CALL_5(bpf_perf_event_output_tp, void *, tp_buff, struct bpf_map *, map, 1421 u64, flags, void *, data, u64, size) 1422 { 1423 struct pt_regs *regs = *(struct pt_regs **)tp_buff; 1424 1425 /* 1426 * r1 points to perf tracepoint buffer where first 8 bytes are hidden 1427 * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it 1428 * from there and call the same bpf_perf_event_output() helper inline. 1429 */ 1430 return ____bpf_perf_event_output(regs, map, flags, data, size); 1431 } 1432 1433 static const struct bpf_func_proto bpf_perf_event_output_proto_tp = { 1434 .func = bpf_perf_event_output_tp, 1435 .gpl_only = true, 1436 .ret_type = RET_INTEGER, 1437 .arg1_type = ARG_PTR_TO_CTX, 1438 .arg2_type = ARG_CONST_MAP_PTR, 1439 .arg3_type = ARG_ANYTHING, 1440 .arg4_type = ARG_PTR_TO_MEM, 1441 .arg5_type = ARG_CONST_SIZE_OR_ZERO, 1442 }; 1443 1444 BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map, 1445 u64, flags) 1446 { 1447 struct pt_regs *regs = *(struct pt_regs **)tp_buff; 1448 1449 /* 1450 * Same comment as in bpf_perf_event_output_tp(), only that this time 1451 * the other helper's function body cannot be inlined due to being 1452 * external, thus we need to call raw helper function. 1453 */ 1454 return bpf_get_stackid((unsigned long) regs, (unsigned long) map, 1455 flags, 0, 0); 1456 } 1457 1458 static const struct bpf_func_proto bpf_get_stackid_proto_tp = { 1459 .func = bpf_get_stackid_tp, 1460 .gpl_only = true, 1461 .ret_type = RET_INTEGER, 1462 .arg1_type = ARG_PTR_TO_CTX, 1463 .arg2_type = ARG_CONST_MAP_PTR, 1464 .arg3_type = ARG_ANYTHING, 1465 }; 1466 1467 BPF_CALL_4(bpf_get_stack_tp, void *, tp_buff, void *, buf, u32, size, 1468 u64, flags) 1469 { 1470 struct pt_regs *regs = *(struct pt_regs **)tp_buff; 1471 1472 return bpf_get_stack((unsigned long) regs, (unsigned long) buf, 1473 (unsigned long) size, flags, 0); 1474 } 1475 1476 static const struct bpf_func_proto bpf_get_stack_proto_tp = { 1477 .func = bpf_get_stack_tp, 1478 .gpl_only = true, 1479 .ret_type = RET_INTEGER, 1480 .arg1_type = ARG_PTR_TO_CTX, 1481 .arg2_type = ARG_PTR_TO_UNINIT_MEM, 1482 .arg3_type = ARG_CONST_SIZE_OR_ZERO, 1483 .arg4_type = ARG_ANYTHING, 1484 }; 1485 1486 static const struct bpf_func_proto * 1487 tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 1488 { 1489 switch (func_id) { 1490 case BPF_FUNC_perf_event_output: 1491 return &bpf_perf_event_output_proto_tp; 1492 case BPF_FUNC_get_stackid: 1493 return &bpf_get_stackid_proto_tp; 1494 case BPF_FUNC_get_stack: 1495 return &bpf_get_stack_proto_tp; 1496 default: 1497 return bpf_tracing_func_proto(func_id, prog); 1498 } 1499 } 1500 1501 static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type, 1502 const struct bpf_prog *prog, 1503 struct bpf_insn_access_aux *info) 1504 { 1505 if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE) 1506 return false; 1507 if (type != BPF_READ) 1508 return false; 1509 if (off % size != 0) 1510 return false; 1511 1512 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64)); 1513 return true; 1514 } 1515 1516 const struct bpf_verifier_ops tracepoint_verifier_ops = { 1517 .get_func_proto = tp_prog_func_proto, 1518 .is_valid_access = tp_prog_is_valid_access, 1519 }; 1520 1521 const struct bpf_prog_ops tracepoint_prog_ops = { 1522 }; 1523 1524 BPF_CALL_3(bpf_perf_prog_read_value, struct bpf_perf_event_data_kern *, ctx, 1525 struct bpf_perf_event_value *, buf, u32, size) 1526 { 1527 int err = -EINVAL; 1528 1529 if (unlikely(size != sizeof(struct bpf_perf_event_value))) 1530 goto clear; 1531 err = perf_event_read_local(ctx->event, &buf->counter, &buf->enabled, 1532 &buf->running); 1533 if (unlikely(err)) 1534 goto clear; 1535 return 0; 1536 clear: 1537 memset(buf, 0, size); 1538 return err; 1539 } 1540 1541 static const struct bpf_func_proto bpf_perf_prog_read_value_proto = { 1542 .func = bpf_perf_prog_read_value, 1543 .gpl_only = true, 1544 .ret_type = RET_INTEGER, 1545 .arg1_type = ARG_PTR_TO_CTX, 1546 .arg2_type = ARG_PTR_TO_UNINIT_MEM, 1547 .arg3_type = ARG_CONST_SIZE, 1548 }; 1549 1550 BPF_CALL_4(bpf_read_branch_records, struct bpf_perf_event_data_kern *, ctx, 1551 void *, buf, u32, size, u64, flags) 1552 { 1553 #ifndef CONFIG_X86 1554 return -ENOENT; 1555 #else 1556 static const u32 br_entry_size = sizeof(struct perf_branch_entry); 1557 struct perf_branch_stack *br_stack = ctx->data->br_stack; 1558 u32 to_copy; 1559 1560 if (unlikely(flags & ~BPF_F_GET_BRANCH_RECORDS_SIZE)) 1561 return -EINVAL; 1562 1563 if (unlikely(!br_stack)) 1564 return -EINVAL; 1565 1566 if (flags & BPF_F_GET_BRANCH_RECORDS_SIZE) 1567 return br_stack->nr * br_entry_size; 1568 1569 if (!buf || (size % br_entry_size != 0)) 1570 return -EINVAL; 1571 1572 to_copy = min_t(u32, br_stack->nr * br_entry_size, size); 1573 memcpy(buf, br_stack->entries, to_copy); 1574 1575 return to_copy; 1576 #endif 1577 } 1578 1579 static const struct bpf_func_proto bpf_read_branch_records_proto = { 1580 .func = bpf_read_branch_records, 1581 .gpl_only = true, 1582 .ret_type = RET_INTEGER, 1583 .arg1_type = ARG_PTR_TO_CTX, 1584 .arg2_type = ARG_PTR_TO_MEM_OR_NULL, 1585 .arg3_type = ARG_CONST_SIZE_OR_ZERO, 1586 .arg4_type = ARG_ANYTHING, 1587 }; 1588 1589 static const struct bpf_func_proto * 1590 pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 1591 { 1592 switch (func_id) { 1593 case BPF_FUNC_perf_event_output: 1594 return &bpf_perf_event_output_proto_tp; 1595 case BPF_FUNC_get_stackid: 1596 return &bpf_get_stackid_proto_pe; 1597 case BPF_FUNC_get_stack: 1598 return &bpf_get_stack_proto_pe; 1599 case BPF_FUNC_perf_prog_read_value: 1600 return &bpf_perf_prog_read_value_proto; 1601 case BPF_FUNC_read_branch_records: 1602 return &bpf_read_branch_records_proto; 1603 default: 1604 return bpf_tracing_func_proto(func_id, prog); 1605 } 1606 } 1607 1608 /* 1609 * bpf_raw_tp_regs are separate from bpf_pt_regs used from skb/xdp 1610 * to avoid potential recursive reuse issue when/if tracepoints are added 1611 * inside bpf_*_event_output, bpf_get_stackid and/or bpf_get_stack. 1612 * 1613 * Since raw tracepoints run despite bpf_prog_active, support concurrent usage 1614 * in normal, irq, and nmi context. 1615 */ 1616 struct bpf_raw_tp_regs { 1617 struct pt_regs regs[3]; 1618 }; 1619 static DEFINE_PER_CPU(struct bpf_raw_tp_regs, bpf_raw_tp_regs); 1620 static DEFINE_PER_CPU(int, bpf_raw_tp_nest_level); 1621 static struct pt_regs *get_bpf_raw_tp_regs(void) 1622 { 1623 struct bpf_raw_tp_regs *tp_regs = this_cpu_ptr(&bpf_raw_tp_regs); 1624 int nest_level = this_cpu_inc_return(bpf_raw_tp_nest_level); 1625 1626 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(tp_regs->regs))) { 1627 this_cpu_dec(bpf_raw_tp_nest_level); 1628 return ERR_PTR(-EBUSY); 1629 } 1630 1631 return &tp_regs->regs[nest_level - 1]; 1632 } 1633 1634 static void put_bpf_raw_tp_regs(void) 1635 { 1636 this_cpu_dec(bpf_raw_tp_nest_level); 1637 } 1638 1639 BPF_CALL_5(bpf_perf_event_output_raw_tp, struct bpf_raw_tracepoint_args *, args, 1640 struct bpf_map *, map, u64, flags, void *, data, u64, size) 1641 { 1642 struct pt_regs *regs = get_bpf_raw_tp_regs(); 1643 int ret; 1644 1645 if (IS_ERR(regs)) 1646 return PTR_ERR(regs); 1647 1648 perf_fetch_caller_regs(regs); 1649 ret = ____bpf_perf_event_output(regs, map, flags, data, size); 1650 1651 put_bpf_raw_tp_regs(); 1652 return ret; 1653 } 1654 1655 static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = { 1656 .func = bpf_perf_event_output_raw_tp, 1657 .gpl_only = true, 1658 .ret_type = RET_INTEGER, 1659 .arg1_type = ARG_PTR_TO_CTX, 1660 .arg2_type = ARG_CONST_MAP_PTR, 1661 .arg3_type = ARG_ANYTHING, 1662 .arg4_type = ARG_PTR_TO_MEM, 1663 .arg5_type = ARG_CONST_SIZE_OR_ZERO, 1664 }; 1665 1666 extern const struct bpf_func_proto bpf_skb_output_proto; 1667 extern const struct bpf_func_proto bpf_xdp_output_proto; 1668 1669 BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args, 1670 struct bpf_map *, map, u64, flags) 1671 { 1672 struct pt_regs *regs = get_bpf_raw_tp_regs(); 1673 int ret; 1674 1675 if (IS_ERR(regs)) 1676 return PTR_ERR(regs); 1677 1678 perf_fetch_caller_regs(regs); 1679 /* similar to bpf_perf_event_output_tp, but pt_regs fetched differently */ 1680 ret = bpf_get_stackid((unsigned long) regs, (unsigned long) map, 1681 flags, 0, 0); 1682 put_bpf_raw_tp_regs(); 1683 return ret; 1684 } 1685 1686 static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = { 1687 .func = bpf_get_stackid_raw_tp, 1688 .gpl_only = true, 1689 .ret_type = RET_INTEGER, 1690 .arg1_type = ARG_PTR_TO_CTX, 1691 .arg2_type = ARG_CONST_MAP_PTR, 1692 .arg3_type = ARG_ANYTHING, 1693 }; 1694 1695 BPF_CALL_4(bpf_get_stack_raw_tp, struct bpf_raw_tracepoint_args *, args, 1696 void *, buf, u32, size, u64, flags) 1697 { 1698 struct pt_regs *regs = get_bpf_raw_tp_regs(); 1699 int ret; 1700 1701 if (IS_ERR(regs)) 1702 return PTR_ERR(regs); 1703 1704 perf_fetch_caller_regs(regs); 1705 ret = bpf_get_stack((unsigned long) regs, (unsigned long) buf, 1706 (unsigned long) size, flags, 0); 1707 put_bpf_raw_tp_regs(); 1708 return ret; 1709 } 1710 1711 static const struct bpf_func_proto bpf_get_stack_proto_raw_tp = { 1712 .func = bpf_get_stack_raw_tp, 1713 .gpl_only = true, 1714 .ret_type = RET_INTEGER, 1715 .arg1_type = ARG_PTR_TO_CTX, 1716 .arg2_type = ARG_PTR_TO_MEM, 1717 .arg3_type = ARG_CONST_SIZE_OR_ZERO, 1718 .arg4_type = ARG_ANYTHING, 1719 }; 1720 1721 static const struct bpf_func_proto * 1722 raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 1723 { 1724 switch (func_id) { 1725 case BPF_FUNC_perf_event_output: 1726 return &bpf_perf_event_output_proto_raw_tp; 1727 case BPF_FUNC_get_stackid: 1728 return &bpf_get_stackid_proto_raw_tp; 1729 case BPF_FUNC_get_stack: 1730 return &bpf_get_stack_proto_raw_tp; 1731 default: 1732 return bpf_tracing_func_proto(func_id, prog); 1733 } 1734 } 1735 1736 const struct bpf_func_proto * 1737 tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 1738 { 1739 switch (func_id) { 1740 #ifdef CONFIG_NET 1741 case BPF_FUNC_skb_output: 1742 return &bpf_skb_output_proto; 1743 case BPF_FUNC_xdp_output: 1744 return &bpf_xdp_output_proto; 1745 case BPF_FUNC_skc_to_tcp6_sock: 1746 return &bpf_skc_to_tcp6_sock_proto; 1747 case BPF_FUNC_skc_to_tcp_sock: 1748 return &bpf_skc_to_tcp_sock_proto; 1749 case BPF_FUNC_skc_to_tcp_timewait_sock: 1750 return &bpf_skc_to_tcp_timewait_sock_proto; 1751 case BPF_FUNC_skc_to_tcp_request_sock: 1752 return &bpf_skc_to_tcp_request_sock_proto; 1753 case BPF_FUNC_skc_to_udp6_sock: 1754 return &bpf_skc_to_udp6_sock_proto; 1755 case BPF_FUNC_sk_storage_get: 1756 return &bpf_sk_storage_get_tracing_proto; 1757 case BPF_FUNC_sk_storage_delete: 1758 return &bpf_sk_storage_delete_tracing_proto; 1759 #endif 1760 case BPF_FUNC_seq_printf: 1761 return prog->expected_attach_type == BPF_TRACE_ITER ? 1762 &bpf_seq_printf_proto : 1763 NULL; 1764 case BPF_FUNC_seq_write: 1765 return prog->expected_attach_type == BPF_TRACE_ITER ? 1766 &bpf_seq_write_proto : 1767 NULL; 1768 case BPF_FUNC_seq_printf_btf: 1769 return prog->expected_attach_type == BPF_TRACE_ITER ? 1770 &bpf_seq_printf_btf_proto : 1771 NULL; 1772 case BPF_FUNC_d_path: 1773 return &bpf_d_path_proto; 1774 default: 1775 return raw_tp_prog_func_proto(func_id, prog); 1776 } 1777 } 1778 1779 static bool raw_tp_prog_is_valid_access(int off, int size, 1780 enum bpf_access_type type, 1781 const struct bpf_prog *prog, 1782 struct bpf_insn_access_aux *info) 1783 { 1784 if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS) 1785 return false; 1786 if (type != BPF_READ) 1787 return false; 1788 if (off % size != 0) 1789 return false; 1790 return true; 1791 } 1792 1793 static bool tracing_prog_is_valid_access(int off, int size, 1794 enum bpf_access_type type, 1795 const struct bpf_prog *prog, 1796 struct bpf_insn_access_aux *info) 1797 { 1798 if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS) 1799 return false; 1800 if (type != BPF_READ) 1801 return false; 1802 if (off % size != 0) 1803 return false; 1804 return btf_ctx_access(off, size, type, prog, info); 1805 } 1806 1807 int __weak bpf_prog_test_run_tracing(struct bpf_prog *prog, 1808 const union bpf_attr *kattr, 1809 union bpf_attr __user *uattr) 1810 { 1811 return -ENOTSUPP; 1812 } 1813 1814 const struct bpf_verifier_ops raw_tracepoint_verifier_ops = { 1815 .get_func_proto = raw_tp_prog_func_proto, 1816 .is_valid_access = raw_tp_prog_is_valid_access, 1817 }; 1818 1819 const struct bpf_prog_ops raw_tracepoint_prog_ops = { 1820 #ifdef CONFIG_NET 1821 .test_run = bpf_prog_test_run_raw_tp, 1822 #endif 1823 }; 1824 1825 const struct bpf_verifier_ops tracing_verifier_ops = { 1826 .get_func_proto = tracing_prog_func_proto, 1827 .is_valid_access = tracing_prog_is_valid_access, 1828 }; 1829 1830 const struct bpf_prog_ops tracing_prog_ops = { 1831 .test_run = bpf_prog_test_run_tracing, 1832 }; 1833 1834 static bool raw_tp_writable_prog_is_valid_access(int off, int size, 1835 enum bpf_access_type type, 1836 const struct bpf_prog *prog, 1837 struct bpf_insn_access_aux *info) 1838 { 1839 if (off == 0) { 1840 if (size != sizeof(u64) || type != BPF_READ) 1841 return false; 1842 info->reg_type = PTR_TO_TP_BUFFER; 1843 } 1844 return raw_tp_prog_is_valid_access(off, size, type, prog, info); 1845 } 1846 1847 const struct bpf_verifier_ops raw_tracepoint_writable_verifier_ops = { 1848 .get_func_proto = raw_tp_prog_func_proto, 1849 .is_valid_access = raw_tp_writable_prog_is_valid_access, 1850 }; 1851 1852 const struct bpf_prog_ops raw_tracepoint_writable_prog_ops = { 1853 }; 1854 1855 static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type, 1856 const struct bpf_prog *prog, 1857 struct bpf_insn_access_aux *info) 1858 { 1859 const int size_u64 = sizeof(u64); 1860 1861 if (off < 0 || off >= sizeof(struct bpf_perf_event_data)) 1862 return false; 1863 if (type != BPF_READ) 1864 return false; 1865 if (off % size != 0) { 1866 if (sizeof(unsigned long) != 4) 1867 return false; 1868 if (size != 8) 1869 return false; 1870 if (off % size != 4) 1871 return false; 1872 } 1873 1874 switch (off) { 1875 case bpf_ctx_range(struct bpf_perf_event_data, sample_period): 1876 bpf_ctx_record_field_size(info, size_u64); 1877 if (!bpf_ctx_narrow_access_ok(off, size, size_u64)) 1878 return false; 1879 break; 1880 case bpf_ctx_range(struct bpf_perf_event_data, addr): 1881 bpf_ctx_record_field_size(info, size_u64); 1882 if (!bpf_ctx_narrow_access_ok(off, size, size_u64)) 1883 return false; 1884 break; 1885 default: 1886 if (size != sizeof(long)) 1887 return false; 1888 } 1889 1890 return true; 1891 } 1892 1893 static u32 pe_prog_convert_ctx_access(enum bpf_access_type type, 1894 const struct bpf_insn *si, 1895 struct bpf_insn *insn_buf, 1896 struct bpf_prog *prog, u32 *target_size) 1897 { 1898 struct bpf_insn *insn = insn_buf; 1899 1900 switch (si->off) { 1901 case offsetof(struct bpf_perf_event_data, sample_period): 1902 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern, 1903 data), si->dst_reg, si->src_reg, 1904 offsetof(struct bpf_perf_event_data_kern, data)); 1905 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg, 1906 bpf_target_off(struct perf_sample_data, period, 8, 1907 target_size)); 1908 break; 1909 case offsetof(struct bpf_perf_event_data, addr): 1910 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern, 1911 data), si->dst_reg, si->src_reg, 1912 offsetof(struct bpf_perf_event_data_kern, data)); 1913 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg, 1914 bpf_target_off(struct perf_sample_data, addr, 8, 1915 target_size)); 1916 break; 1917 default: 1918 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern, 1919 regs), si->dst_reg, si->src_reg, 1920 offsetof(struct bpf_perf_event_data_kern, regs)); 1921 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), si->dst_reg, si->dst_reg, 1922 si->off); 1923 break; 1924 } 1925 1926 return insn - insn_buf; 1927 } 1928 1929 const struct bpf_verifier_ops perf_event_verifier_ops = { 1930 .get_func_proto = pe_prog_func_proto, 1931 .is_valid_access = pe_prog_is_valid_access, 1932 .convert_ctx_access = pe_prog_convert_ctx_access, 1933 }; 1934 1935 const struct bpf_prog_ops perf_event_prog_ops = { 1936 }; 1937 1938 static DEFINE_MUTEX(bpf_event_mutex); 1939 1940 #define BPF_TRACE_MAX_PROGS 64 1941 1942 int perf_event_attach_bpf_prog(struct perf_event *event, 1943 struct bpf_prog *prog) 1944 { 1945 struct bpf_prog_array *old_array; 1946 struct bpf_prog_array *new_array; 1947 int ret = -EEXIST; 1948 1949 /* 1950 * Kprobe override only works if they are on the function entry, 1951 * and only if they are on the opt-in list. 1952 */ 1953 if (prog->kprobe_override && 1954 (!trace_kprobe_on_func_entry(event->tp_event) || 1955 !trace_kprobe_error_injectable(event->tp_event))) 1956 return -EINVAL; 1957 1958 mutex_lock(&bpf_event_mutex); 1959 1960 if (event->prog) 1961 goto unlock; 1962 1963 old_array = bpf_event_rcu_dereference(event->tp_event->prog_array); 1964 if (old_array && 1965 bpf_prog_array_length(old_array) >= BPF_TRACE_MAX_PROGS) { 1966 ret = -E2BIG; 1967 goto unlock; 1968 } 1969 1970 ret = bpf_prog_array_copy(old_array, NULL, prog, &new_array); 1971 if (ret < 0) 1972 goto unlock; 1973 1974 /* set the new array to event->tp_event and set event->prog */ 1975 event->prog = prog; 1976 rcu_assign_pointer(event->tp_event->prog_array, new_array); 1977 bpf_prog_array_free(old_array); 1978 1979 unlock: 1980 mutex_unlock(&bpf_event_mutex); 1981 return ret; 1982 } 1983 1984 void perf_event_detach_bpf_prog(struct perf_event *event) 1985 { 1986 struct bpf_prog_array *old_array; 1987 struct bpf_prog_array *new_array; 1988 int ret; 1989 1990 mutex_lock(&bpf_event_mutex); 1991 1992 if (!event->prog) 1993 goto unlock; 1994 1995 old_array = bpf_event_rcu_dereference(event->tp_event->prog_array); 1996 ret = bpf_prog_array_copy(old_array, event->prog, NULL, &new_array); 1997 if (ret == -ENOENT) 1998 goto unlock; 1999 if (ret < 0) { 2000 bpf_prog_array_delete_safe(old_array, event->prog); 2001 } else { 2002 rcu_assign_pointer(event->tp_event->prog_array, new_array); 2003 bpf_prog_array_free(old_array); 2004 } 2005 2006 bpf_prog_put(event->prog); 2007 event->prog = NULL; 2008 2009 unlock: 2010 mutex_unlock(&bpf_event_mutex); 2011 } 2012 2013 int perf_event_query_prog_array(struct perf_event *event, void __user *info) 2014 { 2015 struct perf_event_query_bpf __user *uquery = info; 2016 struct perf_event_query_bpf query = {}; 2017 struct bpf_prog_array *progs; 2018 u32 *ids, prog_cnt, ids_len; 2019 int ret; 2020 2021 if (!perfmon_capable()) 2022 return -EPERM; 2023 if (event->attr.type != PERF_TYPE_TRACEPOINT) 2024 return -EINVAL; 2025 if (copy_from_user(&query, uquery, sizeof(query))) 2026 return -EFAULT; 2027 2028 ids_len = query.ids_len; 2029 if (ids_len > BPF_TRACE_MAX_PROGS) 2030 return -E2BIG; 2031 ids = kcalloc(ids_len, sizeof(u32), GFP_USER | __GFP_NOWARN); 2032 if (!ids) 2033 return -ENOMEM; 2034 /* 2035 * The above kcalloc returns ZERO_SIZE_PTR when ids_len = 0, which 2036 * is required when user only wants to check for uquery->prog_cnt. 2037 * There is no need to check for it since the case is handled 2038 * gracefully in bpf_prog_array_copy_info. 2039 */ 2040 2041 mutex_lock(&bpf_event_mutex); 2042 progs = bpf_event_rcu_dereference(event->tp_event->prog_array); 2043 ret = bpf_prog_array_copy_info(progs, ids, ids_len, &prog_cnt); 2044 mutex_unlock(&bpf_event_mutex); 2045 2046 if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) || 2047 copy_to_user(uquery->ids, ids, ids_len * sizeof(u32))) 2048 ret = -EFAULT; 2049 2050 kfree(ids); 2051 return ret; 2052 } 2053 2054 extern struct bpf_raw_event_map __start__bpf_raw_tp[]; 2055 extern struct bpf_raw_event_map __stop__bpf_raw_tp[]; 2056 2057 struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name) 2058 { 2059 struct bpf_raw_event_map *btp = __start__bpf_raw_tp; 2060 2061 for (; btp < __stop__bpf_raw_tp; btp++) { 2062 if (!strcmp(btp->tp->name, name)) 2063 return btp; 2064 } 2065 2066 return bpf_get_raw_tracepoint_module(name); 2067 } 2068 2069 void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp) 2070 { 2071 struct module *mod = __module_address((unsigned long)btp); 2072 2073 if (mod) 2074 module_put(mod); 2075 } 2076 2077 static __always_inline 2078 void __bpf_trace_run(struct bpf_prog *prog, u64 *args) 2079 { 2080 cant_sleep(); 2081 rcu_read_lock(); 2082 (void) BPF_PROG_RUN(prog, args); 2083 rcu_read_unlock(); 2084 } 2085 2086 #define UNPACK(...) __VA_ARGS__ 2087 #define REPEAT_1(FN, DL, X, ...) FN(X) 2088 #define REPEAT_2(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_1(FN, DL, __VA_ARGS__) 2089 #define REPEAT_3(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_2(FN, DL, __VA_ARGS__) 2090 #define REPEAT_4(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_3(FN, DL, __VA_ARGS__) 2091 #define REPEAT_5(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_4(FN, DL, __VA_ARGS__) 2092 #define REPEAT_6(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_5(FN, DL, __VA_ARGS__) 2093 #define REPEAT_7(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_6(FN, DL, __VA_ARGS__) 2094 #define REPEAT_8(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_7(FN, DL, __VA_ARGS__) 2095 #define REPEAT_9(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_8(FN, DL, __VA_ARGS__) 2096 #define REPEAT_10(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_9(FN, DL, __VA_ARGS__) 2097 #define REPEAT_11(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_10(FN, DL, __VA_ARGS__) 2098 #define REPEAT_12(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_11(FN, DL, __VA_ARGS__) 2099 #define REPEAT(X, FN, DL, ...) REPEAT_##X(FN, DL, __VA_ARGS__) 2100 2101 #define SARG(X) u64 arg##X 2102 #define COPY(X) args[X] = arg##X 2103 2104 #define __DL_COM (,) 2105 #define __DL_SEM (;) 2106 2107 #define __SEQ_0_11 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 2108 2109 #define BPF_TRACE_DEFN_x(x) \ 2110 void bpf_trace_run##x(struct bpf_prog *prog, \ 2111 REPEAT(x, SARG, __DL_COM, __SEQ_0_11)) \ 2112 { \ 2113 u64 args[x]; \ 2114 REPEAT(x, COPY, __DL_SEM, __SEQ_0_11); \ 2115 __bpf_trace_run(prog, args); \ 2116 } \ 2117 EXPORT_SYMBOL_GPL(bpf_trace_run##x) 2118 BPF_TRACE_DEFN_x(1); 2119 BPF_TRACE_DEFN_x(2); 2120 BPF_TRACE_DEFN_x(3); 2121 BPF_TRACE_DEFN_x(4); 2122 BPF_TRACE_DEFN_x(5); 2123 BPF_TRACE_DEFN_x(6); 2124 BPF_TRACE_DEFN_x(7); 2125 BPF_TRACE_DEFN_x(8); 2126 BPF_TRACE_DEFN_x(9); 2127 BPF_TRACE_DEFN_x(10); 2128 BPF_TRACE_DEFN_x(11); 2129 BPF_TRACE_DEFN_x(12); 2130 2131 static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog) 2132 { 2133 struct tracepoint *tp = btp->tp; 2134 2135 /* 2136 * check that program doesn't access arguments beyond what's 2137 * available in this tracepoint 2138 */ 2139 if (prog->aux->max_ctx_offset > btp->num_args * sizeof(u64)) 2140 return -EINVAL; 2141 2142 if (prog->aux->max_tp_access > btp->writable_size) 2143 return -EINVAL; 2144 2145 return tracepoint_probe_register(tp, (void *)btp->bpf_func, prog); 2146 } 2147 2148 int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog) 2149 { 2150 return __bpf_probe_register(btp, prog); 2151 } 2152 2153 int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog) 2154 { 2155 return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog); 2156 } 2157 2158 int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id, 2159 u32 *fd_type, const char **buf, 2160 u64 *probe_offset, u64 *probe_addr) 2161 { 2162 bool is_tracepoint, is_syscall_tp; 2163 struct bpf_prog *prog; 2164 int flags, err = 0; 2165 2166 prog = event->prog; 2167 if (!prog) 2168 return -ENOENT; 2169 2170 /* not supporting BPF_PROG_TYPE_PERF_EVENT yet */ 2171 if (prog->type == BPF_PROG_TYPE_PERF_EVENT) 2172 return -EOPNOTSUPP; 2173 2174 *prog_id = prog->aux->id; 2175 flags = event->tp_event->flags; 2176 is_tracepoint = flags & TRACE_EVENT_FL_TRACEPOINT; 2177 is_syscall_tp = is_syscall_trace_event(event->tp_event); 2178 2179 if (is_tracepoint || is_syscall_tp) { 2180 *buf = is_tracepoint ? event->tp_event->tp->name 2181 : event->tp_event->name; 2182 *fd_type = BPF_FD_TYPE_TRACEPOINT; 2183 *probe_offset = 0x0; 2184 *probe_addr = 0x0; 2185 } else { 2186 /* kprobe/uprobe */ 2187 err = -EOPNOTSUPP; 2188 #ifdef CONFIG_KPROBE_EVENTS 2189 if (flags & TRACE_EVENT_FL_KPROBE) 2190 err = bpf_get_kprobe_info(event, fd_type, buf, 2191 probe_offset, probe_addr, 2192 event->attr.type == PERF_TYPE_TRACEPOINT); 2193 #endif 2194 #ifdef CONFIG_UPROBE_EVENTS 2195 if (flags & TRACE_EVENT_FL_UPROBE) 2196 err = bpf_get_uprobe_info(event, fd_type, buf, 2197 probe_offset, 2198 event->attr.type == PERF_TYPE_TRACEPOINT); 2199 #endif 2200 } 2201 2202 return err; 2203 } 2204 2205 static int __init send_signal_irq_work_init(void) 2206 { 2207 int cpu; 2208 struct send_signal_irq_work *work; 2209 2210 for_each_possible_cpu(cpu) { 2211 work = per_cpu_ptr(&send_signal_work, cpu); 2212 init_irq_work(&work->irq_work, do_bpf_send_signal); 2213 } 2214 return 0; 2215 } 2216 2217 subsys_initcall(send_signal_irq_work_init); 2218 2219 #ifdef CONFIG_MODULES 2220 static int bpf_event_notify(struct notifier_block *nb, unsigned long op, 2221 void *module) 2222 { 2223 struct bpf_trace_module *btm, *tmp; 2224 struct module *mod = module; 2225 int ret = 0; 2226 2227 if (mod->num_bpf_raw_events == 0 || 2228 (op != MODULE_STATE_COMING && op != MODULE_STATE_GOING)) 2229 goto out; 2230 2231 mutex_lock(&bpf_module_mutex); 2232 2233 switch (op) { 2234 case MODULE_STATE_COMING: 2235 btm = kzalloc(sizeof(*btm), GFP_KERNEL); 2236 if (btm) { 2237 btm->module = module; 2238 list_add(&btm->list, &bpf_trace_modules); 2239 } else { 2240 ret = -ENOMEM; 2241 } 2242 break; 2243 case MODULE_STATE_GOING: 2244 list_for_each_entry_safe(btm, tmp, &bpf_trace_modules, list) { 2245 if (btm->module == module) { 2246 list_del(&btm->list); 2247 kfree(btm); 2248 break; 2249 } 2250 } 2251 break; 2252 } 2253 2254 mutex_unlock(&bpf_module_mutex); 2255 2256 out: 2257 return notifier_from_errno(ret); 2258 } 2259 2260 static struct notifier_block bpf_module_nb = { 2261 .notifier_call = bpf_event_notify, 2262 }; 2263 2264 static int __init bpf_event_init(void) 2265 { 2266 register_module_notifier(&bpf_module_nb); 2267 return 0; 2268 } 2269 2270 fs_initcall(bpf_event_init); 2271 #endif /* CONFIG_MODULES */ 2272