1 /* 2 * uprobes-based tracing events 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License version 2 as 6 * published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * GNU General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public License 14 * along with this program; if not, write to the Free Software 15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 16 * 17 * Copyright (C) IBM Corporation, 2010-2012 18 * Author: Srikar Dronamraju <srikar@linux.vnet.ibm.com> 19 */ 20 #define pr_fmt(fmt) "trace_kprobe: " fmt 21 22 #include <linux/module.h> 23 #include <linux/uaccess.h> 24 #include <linux/uprobes.h> 25 #include <linux/namei.h> 26 #include <linux/string.h> 27 #include <linux/rculist.h> 28 29 #include "trace_probe.h" 30 31 #define UPROBE_EVENT_SYSTEM "uprobes" 32 33 struct uprobe_trace_entry_head { 34 struct trace_entry ent; 35 unsigned long vaddr[]; 36 }; 37 38 #define SIZEOF_TRACE_ENTRY(is_return) \ 39 (sizeof(struct uprobe_trace_entry_head) + \ 40 sizeof(unsigned long) * (is_return ? 2 : 1)) 41 42 #define DATAOF_TRACE_ENTRY(entry, is_return) \ 43 ((void*)(entry) + SIZEOF_TRACE_ENTRY(is_return)) 44 45 struct trace_uprobe_filter { 46 rwlock_t rwlock; 47 int nr_systemwide; 48 struct list_head perf_events; 49 }; 50 51 /* 52 * uprobe event core functions 53 */ 54 struct trace_uprobe { 55 struct list_head list; 56 struct trace_uprobe_filter filter; 57 struct uprobe_consumer consumer; 58 struct path path; 59 struct inode *inode; 60 char *filename; 61 unsigned long offset; 62 unsigned long nhit; 63 struct trace_probe tp; 64 }; 65 66 #define SIZEOF_TRACE_UPROBE(n) \ 67 (offsetof(struct trace_uprobe, tp.args) + \ 68 (sizeof(struct probe_arg) * (n))) 69 70 static int register_uprobe_event(struct trace_uprobe *tu); 71 static int unregister_uprobe_event(struct trace_uprobe *tu); 72 73 static DEFINE_MUTEX(uprobe_lock); 74 static LIST_HEAD(uprobe_list); 75 76 struct uprobe_dispatch_data { 77 struct trace_uprobe *tu; 78 unsigned long bp_addr; 79 }; 80 81 static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs); 82 static int uretprobe_dispatcher(struct uprobe_consumer *con, 83 unsigned long func, struct pt_regs *regs); 84 85 #ifdef CONFIG_STACK_GROWSUP 86 static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n) 87 { 88 return addr - (n * sizeof(long)); 89 } 90 #else 91 static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n) 92 { 93 return addr + (n * sizeof(long)); 94 } 95 #endif 96 97 static unsigned long get_user_stack_nth(struct pt_regs *regs, unsigned int n) 98 { 99 unsigned long ret; 100 unsigned long addr = user_stack_pointer(regs); 101 102 addr = adjust_stack_addr(addr, n); 103 104 if (copy_from_user(&ret, (void __force __user *) addr, sizeof(ret))) 105 return 0; 106 107 return ret; 108 } 109 110 /* 111 * Uprobes-specific fetch functions 112 */ 113 #define DEFINE_FETCH_stack(type) \ 114 static void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs, \ 115 void *offset, void *dest) \ 116 { \ 117 *(type *)dest = (type)get_user_stack_nth(regs, \ 118 ((unsigned long)offset)); \ 119 } 120 DEFINE_BASIC_FETCH_FUNCS(stack) 121 /* No string on the stack entry */ 122 #define fetch_stack_string NULL 123 #define fetch_stack_string_size NULL 124 125 #define DEFINE_FETCH_memory(type) \ 126 static void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs, \ 127 void *addr, void *dest) \ 128 { \ 129 type retval; \ 130 void __user *vaddr = (void __force __user *) addr; \ 131 \ 132 if (copy_from_user(&retval, vaddr, sizeof(type))) \ 133 *(type *)dest = 0; \ 134 else \ 135 *(type *) dest = retval; \ 136 } 137 DEFINE_BASIC_FETCH_FUNCS(memory) 138 /* 139 * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max 140 * length and relative data location. 141 */ 142 static void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs, 143 void *addr, void *dest) 144 { 145 long ret; 146 u32 rloc = *(u32 *)dest; 147 int maxlen = get_rloc_len(rloc); 148 u8 *dst = get_rloc_data(dest); 149 void __user *src = (void __force __user *) addr; 150 151 if (!maxlen) 152 return; 153 154 ret = strncpy_from_user(dst, src, maxlen); 155 if (ret == maxlen) 156 dst[--ret] = '\0'; 157 158 if (ret < 0) { /* Failed to fetch string */ 159 ((u8 *)get_rloc_data(dest))[0] = '\0'; 160 *(u32 *)dest = make_data_rloc(0, get_rloc_offs(rloc)); 161 } else { 162 *(u32 *)dest = make_data_rloc(ret, get_rloc_offs(rloc)); 163 } 164 } 165 166 static void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs, 167 void *addr, void *dest) 168 { 169 int len; 170 void __user *vaddr = (void __force __user *) addr; 171 172 len = strnlen_user(vaddr, MAX_STRING_SIZE); 173 174 if (len == 0 || len > MAX_STRING_SIZE) /* Failed to check length */ 175 *(u32 *)dest = 0; 176 else 177 *(u32 *)dest = len; 178 } 179 180 static unsigned long translate_user_vaddr(void *file_offset) 181 { 182 unsigned long base_addr; 183 struct uprobe_dispatch_data *udd; 184 185 udd = (void *) current->utask->vaddr; 186 187 base_addr = udd->bp_addr - udd->tu->offset; 188 return base_addr + (unsigned long)file_offset; 189 } 190 191 #define DEFINE_FETCH_file_offset(type) \ 192 static void FETCH_FUNC_NAME(file_offset, type)(struct pt_regs *regs, \ 193 void *offset, void *dest)\ 194 { \ 195 void *vaddr = (void *)translate_user_vaddr(offset); \ 196 \ 197 FETCH_FUNC_NAME(memory, type)(regs, vaddr, dest); \ 198 } 199 DEFINE_BASIC_FETCH_FUNCS(file_offset) 200 DEFINE_FETCH_file_offset(string) 201 DEFINE_FETCH_file_offset(string_size) 202 203 /* Fetch type information table */ 204 static const struct fetch_type uprobes_fetch_type_table[] = { 205 /* Special types */ 206 [FETCH_TYPE_STRING] = __ASSIGN_FETCH_TYPE("string", string, string, 207 sizeof(u32), 1, "__data_loc char[]"), 208 [FETCH_TYPE_STRSIZE] = __ASSIGN_FETCH_TYPE("string_size", u32, 209 string_size, sizeof(u32), 0, "u32"), 210 /* Basic types */ 211 ASSIGN_FETCH_TYPE(u8, u8, 0), 212 ASSIGN_FETCH_TYPE(u16, u16, 0), 213 ASSIGN_FETCH_TYPE(u32, u32, 0), 214 ASSIGN_FETCH_TYPE(u64, u64, 0), 215 ASSIGN_FETCH_TYPE(s8, u8, 1), 216 ASSIGN_FETCH_TYPE(s16, u16, 1), 217 ASSIGN_FETCH_TYPE(s32, u32, 1), 218 ASSIGN_FETCH_TYPE(s64, u64, 1), 219 ASSIGN_FETCH_TYPE_ALIAS(x8, u8, u8, 0), 220 ASSIGN_FETCH_TYPE_ALIAS(x16, u16, u16, 0), 221 ASSIGN_FETCH_TYPE_ALIAS(x32, u32, u32, 0), 222 ASSIGN_FETCH_TYPE_ALIAS(x64, u64, u64, 0), 223 224 ASSIGN_FETCH_TYPE_END 225 }; 226 227 static inline void init_trace_uprobe_filter(struct trace_uprobe_filter *filter) 228 { 229 rwlock_init(&filter->rwlock); 230 filter->nr_systemwide = 0; 231 INIT_LIST_HEAD(&filter->perf_events); 232 } 233 234 static inline bool uprobe_filter_is_empty(struct trace_uprobe_filter *filter) 235 { 236 return !filter->nr_systemwide && list_empty(&filter->perf_events); 237 } 238 239 static inline bool is_ret_probe(struct trace_uprobe *tu) 240 { 241 return tu->consumer.ret_handler != NULL; 242 } 243 244 /* 245 * Allocate new trace_uprobe and initialize it (including uprobes). 246 */ 247 static struct trace_uprobe * 248 alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret) 249 { 250 struct trace_uprobe *tu; 251 252 if (!event || !is_good_name(event)) 253 return ERR_PTR(-EINVAL); 254 255 if (!group || !is_good_name(group)) 256 return ERR_PTR(-EINVAL); 257 258 tu = kzalloc(SIZEOF_TRACE_UPROBE(nargs), GFP_KERNEL); 259 if (!tu) 260 return ERR_PTR(-ENOMEM); 261 262 tu->tp.call.class = &tu->tp.class; 263 tu->tp.call.name = kstrdup(event, GFP_KERNEL); 264 if (!tu->tp.call.name) 265 goto error; 266 267 tu->tp.class.system = kstrdup(group, GFP_KERNEL); 268 if (!tu->tp.class.system) 269 goto error; 270 271 INIT_LIST_HEAD(&tu->list); 272 INIT_LIST_HEAD(&tu->tp.files); 273 tu->consumer.handler = uprobe_dispatcher; 274 if (is_ret) 275 tu->consumer.ret_handler = uretprobe_dispatcher; 276 init_trace_uprobe_filter(&tu->filter); 277 return tu; 278 279 error: 280 kfree(tu->tp.call.name); 281 kfree(tu); 282 283 return ERR_PTR(-ENOMEM); 284 } 285 286 static void free_trace_uprobe(struct trace_uprobe *tu) 287 { 288 int i; 289 290 for (i = 0; i < tu->tp.nr_args; i++) 291 traceprobe_free_probe_arg(&tu->tp.args[i]); 292 293 path_put(&tu->path); 294 kfree(tu->tp.call.class->system); 295 kfree(tu->tp.call.name); 296 kfree(tu->filename); 297 kfree(tu); 298 } 299 300 static struct trace_uprobe *find_probe_event(const char *event, const char *group) 301 { 302 struct trace_uprobe *tu; 303 304 list_for_each_entry(tu, &uprobe_list, list) 305 if (strcmp(trace_event_name(&tu->tp.call), event) == 0 && 306 strcmp(tu->tp.call.class->system, group) == 0) 307 return tu; 308 309 return NULL; 310 } 311 312 /* Unregister a trace_uprobe and probe_event: call with locking uprobe_lock */ 313 static int unregister_trace_uprobe(struct trace_uprobe *tu) 314 { 315 int ret; 316 317 ret = unregister_uprobe_event(tu); 318 if (ret) 319 return ret; 320 321 list_del(&tu->list); 322 free_trace_uprobe(tu); 323 return 0; 324 } 325 326 /* Register a trace_uprobe and probe_event */ 327 static int register_trace_uprobe(struct trace_uprobe *tu) 328 { 329 struct trace_uprobe *old_tu; 330 int ret; 331 332 mutex_lock(&uprobe_lock); 333 334 /* register as an event */ 335 old_tu = find_probe_event(trace_event_name(&tu->tp.call), 336 tu->tp.call.class->system); 337 if (old_tu) { 338 /* delete old event */ 339 ret = unregister_trace_uprobe(old_tu); 340 if (ret) 341 goto end; 342 } 343 344 ret = register_uprobe_event(tu); 345 if (ret) { 346 pr_warn("Failed to register probe event(%d)\n", ret); 347 goto end; 348 } 349 350 list_add_tail(&tu->list, &uprobe_list); 351 352 end: 353 mutex_unlock(&uprobe_lock); 354 355 return ret; 356 } 357 358 /* 359 * Argument syntax: 360 * - Add uprobe: p|r[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS] 361 * 362 * - Remove uprobe: -:[GRP/]EVENT 363 */ 364 static int create_trace_uprobe(int argc, char **argv) 365 { 366 struct trace_uprobe *tu; 367 char *arg, *event, *group, *filename; 368 char buf[MAX_EVENT_NAME_LEN]; 369 struct path path; 370 unsigned long offset; 371 bool is_delete, is_return; 372 int i, ret; 373 374 ret = 0; 375 is_delete = false; 376 is_return = false; 377 event = NULL; 378 group = NULL; 379 380 /* argc must be >= 1 */ 381 if (argv[0][0] == '-') 382 is_delete = true; 383 else if (argv[0][0] == 'r') 384 is_return = true; 385 else if (argv[0][0] != 'p') { 386 pr_info("Probe definition must be started with 'p', 'r' or '-'.\n"); 387 return -EINVAL; 388 } 389 390 if (argv[0][1] == ':') { 391 event = &argv[0][2]; 392 arg = strchr(event, '/'); 393 394 if (arg) { 395 group = event; 396 event = arg + 1; 397 event[-1] = '\0'; 398 399 if (strlen(group) == 0) { 400 pr_info("Group name is not specified\n"); 401 return -EINVAL; 402 } 403 } 404 if (strlen(event) == 0) { 405 pr_info("Event name is not specified\n"); 406 return -EINVAL; 407 } 408 } 409 if (!group) 410 group = UPROBE_EVENT_SYSTEM; 411 412 if (is_delete) { 413 int ret; 414 415 if (!event) { 416 pr_info("Delete command needs an event name.\n"); 417 return -EINVAL; 418 } 419 mutex_lock(&uprobe_lock); 420 tu = find_probe_event(event, group); 421 422 if (!tu) { 423 mutex_unlock(&uprobe_lock); 424 pr_info("Event %s/%s doesn't exist.\n", group, event); 425 return -ENOENT; 426 } 427 /* delete an event */ 428 ret = unregister_trace_uprobe(tu); 429 mutex_unlock(&uprobe_lock); 430 return ret; 431 } 432 433 if (argc < 2) { 434 pr_info("Probe point is not specified.\n"); 435 return -EINVAL; 436 } 437 /* Find the last occurrence, in case the path contains ':' too. */ 438 arg = strrchr(argv[1], ':'); 439 if (!arg) 440 return -EINVAL; 441 442 *arg++ = '\0'; 443 filename = argv[1]; 444 ret = kern_path(filename, LOOKUP_FOLLOW, &path); 445 if (ret) 446 return ret; 447 448 if (!d_is_reg(path.dentry)) { 449 ret = -EINVAL; 450 goto fail_address_parse; 451 } 452 453 ret = kstrtoul(arg, 0, &offset); 454 if (ret) 455 goto fail_address_parse; 456 457 argc -= 2; 458 argv += 2; 459 460 /* setup a probe */ 461 if (!event) { 462 char *tail; 463 char *ptr; 464 465 tail = kstrdup(kbasename(filename), GFP_KERNEL); 466 if (!tail) { 467 ret = -ENOMEM; 468 goto fail_address_parse; 469 } 470 471 ptr = strpbrk(tail, ".-_"); 472 if (ptr) 473 *ptr = '\0'; 474 475 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_0x%lx", 'p', tail, offset); 476 event = buf; 477 kfree(tail); 478 } 479 480 tu = alloc_trace_uprobe(group, event, argc, is_return); 481 if (IS_ERR(tu)) { 482 pr_info("Failed to allocate trace_uprobe.(%d)\n", (int)PTR_ERR(tu)); 483 ret = PTR_ERR(tu); 484 goto fail_address_parse; 485 } 486 tu->offset = offset; 487 tu->path = path; 488 tu->filename = kstrdup(filename, GFP_KERNEL); 489 490 if (!tu->filename) { 491 pr_info("Failed to allocate filename.\n"); 492 ret = -ENOMEM; 493 goto error; 494 } 495 496 /* parse arguments */ 497 ret = 0; 498 for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) { 499 struct probe_arg *parg = &tu->tp.args[i]; 500 501 /* Increment count for freeing args in error case */ 502 tu->tp.nr_args++; 503 504 /* Parse argument name */ 505 arg = strchr(argv[i], '='); 506 if (arg) { 507 *arg++ = '\0'; 508 parg->name = kstrdup(argv[i], GFP_KERNEL); 509 } else { 510 arg = argv[i]; 511 /* If argument name is omitted, set "argN" */ 512 snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1); 513 parg->name = kstrdup(buf, GFP_KERNEL); 514 } 515 516 if (!parg->name) { 517 pr_info("Failed to allocate argument[%d] name.\n", i); 518 ret = -ENOMEM; 519 goto error; 520 } 521 522 if (!is_good_name(parg->name)) { 523 pr_info("Invalid argument[%d] name: %s\n", i, parg->name); 524 ret = -EINVAL; 525 goto error; 526 } 527 528 if (traceprobe_conflict_field_name(parg->name, tu->tp.args, i)) { 529 pr_info("Argument[%d] name '%s' conflicts with " 530 "another field.\n", i, argv[i]); 531 ret = -EINVAL; 532 goto error; 533 } 534 535 /* Parse fetch argument */ 536 ret = traceprobe_parse_probe_arg(arg, &tu->tp.size, parg, 537 is_return, false, 538 uprobes_fetch_type_table); 539 if (ret) { 540 pr_info("Parse error at argument[%d]. (%d)\n", i, ret); 541 goto error; 542 } 543 } 544 545 ret = register_trace_uprobe(tu); 546 if (ret) 547 goto error; 548 return 0; 549 550 error: 551 free_trace_uprobe(tu); 552 return ret; 553 554 fail_address_parse: 555 path_put(&path); 556 557 pr_info("Failed to parse address or file.\n"); 558 559 return ret; 560 } 561 562 static int cleanup_all_probes(void) 563 { 564 struct trace_uprobe *tu; 565 int ret = 0; 566 567 mutex_lock(&uprobe_lock); 568 while (!list_empty(&uprobe_list)) { 569 tu = list_entry(uprobe_list.next, struct trace_uprobe, list); 570 ret = unregister_trace_uprobe(tu); 571 if (ret) 572 break; 573 } 574 mutex_unlock(&uprobe_lock); 575 return ret; 576 } 577 578 /* Probes listing interfaces */ 579 static void *probes_seq_start(struct seq_file *m, loff_t *pos) 580 { 581 mutex_lock(&uprobe_lock); 582 return seq_list_start(&uprobe_list, *pos); 583 } 584 585 static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos) 586 { 587 return seq_list_next(v, &uprobe_list, pos); 588 } 589 590 static void probes_seq_stop(struct seq_file *m, void *v) 591 { 592 mutex_unlock(&uprobe_lock); 593 } 594 595 static int probes_seq_show(struct seq_file *m, void *v) 596 { 597 struct trace_uprobe *tu = v; 598 char c = is_ret_probe(tu) ? 'r' : 'p'; 599 int i; 600 601 seq_printf(m, "%c:%s/%s %s:0x%0*lx", c, tu->tp.call.class->system, 602 trace_event_name(&tu->tp.call), tu->filename, 603 (int)(sizeof(void *) * 2), tu->offset); 604 605 for (i = 0; i < tu->tp.nr_args; i++) 606 seq_printf(m, " %s=%s", tu->tp.args[i].name, tu->tp.args[i].comm); 607 608 seq_putc(m, '\n'); 609 return 0; 610 } 611 612 static const struct seq_operations probes_seq_op = { 613 .start = probes_seq_start, 614 .next = probes_seq_next, 615 .stop = probes_seq_stop, 616 .show = probes_seq_show 617 }; 618 619 static int probes_open(struct inode *inode, struct file *file) 620 { 621 int ret; 622 623 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) { 624 ret = cleanup_all_probes(); 625 if (ret) 626 return ret; 627 } 628 629 return seq_open(file, &probes_seq_op); 630 } 631 632 static ssize_t probes_write(struct file *file, const char __user *buffer, 633 size_t count, loff_t *ppos) 634 { 635 return trace_parse_run_command(file, buffer, count, ppos, create_trace_uprobe); 636 } 637 638 static const struct file_operations uprobe_events_ops = { 639 .owner = THIS_MODULE, 640 .open = probes_open, 641 .read = seq_read, 642 .llseek = seq_lseek, 643 .release = seq_release, 644 .write = probes_write, 645 }; 646 647 /* Probes profiling interfaces */ 648 static int probes_profile_seq_show(struct seq_file *m, void *v) 649 { 650 struct trace_uprobe *tu = v; 651 652 seq_printf(m, " %s %-44s %15lu\n", tu->filename, 653 trace_event_name(&tu->tp.call), tu->nhit); 654 return 0; 655 } 656 657 static const struct seq_operations profile_seq_op = { 658 .start = probes_seq_start, 659 .next = probes_seq_next, 660 .stop = probes_seq_stop, 661 .show = probes_profile_seq_show 662 }; 663 664 static int profile_open(struct inode *inode, struct file *file) 665 { 666 return seq_open(file, &profile_seq_op); 667 } 668 669 static const struct file_operations uprobe_profile_ops = { 670 .owner = THIS_MODULE, 671 .open = profile_open, 672 .read = seq_read, 673 .llseek = seq_lseek, 674 .release = seq_release, 675 }; 676 677 struct uprobe_cpu_buffer { 678 struct mutex mutex; 679 void *buf; 680 }; 681 static struct uprobe_cpu_buffer __percpu *uprobe_cpu_buffer; 682 static int uprobe_buffer_refcnt; 683 684 static int uprobe_buffer_init(void) 685 { 686 int cpu, err_cpu; 687 688 uprobe_cpu_buffer = alloc_percpu(struct uprobe_cpu_buffer); 689 if (uprobe_cpu_buffer == NULL) 690 return -ENOMEM; 691 692 for_each_possible_cpu(cpu) { 693 struct page *p = alloc_pages_node(cpu_to_node(cpu), 694 GFP_KERNEL, 0); 695 if (p == NULL) { 696 err_cpu = cpu; 697 goto err; 698 } 699 per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf = page_address(p); 700 mutex_init(&per_cpu_ptr(uprobe_cpu_buffer, cpu)->mutex); 701 } 702 703 return 0; 704 705 err: 706 for_each_possible_cpu(cpu) { 707 if (cpu == err_cpu) 708 break; 709 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf); 710 } 711 712 free_percpu(uprobe_cpu_buffer); 713 return -ENOMEM; 714 } 715 716 static int uprobe_buffer_enable(void) 717 { 718 int ret = 0; 719 720 BUG_ON(!mutex_is_locked(&event_mutex)); 721 722 if (uprobe_buffer_refcnt++ == 0) { 723 ret = uprobe_buffer_init(); 724 if (ret < 0) 725 uprobe_buffer_refcnt--; 726 } 727 728 return ret; 729 } 730 731 static void uprobe_buffer_disable(void) 732 { 733 int cpu; 734 735 BUG_ON(!mutex_is_locked(&event_mutex)); 736 737 if (--uprobe_buffer_refcnt == 0) { 738 for_each_possible_cpu(cpu) 739 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer, 740 cpu)->buf); 741 742 free_percpu(uprobe_cpu_buffer); 743 uprobe_cpu_buffer = NULL; 744 } 745 } 746 747 static struct uprobe_cpu_buffer *uprobe_buffer_get(void) 748 { 749 struct uprobe_cpu_buffer *ucb; 750 int cpu; 751 752 cpu = raw_smp_processor_id(); 753 ucb = per_cpu_ptr(uprobe_cpu_buffer, cpu); 754 755 /* 756 * Use per-cpu buffers for fastest access, but we might migrate 757 * so the mutex makes sure we have sole access to it. 758 */ 759 mutex_lock(&ucb->mutex); 760 761 return ucb; 762 } 763 764 static void uprobe_buffer_put(struct uprobe_cpu_buffer *ucb) 765 { 766 mutex_unlock(&ucb->mutex); 767 } 768 769 static void __uprobe_trace_func(struct trace_uprobe *tu, 770 unsigned long func, struct pt_regs *regs, 771 struct uprobe_cpu_buffer *ucb, int dsize, 772 struct trace_event_file *trace_file) 773 { 774 struct uprobe_trace_entry_head *entry; 775 struct ring_buffer_event *event; 776 struct ring_buffer *buffer; 777 void *data; 778 int size, esize; 779 struct trace_event_call *call = &tu->tp.call; 780 781 WARN_ON(call != trace_file->event_call); 782 783 if (WARN_ON_ONCE(tu->tp.size + dsize > PAGE_SIZE)) 784 return; 785 786 if (trace_trigger_soft_disabled(trace_file)) 787 return; 788 789 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu)); 790 size = esize + tu->tp.size + dsize; 791 event = trace_event_buffer_lock_reserve(&buffer, trace_file, 792 call->event.type, size, 0, 0); 793 if (!event) 794 return; 795 796 entry = ring_buffer_event_data(event); 797 if (is_ret_probe(tu)) { 798 entry->vaddr[0] = func; 799 entry->vaddr[1] = instruction_pointer(regs); 800 data = DATAOF_TRACE_ENTRY(entry, true); 801 } else { 802 entry->vaddr[0] = instruction_pointer(regs); 803 data = DATAOF_TRACE_ENTRY(entry, false); 804 } 805 806 memcpy(data, ucb->buf, tu->tp.size + dsize); 807 808 event_trigger_unlock_commit(trace_file, buffer, event, entry, 0, 0); 809 } 810 811 /* uprobe handler */ 812 static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs, 813 struct uprobe_cpu_buffer *ucb, int dsize) 814 { 815 struct event_file_link *link; 816 817 if (is_ret_probe(tu)) 818 return 0; 819 820 rcu_read_lock(); 821 list_for_each_entry_rcu(link, &tu->tp.files, list) 822 __uprobe_trace_func(tu, 0, regs, ucb, dsize, link->file); 823 rcu_read_unlock(); 824 825 return 0; 826 } 827 828 static void uretprobe_trace_func(struct trace_uprobe *tu, unsigned long func, 829 struct pt_regs *regs, 830 struct uprobe_cpu_buffer *ucb, int dsize) 831 { 832 struct event_file_link *link; 833 834 rcu_read_lock(); 835 list_for_each_entry_rcu(link, &tu->tp.files, list) 836 __uprobe_trace_func(tu, func, regs, ucb, dsize, link->file); 837 rcu_read_unlock(); 838 } 839 840 /* Event entry printers */ 841 static enum print_line_t 842 print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *event) 843 { 844 struct uprobe_trace_entry_head *entry; 845 struct trace_seq *s = &iter->seq; 846 struct trace_uprobe *tu; 847 u8 *data; 848 int i; 849 850 entry = (struct uprobe_trace_entry_head *)iter->ent; 851 tu = container_of(event, struct trace_uprobe, tp.call.event); 852 853 if (is_ret_probe(tu)) { 854 trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)", 855 trace_event_name(&tu->tp.call), 856 entry->vaddr[1], entry->vaddr[0]); 857 data = DATAOF_TRACE_ENTRY(entry, true); 858 } else { 859 trace_seq_printf(s, "%s: (0x%lx)", 860 trace_event_name(&tu->tp.call), 861 entry->vaddr[0]); 862 data = DATAOF_TRACE_ENTRY(entry, false); 863 } 864 865 for (i = 0; i < tu->tp.nr_args; i++) { 866 struct probe_arg *parg = &tu->tp.args[i]; 867 868 if (!parg->type->print(s, parg->name, data + parg->offset, entry)) 869 goto out; 870 } 871 872 trace_seq_putc(s, '\n'); 873 874 out: 875 return trace_handle_return(s); 876 } 877 878 typedef bool (*filter_func_t)(struct uprobe_consumer *self, 879 enum uprobe_filter_ctx ctx, 880 struct mm_struct *mm); 881 882 static int 883 probe_event_enable(struct trace_uprobe *tu, struct trace_event_file *file, 884 filter_func_t filter) 885 { 886 bool enabled = trace_probe_is_enabled(&tu->tp); 887 struct event_file_link *link = NULL; 888 int ret; 889 890 if (file) { 891 if (tu->tp.flags & TP_FLAG_PROFILE) 892 return -EINTR; 893 894 link = kmalloc(sizeof(*link), GFP_KERNEL); 895 if (!link) 896 return -ENOMEM; 897 898 link->file = file; 899 list_add_tail_rcu(&link->list, &tu->tp.files); 900 901 tu->tp.flags |= TP_FLAG_TRACE; 902 } else { 903 if (tu->tp.flags & TP_FLAG_TRACE) 904 return -EINTR; 905 906 tu->tp.flags |= TP_FLAG_PROFILE; 907 } 908 909 WARN_ON(!uprobe_filter_is_empty(&tu->filter)); 910 911 if (enabled) 912 return 0; 913 914 ret = uprobe_buffer_enable(); 915 if (ret) 916 goto err_flags; 917 918 tu->consumer.filter = filter; 919 tu->inode = d_real_inode(tu->path.dentry); 920 ret = uprobe_register(tu->inode, tu->offset, &tu->consumer); 921 if (ret) 922 goto err_buffer; 923 924 return 0; 925 926 err_buffer: 927 uprobe_buffer_disable(); 928 929 err_flags: 930 if (file) { 931 list_del(&link->list); 932 kfree(link); 933 tu->tp.flags &= ~TP_FLAG_TRACE; 934 } else { 935 tu->tp.flags &= ~TP_FLAG_PROFILE; 936 } 937 return ret; 938 } 939 940 static void 941 probe_event_disable(struct trace_uprobe *tu, struct trace_event_file *file) 942 { 943 if (!trace_probe_is_enabled(&tu->tp)) 944 return; 945 946 if (file) { 947 struct event_file_link *link; 948 949 link = find_event_file_link(&tu->tp, file); 950 if (!link) 951 return; 952 953 list_del_rcu(&link->list); 954 /* synchronize with u{,ret}probe_trace_func */ 955 synchronize_sched(); 956 kfree(link); 957 958 if (!list_empty(&tu->tp.files)) 959 return; 960 } 961 962 WARN_ON(!uprobe_filter_is_empty(&tu->filter)); 963 964 uprobe_unregister(tu->inode, tu->offset, &tu->consumer); 965 tu->inode = NULL; 966 tu->tp.flags &= file ? ~TP_FLAG_TRACE : ~TP_FLAG_PROFILE; 967 968 uprobe_buffer_disable(); 969 } 970 971 static int uprobe_event_define_fields(struct trace_event_call *event_call) 972 { 973 int ret, i, size; 974 struct uprobe_trace_entry_head field; 975 struct trace_uprobe *tu = event_call->data; 976 977 if (is_ret_probe(tu)) { 978 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_FUNC, 0); 979 DEFINE_FIELD(unsigned long, vaddr[1], FIELD_STRING_RETIP, 0); 980 size = SIZEOF_TRACE_ENTRY(true); 981 } else { 982 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_IP, 0); 983 size = SIZEOF_TRACE_ENTRY(false); 984 } 985 /* Set argument names as fields */ 986 for (i = 0; i < tu->tp.nr_args; i++) { 987 struct probe_arg *parg = &tu->tp.args[i]; 988 989 ret = trace_define_field(event_call, parg->type->fmttype, 990 parg->name, size + parg->offset, 991 parg->type->size, parg->type->is_signed, 992 FILTER_OTHER); 993 994 if (ret) 995 return ret; 996 } 997 return 0; 998 } 999 1000 #ifdef CONFIG_PERF_EVENTS 1001 static bool 1002 __uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm) 1003 { 1004 struct perf_event *event; 1005 1006 if (filter->nr_systemwide) 1007 return true; 1008 1009 list_for_each_entry(event, &filter->perf_events, hw.tp_list) { 1010 if (event->hw.target->mm == mm) 1011 return true; 1012 } 1013 1014 return false; 1015 } 1016 1017 static inline bool 1018 uprobe_filter_event(struct trace_uprobe *tu, struct perf_event *event) 1019 { 1020 return __uprobe_perf_filter(&tu->filter, event->hw.target->mm); 1021 } 1022 1023 static int uprobe_perf_close(struct trace_uprobe *tu, struct perf_event *event) 1024 { 1025 bool done; 1026 1027 write_lock(&tu->filter.rwlock); 1028 if (event->hw.target) { 1029 list_del(&event->hw.tp_list); 1030 done = tu->filter.nr_systemwide || 1031 (event->hw.target->flags & PF_EXITING) || 1032 uprobe_filter_event(tu, event); 1033 } else { 1034 tu->filter.nr_systemwide--; 1035 done = tu->filter.nr_systemwide; 1036 } 1037 write_unlock(&tu->filter.rwlock); 1038 1039 if (!done) 1040 return uprobe_apply(tu->inode, tu->offset, &tu->consumer, false); 1041 1042 return 0; 1043 } 1044 1045 static int uprobe_perf_open(struct trace_uprobe *tu, struct perf_event *event) 1046 { 1047 bool done; 1048 int err; 1049 1050 write_lock(&tu->filter.rwlock); 1051 if (event->hw.target) { 1052 /* 1053 * event->parent != NULL means copy_process(), we can avoid 1054 * uprobe_apply(). current->mm must be probed and we can rely 1055 * on dup_mmap() which preserves the already installed bp's. 1056 * 1057 * attr.enable_on_exec means that exec/mmap will install the 1058 * breakpoints we need. 1059 */ 1060 done = tu->filter.nr_systemwide || 1061 event->parent || event->attr.enable_on_exec || 1062 uprobe_filter_event(tu, event); 1063 list_add(&event->hw.tp_list, &tu->filter.perf_events); 1064 } else { 1065 done = tu->filter.nr_systemwide; 1066 tu->filter.nr_systemwide++; 1067 } 1068 write_unlock(&tu->filter.rwlock); 1069 1070 err = 0; 1071 if (!done) { 1072 err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true); 1073 if (err) 1074 uprobe_perf_close(tu, event); 1075 } 1076 return err; 1077 } 1078 1079 static bool uprobe_perf_filter(struct uprobe_consumer *uc, 1080 enum uprobe_filter_ctx ctx, struct mm_struct *mm) 1081 { 1082 struct trace_uprobe *tu; 1083 int ret; 1084 1085 tu = container_of(uc, struct trace_uprobe, consumer); 1086 read_lock(&tu->filter.rwlock); 1087 ret = __uprobe_perf_filter(&tu->filter, mm); 1088 read_unlock(&tu->filter.rwlock); 1089 1090 return ret; 1091 } 1092 1093 static void __uprobe_perf_func(struct trace_uprobe *tu, 1094 unsigned long func, struct pt_regs *regs, 1095 struct uprobe_cpu_buffer *ucb, int dsize) 1096 { 1097 struct trace_event_call *call = &tu->tp.call; 1098 struct uprobe_trace_entry_head *entry; 1099 struct hlist_head *head; 1100 void *data; 1101 int size, esize; 1102 int rctx; 1103 1104 if (bpf_prog_array_valid(call) && !trace_call_bpf(call, regs)) 1105 return; 1106 1107 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu)); 1108 1109 size = esize + tu->tp.size + dsize; 1110 size = ALIGN(size + sizeof(u32), sizeof(u64)) - sizeof(u32); 1111 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough")) 1112 return; 1113 1114 preempt_disable(); 1115 head = this_cpu_ptr(call->perf_events); 1116 if (hlist_empty(head)) 1117 goto out; 1118 1119 entry = perf_trace_buf_alloc(size, NULL, &rctx); 1120 if (!entry) 1121 goto out; 1122 1123 if (is_ret_probe(tu)) { 1124 entry->vaddr[0] = func; 1125 entry->vaddr[1] = instruction_pointer(regs); 1126 data = DATAOF_TRACE_ENTRY(entry, true); 1127 } else { 1128 entry->vaddr[0] = instruction_pointer(regs); 1129 data = DATAOF_TRACE_ENTRY(entry, false); 1130 } 1131 1132 memcpy(data, ucb->buf, tu->tp.size + dsize); 1133 1134 if (size - esize > tu->tp.size + dsize) { 1135 int len = tu->tp.size + dsize; 1136 1137 memset(data + len, 0, size - esize - len); 1138 } 1139 1140 perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs, 1141 head, NULL); 1142 out: 1143 preempt_enable(); 1144 } 1145 1146 /* uprobe profile handler */ 1147 static int uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs, 1148 struct uprobe_cpu_buffer *ucb, int dsize) 1149 { 1150 if (!uprobe_perf_filter(&tu->consumer, 0, current->mm)) 1151 return UPROBE_HANDLER_REMOVE; 1152 1153 if (!is_ret_probe(tu)) 1154 __uprobe_perf_func(tu, 0, regs, ucb, dsize); 1155 return 0; 1156 } 1157 1158 static void uretprobe_perf_func(struct trace_uprobe *tu, unsigned long func, 1159 struct pt_regs *regs, 1160 struct uprobe_cpu_buffer *ucb, int dsize) 1161 { 1162 __uprobe_perf_func(tu, func, regs, ucb, dsize); 1163 } 1164 #endif /* CONFIG_PERF_EVENTS */ 1165 1166 static int 1167 trace_uprobe_register(struct trace_event_call *event, enum trace_reg type, 1168 void *data) 1169 { 1170 struct trace_uprobe *tu = event->data; 1171 struct trace_event_file *file = data; 1172 1173 switch (type) { 1174 case TRACE_REG_REGISTER: 1175 return probe_event_enable(tu, file, NULL); 1176 1177 case TRACE_REG_UNREGISTER: 1178 probe_event_disable(tu, file); 1179 return 0; 1180 1181 #ifdef CONFIG_PERF_EVENTS 1182 case TRACE_REG_PERF_REGISTER: 1183 return probe_event_enable(tu, NULL, uprobe_perf_filter); 1184 1185 case TRACE_REG_PERF_UNREGISTER: 1186 probe_event_disable(tu, NULL); 1187 return 0; 1188 1189 case TRACE_REG_PERF_OPEN: 1190 return uprobe_perf_open(tu, data); 1191 1192 case TRACE_REG_PERF_CLOSE: 1193 return uprobe_perf_close(tu, data); 1194 1195 #endif 1196 default: 1197 return 0; 1198 } 1199 return 0; 1200 } 1201 1202 static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs) 1203 { 1204 struct trace_uprobe *tu; 1205 struct uprobe_dispatch_data udd; 1206 struct uprobe_cpu_buffer *ucb; 1207 int dsize, esize; 1208 int ret = 0; 1209 1210 1211 tu = container_of(con, struct trace_uprobe, consumer); 1212 tu->nhit++; 1213 1214 udd.tu = tu; 1215 udd.bp_addr = instruction_pointer(regs); 1216 1217 current->utask->vaddr = (unsigned long) &udd; 1218 1219 if (WARN_ON_ONCE(!uprobe_cpu_buffer)) 1220 return 0; 1221 1222 dsize = __get_data_size(&tu->tp, regs); 1223 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu)); 1224 1225 ucb = uprobe_buffer_get(); 1226 store_trace_args(esize, &tu->tp, regs, ucb->buf, dsize); 1227 1228 if (tu->tp.flags & TP_FLAG_TRACE) 1229 ret |= uprobe_trace_func(tu, regs, ucb, dsize); 1230 1231 #ifdef CONFIG_PERF_EVENTS 1232 if (tu->tp.flags & TP_FLAG_PROFILE) 1233 ret |= uprobe_perf_func(tu, regs, ucb, dsize); 1234 #endif 1235 uprobe_buffer_put(ucb); 1236 return ret; 1237 } 1238 1239 static int uretprobe_dispatcher(struct uprobe_consumer *con, 1240 unsigned long func, struct pt_regs *regs) 1241 { 1242 struct trace_uprobe *tu; 1243 struct uprobe_dispatch_data udd; 1244 struct uprobe_cpu_buffer *ucb; 1245 int dsize, esize; 1246 1247 tu = container_of(con, struct trace_uprobe, consumer); 1248 1249 udd.tu = tu; 1250 udd.bp_addr = func; 1251 1252 current->utask->vaddr = (unsigned long) &udd; 1253 1254 if (WARN_ON_ONCE(!uprobe_cpu_buffer)) 1255 return 0; 1256 1257 dsize = __get_data_size(&tu->tp, regs); 1258 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu)); 1259 1260 ucb = uprobe_buffer_get(); 1261 store_trace_args(esize, &tu->tp, regs, ucb->buf, dsize); 1262 1263 if (tu->tp.flags & TP_FLAG_TRACE) 1264 uretprobe_trace_func(tu, func, regs, ucb, dsize); 1265 1266 #ifdef CONFIG_PERF_EVENTS 1267 if (tu->tp.flags & TP_FLAG_PROFILE) 1268 uretprobe_perf_func(tu, func, regs, ucb, dsize); 1269 #endif 1270 uprobe_buffer_put(ucb); 1271 return 0; 1272 } 1273 1274 static struct trace_event_functions uprobe_funcs = { 1275 .trace = print_uprobe_event 1276 }; 1277 1278 static inline void init_trace_event_call(struct trace_uprobe *tu, 1279 struct trace_event_call *call) 1280 { 1281 INIT_LIST_HEAD(&call->class->fields); 1282 call->event.funcs = &uprobe_funcs; 1283 call->class->define_fields = uprobe_event_define_fields; 1284 1285 call->flags = TRACE_EVENT_FL_UPROBE; 1286 call->class->reg = trace_uprobe_register; 1287 call->data = tu; 1288 } 1289 1290 static int register_uprobe_event(struct trace_uprobe *tu) 1291 { 1292 struct trace_event_call *call = &tu->tp.call; 1293 int ret = 0; 1294 1295 init_trace_event_call(tu, call); 1296 1297 if (set_print_fmt(&tu->tp, is_ret_probe(tu)) < 0) 1298 return -ENOMEM; 1299 1300 ret = register_trace_event(&call->event); 1301 if (!ret) { 1302 kfree(call->print_fmt); 1303 return -ENODEV; 1304 } 1305 1306 ret = trace_add_event_call(call); 1307 1308 if (ret) { 1309 pr_info("Failed to register uprobe event: %s\n", 1310 trace_event_name(call)); 1311 kfree(call->print_fmt); 1312 unregister_trace_event(&call->event); 1313 } 1314 1315 return ret; 1316 } 1317 1318 static int unregister_uprobe_event(struct trace_uprobe *tu) 1319 { 1320 int ret; 1321 1322 /* tu->event is unregistered in trace_remove_event_call() */ 1323 ret = trace_remove_event_call(&tu->tp.call); 1324 if (ret) 1325 return ret; 1326 kfree(tu->tp.call.print_fmt); 1327 tu->tp.call.print_fmt = NULL; 1328 return 0; 1329 } 1330 1331 #ifdef CONFIG_PERF_EVENTS 1332 struct trace_event_call * 1333 create_local_trace_uprobe(char *name, unsigned long offs, bool is_return) 1334 { 1335 struct trace_uprobe *tu; 1336 struct path path; 1337 int ret; 1338 1339 ret = kern_path(name, LOOKUP_FOLLOW, &path); 1340 if (ret) 1341 return ERR_PTR(ret); 1342 1343 if (!d_is_reg(path.dentry)) { 1344 path_put(&path); 1345 return ERR_PTR(-EINVAL); 1346 } 1347 1348 /* 1349 * local trace_kprobes are not added to probe_list, so they are never 1350 * searched in find_trace_kprobe(). Therefore, there is no concern of 1351 * duplicated name "DUMMY_EVENT" here. 1352 */ 1353 tu = alloc_trace_uprobe(UPROBE_EVENT_SYSTEM, "DUMMY_EVENT", 0, 1354 is_return); 1355 1356 if (IS_ERR(tu)) { 1357 pr_info("Failed to allocate trace_uprobe.(%d)\n", 1358 (int)PTR_ERR(tu)); 1359 path_put(&path); 1360 return ERR_CAST(tu); 1361 } 1362 1363 tu->offset = offs; 1364 tu->path = path; 1365 tu->filename = kstrdup(name, GFP_KERNEL); 1366 init_trace_event_call(tu, &tu->tp.call); 1367 1368 if (set_print_fmt(&tu->tp, is_ret_probe(tu)) < 0) { 1369 ret = -ENOMEM; 1370 goto error; 1371 } 1372 1373 return &tu->tp.call; 1374 error: 1375 free_trace_uprobe(tu); 1376 return ERR_PTR(ret); 1377 } 1378 1379 void destroy_local_trace_uprobe(struct trace_event_call *event_call) 1380 { 1381 struct trace_uprobe *tu; 1382 1383 tu = container_of(event_call, struct trace_uprobe, tp.call); 1384 1385 kfree(tu->tp.call.print_fmt); 1386 tu->tp.call.print_fmt = NULL; 1387 1388 free_trace_uprobe(tu); 1389 } 1390 #endif /* CONFIG_PERF_EVENTS */ 1391 1392 /* Make a trace interface for controling probe points */ 1393 static __init int init_uprobe_trace(void) 1394 { 1395 struct dentry *d_tracer; 1396 1397 d_tracer = tracing_init_dentry(); 1398 if (IS_ERR(d_tracer)) 1399 return 0; 1400 1401 trace_create_file("uprobe_events", 0644, d_tracer, 1402 NULL, &uprobe_events_ops); 1403 /* Profile interface */ 1404 trace_create_file("uprobe_profile", 0444, d_tracer, 1405 NULL, &uprobe_profile_ops); 1406 return 0; 1407 } 1408 1409 fs_initcall(init_uprobe_trace); 1410