1 /* 2 * uprobes-based tracing events 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License version 2 as 6 * published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * GNU General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public License 14 * along with this program; if not, write to the Free Software 15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 16 * 17 * Copyright (C) IBM Corporation, 2010-2012 18 * Author: Srikar Dronamraju <srikar@linux.vnet.ibm.com> 19 */ 20 #define pr_fmt(fmt) "trace_kprobe: " fmt 21 22 #include <linux/module.h> 23 #include <linux/uaccess.h> 24 #include <linux/uprobes.h> 25 #include <linux/namei.h> 26 #include <linux/string.h> 27 28 #include "trace_probe.h" 29 30 #define UPROBE_EVENT_SYSTEM "uprobes" 31 32 struct uprobe_trace_entry_head { 33 struct trace_entry ent; 34 unsigned long vaddr[]; 35 }; 36 37 #define SIZEOF_TRACE_ENTRY(is_return) \ 38 (sizeof(struct uprobe_trace_entry_head) + \ 39 sizeof(unsigned long) * (is_return ? 2 : 1)) 40 41 #define DATAOF_TRACE_ENTRY(entry, is_return) \ 42 ((void*)(entry) + SIZEOF_TRACE_ENTRY(is_return)) 43 44 struct trace_uprobe_filter { 45 rwlock_t rwlock; 46 int nr_systemwide; 47 struct list_head perf_events; 48 }; 49 50 /* 51 * uprobe event core functions 52 */ 53 struct trace_uprobe { 54 struct list_head list; 55 struct trace_uprobe_filter filter; 56 struct uprobe_consumer consumer; 57 struct inode *inode; 58 char *filename; 59 unsigned long offset; 60 unsigned long nhit; 61 struct trace_probe tp; 62 }; 63 64 #define SIZEOF_TRACE_UPROBE(n) \ 65 (offsetof(struct trace_uprobe, tp.args) + \ 66 (sizeof(struct probe_arg) * (n))) 67 68 static int register_uprobe_event(struct trace_uprobe *tu); 69 static int unregister_uprobe_event(struct trace_uprobe *tu); 70 71 static DEFINE_MUTEX(uprobe_lock); 72 static LIST_HEAD(uprobe_list); 73 74 struct uprobe_dispatch_data { 75 struct trace_uprobe *tu; 76 unsigned long bp_addr; 77 }; 78 79 static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs); 80 static int uretprobe_dispatcher(struct uprobe_consumer *con, 81 unsigned long func, struct pt_regs *regs); 82 83 #ifdef CONFIG_STACK_GROWSUP 84 static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n) 85 { 86 return addr - (n * sizeof(long)); 87 } 88 #else 89 static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n) 90 { 91 return addr + (n * sizeof(long)); 92 } 93 #endif 94 95 static unsigned long get_user_stack_nth(struct pt_regs *regs, unsigned int n) 96 { 97 unsigned long ret; 98 unsigned long addr = user_stack_pointer(regs); 99 100 addr = adjust_stack_addr(addr, n); 101 102 if (copy_from_user(&ret, (void __force __user *) addr, sizeof(ret))) 103 return 0; 104 105 return ret; 106 } 107 108 /* 109 * Uprobes-specific fetch functions 110 */ 111 #define DEFINE_FETCH_stack(type) \ 112 static void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs, \ 113 void *offset, void *dest) \ 114 { \ 115 *(type *)dest = (type)get_user_stack_nth(regs, \ 116 ((unsigned long)offset)); \ 117 } 118 DEFINE_BASIC_FETCH_FUNCS(stack) 119 /* No string on the stack entry */ 120 #define fetch_stack_string NULL 121 #define fetch_stack_string_size NULL 122 123 #define DEFINE_FETCH_memory(type) \ 124 static void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs, \ 125 void *addr, void *dest) \ 126 { \ 127 type retval; \ 128 void __user *vaddr = (void __force __user *) addr; \ 129 \ 130 if (copy_from_user(&retval, vaddr, sizeof(type))) \ 131 *(type *)dest = 0; \ 132 else \ 133 *(type *) dest = retval; \ 134 } 135 DEFINE_BASIC_FETCH_FUNCS(memory) 136 /* 137 * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max 138 * length and relative data location. 139 */ 140 static void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs, 141 void *addr, void *dest) 142 { 143 long ret; 144 u32 rloc = *(u32 *)dest; 145 int maxlen = get_rloc_len(rloc); 146 u8 *dst = get_rloc_data(dest); 147 void __user *src = (void __force __user *) addr; 148 149 if (!maxlen) 150 return; 151 152 ret = strncpy_from_user(dst, src, maxlen); 153 154 if (ret < 0) { /* Failed to fetch string */ 155 ((u8 *)get_rloc_data(dest))[0] = '\0'; 156 *(u32 *)dest = make_data_rloc(0, get_rloc_offs(rloc)); 157 } else { 158 *(u32 *)dest = make_data_rloc(ret, get_rloc_offs(rloc)); 159 } 160 } 161 162 static void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs, 163 void *addr, void *dest) 164 { 165 int len; 166 void __user *vaddr = (void __force __user *) addr; 167 168 len = strnlen_user(vaddr, MAX_STRING_SIZE); 169 170 if (len == 0 || len > MAX_STRING_SIZE) /* Failed to check length */ 171 *(u32 *)dest = 0; 172 else 173 *(u32 *)dest = len; 174 } 175 176 static unsigned long translate_user_vaddr(void *file_offset) 177 { 178 unsigned long base_addr; 179 struct uprobe_dispatch_data *udd; 180 181 udd = (void *) current->utask->vaddr; 182 183 base_addr = udd->bp_addr - udd->tu->offset; 184 return base_addr + (unsigned long)file_offset; 185 } 186 187 #define DEFINE_FETCH_file_offset(type) \ 188 static void FETCH_FUNC_NAME(file_offset, type)(struct pt_regs *regs, \ 189 void *offset, void *dest)\ 190 { \ 191 void *vaddr = (void *)translate_user_vaddr(offset); \ 192 \ 193 FETCH_FUNC_NAME(memory, type)(regs, vaddr, dest); \ 194 } 195 DEFINE_BASIC_FETCH_FUNCS(file_offset) 196 DEFINE_FETCH_file_offset(string) 197 DEFINE_FETCH_file_offset(string_size) 198 199 /* Fetch type information table */ 200 static const struct fetch_type uprobes_fetch_type_table[] = { 201 /* Special types */ 202 [FETCH_TYPE_STRING] = __ASSIGN_FETCH_TYPE("string", string, string, 203 sizeof(u32), 1, "__data_loc char[]"), 204 [FETCH_TYPE_STRSIZE] = __ASSIGN_FETCH_TYPE("string_size", u32, 205 string_size, sizeof(u32), 0, "u32"), 206 /* Basic types */ 207 ASSIGN_FETCH_TYPE(u8, u8, 0), 208 ASSIGN_FETCH_TYPE(u16, u16, 0), 209 ASSIGN_FETCH_TYPE(u32, u32, 0), 210 ASSIGN_FETCH_TYPE(u64, u64, 0), 211 ASSIGN_FETCH_TYPE(s8, u8, 1), 212 ASSIGN_FETCH_TYPE(s16, u16, 1), 213 ASSIGN_FETCH_TYPE(s32, u32, 1), 214 ASSIGN_FETCH_TYPE(s64, u64, 1), 215 ASSIGN_FETCH_TYPE_ALIAS(x8, u8, u8, 0), 216 ASSIGN_FETCH_TYPE_ALIAS(x16, u16, u16, 0), 217 ASSIGN_FETCH_TYPE_ALIAS(x32, u32, u32, 0), 218 ASSIGN_FETCH_TYPE_ALIAS(x64, u64, u64, 0), 219 220 ASSIGN_FETCH_TYPE_END 221 }; 222 223 static inline void init_trace_uprobe_filter(struct trace_uprobe_filter *filter) 224 { 225 rwlock_init(&filter->rwlock); 226 filter->nr_systemwide = 0; 227 INIT_LIST_HEAD(&filter->perf_events); 228 } 229 230 static inline bool uprobe_filter_is_empty(struct trace_uprobe_filter *filter) 231 { 232 return !filter->nr_systemwide && list_empty(&filter->perf_events); 233 } 234 235 static inline bool is_ret_probe(struct trace_uprobe *tu) 236 { 237 return tu->consumer.ret_handler != NULL; 238 } 239 240 /* 241 * Allocate new trace_uprobe and initialize it (including uprobes). 242 */ 243 static struct trace_uprobe * 244 alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret) 245 { 246 struct trace_uprobe *tu; 247 248 if (!event || !is_good_name(event)) 249 return ERR_PTR(-EINVAL); 250 251 if (!group || !is_good_name(group)) 252 return ERR_PTR(-EINVAL); 253 254 tu = kzalloc(SIZEOF_TRACE_UPROBE(nargs), GFP_KERNEL); 255 if (!tu) 256 return ERR_PTR(-ENOMEM); 257 258 tu->tp.call.class = &tu->tp.class; 259 tu->tp.call.name = kstrdup(event, GFP_KERNEL); 260 if (!tu->tp.call.name) 261 goto error; 262 263 tu->tp.class.system = kstrdup(group, GFP_KERNEL); 264 if (!tu->tp.class.system) 265 goto error; 266 267 INIT_LIST_HEAD(&tu->list); 268 INIT_LIST_HEAD(&tu->tp.files); 269 tu->consumer.handler = uprobe_dispatcher; 270 if (is_ret) 271 tu->consumer.ret_handler = uretprobe_dispatcher; 272 init_trace_uprobe_filter(&tu->filter); 273 return tu; 274 275 error: 276 kfree(tu->tp.call.name); 277 kfree(tu); 278 279 return ERR_PTR(-ENOMEM); 280 } 281 282 static void free_trace_uprobe(struct trace_uprobe *tu) 283 { 284 int i; 285 286 for (i = 0; i < tu->tp.nr_args; i++) 287 traceprobe_free_probe_arg(&tu->tp.args[i]); 288 289 iput(tu->inode); 290 kfree(tu->tp.call.class->system); 291 kfree(tu->tp.call.name); 292 kfree(tu->filename); 293 kfree(tu); 294 } 295 296 static struct trace_uprobe *find_probe_event(const char *event, const char *group) 297 { 298 struct trace_uprobe *tu; 299 300 list_for_each_entry(tu, &uprobe_list, list) 301 if (strcmp(trace_event_name(&tu->tp.call), event) == 0 && 302 strcmp(tu->tp.call.class->system, group) == 0) 303 return tu; 304 305 return NULL; 306 } 307 308 /* Unregister a trace_uprobe and probe_event: call with locking uprobe_lock */ 309 static int unregister_trace_uprobe(struct trace_uprobe *tu) 310 { 311 int ret; 312 313 ret = unregister_uprobe_event(tu); 314 if (ret) 315 return ret; 316 317 list_del(&tu->list); 318 free_trace_uprobe(tu); 319 return 0; 320 } 321 322 /* Register a trace_uprobe and probe_event */ 323 static int register_trace_uprobe(struct trace_uprobe *tu) 324 { 325 struct trace_uprobe *old_tu; 326 int ret; 327 328 mutex_lock(&uprobe_lock); 329 330 /* register as an event */ 331 old_tu = find_probe_event(trace_event_name(&tu->tp.call), 332 tu->tp.call.class->system); 333 if (old_tu) { 334 /* delete old event */ 335 ret = unregister_trace_uprobe(old_tu); 336 if (ret) 337 goto end; 338 } 339 340 ret = register_uprobe_event(tu); 341 if (ret) { 342 pr_warn("Failed to register probe event(%d)\n", ret); 343 goto end; 344 } 345 346 list_add_tail(&tu->list, &uprobe_list); 347 348 end: 349 mutex_unlock(&uprobe_lock); 350 351 return ret; 352 } 353 354 /* 355 * Argument syntax: 356 * - Add uprobe: p|r[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS] 357 * 358 * - Remove uprobe: -:[GRP/]EVENT 359 */ 360 static int create_trace_uprobe(int argc, char **argv) 361 { 362 struct trace_uprobe *tu; 363 struct inode *inode; 364 char *arg, *event, *group, *filename; 365 char buf[MAX_EVENT_NAME_LEN]; 366 struct path path; 367 unsigned long offset; 368 bool is_delete, is_return; 369 int i, ret; 370 371 inode = NULL; 372 ret = 0; 373 is_delete = false; 374 is_return = false; 375 event = NULL; 376 group = NULL; 377 378 /* argc must be >= 1 */ 379 if (argv[0][0] == '-') 380 is_delete = true; 381 else if (argv[0][0] == 'r') 382 is_return = true; 383 else if (argv[0][0] != 'p') { 384 pr_info("Probe definition must be started with 'p', 'r' or '-'.\n"); 385 return -EINVAL; 386 } 387 388 if (argv[0][1] == ':') { 389 event = &argv[0][2]; 390 arg = strchr(event, '/'); 391 392 if (arg) { 393 group = event; 394 event = arg + 1; 395 event[-1] = '\0'; 396 397 if (strlen(group) == 0) { 398 pr_info("Group name is not specified\n"); 399 return -EINVAL; 400 } 401 } 402 if (strlen(event) == 0) { 403 pr_info("Event name is not specified\n"); 404 return -EINVAL; 405 } 406 } 407 if (!group) 408 group = UPROBE_EVENT_SYSTEM; 409 410 if (is_delete) { 411 int ret; 412 413 if (!event) { 414 pr_info("Delete command needs an event name.\n"); 415 return -EINVAL; 416 } 417 mutex_lock(&uprobe_lock); 418 tu = find_probe_event(event, group); 419 420 if (!tu) { 421 mutex_unlock(&uprobe_lock); 422 pr_info("Event %s/%s doesn't exist.\n", group, event); 423 return -ENOENT; 424 } 425 /* delete an event */ 426 ret = unregister_trace_uprobe(tu); 427 mutex_unlock(&uprobe_lock); 428 return ret; 429 } 430 431 if (argc < 2) { 432 pr_info("Probe point is not specified.\n"); 433 return -EINVAL; 434 } 435 /* Find the last occurrence, in case the path contains ':' too. */ 436 arg = strrchr(argv[1], ':'); 437 if (!arg) { 438 ret = -EINVAL; 439 goto fail_address_parse; 440 } 441 442 *arg++ = '\0'; 443 filename = argv[1]; 444 ret = kern_path(filename, LOOKUP_FOLLOW, &path); 445 if (ret) 446 goto fail_address_parse; 447 448 inode = igrab(d_inode(path.dentry)); 449 path_put(&path); 450 451 if (!inode || !S_ISREG(inode->i_mode)) { 452 ret = -EINVAL; 453 goto fail_address_parse; 454 } 455 456 ret = kstrtoul(arg, 0, &offset); 457 if (ret) 458 goto fail_address_parse; 459 460 argc -= 2; 461 argv += 2; 462 463 /* setup a probe */ 464 if (!event) { 465 char *tail; 466 char *ptr; 467 468 tail = kstrdup(kbasename(filename), GFP_KERNEL); 469 if (!tail) { 470 ret = -ENOMEM; 471 goto fail_address_parse; 472 } 473 474 ptr = strpbrk(tail, ".-_"); 475 if (ptr) 476 *ptr = '\0'; 477 478 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_0x%lx", 'p', tail, offset); 479 event = buf; 480 kfree(tail); 481 } 482 483 tu = alloc_trace_uprobe(group, event, argc, is_return); 484 if (IS_ERR(tu)) { 485 pr_info("Failed to allocate trace_uprobe.(%d)\n", (int)PTR_ERR(tu)); 486 ret = PTR_ERR(tu); 487 goto fail_address_parse; 488 } 489 tu->offset = offset; 490 tu->inode = inode; 491 tu->filename = kstrdup(filename, GFP_KERNEL); 492 493 if (!tu->filename) { 494 pr_info("Failed to allocate filename.\n"); 495 ret = -ENOMEM; 496 goto error; 497 } 498 499 /* parse arguments */ 500 ret = 0; 501 for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) { 502 struct probe_arg *parg = &tu->tp.args[i]; 503 504 /* Increment count for freeing args in error case */ 505 tu->tp.nr_args++; 506 507 /* Parse argument name */ 508 arg = strchr(argv[i], '='); 509 if (arg) { 510 *arg++ = '\0'; 511 parg->name = kstrdup(argv[i], GFP_KERNEL); 512 } else { 513 arg = argv[i]; 514 /* If argument name is omitted, set "argN" */ 515 snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1); 516 parg->name = kstrdup(buf, GFP_KERNEL); 517 } 518 519 if (!parg->name) { 520 pr_info("Failed to allocate argument[%d] name.\n", i); 521 ret = -ENOMEM; 522 goto error; 523 } 524 525 if (!is_good_name(parg->name)) { 526 pr_info("Invalid argument[%d] name: %s\n", i, parg->name); 527 ret = -EINVAL; 528 goto error; 529 } 530 531 if (traceprobe_conflict_field_name(parg->name, tu->tp.args, i)) { 532 pr_info("Argument[%d] name '%s' conflicts with " 533 "another field.\n", i, argv[i]); 534 ret = -EINVAL; 535 goto error; 536 } 537 538 /* Parse fetch argument */ 539 ret = traceprobe_parse_probe_arg(arg, &tu->tp.size, parg, 540 is_return, false, 541 uprobes_fetch_type_table); 542 if (ret) { 543 pr_info("Parse error at argument[%d]. (%d)\n", i, ret); 544 goto error; 545 } 546 } 547 548 ret = register_trace_uprobe(tu); 549 if (ret) 550 goto error; 551 return 0; 552 553 error: 554 free_trace_uprobe(tu); 555 return ret; 556 557 fail_address_parse: 558 iput(inode); 559 560 pr_info("Failed to parse address or file.\n"); 561 562 return ret; 563 } 564 565 static int cleanup_all_probes(void) 566 { 567 struct trace_uprobe *tu; 568 int ret = 0; 569 570 mutex_lock(&uprobe_lock); 571 while (!list_empty(&uprobe_list)) { 572 tu = list_entry(uprobe_list.next, struct trace_uprobe, list); 573 ret = unregister_trace_uprobe(tu); 574 if (ret) 575 break; 576 } 577 mutex_unlock(&uprobe_lock); 578 return ret; 579 } 580 581 /* Probes listing interfaces */ 582 static void *probes_seq_start(struct seq_file *m, loff_t *pos) 583 { 584 mutex_lock(&uprobe_lock); 585 return seq_list_start(&uprobe_list, *pos); 586 } 587 588 static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos) 589 { 590 return seq_list_next(v, &uprobe_list, pos); 591 } 592 593 static void probes_seq_stop(struct seq_file *m, void *v) 594 { 595 mutex_unlock(&uprobe_lock); 596 } 597 598 static int probes_seq_show(struct seq_file *m, void *v) 599 { 600 struct trace_uprobe *tu = v; 601 char c = is_ret_probe(tu) ? 'r' : 'p'; 602 int i; 603 604 seq_printf(m, "%c:%s/%s", c, tu->tp.call.class->system, 605 trace_event_name(&tu->tp.call)); 606 seq_printf(m, " %s:", tu->filename); 607 608 /* Don't print "0x (null)" when offset is 0 */ 609 if (tu->offset) { 610 seq_printf(m, "0x%p", (void *)tu->offset); 611 } else { 612 switch (sizeof(void *)) { 613 case 4: 614 seq_printf(m, "0x00000000"); 615 break; 616 case 8: 617 default: 618 seq_printf(m, "0x0000000000000000"); 619 break; 620 } 621 } 622 623 for (i = 0; i < tu->tp.nr_args; i++) 624 seq_printf(m, " %s=%s", tu->tp.args[i].name, tu->tp.args[i].comm); 625 626 seq_putc(m, '\n'); 627 return 0; 628 } 629 630 static const struct seq_operations probes_seq_op = { 631 .start = probes_seq_start, 632 .next = probes_seq_next, 633 .stop = probes_seq_stop, 634 .show = probes_seq_show 635 }; 636 637 static int probes_open(struct inode *inode, struct file *file) 638 { 639 int ret; 640 641 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) { 642 ret = cleanup_all_probes(); 643 if (ret) 644 return ret; 645 } 646 647 return seq_open(file, &probes_seq_op); 648 } 649 650 static ssize_t probes_write(struct file *file, const char __user *buffer, 651 size_t count, loff_t *ppos) 652 { 653 return traceprobe_probes_write(file, buffer, count, ppos, create_trace_uprobe); 654 } 655 656 static const struct file_operations uprobe_events_ops = { 657 .owner = THIS_MODULE, 658 .open = probes_open, 659 .read = seq_read, 660 .llseek = seq_lseek, 661 .release = seq_release, 662 .write = probes_write, 663 }; 664 665 /* Probes profiling interfaces */ 666 static int probes_profile_seq_show(struct seq_file *m, void *v) 667 { 668 struct trace_uprobe *tu = v; 669 670 seq_printf(m, " %s %-44s %15lu\n", tu->filename, 671 trace_event_name(&tu->tp.call), tu->nhit); 672 return 0; 673 } 674 675 static const struct seq_operations profile_seq_op = { 676 .start = probes_seq_start, 677 .next = probes_seq_next, 678 .stop = probes_seq_stop, 679 .show = probes_profile_seq_show 680 }; 681 682 static int profile_open(struct inode *inode, struct file *file) 683 { 684 return seq_open(file, &profile_seq_op); 685 } 686 687 static const struct file_operations uprobe_profile_ops = { 688 .owner = THIS_MODULE, 689 .open = profile_open, 690 .read = seq_read, 691 .llseek = seq_lseek, 692 .release = seq_release, 693 }; 694 695 struct uprobe_cpu_buffer { 696 struct mutex mutex; 697 void *buf; 698 }; 699 static struct uprobe_cpu_buffer __percpu *uprobe_cpu_buffer; 700 static int uprobe_buffer_refcnt; 701 702 static int uprobe_buffer_init(void) 703 { 704 int cpu, err_cpu; 705 706 uprobe_cpu_buffer = alloc_percpu(struct uprobe_cpu_buffer); 707 if (uprobe_cpu_buffer == NULL) 708 return -ENOMEM; 709 710 for_each_possible_cpu(cpu) { 711 struct page *p = alloc_pages_node(cpu_to_node(cpu), 712 GFP_KERNEL, 0); 713 if (p == NULL) { 714 err_cpu = cpu; 715 goto err; 716 } 717 per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf = page_address(p); 718 mutex_init(&per_cpu_ptr(uprobe_cpu_buffer, cpu)->mutex); 719 } 720 721 return 0; 722 723 err: 724 for_each_possible_cpu(cpu) { 725 if (cpu == err_cpu) 726 break; 727 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf); 728 } 729 730 free_percpu(uprobe_cpu_buffer); 731 return -ENOMEM; 732 } 733 734 static int uprobe_buffer_enable(void) 735 { 736 int ret = 0; 737 738 BUG_ON(!mutex_is_locked(&event_mutex)); 739 740 if (uprobe_buffer_refcnt++ == 0) { 741 ret = uprobe_buffer_init(); 742 if (ret < 0) 743 uprobe_buffer_refcnt--; 744 } 745 746 return ret; 747 } 748 749 static void uprobe_buffer_disable(void) 750 { 751 int cpu; 752 753 BUG_ON(!mutex_is_locked(&event_mutex)); 754 755 if (--uprobe_buffer_refcnt == 0) { 756 for_each_possible_cpu(cpu) 757 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer, 758 cpu)->buf); 759 760 free_percpu(uprobe_cpu_buffer); 761 uprobe_cpu_buffer = NULL; 762 } 763 } 764 765 static struct uprobe_cpu_buffer *uprobe_buffer_get(void) 766 { 767 struct uprobe_cpu_buffer *ucb; 768 int cpu; 769 770 cpu = raw_smp_processor_id(); 771 ucb = per_cpu_ptr(uprobe_cpu_buffer, cpu); 772 773 /* 774 * Use per-cpu buffers for fastest access, but we might migrate 775 * so the mutex makes sure we have sole access to it. 776 */ 777 mutex_lock(&ucb->mutex); 778 779 return ucb; 780 } 781 782 static void uprobe_buffer_put(struct uprobe_cpu_buffer *ucb) 783 { 784 mutex_unlock(&ucb->mutex); 785 } 786 787 static void __uprobe_trace_func(struct trace_uprobe *tu, 788 unsigned long func, struct pt_regs *regs, 789 struct uprobe_cpu_buffer *ucb, int dsize, 790 struct trace_event_file *trace_file) 791 { 792 struct uprobe_trace_entry_head *entry; 793 struct ring_buffer_event *event; 794 struct ring_buffer *buffer; 795 void *data; 796 int size, esize; 797 struct trace_event_call *call = &tu->tp.call; 798 799 WARN_ON(call != trace_file->event_call); 800 801 if (WARN_ON_ONCE(tu->tp.size + dsize > PAGE_SIZE)) 802 return; 803 804 if (trace_trigger_soft_disabled(trace_file)) 805 return; 806 807 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu)); 808 size = esize + tu->tp.size + dsize; 809 event = trace_event_buffer_lock_reserve(&buffer, trace_file, 810 call->event.type, size, 0, 0); 811 if (!event) 812 return; 813 814 entry = ring_buffer_event_data(event); 815 if (is_ret_probe(tu)) { 816 entry->vaddr[0] = func; 817 entry->vaddr[1] = instruction_pointer(regs); 818 data = DATAOF_TRACE_ENTRY(entry, true); 819 } else { 820 entry->vaddr[0] = instruction_pointer(regs); 821 data = DATAOF_TRACE_ENTRY(entry, false); 822 } 823 824 memcpy(data, ucb->buf, tu->tp.size + dsize); 825 826 event_trigger_unlock_commit(trace_file, buffer, event, entry, 0, 0); 827 } 828 829 /* uprobe handler */ 830 static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs, 831 struct uprobe_cpu_buffer *ucb, int dsize) 832 { 833 struct event_file_link *link; 834 835 if (is_ret_probe(tu)) 836 return 0; 837 838 rcu_read_lock(); 839 list_for_each_entry_rcu(link, &tu->tp.files, list) 840 __uprobe_trace_func(tu, 0, regs, ucb, dsize, link->file); 841 rcu_read_unlock(); 842 843 return 0; 844 } 845 846 static void uretprobe_trace_func(struct trace_uprobe *tu, unsigned long func, 847 struct pt_regs *regs, 848 struct uprobe_cpu_buffer *ucb, int dsize) 849 { 850 struct event_file_link *link; 851 852 rcu_read_lock(); 853 list_for_each_entry_rcu(link, &tu->tp.files, list) 854 __uprobe_trace_func(tu, func, regs, ucb, dsize, link->file); 855 rcu_read_unlock(); 856 } 857 858 /* Event entry printers */ 859 static enum print_line_t 860 print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *event) 861 { 862 struct uprobe_trace_entry_head *entry; 863 struct trace_seq *s = &iter->seq; 864 struct trace_uprobe *tu; 865 u8 *data; 866 int i; 867 868 entry = (struct uprobe_trace_entry_head *)iter->ent; 869 tu = container_of(event, struct trace_uprobe, tp.call.event); 870 871 if (is_ret_probe(tu)) { 872 trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)", 873 trace_event_name(&tu->tp.call), 874 entry->vaddr[1], entry->vaddr[0]); 875 data = DATAOF_TRACE_ENTRY(entry, true); 876 } else { 877 trace_seq_printf(s, "%s: (0x%lx)", 878 trace_event_name(&tu->tp.call), 879 entry->vaddr[0]); 880 data = DATAOF_TRACE_ENTRY(entry, false); 881 } 882 883 for (i = 0; i < tu->tp.nr_args; i++) { 884 struct probe_arg *parg = &tu->tp.args[i]; 885 886 if (!parg->type->print(s, parg->name, data + parg->offset, entry)) 887 goto out; 888 } 889 890 trace_seq_putc(s, '\n'); 891 892 out: 893 return trace_handle_return(s); 894 } 895 896 typedef bool (*filter_func_t)(struct uprobe_consumer *self, 897 enum uprobe_filter_ctx ctx, 898 struct mm_struct *mm); 899 900 static int 901 probe_event_enable(struct trace_uprobe *tu, struct trace_event_file *file, 902 filter_func_t filter) 903 { 904 bool enabled = trace_probe_is_enabled(&tu->tp); 905 struct event_file_link *link = NULL; 906 int ret; 907 908 if (file) { 909 if (tu->tp.flags & TP_FLAG_PROFILE) 910 return -EINTR; 911 912 link = kmalloc(sizeof(*link), GFP_KERNEL); 913 if (!link) 914 return -ENOMEM; 915 916 link->file = file; 917 list_add_tail_rcu(&link->list, &tu->tp.files); 918 919 tu->tp.flags |= TP_FLAG_TRACE; 920 } else { 921 if (tu->tp.flags & TP_FLAG_TRACE) 922 return -EINTR; 923 924 tu->tp.flags |= TP_FLAG_PROFILE; 925 } 926 927 WARN_ON(!uprobe_filter_is_empty(&tu->filter)); 928 929 if (enabled) 930 return 0; 931 932 ret = uprobe_buffer_enable(); 933 if (ret) 934 goto err_flags; 935 936 tu->consumer.filter = filter; 937 ret = uprobe_register(tu->inode, tu->offset, &tu->consumer); 938 if (ret) 939 goto err_buffer; 940 941 return 0; 942 943 err_buffer: 944 uprobe_buffer_disable(); 945 946 err_flags: 947 if (file) { 948 list_del(&link->list); 949 kfree(link); 950 tu->tp.flags &= ~TP_FLAG_TRACE; 951 } else { 952 tu->tp.flags &= ~TP_FLAG_PROFILE; 953 } 954 return ret; 955 } 956 957 static void 958 probe_event_disable(struct trace_uprobe *tu, struct trace_event_file *file) 959 { 960 if (!trace_probe_is_enabled(&tu->tp)) 961 return; 962 963 if (file) { 964 struct event_file_link *link; 965 966 link = find_event_file_link(&tu->tp, file); 967 if (!link) 968 return; 969 970 list_del_rcu(&link->list); 971 /* synchronize with u{,ret}probe_trace_func */ 972 synchronize_sched(); 973 kfree(link); 974 975 if (!list_empty(&tu->tp.files)) 976 return; 977 } 978 979 WARN_ON(!uprobe_filter_is_empty(&tu->filter)); 980 981 uprobe_unregister(tu->inode, tu->offset, &tu->consumer); 982 tu->tp.flags &= file ? ~TP_FLAG_TRACE : ~TP_FLAG_PROFILE; 983 984 uprobe_buffer_disable(); 985 } 986 987 static int uprobe_event_define_fields(struct trace_event_call *event_call) 988 { 989 int ret, i, size; 990 struct uprobe_trace_entry_head field; 991 struct trace_uprobe *tu = event_call->data; 992 993 if (is_ret_probe(tu)) { 994 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_FUNC, 0); 995 DEFINE_FIELD(unsigned long, vaddr[1], FIELD_STRING_RETIP, 0); 996 size = SIZEOF_TRACE_ENTRY(true); 997 } else { 998 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_IP, 0); 999 size = SIZEOF_TRACE_ENTRY(false); 1000 } 1001 /* Set argument names as fields */ 1002 for (i = 0; i < tu->tp.nr_args; i++) { 1003 struct probe_arg *parg = &tu->tp.args[i]; 1004 1005 ret = trace_define_field(event_call, parg->type->fmttype, 1006 parg->name, size + parg->offset, 1007 parg->type->size, parg->type->is_signed, 1008 FILTER_OTHER); 1009 1010 if (ret) 1011 return ret; 1012 } 1013 return 0; 1014 } 1015 1016 #ifdef CONFIG_PERF_EVENTS 1017 static bool 1018 __uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm) 1019 { 1020 struct perf_event *event; 1021 1022 if (filter->nr_systemwide) 1023 return true; 1024 1025 list_for_each_entry(event, &filter->perf_events, hw.tp_list) { 1026 if (event->hw.target->mm == mm) 1027 return true; 1028 } 1029 1030 return false; 1031 } 1032 1033 static inline bool 1034 uprobe_filter_event(struct trace_uprobe *tu, struct perf_event *event) 1035 { 1036 return __uprobe_perf_filter(&tu->filter, event->hw.target->mm); 1037 } 1038 1039 static int uprobe_perf_close(struct trace_uprobe *tu, struct perf_event *event) 1040 { 1041 bool done; 1042 1043 write_lock(&tu->filter.rwlock); 1044 if (event->hw.target) { 1045 list_del(&event->hw.tp_list); 1046 done = tu->filter.nr_systemwide || 1047 (event->hw.target->flags & PF_EXITING) || 1048 uprobe_filter_event(tu, event); 1049 } else { 1050 tu->filter.nr_systemwide--; 1051 done = tu->filter.nr_systemwide; 1052 } 1053 write_unlock(&tu->filter.rwlock); 1054 1055 if (!done) 1056 return uprobe_apply(tu->inode, tu->offset, &tu->consumer, false); 1057 1058 return 0; 1059 } 1060 1061 static int uprobe_perf_open(struct trace_uprobe *tu, struct perf_event *event) 1062 { 1063 bool done; 1064 int err; 1065 1066 write_lock(&tu->filter.rwlock); 1067 if (event->hw.target) { 1068 /* 1069 * event->parent != NULL means copy_process(), we can avoid 1070 * uprobe_apply(). current->mm must be probed and we can rely 1071 * on dup_mmap() which preserves the already installed bp's. 1072 * 1073 * attr.enable_on_exec means that exec/mmap will install the 1074 * breakpoints we need. 1075 */ 1076 done = tu->filter.nr_systemwide || 1077 event->parent || event->attr.enable_on_exec || 1078 uprobe_filter_event(tu, event); 1079 list_add(&event->hw.tp_list, &tu->filter.perf_events); 1080 } else { 1081 done = tu->filter.nr_systemwide; 1082 tu->filter.nr_systemwide++; 1083 } 1084 write_unlock(&tu->filter.rwlock); 1085 1086 err = 0; 1087 if (!done) { 1088 err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true); 1089 if (err) 1090 uprobe_perf_close(tu, event); 1091 } 1092 return err; 1093 } 1094 1095 static bool uprobe_perf_filter(struct uprobe_consumer *uc, 1096 enum uprobe_filter_ctx ctx, struct mm_struct *mm) 1097 { 1098 struct trace_uprobe *tu; 1099 int ret; 1100 1101 tu = container_of(uc, struct trace_uprobe, consumer); 1102 read_lock(&tu->filter.rwlock); 1103 ret = __uprobe_perf_filter(&tu->filter, mm); 1104 read_unlock(&tu->filter.rwlock); 1105 1106 return ret; 1107 } 1108 1109 static void __uprobe_perf_func(struct trace_uprobe *tu, 1110 unsigned long func, struct pt_regs *regs, 1111 struct uprobe_cpu_buffer *ucb, int dsize) 1112 { 1113 struct trace_event_call *call = &tu->tp.call; 1114 struct uprobe_trace_entry_head *entry; 1115 struct bpf_prog *prog = call->prog; 1116 struct hlist_head *head; 1117 void *data; 1118 int size, esize; 1119 int rctx; 1120 1121 if (prog && !trace_call_bpf(prog, regs)) 1122 return; 1123 1124 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu)); 1125 1126 size = esize + tu->tp.size + dsize; 1127 size = ALIGN(size + sizeof(u32), sizeof(u64)) - sizeof(u32); 1128 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough")) 1129 return; 1130 1131 preempt_disable(); 1132 head = this_cpu_ptr(call->perf_events); 1133 if (hlist_empty(head)) 1134 goto out; 1135 1136 entry = perf_trace_buf_alloc(size, NULL, &rctx); 1137 if (!entry) 1138 goto out; 1139 1140 if (is_ret_probe(tu)) { 1141 entry->vaddr[0] = func; 1142 entry->vaddr[1] = instruction_pointer(regs); 1143 data = DATAOF_TRACE_ENTRY(entry, true); 1144 } else { 1145 entry->vaddr[0] = instruction_pointer(regs); 1146 data = DATAOF_TRACE_ENTRY(entry, false); 1147 } 1148 1149 memcpy(data, ucb->buf, tu->tp.size + dsize); 1150 1151 if (size - esize > tu->tp.size + dsize) { 1152 int len = tu->tp.size + dsize; 1153 1154 memset(data + len, 0, size - esize - len); 1155 } 1156 1157 perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs, 1158 head, NULL); 1159 out: 1160 preempt_enable(); 1161 } 1162 1163 /* uprobe profile handler */ 1164 static int uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs, 1165 struct uprobe_cpu_buffer *ucb, int dsize) 1166 { 1167 if (!uprobe_perf_filter(&tu->consumer, 0, current->mm)) 1168 return UPROBE_HANDLER_REMOVE; 1169 1170 if (!is_ret_probe(tu)) 1171 __uprobe_perf_func(tu, 0, regs, ucb, dsize); 1172 return 0; 1173 } 1174 1175 static void uretprobe_perf_func(struct trace_uprobe *tu, unsigned long func, 1176 struct pt_regs *regs, 1177 struct uprobe_cpu_buffer *ucb, int dsize) 1178 { 1179 __uprobe_perf_func(tu, func, regs, ucb, dsize); 1180 } 1181 #endif /* CONFIG_PERF_EVENTS */ 1182 1183 static int 1184 trace_uprobe_register(struct trace_event_call *event, enum trace_reg type, 1185 void *data) 1186 { 1187 struct trace_uprobe *tu = event->data; 1188 struct trace_event_file *file = data; 1189 1190 switch (type) { 1191 case TRACE_REG_REGISTER: 1192 return probe_event_enable(tu, file, NULL); 1193 1194 case TRACE_REG_UNREGISTER: 1195 probe_event_disable(tu, file); 1196 return 0; 1197 1198 #ifdef CONFIG_PERF_EVENTS 1199 case TRACE_REG_PERF_REGISTER: 1200 return probe_event_enable(tu, NULL, uprobe_perf_filter); 1201 1202 case TRACE_REG_PERF_UNREGISTER: 1203 probe_event_disable(tu, NULL); 1204 return 0; 1205 1206 case TRACE_REG_PERF_OPEN: 1207 return uprobe_perf_open(tu, data); 1208 1209 case TRACE_REG_PERF_CLOSE: 1210 return uprobe_perf_close(tu, data); 1211 1212 #endif 1213 default: 1214 return 0; 1215 } 1216 return 0; 1217 } 1218 1219 static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs) 1220 { 1221 struct trace_uprobe *tu; 1222 struct uprobe_dispatch_data udd; 1223 struct uprobe_cpu_buffer *ucb; 1224 int dsize, esize; 1225 int ret = 0; 1226 1227 1228 tu = container_of(con, struct trace_uprobe, consumer); 1229 tu->nhit++; 1230 1231 udd.tu = tu; 1232 udd.bp_addr = instruction_pointer(regs); 1233 1234 current->utask->vaddr = (unsigned long) &udd; 1235 1236 if (WARN_ON_ONCE(!uprobe_cpu_buffer)) 1237 return 0; 1238 1239 dsize = __get_data_size(&tu->tp, regs); 1240 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu)); 1241 1242 ucb = uprobe_buffer_get(); 1243 store_trace_args(esize, &tu->tp, regs, ucb->buf, dsize); 1244 1245 if (tu->tp.flags & TP_FLAG_TRACE) 1246 ret |= uprobe_trace_func(tu, regs, ucb, dsize); 1247 1248 #ifdef CONFIG_PERF_EVENTS 1249 if (tu->tp.flags & TP_FLAG_PROFILE) 1250 ret |= uprobe_perf_func(tu, regs, ucb, dsize); 1251 #endif 1252 uprobe_buffer_put(ucb); 1253 return ret; 1254 } 1255 1256 static int uretprobe_dispatcher(struct uprobe_consumer *con, 1257 unsigned long func, struct pt_regs *regs) 1258 { 1259 struct trace_uprobe *tu; 1260 struct uprobe_dispatch_data udd; 1261 struct uprobe_cpu_buffer *ucb; 1262 int dsize, esize; 1263 1264 tu = container_of(con, struct trace_uprobe, consumer); 1265 1266 udd.tu = tu; 1267 udd.bp_addr = func; 1268 1269 current->utask->vaddr = (unsigned long) &udd; 1270 1271 if (WARN_ON_ONCE(!uprobe_cpu_buffer)) 1272 return 0; 1273 1274 dsize = __get_data_size(&tu->tp, regs); 1275 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu)); 1276 1277 ucb = uprobe_buffer_get(); 1278 store_trace_args(esize, &tu->tp, regs, ucb->buf, dsize); 1279 1280 if (tu->tp.flags & TP_FLAG_TRACE) 1281 uretprobe_trace_func(tu, func, regs, ucb, dsize); 1282 1283 #ifdef CONFIG_PERF_EVENTS 1284 if (tu->tp.flags & TP_FLAG_PROFILE) 1285 uretprobe_perf_func(tu, func, regs, ucb, dsize); 1286 #endif 1287 uprobe_buffer_put(ucb); 1288 return 0; 1289 } 1290 1291 static struct trace_event_functions uprobe_funcs = { 1292 .trace = print_uprobe_event 1293 }; 1294 1295 static int register_uprobe_event(struct trace_uprobe *tu) 1296 { 1297 struct trace_event_call *call = &tu->tp.call; 1298 int ret; 1299 1300 /* Initialize trace_event_call */ 1301 INIT_LIST_HEAD(&call->class->fields); 1302 call->event.funcs = &uprobe_funcs; 1303 call->class->define_fields = uprobe_event_define_fields; 1304 1305 if (set_print_fmt(&tu->tp, is_ret_probe(tu)) < 0) 1306 return -ENOMEM; 1307 1308 ret = register_trace_event(&call->event); 1309 if (!ret) { 1310 kfree(call->print_fmt); 1311 return -ENODEV; 1312 } 1313 1314 call->flags = TRACE_EVENT_FL_UPROBE; 1315 call->class->reg = trace_uprobe_register; 1316 call->data = tu; 1317 ret = trace_add_event_call(call); 1318 1319 if (ret) { 1320 pr_info("Failed to register uprobe event: %s\n", 1321 trace_event_name(call)); 1322 kfree(call->print_fmt); 1323 unregister_trace_event(&call->event); 1324 } 1325 1326 return ret; 1327 } 1328 1329 static int unregister_uprobe_event(struct trace_uprobe *tu) 1330 { 1331 int ret; 1332 1333 /* tu->event is unregistered in trace_remove_event_call() */ 1334 ret = trace_remove_event_call(&tu->tp.call); 1335 if (ret) 1336 return ret; 1337 kfree(tu->tp.call.print_fmt); 1338 tu->tp.call.print_fmt = NULL; 1339 return 0; 1340 } 1341 1342 /* Make a trace interface for controling probe points */ 1343 static __init int init_uprobe_trace(void) 1344 { 1345 struct dentry *d_tracer; 1346 1347 d_tracer = tracing_init_dentry(); 1348 if (IS_ERR(d_tracer)) 1349 return 0; 1350 1351 trace_create_file("uprobe_events", 0644, d_tracer, 1352 NULL, &uprobe_events_ops); 1353 /* Profile interface */ 1354 trace_create_file("uprobe_profile", 0444, d_tracer, 1355 NULL, &uprobe_profile_ops); 1356 return 0; 1357 } 1358 1359 fs_initcall(init_uprobe_trace); 1360