1 /* 2 * uprobes-based tracing events 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License version 2 as 6 * published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * GNU General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public License 14 * along with this program; if not, write to the Free Software 15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 16 * 17 * Copyright (C) IBM Corporation, 2010-2012 18 * Author: Srikar Dronamraju <srikar@linux.vnet.ibm.com> 19 */ 20 21 #include <linux/module.h> 22 #include <linux/uaccess.h> 23 #include <linux/uprobes.h> 24 #include <linux/namei.h> 25 #include <linux/string.h> 26 27 #include "trace_probe.h" 28 29 #define UPROBE_EVENT_SYSTEM "uprobes" 30 31 struct uprobe_trace_entry_head { 32 struct trace_entry ent; 33 unsigned long vaddr[]; 34 }; 35 36 #define SIZEOF_TRACE_ENTRY(is_return) \ 37 (sizeof(struct uprobe_trace_entry_head) + \ 38 sizeof(unsigned long) * (is_return ? 2 : 1)) 39 40 #define DATAOF_TRACE_ENTRY(entry, is_return) \ 41 ((void*)(entry) + SIZEOF_TRACE_ENTRY(is_return)) 42 43 struct trace_uprobe_filter { 44 rwlock_t rwlock; 45 int nr_systemwide; 46 struct list_head perf_events; 47 }; 48 49 /* 50 * uprobe event core functions 51 */ 52 struct trace_uprobe { 53 struct list_head list; 54 struct trace_uprobe_filter filter; 55 struct uprobe_consumer consumer; 56 struct inode *inode; 57 char *filename; 58 unsigned long offset; 59 unsigned long nhit; 60 struct trace_probe tp; 61 }; 62 63 #define SIZEOF_TRACE_UPROBE(n) \ 64 (offsetof(struct trace_uprobe, tp.args) + \ 65 (sizeof(struct probe_arg) * (n))) 66 67 static int register_uprobe_event(struct trace_uprobe *tu); 68 static int unregister_uprobe_event(struct trace_uprobe *tu); 69 70 static DEFINE_MUTEX(uprobe_lock); 71 static LIST_HEAD(uprobe_list); 72 73 struct uprobe_dispatch_data { 74 struct trace_uprobe *tu; 75 unsigned long bp_addr; 76 }; 77 78 static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs); 79 static int uretprobe_dispatcher(struct uprobe_consumer *con, 80 unsigned long func, struct pt_regs *regs); 81 82 #ifdef CONFIG_STACK_GROWSUP 83 static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n) 84 { 85 return addr - (n * sizeof(long)); 86 } 87 #else 88 static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n) 89 { 90 return addr + (n * sizeof(long)); 91 } 92 #endif 93 94 static unsigned long get_user_stack_nth(struct pt_regs *regs, unsigned int n) 95 { 96 unsigned long ret; 97 unsigned long addr = user_stack_pointer(regs); 98 99 addr = adjust_stack_addr(addr, n); 100 101 if (copy_from_user(&ret, (void __force __user *) addr, sizeof(ret))) 102 return 0; 103 104 return ret; 105 } 106 107 /* 108 * Uprobes-specific fetch functions 109 */ 110 #define DEFINE_FETCH_stack(type) \ 111 static __kprobes void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs,\ 112 void *offset, void *dest) \ 113 { \ 114 *(type *)dest = (type)get_user_stack_nth(regs, \ 115 ((unsigned long)offset)); \ 116 } 117 DEFINE_BASIC_FETCH_FUNCS(stack) 118 /* No string on the stack entry */ 119 #define fetch_stack_string NULL 120 #define fetch_stack_string_size NULL 121 122 #define DEFINE_FETCH_memory(type) \ 123 static __kprobes void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs,\ 124 void *addr, void *dest) \ 125 { \ 126 type retval; \ 127 void __user *vaddr = (void __force __user *) addr; \ 128 \ 129 if (copy_from_user(&retval, vaddr, sizeof(type))) \ 130 *(type *)dest = 0; \ 131 else \ 132 *(type *) dest = retval; \ 133 } 134 DEFINE_BASIC_FETCH_FUNCS(memory) 135 /* 136 * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max 137 * length and relative data location. 138 */ 139 static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs, 140 void *addr, void *dest) 141 { 142 long ret; 143 u32 rloc = *(u32 *)dest; 144 int maxlen = get_rloc_len(rloc); 145 u8 *dst = get_rloc_data(dest); 146 void __user *src = (void __force __user *) addr; 147 148 if (!maxlen) 149 return; 150 151 ret = strncpy_from_user(dst, src, maxlen); 152 153 if (ret < 0) { /* Failed to fetch string */ 154 ((u8 *)get_rloc_data(dest))[0] = '\0'; 155 *(u32 *)dest = make_data_rloc(0, get_rloc_offs(rloc)); 156 } else { 157 *(u32 *)dest = make_data_rloc(ret, get_rloc_offs(rloc)); 158 } 159 } 160 161 static __kprobes void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs, 162 void *addr, void *dest) 163 { 164 int len; 165 void __user *vaddr = (void __force __user *) addr; 166 167 len = strnlen_user(vaddr, MAX_STRING_SIZE); 168 169 if (len == 0 || len > MAX_STRING_SIZE) /* Failed to check length */ 170 *(u32 *)dest = 0; 171 else 172 *(u32 *)dest = len; 173 } 174 175 static unsigned long translate_user_vaddr(void *file_offset) 176 { 177 unsigned long base_addr; 178 struct uprobe_dispatch_data *udd; 179 180 udd = (void *) current->utask->vaddr; 181 182 base_addr = udd->bp_addr - udd->tu->offset; 183 return base_addr + (unsigned long)file_offset; 184 } 185 186 #define DEFINE_FETCH_file_offset(type) \ 187 static __kprobes void FETCH_FUNC_NAME(file_offset, type)(struct pt_regs *regs,\ 188 void *offset, void *dest) \ 189 { \ 190 void *vaddr = (void *)translate_user_vaddr(offset); \ 191 \ 192 FETCH_FUNC_NAME(memory, type)(regs, vaddr, dest); \ 193 } 194 DEFINE_BASIC_FETCH_FUNCS(file_offset) 195 DEFINE_FETCH_file_offset(string) 196 DEFINE_FETCH_file_offset(string_size) 197 198 /* Fetch type information table */ 199 const struct fetch_type uprobes_fetch_type_table[] = { 200 /* Special types */ 201 [FETCH_TYPE_STRING] = __ASSIGN_FETCH_TYPE("string", string, string, 202 sizeof(u32), 1, "__data_loc char[]"), 203 [FETCH_TYPE_STRSIZE] = __ASSIGN_FETCH_TYPE("string_size", u32, 204 string_size, sizeof(u32), 0, "u32"), 205 /* Basic types */ 206 ASSIGN_FETCH_TYPE(u8, u8, 0), 207 ASSIGN_FETCH_TYPE(u16, u16, 0), 208 ASSIGN_FETCH_TYPE(u32, u32, 0), 209 ASSIGN_FETCH_TYPE(u64, u64, 0), 210 ASSIGN_FETCH_TYPE(s8, u8, 1), 211 ASSIGN_FETCH_TYPE(s16, u16, 1), 212 ASSIGN_FETCH_TYPE(s32, u32, 1), 213 ASSIGN_FETCH_TYPE(s64, u64, 1), 214 215 ASSIGN_FETCH_TYPE_END 216 }; 217 218 static inline void init_trace_uprobe_filter(struct trace_uprobe_filter *filter) 219 { 220 rwlock_init(&filter->rwlock); 221 filter->nr_systemwide = 0; 222 INIT_LIST_HEAD(&filter->perf_events); 223 } 224 225 static inline bool uprobe_filter_is_empty(struct trace_uprobe_filter *filter) 226 { 227 return !filter->nr_systemwide && list_empty(&filter->perf_events); 228 } 229 230 static inline bool is_ret_probe(struct trace_uprobe *tu) 231 { 232 return tu->consumer.ret_handler != NULL; 233 } 234 235 /* 236 * Allocate new trace_uprobe and initialize it (including uprobes). 237 */ 238 static struct trace_uprobe * 239 alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret) 240 { 241 struct trace_uprobe *tu; 242 243 if (!event || !is_good_name(event)) 244 return ERR_PTR(-EINVAL); 245 246 if (!group || !is_good_name(group)) 247 return ERR_PTR(-EINVAL); 248 249 tu = kzalloc(SIZEOF_TRACE_UPROBE(nargs), GFP_KERNEL); 250 if (!tu) 251 return ERR_PTR(-ENOMEM); 252 253 tu->tp.call.class = &tu->tp.class; 254 tu->tp.call.name = kstrdup(event, GFP_KERNEL); 255 if (!tu->tp.call.name) 256 goto error; 257 258 tu->tp.class.system = kstrdup(group, GFP_KERNEL); 259 if (!tu->tp.class.system) 260 goto error; 261 262 INIT_LIST_HEAD(&tu->list); 263 INIT_LIST_HEAD(&tu->tp.files); 264 tu->consumer.handler = uprobe_dispatcher; 265 if (is_ret) 266 tu->consumer.ret_handler = uretprobe_dispatcher; 267 init_trace_uprobe_filter(&tu->filter); 268 tu->tp.call.flags |= TRACE_EVENT_FL_USE_CALL_FILTER; 269 return tu; 270 271 error: 272 kfree(tu->tp.call.name); 273 kfree(tu); 274 275 return ERR_PTR(-ENOMEM); 276 } 277 278 static void free_trace_uprobe(struct trace_uprobe *tu) 279 { 280 int i; 281 282 for (i = 0; i < tu->tp.nr_args; i++) 283 traceprobe_free_probe_arg(&tu->tp.args[i]); 284 285 iput(tu->inode); 286 kfree(tu->tp.call.class->system); 287 kfree(tu->tp.call.name); 288 kfree(tu->filename); 289 kfree(tu); 290 } 291 292 static struct trace_uprobe *find_probe_event(const char *event, const char *group) 293 { 294 struct trace_uprobe *tu; 295 296 list_for_each_entry(tu, &uprobe_list, list) 297 if (strcmp(ftrace_event_name(&tu->tp.call), event) == 0 && 298 strcmp(tu->tp.call.class->system, group) == 0) 299 return tu; 300 301 return NULL; 302 } 303 304 /* Unregister a trace_uprobe and probe_event: call with locking uprobe_lock */ 305 static int unregister_trace_uprobe(struct trace_uprobe *tu) 306 { 307 int ret; 308 309 ret = unregister_uprobe_event(tu); 310 if (ret) 311 return ret; 312 313 list_del(&tu->list); 314 free_trace_uprobe(tu); 315 return 0; 316 } 317 318 /* Register a trace_uprobe and probe_event */ 319 static int register_trace_uprobe(struct trace_uprobe *tu) 320 { 321 struct trace_uprobe *old_tu; 322 int ret; 323 324 mutex_lock(&uprobe_lock); 325 326 /* register as an event */ 327 old_tu = find_probe_event(ftrace_event_name(&tu->tp.call), 328 tu->tp.call.class->system); 329 if (old_tu) { 330 /* delete old event */ 331 ret = unregister_trace_uprobe(old_tu); 332 if (ret) 333 goto end; 334 } 335 336 ret = register_uprobe_event(tu); 337 if (ret) { 338 pr_warning("Failed to register probe event(%d)\n", ret); 339 goto end; 340 } 341 342 list_add_tail(&tu->list, &uprobe_list); 343 344 end: 345 mutex_unlock(&uprobe_lock); 346 347 return ret; 348 } 349 350 /* 351 * Argument syntax: 352 * - Add uprobe: p|r[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS] 353 * 354 * - Remove uprobe: -:[GRP/]EVENT 355 */ 356 static int create_trace_uprobe(int argc, char **argv) 357 { 358 struct trace_uprobe *tu; 359 struct inode *inode; 360 char *arg, *event, *group, *filename; 361 char buf[MAX_EVENT_NAME_LEN]; 362 struct path path; 363 unsigned long offset; 364 bool is_delete, is_return; 365 int i, ret; 366 367 inode = NULL; 368 ret = 0; 369 is_delete = false; 370 is_return = false; 371 event = NULL; 372 group = NULL; 373 374 /* argc must be >= 1 */ 375 if (argv[0][0] == '-') 376 is_delete = true; 377 else if (argv[0][0] == 'r') 378 is_return = true; 379 else if (argv[0][0] != 'p') { 380 pr_info("Probe definition must be started with 'p', 'r' or '-'.\n"); 381 return -EINVAL; 382 } 383 384 if (argv[0][1] == ':') { 385 event = &argv[0][2]; 386 arg = strchr(event, '/'); 387 388 if (arg) { 389 group = event; 390 event = arg + 1; 391 event[-1] = '\0'; 392 393 if (strlen(group) == 0) { 394 pr_info("Group name is not specified\n"); 395 return -EINVAL; 396 } 397 } 398 if (strlen(event) == 0) { 399 pr_info("Event name is not specified\n"); 400 return -EINVAL; 401 } 402 } 403 if (!group) 404 group = UPROBE_EVENT_SYSTEM; 405 406 if (is_delete) { 407 int ret; 408 409 if (!event) { 410 pr_info("Delete command needs an event name.\n"); 411 return -EINVAL; 412 } 413 mutex_lock(&uprobe_lock); 414 tu = find_probe_event(event, group); 415 416 if (!tu) { 417 mutex_unlock(&uprobe_lock); 418 pr_info("Event %s/%s doesn't exist.\n", group, event); 419 return -ENOENT; 420 } 421 /* delete an event */ 422 ret = unregister_trace_uprobe(tu); 423 mutex_unlock(&uprobe_lock); 424 return ret; 425 } 426 427 if (argc < 2) { 428 pr_info("Probe point is not specified.\n"); 429 return -EINVAL; 430 } 431 if (isdigit(argv[1][0])) { 432 pr_info("probe point must be have a filename.\n"); 433 return -EINVAL; 434 } 435 arg = strchr(argv[1], ':'); 436 if (!arg) { 437 ret = -EINVAL; 438 goto fail_address_parse; 439 } 440 441 *arg++ = '\0'; 442 filename = argv[1]; 443 ret = kern_path(filename, LOOKUP_FOLLOW, &path); 444 if (ret) 445 goto fail_address_parse; 446 447 inode = igrab(path.dentry->d_inode); 448 path_put(&path); 449 450 if (!inode || !S_ISREG(inode->i_mode)) { 451 ret = -EINVAL; 452 goto fail_address_parse; 453 } 454 455 ret = kstrtoul(arg, 0, &offset); 456 if (ret) 457 goto fail_address_parse; 458 459 argc -= 2; 460 argv += 2; 461 462 /* setup a probe */ 463 if (!event) { 464 char *tail; 465 char *ptr; 466 467 tail = kstrdup(kbasename(filename), GFP_KERNEL); 468 if (!tail) { 469 ret = -ENOMEM; 470 goto fail_address_parse; 471 } 472 473 ptr = strpbrk(tail, ".-_"); 474 if (ptr) 475 *ptr = '\0'; 476 477 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_0x%lx", 'p', tail, offset); 478 event = buf; 479 kfree(tail); 480 } 481 482 tu = alloc_trace_uprobe(group, event, argc, is_return); 483 if (IS_ERR(tu)) { 484 pr_info("Failed to allocate trace_uprobe.(%d)\n", (int)PTR_ERR(tu)); 485 ret = PTR_ERR(tu); 486 goto fail_address_parse; 487 } 488 tu->offset = offset; 489 tu->inode = inode; 490 tu->filename = kstrdup(filename, GFP_KERNEL); 491 492 if (!tu->filename) { 493 pr_info("Failed to allocate filename.\n"); 494 ret = -ENOMEM; 495 goto error; 496 } 497 498 /* parse arguments */ 499 ret = 0; 500 for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) { 501 struct probe_arg *parg = &tu->tp.args[i]; 502 503 /* Increment count for freeing args in error case */ 504 tu->tp.nr_args++; 505 506 /* Parse argument name */ 507 arg = strchr(argv[i], '='); 508 if (arg) { 509 *arg++ = '\0'; 510 parg->name = kstrdup(argv[i], GFP_KERNEL); 511 } else { 512 arg = argv[i]; 513 /* If argument name is omitted, set "argN" */ 514 snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1); 515 parg->name = kstrdup(buf, GFP_KERNEL); 516 } 517 518 if (!parg->name) { 519 pr_info("Failed to allocate argument[%d] name.\n", i); 520 ret = -ENOMEM; 521 goto error; 522 } 523 524 if (!is_good_name(parg->name)) { 525 pr_info("Invalid argument[%d] name: %s\n", i, parg->name); 526 ret = -EINVAL; 527 goto error; 528 } 529 530 if (traceprobe_conflict_field_name(parg->name, tu->tp.args, i)) { 531 pr_info("Argument[%d] name '%s' conflicts with " 532 "another field.\n", i, argv[i]); 533 ret = -EINVAL; 534 goto error; 535 } 536 537 /* Parse fetch argument */ 538 ret = traceprobe_parse_probe_arg(arg, &tu->tp.size, parg, 539 is_return, false); 540 if (ret) { 541 pr_info("Parse error at argument[%d]. (%d)\n", i, ret); 542 goto error; 543 } 544 } 545 546 ret = register_trace_uprobe(tu); 547 if (ret) 548 goto error; 549 return 0; 550 551 error: 552 free_trace_uprobe(tu); 553 return ret; 554 555 fail_address_parse: 556 if (inode) 557 iput(inode); 558 559 pr_info("Failed to parse address or file.\n"); 560 561 return ret; 562 } 563 564 static int cleanup_all_probes(void) 565 { 566 struct trace_uprobe *tu; 567 int ret = 0; 568 569 mutex_lock(&uprobe_lock); 570 while (!list_empty(&uprobe_list)) { 571 tu = list_entry(uprobe_list.next, struct trace_uprobe, list); 572 ret = unregister_trace_uprobe(tu); 573 if (ret) 574 break; 575 } 576 mutex_unlock(&uprobe_lock); 577 return ret; 578 } 579 580 /* Probes listing interfaces */ 581 static void *probes_seq_start(struct seq_file *m, loff_t *pos) 582 { 583 mutex_lock(&uprobe_lock); 584 return seq_list_start(&uprobe_list, *pos); 585 } 586 587 static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos) 588 { 589 return seq_list_next(v, &uprobe_list, pos); 590 } 591 592 static void probes_seq_stop(struct seq_file *m, void *v) 593 { 594 mutex_unlock(&uprobe_lock); 595 } 596 597 static int probes_seq_show(struct seq_file *m, void *v) 598 { 599 struct trace_uprobe *tu = v; 600 char c = is_ret_probe(tu) ? 'r' : 'p'; 601 int i; 602 603 seq_printf(m, "%c:%s/%s", c, tu->tp.call.class->system, 604 ftrace_event_name(&tu->tp.call)); 605 seq_printf(m, " %s:0x%p", tu->filename, (void *)tu->offset); 606 607 for (i = 0; i < tu->tp.nr_args; i++) 608 seq_printf(m, " %s=%s", tu->tp.args[i].name, tu->tp.args[i].comm); 609 610 seq_printf(m, "\n"); 611 return 0; 612 } 613 614 static const struct seq_operations probes_seq_op = { 615 .start = probes_seq_start, 616 .next = probes_seq_next, 617 .stop = probes_seq_stop, 618 .show = probes_seq_show 619 }; 620 621 static int probes_open(struct inode *inode, struct file *file) 622 { 623 int ret; 624 625 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) { 626 ret = cleanup_all_probes(); 627 if (ret) 628 return ret; 629 } 630 631 return seq_open(file, &probes_seq_op); 632 } 633 634 static ssize_t probes_write(struct file *file, const char __user *buffer, 635 size_t count, loff_t *ppos) 636 { 637 return traceprobe_probes_write(file, buffer, count, ppos, create_trace_uprobe); 638 } 639 640 static const struct file_operations uprobe_events_ops = { 641 .owner = THIS_MODULE, 642 .open = probes_open, 643 .read = seq_read, 644 .llseek = seq_lseek, 645 .release = seq_release, 646 .write = probes_write, 647 }; 648 649 /* Probes profiling interfaces */ 650 static int probes_profile_seq_show(struct seq_file *m, void *v) 651 { 652 struct trace_uprobe *tu = v; 653 654 seq_printf(m, " %s %-44s %15lu\n", tu->filename, 655 ftrace_event_name(&tu->tp.call), tu->nhit); 656 return 0; 657 } 658 659 static const struct seq_operations profile_seq_op = { 660 .start = probes_seq_start, 661 .next = probes_seq_next, 662 .stop = probes_seq_stop, 663 .show = probes_profile_seq_show 664 }; 665 666 static int profile_open(struct inode *inode, struct file *file) 667 { 668 return seq_open(file, &profile_seq_op); 669 } 670 671 static const struct file_operations uprobe_profile_ops = { 672 .owner = THIS_MODULE, 673 .open = profile_open, 674 .read = seq_read, 675 .llseek = seq_lseek, 676 .release = seq_release, 677 }; 678 679 struct uprobe_cpu_buffer { 680 struct mutex mutex; 681 void *buf; 682 }; 683 static struct uprobe_cpu_buffer __percpu *uprobe_cpu_buffer; 684 static int uprobe_buffer_refcnt; 685 686 static int uprobe_buffer_init(void) 687 { 688 int cpu, err_cpu; 689 690 uprobe_cpu_buffer = alloc_percpu(struct uprobe_cpu_buffer); 691 if (uprobe_cpu_buffer == NULL) 692 return -ENOMEM; 693 694 for_each_possible_cpu(cpu) { 695 struct page *p = alloc_pages_node(cpu_to_node(cpu), 696 GFP_KERNEL, 0); 697 if (p == NULL) { 698 err_cpu = cpu; 699 goto err; 700 } 701 per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf = page_address(p); 702 mutex_init(&per_cpu_ptr(uprobe_cpu_buffer, cpu)->mutex); 703 } 704 705 return 0; 706 707 err: 708 for_each_possible_cpu(cpu) { 709 if (cpu == err_cpu) 710 break; 711 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf); 712 } 713 714 free_percpu(uprobe_cpu_buffer); 715 return -ENOMEM; 716 } 717 718 static int uprobe_buffer_enable(void) 719 { 720 int ret = 0; 721 722 BUG_ON(!mutex_is_locked(&event_mutex)); 723 724 if (uprobe_buffer_refcnt++ == 0) { 725 ret = uprobe_buffer_init(); 726 if (ret < 0) 727 uprobe_buffer_refcnt--; 728 } 729 730 return ret; 731 } 732 733 static void uprobe_buffer_disable(void) 734 { 735 BUG_ON(!mutex_is_locked(&event_mutex)); 736 737 if (--uprobe_buffer_refcnt == 0) { 738 free_percpu(uprobe_cpu_buffer); 739 uprobe_cpu_buffer = NULL; 740 } 741 } 742 743 static struct uprobe_cpu_buffer *uprobe_buffer_get(void) 744 { 745 struct uprobe_cpu_buffer *ucb; 746 int cpu; 747 748 cpu = raw_smp_processor_id(); 749 ucb = per_cpu_ptr(uprobe_cpu_buffer, cpu); 750 751 /* 752 * Use per-cpu buffers for fastest access, but we might migrate 753 * so the mutex makes sure we have sole access to it. 754 */ 755 mutex_lock(&ucb->mutex); 756 757 return ucb; 758 } 759 760 static void uprobe_buffer_put(struct uprobe_cpu_buffer *ucb) 761 { 762 mutex_unlock(&ucb->mutex); 763 } 764 765 static void __uprobe_trace_func(struct trace_uprobe *tu, 766 unsigned long func, struct pt_regs *regs, 767 struct uprobe_cpu_buffer *ucb, int dsize, 768 struct ftrace_event_file *ftrace_file) 769 { 770 struct uprobe_trace_entry_head *entry; 771 struct ring_buffer_event *event; 772 struct ring_buffer *buffer; 773 void *data; 774 int size, esize; 775 struct ftrace_event_call *call = &tu->tp.call; 776 777 WARN_ON(call != ftrace_file->event_call); 778 779 if (WARN_ON_ONCE(tu->tp.size + dsize > PAGE_SIZE)) 780 return; 781 782 if (ftrace_trigger_soft_disabled(ftrace_file)) 783 return; 784 785 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu)); 786 size = esize + tu->tp.size + dsize; 787 event = trace_event_buffer_lock_reserve(&buffer, ftrace_file, 788 call->event.type, size, 0, 0); 789 if (!event) 790 return; 791 792 entry = ring_buffer_event_data(event); 793 if (is_ret_probe(tu)) { 794 entry->vaddr[0] = func; 795 entry->vaddr[1] = instruction_pointer(regs); 796 data = DATAOF_TRACE_ENTRY(entry, true); 797 } else { 798 entry->vaddr[0] = instruction_pointer(regs); 799 data = DATAOF_TRACE_ENTRY(entry, false); 800 } 801 802 memcpy(data, ucb->buf, tu->tp.size + dsize); 803 804 event_trigger_unlock_commit(ftrace_file, buffer, event, entry, 0, 0); 805 } 806 807 /* uprobe handler */ 808 static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs, 809 struct uprobe_cpu_buffer *ucb, int dsize) 810 { 811 struct event_file_link *link; 812 813 if (is_ret_probe(tu)) 814 return 0; 815 816 rcu_read_lock(); 817 list_for_each_entry_rcu(link, &tu->tp.files, list) 818 __uprobe_trace_func(tu, 0, regs, ucb, dsize, link->file); 819 rcu_read_unlock(); 820 821 return 0; 822 } 823 824 static void uretprobe_trace_func(struct trace_uprobe *tu, unsigned long func, 825 struct pt_regs *regs, 826 struct uprobe_cpu_buffer *ucb, int dsize) 827 { 828 struct event_file_link *link; 829 830 rcu_read_lock(); 831 list_for_each_entry_rcu(link, &tu->tp.files, list) 832 __uprobe_trace_func(tu, func, regs, ucb, dsize, link->file); 833 rcu_read_unlock(); 834 } 835 836 /* Event entry printers */ 837 static enum print_line_t 838 print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *event) 839 { 840 struct uprobe_trace_entry_head *entry; 841 struct trace_seq *s = &iter->seq; 842 struct trace_uprobe *tu; 843 u8 *data; 844 int i; 845 846 entry = (struct uprobe_trace_entry_head *)iter->ent; 847 tu = container_of(event, struct trace_uprobe, tp.call.event); 848 849 if (is_ret_probe(tu)) { 850 if (!trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)", 851 ftrace_event_name(&tu->tp.call), 852 entry->vaddr[1], entry->vaddr[0])) 853 goto partial; 854 data = DATAOF_TRACE_ENTRY(entry, true); 855 } else { 856 if (!trace_seq_printf(s, "%s: (0x%lx)", 857 ftrace_event_name(&tu->tp.call), 858 entry->vaddr[0])) 859 goto partial; 860 data = DATAOF_TRACE_ENTRY(entry, false); 861 } 862 863 for (i = 0; i < tu->tp.nr_args; i++) { 864 struct probe_arg *parg = &tu->tp.args[i]; 865 866 if (!parg->type->print(s, parg->name, data + parg->offset, entry)) 867 goto partial; 868 } 869 870 if (trace_seq_puts(s, "\n")) 871 return TRACE_TYPE_HANDLED; 872 873 partial: 874 return TRACE_TYPE_PARTIAL_LINE; 875 } 876 877 typedef bool (*filter_func_t)(struct uprobe_consumer *self, 878 enum uprobe_filter_ctx ctx, 879 struct mm_struct *mm); 880 881 static int 882 probe_event_enable(struct trace_uprobe *tu, struct ftrace_event_file *file, 883 filter_func_t filter) 884 { 885 bool enabled = trace_probe_is_enabled(&tu->tp); 886 struct event_file_link *link = NULL; 887 int ret; 888 889 if (file) { 890 link = kmalloc(sizeof(*link), GFP_KERNEL); 891 if (!link) 892 return -ENOMEM; 893 894 link->file = file; 895 list_add_tail_rcu(&link->list, &tu->tp.files); 896 897 tu->tp.flags |= TP_FLAG_TRACE; 898 } else 899 tu->tp.flags |= TP_FLAG_PROFILE; 900 901 ret = uprobe_buffer_enable(); 902 if (ret < 0) 903 return ret; 904 905 WARN_ON(!uprobe_filter_is_empty(&tu->filter)); 906 907 if (enabled) 908 return 0; 909 910 tu->consumer.filter = filter; 911 ret = uprobe_register(tu->inode, tu->offset, &tu->consumer); 912 if (ret) { 913 if (file) { 914 list_del(&link->list); 915 kfree(link); 916 tu->tp.flags &= ~TP_FLAG_TRACE; 917 } else 918 tu->tp.flags &= ~TP_FLAG_PROFILE; 919 } 920 921 return ret; 922 } 923 924 static void 925 probe_event_disable(struct trace_uprobe *tu, struct ftrace_event_file *file) 926 { 927 if (!trace_probe_is_enabled(&tu->tp)) 928 return; 929 930 if (file) { 931 struct event_file_link *link; 932 933 link = find_event_file_link(&tu->tp, file); 934 if (!link) 935 return; 936 937 list_del_rcu(&link->list); 938 /* synchronize with u{,ret}probe_trace_func */ 939 synchronize_sched(); 940 kfree(link); 941 942 if (!list_empty(&tu->tp.files)) 943 return; 944 } 945 946 WARN_ON(!uprobe_filter_is_empty(&tu->filter)); 947 948 uprobe_unregister(tu->inode, tu->offset, &tu->consumer); 949 tu->tp.flags &= file ? ~TP_FLAG_TRACE : ~TP_FLAG_PROFILE; 950 951 uprobe_buffer_disable(); 952 } 953 954 static int uprobe_event_define_fields(struct ftrace_event_call *event_call) 955 { 956 int ret, i, size; 957 struct uprobe_trace_entry_head field; 958 struct trace_uprobe *tu = event_call->data; 959 960 if (is_ret_probe(tu)) { 961 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_FUNC, 0); 962 DEFINE_FIELD(unsigned long, vaddr[1], FIELD_STRING_RETIP, 0); 963 size = SIZEOF_TRACE_ENTRY(true); 964 } else { 965 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_IP, 0); 966 size = SIZEOF_TRACE_ENTRY(false); 967 } 968 /* Set argument names as fields */ 969 for (i = 0; i < tu->tp.nr_args; i++) { 970 struct probe_arg *parg = &tu->tp.args[i]; 971 972 ret = trace_define_field(event_call, parg->type->fmttype, 973 parg->name, size + parg->offset, 974 parg->type->size, parg->type->is_signed, 975 FILTER_OTHER); 976 977 if (ret) 978 return ret; 979 } 980 return 0; 981 } 982 983 #ifdef CONFIG_PERF_EVENTS 984 static bool 985 __uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm) 986 { 987 struct perf_event *event; 988 989 if (filter->nr_systemwide) 990 return true; 991 992 list_for_each_entry(event, &filter->perf_events, hw.tp_list) { 993 if (event->hw.tp_target->mm == mm) 994 return true; 995 } 996 997 return false; 998 } 999 1000 static inline bool 1001 uprobe_filter_event(struct trace_uprobe *tu, struct perf_event *event) 1002 { 1003 return __uprobe_perf_filter(&tu->filter, event->hw.tp_target->mm); 1004 } 1005 1006 static int uprobe_perf_open(struct trace_uprobe *tu, struct perf_event *event) 1007 { 1008 bool done; 1009 1010 write_lock(&tu->filter.rwlock); 1011 if (event->hw.tp_target) { 1012 /* 1013 * event->parent != NULL means copy_process(), we can avoid 1014 * uprobe_apply(). current->mm must be probed and we can rely 1015 * on dup_mmap() which preserves the already installed bp's. 1016 * 1017 * attr.enable_on_exec means that exec/mmap will install the 1018 * breakpoints we need. 1019 */ 1020 done = tu->filter.nr_systemwide || 1021 event->parent || event->attr.enable_on_exec || 1022 uprobe_filter_event(tu, event); 1023 list_add(&event->hw.tp_list, &tu->filter.perf_events); 1024 } else { 1025 done = tu->filter.nr_systemwide; 1026 tu->filter.nr_systemwide++; 1027 } 1028 write_unlock(&tu->filter.rwlock); 1029 1030 if (!done) 1031 uprobe_apply(tu->inode, tu->offset, &tu->consumer, true); 1032 1033 return 0; 1034 } 1035 1036 static int uprobe_perf_close(struct trace_uprobe *tu, struct perf_event *event) 1037 { 1038 bool done; 1039 1040 write_lock(&tu->filter.rwlock); 1041 if (event->hw.tp_target) { 1042 list_del(&event->hw.tp_list); 1043 done = tu->filter.nr_systemwide || 1044 (event->hw.tp_target->flags & PF_EXITING) || 1045 uprobe_filter_event(tu, event); 1046 } else { 1047 tu->filter.nr_systemwide--; 1048 done = tu->filter.nr_systemwide; 1049 } 1050 write_unlock(&tu->filter.rwlock); 1051 1052 if (!done) 1053 uprobe_apply(tu->inode, tu->offset, &tu->consumer, false); 1054 1055 return 0; 1056 } 1057 1058 static bool uprobe_perf_filter(struct uprobe_consumer *uc, 1059 enum uprobe_filter_ctx ctx, struct mm_struct *mm) 1060 { 1061 struct trace_uprobe *tu; 1062 int ret; 1063 1064 tu = container_of(uc, struct trace_uprobe, consumer); 1065 read_lock(&tu->filter.rwlock); 1066 ret = __uprobe_perf_filter(&tu->filter, mm); 1067 read_unlock(&tu->filter.rwlock); 1068 1069 return ret; 1070 } 1071 1072 static void __uprobe_perf_func(struct trace_uprobe *tu, 1073 unsigned long func, struct pt_regs *regs, 1074 struct uprobe_cpu_buffer *ucb, int dsize) 1075 { 1076 struct ftrace_event_call *call = &tu->tp.call; 1077 struct uprobe_trace_entry_head *entry; 1078 struct hlist_head *head; 1079 void *data; 1080 int size, esize; 1081 int rctx; 1082 1083 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu)); 1084 1085 size = esize + tu->tp.size + dsize; 1086 size = ALIGN(size + sizeof(u32), sizeof(u64)) - sizeof(u32); 1087 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough")) 1088 return; 1089 1090 preempt_disable(); 1091 head = this_cpu_ptr(call->perf_events); 1092 if (hlist_empty(head)) 1093 goto out; 1094 1095 entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx); 1096 if (!entry) 1097 goto out; 1098 1099 if (is_ret_probe(tu)) { 1100 entry->vaddr[0] = func; 1101 entry->vaddr[1] = instruction_pointer(regs); 1102 data = DATAOF_TRACE_ENTRY(entry, true); 1103 } else { 1104 entry->vaddr[0] = instruction_pointer(regs); 1105 data = DATAOF_TRACE_ENTRY(entry, false); 1106 } 1107 1108 memcpy(data, ucb->buf, tu->tp.size + dsize); 1109 1110 if (size - esize > tu->tp.size + dsize) { 1111 int len = tu->tp.size + dsize; 1112 1113 memset(data + len, 0, size - esize - len); 1114 } 1115 1116 perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL); 1117 out: 1118 preempt_enable(); 1119 } 1120 1121 /* uprobe profile handler */ 1122 static int uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs, 1123 struct uprobe_cpu_buffer *ucb, int dsize) 1124 { 1125 if (!uprobe_perf_filter(&tu->consumer, 0, current->mm)) 1126 return UPROBE_HANDLER_REMOVE; 1127 1128 if (!is_ret_probe(tu)) 1129 __uprobe_perf_func(tu, 0, regs, ucb, dsize); 1130 return 0; 1131 } 1132 1133 static void uretprobe_perf_func(struct trace_uprobe *tu, unsigned long func, 1134 struct pt_regs *regs, 1135 struct uprobe_cpu_buffer *ucb, int dsize) 1136 { 1137 __uprobe_perf_func(tu, func, regs, ucb, dsize); 1138 } 1139 #endif /* CONFIG_PERF_EVENTS */ 1140 1141 static int 1142 trace_uprobe_register(struct ftrace_event_call *event, enum trace_reg type, 1143 void *data) 1144 { 1145 struct trace_uprobe *tu = event->data; 1146 struct ftrace_event_file *file = data; 1147 1148 switch (type) { 1149 case TRACE_REG_REGISTER: 1150 return probe_event_enable(tu, file, NULL); 1151 1152 case TRACE_REG_UNREGISTER: 1153 probe_event_disable(tu, file); 1154 return 0; 1155 1156 #ifdef CONFIG_PERF_EVENTS 1157 case TRACE_REG_PERF_REGISTER: 1158 return probe_event_enable(tu, NULL, uprobe_perf_filter); 1159 1160 case TRACE_REG_PERF_UNREGISTER: 1161 probe_event_disable(tu, NULL); 1162 return 0; 1163 1164 case TRACE_REG_PERF_OPEN: 1165 return uprobe_perf_open(tu, data); 1166 1167 case TRACE_REG_PERF_CLOSE: 1168 return uprobe_perf_close(tu, data); 1169 1170 #endif 1171 default: 1172 return 0; 1173 } 1174 return 0; 1175 } 1176 1177 static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs) 1178 { 1179 struct trace_uprobe *tu; 1180 struct uprobe_dispatch_data udd; 1181 struct uprobe_cpu_buffer *ucb; 1182 int dsize, esize; 1183 int ret = 0; 1184 1185 1186 tu = container_of(con, struct trace_uprobe, consumer); 1187 tu->nhit++; 1188 1189 udd.tu = tu; 1190 udd.bp_addr = instruction_pointer(regs); 1191 1192 current->utask->vaddr = (unsigned long) &udd; 1193 1194 #ifdef CONFIG_PERF_EVENTS 1195 if ((tu->tp.flags & TP_FLAG_TRACE) == 0 && 1196 !uprobe_perf_filter(&tu->consumer, 0, current->mm)) 1197 return UPROBE_HANDLER_REMOVE; 1198 #endif 1199 1200 if (WARN_ON_ONCE(!uprobe_cpu_buffer)) 1201 return 0; 1202 1203 dsize = __get_data_size(&tu->tp, regs); 1204 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu)); 1205 1206 ucb = uprobe_buffer_get(); 1207 store_trace_args(esize, &tu->tp, regs, ucb->buf, dsize); 1208 1209 if (tu->tp.flags & TP_FLAG_TRACE) 1210 ret |= uprobe_trace_func(tu, regs, ucb, dsize); 1211 1212 #ifdef CONFIG_PERF_EVENTS 1213 if (tu->tp.flags & TP_FLAG_PROFILE) 1214 ret |= uprobe_perf_func(tu, regs, ucb, dsize); 1215 #endif 1216 uprobe_buffer_put(ucb); 1217 return ret; 1218 } 1219 1220 static int uretprobe_dispatcher(struct uprobe_consumer *con, 1221 unsigned long func, struct pt_regs *regs) 1222 { 1223 struct trace_uprobe *tu; 1224 struct uprobe_dispatch_data udd; 1225 struct uprobe_cpu_buffer *ucb; 1226 int dsize, esize; 1227 1228 tu = container_of(con, struct trace_uprobe, consumer); 1229 1230 udd.tu = tu; 1231 udd.bp_addr = func; 1232 1233 current->utask->vaddr = (unsigned long) &udd; 1234 1235 if (WARN_ON_ONCE(!uprobe_cpu_buffer)) 1236 return 0; 1237 1238 dsize = __get_data_size(&tu->tp, regs); 1239 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu)); 1240 1241 ucb = uprobe_buffer_get(); 1242 store_trace_args(esize, &tu->tp, regs, ucb->buf, dsize); 1243 1244 if (tu->tp.flags & TP_FLAG_TRACE) 1245 uretprobe_trace_func(tu, func, regs, ucb, dsize); 1246 1247 #ifdef CONFIG_PERF_EVENTS 1248 if (tu->tp.flags & TP_FLAG_PROFILE) 1249 uretprobe_perf_func(tu, func, regs, ucb, dsize); 1250 #endif 1251 uprobe_buffer_put(ucb); 1252 return 0; 1253 } 1254 1255 static struct trace_event_functions uprobe_funcs = { 1256 .trace = print_uprobe_event 1257 }; 1258 1259 static int register_uprobe_event(struct trace_uprobe *tu) 1260 { 1261 struct ftrace_event_call *call = &tu->tp.call; 1262 int ret; 1263 1264 /* Initialize ftrace_event_call */ 1265 INIT_LIST_HEAD(&call->class->fields); 1266 call->event.funcs = &uprobe_funcs; 1267 call->class->define_fields = uprobe_event_define_fields; 1268 1269 if (set_print_fmt(&tu->tp, is_ret_probe(tu)) < 0) 1270 return -ENOMEM; 1271 1272 ret = register_ftrace_event(&call->event); 1273 if (!ret) { 1274 kfree(call->print_fmt); 1275 return -ENODEV; 1276 } 1277 call->flags = 0; 1278 call->class->reg = trace_uprobe_register; 1279 call->data = tu; 1280 ret = trace_add_event_call(call); 1281 1282 if (ret) { 1283 pr_info("Failed to register uprobe event: %s\n", 1284 ftrace_event_name(call)); 1285 kfree(call->print_fmt); 1286 unregister_ftrace_event(&call->event); 1287 } 1288 1289 return ret; 1290 } 1291 1292 static int unregister_uprobe_event(struct trace_uprobe *tu) 1293 { 1294 int ret; 1295 1296 /* tu->event is unregistered in trace_remove_event_call() */ 1297 ret = trace_remove_event_call(&tu->tp.call); 1298 if (ret) 1299 return ret; 1300 kfree(tu->tp.call.print_fmt); 1301 tu->tp.call.print_fmt = NULL; 1302 return 0; 1303 } 1304 1305 /* Make a trace interface for controling probe points */ 1306 static __init int init_uprobe_trace(void) 1307 { 1308 struct dentry *d_tracer; 1309 1310 d_tracer = tracing_init_dentry(); 1311 if (!d_tracer) 1312 return 0; 1313 1314 trace_create_file("uprobe_events", 0644, d_tracer, 1315 NULL, &uprobe_events_ops); 1316 /* Profile interface */ 1317 trace_create_file("uprobe_profile", 0444, d_tracer, 1318 NULL, &uprobe_profile_ops); 1319 return 0; 1320 } 1321 1322 fs_initcall(init_uprobe_trace); 1323