1 /* 2 * Kprobes-based tracing events 3 * 4 * Created by Masami Hiramatsu <mhiramat@redhat.com> 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 */ 19 20 #include <linux/module.h> 21 #include <linux/uaccess.h> 22 #include <linux/kprobes.h> 23 #include <linux/seq_file.h> 24 #include <linux/slab.h> 25 #include <linux/smp.h> 26 #include <linux/debugfs.h> 27 #include <linux/types.h> 28 #include <linux/string.h> 29 #include <linux/ctype.h> 30 #include <linux/ptrace.h> 31 #include <linux/perf_event.h> 32 33 #include "trace.h" 34 #include "trace_output.h" 35 36 #define MAX_TRACE_ARGS 128 37 #define MAX_ARGSTR_LEN 63 38 #define MAX_EVENT_NAME_LEN 64 39 #define KPROBE_EVENT_SYSTEM "kprobes" 40 41 /* Reserved field names */ 42 #define FIELD_STRING_IP "__probe_ip" 43 #define FIELD_STRING_NARGS "__probe_nargs" 44 #define FIELD_STRING_RETIP "__probe_ret_ip" 45 #define FIELD_STRING_FUNC "__probe_func" 46 47 const char *reserved_field_names[] = { 48 "common_type", 49 "common_flags", 50 "common_preempt_count", 51 "common_pid", 52 "common_tgid", 53 "common_lock_depth", 54 FIELD_STRING_IP, 55 FIELD_STRING_NARGS, 56 FIELD_STRING_RETIP, 57 FIELD_STRING_FUNC, 58 }; 59 60 struct fetch_func { 61 unsigned long (*func)(struct pt_regs *, void *); 62 void *data; 63 }; 64 65 static __kprobes unsigned long call_fetch(struct fetch_func *f, 66 struct pt_regs *regs) 67 { 68 return f->func(regs, f->data); 69 } 70 71 /* fetch handlers */ 72 static __kprobes unsigned long fetch_register(struct pt_regs *regs, 73 void *offset) 74 { 75 return regs_get_register(regs, (unsigned int)((unsigned long)offset)); 76 } 77 78 static __kprobes unsigned long fetch_stack(struct pt_regs *regs, 79 void *num) 80 { 81 return regs_get_kernel_stack_nth(regs, 82 (unsigned int)((unsigned long)num)); 83 } 84 85 static __kprobes unsigned long fetch_memory(struct pt_regs *regs, void *addr) 86 { 87 unsigned long retval; 88 89 if (probe_kernel_address(addr, retval)) 90 return 0; 91 return retval; 92 } 93 94 static __kprobes unsigned long fetch_argument(struct pt_regs *regs, void *num) 95 { 96 return regs_get_argument_nth(regs, (unsigned int)((unsigned long)num)); 97 } 98 99 static __kprobes unsigned long fetch_retvalue(struct pt_regs *regs, 100 void *dummy) 101 { 102 return regs_return_value(regs); 103 } 104 105 static __kprobes unsigned long fetch_stack_address(struct pt_regs *regs, 106 void *dummy) 107 { 108 return kernel_stack_pointer(regs); 109 } 110 111 /* Memory fetching by symbol */ 112 struct symbol_cache { 113 char *symbol; 114 long offset; 115 unsigned long addr; 116 }; 117 118 static unsigned long update_symbol_cache(struct symbol_cache *sc) 119 { 120 sc->addr = (unsigned long)kallsyms_lookup_name(sc->symbol); 121 if (sc->addr) 122 sc->addr += sc->offset; 123 return sc->addr; 124 } 125 126 static void free_symbol_cache(struct symbol_cache *sc) 127 { 128 kfree(sc->symbol); 129 kfree(sc); 130 } 131 132 static struct symbol_cache *alloc_symbol_cache(const char *sym, long offset) 133 { 134 struct symbol_cache *sc; 135 136 if (!sym || strlen(sym) == 0) 137 return NULL; 138 sc = kzalloc(sizeof(struct symbol_cache), GFP_KERNEL); 139 if (!sc) 140 return NULL; 141 142 sc->symbol = kstrdup(sym, GFP_KERNEL); 143 if (!sc->symbol) { 144 kfree(sc); 145 return NULL; 146 } 147 sc->offset = offset; 148 149 update_symbol_cache(sc); 150 return sc; 151 } 152 153 static __kprobes unsigned long fetch_symbol(struct pt_regs *regs, void *data) 154 { 155 struct symbol_cache *sc = data; 156 157 if (sc->addr) 158 return fetch_memory(regs, (void *)sc->addr); 159 else 160 return 0; 161 } 162 163 /* Special indirect memory access interface */ 164 struct indirect_fetch_data { 165 struct fetch_func orig; 166 long offset; 167 }; 168 169 static __kprobes unsigned long fetch_indirect(struct pt_regs *regs, void *data) 170 { 171 struct indirect_fetch_data *ind = data; 172 unsigned long addr; 173 174 addr = call_fetch(&ind->orig, regs); 175 if (addr) { 176 addr += ind->offset; 177 return fetch_memory(regs, (void *)addr); 178 } else 179 return 0; 180 } 181 182 static __kprobes void free_indirect_fetch_data(struct indirect_fetch_data *data) 183 { 184 if (data->orig.func == fetch_indirect) 185 free_indirect_fetch_data(data->orig.data); 186 else if (data->orig.func == fetch_symbol) 187 free_symbol_cache(data->orig.data); 188 kfree(data); 189 } 190 191 /** 192 * Kprobe event core functions 193 */ 194 195 struct probe_arg { 196 struct fetch_func fetch; 197 const char *name; 198 }; 199 200 /* Flags for trace_probe */ 201 #define TP_FLAG_TRACE 1 202 #define TP_FLAG_PROFILE 2 203 204 struct trace_probe { 205 struct list_head list; 206 struct kretprobe rp; /* Use rp.kp for kprobe use */ 207 unsigned long nhit; 208 unsigned int flags; /* For TP_FLAG_* */ 209 const char *symbol; /* symbol name */ 210 struct ftrace_event_call call; 211 struct trace_event event; 212 unsigned int nr_args; 213 struct probe_arg args[]; 214 }; 215 216 #define SIZEOF_TRACE_PROBE(n) \ 217 (offsetof(struct trace_probe, args) + \ 218 (sizeof(struct probe_arg) * (n))) 219 220 static __kprobes int probe_is_return(struct trace_probe *tp) 221 { 222 return tp->rp.handler != NULL; 223 } 224 225 static __kprobes const char *probe_symbol(struct trace_probe *tp) 226 { 227 return tp->symbol ? tp->symbol : "unknown"; 228 } 229 230 static int probe_arg_string(char *buf, size_t n, struct fetch_func *ff) 231 { 232 int ret = -EINVAL; 233 234 if (ff->func == fetch_argument) 235 ret = snprintf(buf, n, "$arg%lu", (unsigned long)ff->data); 236 else if (ff->func == fetch_register) { 237 const char *name; 238 name = regs_query_register_name((unsigned int)((long)ff->data)); 239 ret = snprintf(buf, n, "%%%s", name); 240 } else if (ff->func == fetch_stack) 241 ret = snprintf(buf, n, "$stack%lu", (unsigned long)ff->data); 242 else if (ff->func == fetch_memory) 243 ret = snprintf(buf, n, "@0x%p", ff->data); 244 else if (ff->func == fetch_symbol) { 245 struct symbol_cache *sc = ff->data; 246 if (sc->offset) 247 ret = snprintf(buf, n, "@%s%+ld", sc->symbol, 248 sc->offset); 249 else 250 ret = snprintf(buf, n, "@%s", sc->symbol); 251 } else if (ff->func == fetch_retvalue) 252 ret = snprintf(buf, n, "$retval"); 253 else if (ff->func == fetch_stack_address) 254 ret = snprintf(buf, n, "$stack"); 255 else if (ff->func == fetch_indirect) { 256 struct indirect_fetch_data *id = ff->data; 257 size_t l = 0; 258 ret = snprintf(buf, n, "%+ld(", id->offset); 259 if (ret >= n) 260 goto end; 261 l += ret; 262 ret = probe_arg_string(buf + l, n - l, &id->orig); 263 if (ret < 0) 264 goto end; 265 l += ret; 266 ret = snprintf(buf + l, n - l, ")"); 267 ret += l; 268 } 269 end: 270 if (ret >= n) 271 return -ENOSPC; 272 return ret; 273 } 274 275 static int register_probe_event(struct trace_probe *tp); 276 static void unregister_probe_event(struct trace_probe *tp); 277 278 static DEFINE_MUTEX(probe_lock); 279 static LIST_HEAD(probe_list); 280 281 static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs); 282 static int kretprobe_dispatcher(struct kretprobe_instance *ri, 283 struct pt_regs *regs); 284 285 /* Check the name is good for event/group */ 286 static int check_event_name(const char *name) 287 { 288 if (!isalpha(*name) && *name != '_') 289 return 0; 290 while (*++name != '\0') { 291 if (!isalpha(*name) && !isdigit(*name) && *name != '_') 292 return 0; 293 } 294 return 1; 295 } 296 297 /* 298 * Allocate new trace_probe and initialize it (including kprobes). 299 */ 300 static struct trace_probe *alloc_trace_probe(const char *group, 301 const char *event, 302 void *addr, 303 const char *symbol, 304 unsigned long offs, 305 int nargs, int is_return) 306 { 307 struct trace_probe *tp; 308 int ret = -ENOMEM; 309 310 tp = kzalloc(SIZEOF_TRACE_PROBE(nargs), GFP_KERNEL); 311 if (!tp) 312 return ERR_PTR(ret); 313 314 if (symbol) { 315 tp->symbol = kstrdup(symbol, GFP_KERNEL); 316 if (!tp->symbol) 317 goto error; 318 tp->rp.kp.symbol_name = tp->symbol; 319 tp->rp.kp.offset = offs; 320 } else 321 tp->rp.kp.addr = addr; 322 323 if (is_return) 324 tp->rp.handler = kretprobe_dispatcher; 325 else 326 tp->rp.kp.pre_handler = kprobe_dispatcher; 327 328 if (!event || !check_event_name(event)) { 329 ret = -EINVAL; 330 goto error; 331 } 332 333 tp->call.name = kstrdup(event, GFP_KERNEL); 334 if (!tp->call.name) 335 goto error; 336 337 if (!group || !check_event_name(group)) { 338 ret = -EINVAL; 339 goto error; 340 } 341 342 tp->call.system = kstrdup(group, GFP_KERNEL); 343 if (!tp->call.system) 344 goto error; 345 346 INIT_LIST_HEAD(&tp->list); 347 return tp; 348 error: 349 kfree(tp->call.name); 350 kfree(tp->symbol); 351 kfree(tp); 352 return ERR_PTR(ret); 353 } 354 355 static void free_probe_arg(struct probe_arg *arg) 356 { 357 if (arg->fetch.func == fetch_symbol) 358 free_symbol_cache(arg->fetch.data); 359 else if (arg->fetch.func == fetch_indirect) 360 free_indirect_fetch_data(arg->fetch.data); 361 kfree(arg->name); 362 } 363 364 static void free_trace_probe(struct trace_probe *tp) 365 { 366 int i; 367 368 for (i = 0; i < tp->nr_args; i++) 369 free_probe_arg(&tp->args[i]); 370 371 kfree(tp->call.system); 372 kfree(tp->call.name); 373 kfree(tp->symbol); 374 kfree(tp); 375 } 376 377 static struct trace_probe *find_probe_event(const char *event, 378 const char *group) 379 { 380 struct trace_probe *tp; 381 382 list_for_each_entry(tp, &probe_list, list) 383 if (strcmp(tp->call.name, event) == 0 && 384 strcmp(tp->call.system, group) == 0) 385 return tp; 386 return NULL; 387 } 388 389 /* Unregister a trace_probe and probe_event: call with locking probe_lock */ 390 static void unregister_trace_probe(struct trace_probe *tp) 391 { 392 if (probe_is_return(tp)) 393 unregister_kretprobe(&tp->rp); 394 else 395 unregister_kprobe(&tp->rp.kp); 396 list_del(&tp->list); 397 unregister_probe_event(tp); 398 } 399 400 /* Register a trace_probe and probe_event */ 401 static int register_trace_probe(struct trace_probe *tp) 402 { 403 struct trace_probe *old_tp; 404 int ret; 405 406 mutex_lock(&probe_lock); 407 408 /* register as an event */ 409 old_tp = find_probe_event(tp->call.name, tp->call.system); 410 if (old_tp) { 411 /* delete old event */ 412 unregister_trace_probe(old_tp); 413 free_trace_probe(old_tp); 414 } 415 ret = register_probe_event(tp); 416 if (ret) { 417 pr_warning("Faild to register probe event(%d)\n", ret); 418 goto end; 419 } 420 421 tp->rp.kp.flags |= KPROBE_FLAG_DISABLED; 422 if (probe_is_return(tp)) 423 ret = register_kretprobe(&tp->rp); 424 else 425 ret = register_kprobe(&tp->rp.kp); 426 427 if (ret) { 428 pr_warning("Could not insert probe(%d)\n", ret); 429 if (ret == -EILSEQ) { 430 pr_warning("Probing address(0x%p) is not an " 431 "instruction boundary.\n", 432 tp->rp.kp.addr); 433 ret = -EINVAL; 434 } 435 unregister_probe_event(tp); 436 } else 437 list_add_tail(&tp->list, &probe_list); 438 end: 439 mutex_unlock(&probe_lock); 440 return ret; 441 } 442 443 /* Split symbol and offset. */ 444 static int split_symbol_offset(char *symbol, unsigned long *offset) 445 { 446 char *tmp; 447 int ret; 448 449 if (!offset) 450 return -EINVAL; 451 452 tmp = strchr(symbol, '+'); 453 if (tmp) { 454 /* skip sign because strict_strtol doesn't accept '+' */ 455 ret = strict_strtoul(tmp + 1, 0, offset); 456 if (ret) 457 return ret; 458 *tmp = '\0'; 459 } else 460 *offset = 0; 461 return 0; 462 } 463 464 #define PARAM_MAX_ARGS 16 465 #define PARAM_MAX_STACK (THREAD_SIZE / sizeof(unsigned long)) 466 467 static int parse_probe_vars(char *arg, struct fetch_func *ff, int is_return) 468 { 469 int ret = 0; 470 unsigned long param; 471 472 if (strcmp(arg, "retval") == 0) { 473 if (is_return) { 474 ff->func = fetch_retvalue; 475 ff->data = NULL; 476 } else 477 ret = -EINVAL; 478 } else if (strncmp(arg, "stack", 5) == 0) { 479 if (arg[5] == '\0') { 480 ff->func = fetch_stack_address; 481 ff->data = NULL; 482 } else if (isdigit(arg[5])) { 483 ret = strict_strtoul(arg + 5, 10, ¶m); 484 if (ret || param > PARAM_MAX_STACK) 485 ret = -EINVAL; 486 else { 487 ff->func = fetch_stack; 488 ff->data = (void *)param; 489 } 490 } else 491 ret = -EINVAL; 492 } else if (strncmp(arg, "arg", 3) == 0 && isdigit(arg[3])) { 493 ret = strict_strtoul(arg + 3, 10, ¶m); 494 if (ret || param > PARAM_MAX_ARGS) 495 ret = -EINVAL; 496 else { 497 ff->func = fetch_argument; 498 ff->data = (void *)param; 499 } 500 } else 501 ret = -EINVAL; 502 return ret; 503 } 504 505 /* Recursive argument parser */ 506 static int __parse_probe_arg(char *arg, struct fetch_func *ff, int is_return) 507 { 508 int ret = 0; 509 unsigned long param; 510 long offset; 511 char *tmp; 512 513 switch (arg[0]) { 514 case '$': 515 ret = parse_probe_vars(arg + 1, ff, is_return); 516 break; 517 case '%': /* named register */ 518 ret = regs_query_register_offset(arg + 1); 519 if (ret >= 0) { 520 ff->func = fetch_register; 521 ff->data = (void *)(unsigned long)ret; 522 ret = 0; 523 } 524 break; 525 case '@': /* memory or symbol */ 526 if (isdigit(arg[1])) { 527 ret = strict_strtoul(arg + 1, 0, ¶m); 528 if (ret) 529 break; 530 ff->func = fetch_memory; 531 ff->data = (void *)param; 532 } else { 533 ret = split_symbol_offset(arg + 1, &offset); 534 if (ret) 535 break; 536 ff->data = alloc_symbol_cache(arg + 1, offset); 537 if (ff->data) 538 ff->func = fetch_symbol; 539 else 540 ret = -EINVAL; 541 } 542 break; 543 case '+': /* indirect memory */ 544 case '-': 545 tmp = strchr(arg, '('); 546 if (!tmp) { 547 ret = -EINVAL; 548 break; 549 } 550 *tmp = '\0'; 551 ret = strict_strtol(arg + 1, 0, &offset); 552 if (ret) 553 break; 554 if (arg[0] == '-') 555 offset = -offset; 556 arg = tmp + 1; 557 tmp = strrchr(arg, ')'); 558 if (tmp) { 559 struct indirect_fetch_data *id; 560 *tmp = '\0'; 561 id = kzalloc(sizeof(struct indirect_fetch_data), 562 GFP_KERNEL); 563 if (!id) 564 return -ENOMEM; 565 id->offset = offset; 566 ret = __parse_probe_arg(arg, &id->orig, is_return); 567 if (ret) 568 kfree(id); 569 else { 570 ff->func = fetch_indirect; 571 ff->data = (void *)id; 572 } 573 } else 574 ret = -EINVAL; 575 break; 576 default: 577 /* TODO: support custom handler */ 578 ret = -EINVAL; 579 } 580 return ret; 581 } 582 583 /* String length checking wrapper */ 584 static int parse_probe_arg(char *arg, struct fetch_func *ff, int is_return) 585 { 586 if (strlen(arg) > MAX_ARGSTR_LEN) { 587 pr_info("Argument is too long.: %s\n", arg); 588 return -ENOSPC; 589 } 590 return __parse_probe_arg(arg, ff, is_return); 591 } 592 593 /* Return 1 if name is reserved or already used by another argument */ 594 static int conflict_field_name(const char *name, 595 struct probe_arg *args, int narg) 596 { 597 int i; 598 for (i = 0; i < ARRAY_SIZE(reserved_field_names); i++) 599 if (strcmp(reserved_field_names[i], name) == 0) 600 return 1; 601 for (i = 0; i < narg; i++) 602 if (strcmp(args[i].name, name) == 0) 603 return 1; 604 return 0; 605 } 606 607 static int create_trace_probe(int argc, char **argv) 608 { 609 /* 610 * Argument syntax: 611 * - Add kprobe: p[:[GRP/]EVENT] KSYM[+OFFS]|KADDR [FETCHARGS] 612 * - Add kretprobe: r[:[GRP/]EVENT] KSYM[+0] [FETCHARGS] 613 * Fetch args: 614 * $argN : fetch Nth of function argument. (N:0-) 615 * $retval : fetch return value 616 * $stack : fetch stack address 617 * $stackN : fetch Nth of stack (N:0-) 618 * @ADDR : fetch memory at ADDR (ADDR should be in kernel) 619 * @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol) 620 * %REG : fetch register REG 621 * Indirect memory fetch: 622 * +|-offs(ARG) : fetch memory at ARG +|- offs address. 623 * Alias name of args: 624 * NAME=FETCHARG : set NAME as alias of FETCHARG. 625 */ 626 struct trace_probe *tp; 627 int i, ret = 0; 628 int is_return = 0, is_delete = 0; 629 char *symbol = NULL, *event = NULL, *arg = NULL, *group = NULL; 630 unsigned long offset = 0; 631 void *addr = NULL; 632 char buf[MAX_EVENT_NAME_LEN]; 633 634 /* argc must be >= 1 */ 635 if (argv[0][0] == 'p') 636 is_return = 0; 637 else if (argv[0][0] == 'r') 638 is_return = 1; 639 else if (argv[0][0] == '-') 640 is_delete = 1; 641 else { 642 pr_info("Probe definition must be started with 'p', 'r' or" 643 " '-'.\n"); 644 return -EINVAL; 645 } 646 647 if (argv[0][1] == ':') { 648 event = &argv[0][2]; 649 if (strchr(event, '/')) { 650 group = event; 651 event = strchr(group, '/') + 1; 652 event[-1] = '\0'; 653 if (strlen(group) == 0) { 654 pr_info("Group name is not specifiled\n"); 655 return -EINVAL; 656 } 657 } 658 if (strlen(event) == 0) { 659 pr_info("Event name is not specifiled\n"); 660 return -EINVAL; 661 } 662 } 663 if (!group) 664 group = KPROBE_EVENT_SYSTEM; 665 666 if (is_delete) { 667 if (!event) { 668 pr_info("Delete command needs an event name.\n"); 669 return -EINVAL; 670 } 671 tp = find_probe_event(event, group); 672 if (!tp) { 673 pr_info("Event %s/%s doesn't exist.\n", group, event); 674 return -ENOENT; 675 } 676 /* delete an event */ 677 unregister_trace_probe(tp); 678 free_trace_probe(tp); 679 return 0; 680 } 681 682 if (argc < 2) { 683 pr_info("Probe point is not specified.\n"); 684 return -EINVAL; 685 } 686 if (isdigit(argv[1][0])) { 687 if (is_return) { 688 pr_info("Return probe point must be a symbol.\n"); 689 return -EINVAL; 690 } 691 /* an address specified */ 692 ret = strict_strtoul(&argv[0][2], 0, (unsigned long *)&addr); 693 if (ret) { 694 pr_info("Failed to parse address.\n"); 695 return ret; 696 } 697 } else { 698 /* a symbol specified */ 699 symbol = argv[1]; 700 /* TODO: support .init module functions */ 701 ret = split_symbol_offset(symbol, &offset); 702 if (ret) { 703 pr_info("Failed to parse symbol.\n"); 704 return ret; 705 } 706 if (offset && is_return) { 707 pr_info("Return probe must be used without offset.\n"); 708 return -EINVAL; 709 } 710 } 711 argc -= 2; argv += 2; 712 713 /* setup a probe */ 714 if (!event) { 715 /* Make a new event name */ 716 if (symbol) 717 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_%ld", 718 is_return ? 'r' : 'p', symbol, offset); 719 else 720 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_0x%p", 721 is_return ? 'r' : 'p', addr); 722 event = buf; 723 } 724 tp = alloc_trace_probe(group, event, addr, symbol, offset, argc, 725 is_return); 726 if (IS_ERR(tp)) { 727 pr_info("Failed to allocate trace_probe.(%d)\n", 728 (int)PTR_ERR(tp)); 729 return PTR_ERR(tp); 730 } 731 732 /* parse arguments */ 733 ret = 0; 734 for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) { 735 /* Parse argument name */ 736 arg = strchr(argv[i], '='); 737 if (arg) 738 *arg++ = '\0'; 739 else 740 arg = argv[i]; 741 742 if (conflict_field_name(argv[i], tp->args, i)) { 743 pr_info("Argument%d name '%s' conflicts with " 744 "another field.\n", i, argv[i]); 745 ret = -EINVAL; 746 goto error; 747 } 748 749 tp->args[i].name = kstrdup(argv[i], GFP_KERNEL); 750 if (!tp->args[i].name) { 751 pr_info("Failed to allocate argument%d name '%s'.\n", 752 i, argv[i]); 753 ret = -ENOMEM; 754 goto error; 755 } 756 757 /* Parse fetch argument */ 758 ret = parse_probe_arg(arg, &tp->args[i].fetch, is_return); 759 if (ret) { 760 pr_info("Parse error at argument%d. (%d)\n", i, ret); 761 kfree(tp->args[i].name); 762 goto error; 763 } 764 765 tp->nr_args++; 766 } 767 768 ret = register_trace_probe(tp); 769 if (ret) 770 goto error; 771 return 0; 772 773 error: 774 free_trace_probe(tp); 775 return ret; 776 } 777 778 static void cleanup_all_probes(void) 779 { 780 struct trace_probe *tp; 781 782 mutex_lock(&probe_lock); 783 /* TODO: Use batch unregistration */ 784 while (!list_empty(&probe_list)) { 785 tp = list_entry(probe_list.next, struct trace_probe, list); 786 unregister_trace_probe(tp); 787 free_trace_probe(tp); 788 } 789 mutex_unlock(&probe_lock); 790 } 791 792 793 /* Probes listing interfaces */ 794 static void *probes_seq_start(struct seq_file *m, loff_t *pos) 795 { 796 mutex_lock(&probe_lock); 797 return seq_list_start(&probe_list, *pos); 798 } 799 800 static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos) 801 { 802 return seq_list_next(v, &probe_list, pos); 803 } 804 805 static void probes_seq_stop(struct seq_file *m, void *v) 806 { 807 mutex_unlock(&probe_lock); 808 } 809 810 static int probes_seq_show(struct seq_file *m, void *v) 811 { 812 struct trace_probe *tp = v; 813 int i, ret; 814 char buf[MAX_ARGSTR_LEN + 1]; 815 816 seq_printf(m, "%c", probe_is_return(tp) ? 'r' : 'p'); 817 seq_printf(m, ":%s/%s", tp->call.system, tp->call.name); 818 819 if (!tp->symbol) 820 seq_printf(m, " 0x%p", tp->rp.kp.addr); 821 else if (tp->rp.kp.offset) 822 seq_printf(m, " %s+%u", probe_symbol(tp), tp->rp.kp.offset); 823 else 824 seq_printf(m, " %s", probe_symbol(tp)); 825 826 for (i = 0; i < tp->nr_args; i++) { 827 ret = probe_arg_string(buf, MAX_ARGSTR_LEN, &tp->args[i].fetch); 828 if (ret < 0) { 829 pr_warning("Argument%d decoding error(%d).\n", i, ret); 830 return ret; 831 } 832 seq_printf(m, " %s=%s", tp->args[i].name, buf); 833 } 834 seq_printf(m, "\n"); 835 return 0; 836 } 837 838 static const struct seq_operations probes_seq_op = { 839 .start = probes_seq_start, 840 .next = probes_seq_next, 841 .stop = probes_seq_stop, 842 .show = probes_seq_show 843 }; 844 845 static int probes_open(struct inode *inode, struct file *file) 846 { 847 if ((file->f_mode & FMODE_WRITE) && 848 (file->f_flags & O_TRUNC)) 849 cleanup_all_probes(); 850 851 return seq_open(file, &probes_seq_op); 852 } 853 854 static int command_trace_probe(const char *buf) 855 { 856 char **argv; 857 int argc = 0, ret = 0; 858 859 argv = argv_split(GFP_KERNEL, buf, &argc); 860 if (!argv) 861 return -ENOMEM; 862 863 if (argc) 864 ret = create_trace_probe(argc, argv); 865 866 argv_free(argv); 867 return ret; 868 } 869 870 #define WRITE_BUFSIZE 128 871 872 static ssize_t probes_write(struct file *file, const char __user *buffer, 873 size_t count, loff_t *ppos) 874 { 875 char *kbuf, *tmp; 876 int ret; 877 size_t done; 878 size_t size; 879 880 kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL); 881 if (!kbuf) 882 return -ENOMEM; 883 884 ret = done = 0; 885 while (done < count) { 886 size = count - done; 887 if (size >= WRITE_BUFSIZE) 888 size = WRITE_BUFSIZE - 1; 889 if (copy_from_user(kbuf, buffer + done, size)) { 890 ret = -EFAULT; 891 goto out; 892 } 893 kbuf[size] = '\0'; 894 tmp = strchr(kbuf, '\n'); 895 if (tmp) { 896 *tmp = '\0'; 897 size = tmp - kbuf + 1; 898 } else if (done + size < count) { 899 pr_warning("Line length is too long: " 900 "Should be less than %d.", WRITE_BUFSIZE); 901 ret = -EINVAL; 902 goto out; 903 } 904 done += size; 905 /* Remove comments */ 906 tmp = strchr(kbuf, '#'); 907 if (tmp) 908 *tmp = '\0'; 909 910 ret = command_trace_probe(kbuf); 911 if (ret) 912 goto out; 913 } 914 ret = done; 915 out: 916 kfree(kbuf); 917 return ret; 918 } 919 920 static const struct file_operations kprobe_events_ops = { 921 .owner = THIS_MODULE, 922 .open = probes_open, 923 .read = seq_read, 924 .llseek = seq_lseek, 925 .release = seq_release, 926 .write = probes_write, 927 }; 928 929 /* Probes profiling interfaces */ 930 static int probes_profile_seq_show(struct seq_file *m, void *v) 931 { 932 struct trace_probe *tp = v; 933 934 seq_printf(m, " %-44s %15lu %15lu\n", tp->call.name, tp->nhit, 935 tp->rp.kp.nmissed); 936 937 return 0; 938 } 939 940 static const struct seq_operations profile_seq_op = { 941 .start = probes_seq_start, 942 .next = probes_seq_next, 943 .stop = probes_seq_stop, 944 .show = probes_profile_seq_show 945 }; 946 947 static int profile_open(struct inode *inode, struct file *file) 948 { 949 return seq_open(file, &profile_seq_op); 950 } 951 952 static const struct file_operations kprobe_profile_ops = { 953 .owner = THIS_MODULE, 954 .open = profile_open, 955 .read = seq_read, 956 .llseek = seq_lseek, 957 .release = seq_release, 958 }; 959 960 /* Kprobe handler */ 961 static __kprobes int kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs) 962 { 963 struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp); 964 struct kprobe_trace_entry *entry; 965 struct ring_buffer_event *event; 966 struct ring_buffer *buffer; 967 int size, i, pc; 968 unsigned long irq_flags; 969 struct ftrace_event_call *call = &tp->call; 970 971 tp->nhit++; 972 973 local_save_flags(irq_flags); 974 pc = preempt_count(); 975 976 size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args); 977 978 event = trace_current_buffer_lock_reserve(&buffer, call->id, size, 979 irq_flags, pc); 980 if (!event) 981 return 0; 982 983 entry = ring_buffer_event_data(event); 984 entry->nargs = tp->nr_args; 985 entry->ip = (unsigned long)kp->addr; 986 for (i = 0; i < tp->nr_args; i++) 987 entry->args[i] = call_fetch(&tp->args[i].fetch, regs); 988 989 if (!filter_current_check_discard(buffer, call, entry, event)) 990 trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc); 991 return 0; 992 } 993 994 /* Kretprobe handler */ 995 static __kprobes int kretprobe_trace_func(struct kretprobe_instance *ri, 996 struct pt_regs *regs) 997 { 998 struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp); 999 struct kretprobe_trace_entry *entry; 1000 struct ring_buffer_event *event; 1001 struct ring_buffer *buffer; 1002 int size, i, pc; 1003 unsigned long irq_flags; 1004 struct ftrace_event_call *call = &tp->call; 1005 1006 local_save_flags(irq_flags); 1007 pc = preempt_count(); 1008 1009 size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args); 1010 1011 event = trace_current_buffer_lock_reserve(&buffer, call->id, size, 1012 irq_flags, pc); 1013 if (!event) 1014 return 0; 1015 1016 entry = ring_buffer_event_data(event); 1017 entry->nargs = tp->nr_args; 1018 entry->func = (unsigned long)tp->rp.kp.addr; 1019 entry->ret_ip = (unsigned long)ri->ret_addr; 1020 for (i = 0; i < tp->nr_args; i++) 1021 entry->args[i] = call_fetch(&tp->args[i].fetch, regs); 1022 1023 if (!filter_current_check_discard(buffer, call, entry, event)) 1024 trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc); 1025 1026 return 0; 1027 } 1028 1029 /* Event entry printers */ 1030 enum print_line_t 1031 print_kprobe_event(struct trace_iterator *iter, int flags) 1032 { 1033 struct kprobe_trace_entry *field; 1034 struct trace_seq *s = &iter->seq; 1035 struct trace_event *event; 1036 struct trace_probe *tp; 1037 int i; 1038 1039 field = (struct kprobe_trace_entry *)iter->ent; 1040 event = ftrace_find_event(field->ent.type); 1041 tp = container_of(event, struct trace_probe, event); 1042 1043 if (!trace_seq_printf(s, "%s: (", tp->call.name)) 1044 goto partial; 1045 1046 if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET)) 1047 goto partial; 1048 1049 if (!trace_seq_puts(s, ")")) 1050 goto partial; 1051 1052 for (i = 0; i < field->nargs; i++) 1053 if (!trace_seq_printf(s, " %s=%lx", 1054 tp->args[i].name, field->args[i])) 1055 goto partial; 1056 1057 if (!trace_seq_puts(s, "\n")) 1058 goto partial; 1059 1060 return TRACE_TYPE_HANDLED; 1061 partial: 1062 return TRACE_TYPE_PARTIAL_LINE; 1063 } 1064 1065 enum print_line_t 1066 print_kretprobe_event(struct trace_iterator *iter, int flags) 1067 { 1068 struct kretprobe_trace_entry *field; 1069 struct trace_seq *s = &iter->seq; 1070 struct trace_event *event; 1071 struct trace_probe *tp; 1072 int i; 1073 1074 field = (struct kretprobe_trace_entry *)iter->ent; 1075 event = ftrace_find_event(field->ent.type); 1076 tp = container_of(event, struct trace_probe, event); 1077 1078 if (!trace_seq_printf(s, "%s: (", tp->call.name)) 1079 goto partial; 1080 1081 if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET)) 1082 goto partial; 1083 1084 if (!trace_seq_puts(s, " <- ")) 1085 goto partial; 1086 1087 if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET)) 1088 goto partial; 1089 1090 if (!trace_seq_puts(s, ")")) 1091 goto partial; 1092 1093 for (i = 0; i < field->nargs; i++) 1094 if (!trace_seq_printf(s, " %s=%lx", 1095 tp->args[i].name, field->args[i])) 1096 goto partial; 1097 1098 if (!trace_seq_puts(s, "\n")) 1099 goto partial; 1100 1101 return TRACE_TYPE_HANDLED; 1102 partial: 1103 return TRACE_TYPE_PARTIAL_LINE; 1104 } 1105 1106 static int probe_event_enable(struct ftrace_event_call *call) 1107 { 1108 struct trace_probe *tp = (struct trace_probe *)call->data; 1109 1110 tp->flags |= TP_FLAG_TRACE; 1111 if (probe_is_return(tp)) 1112 return enable_kretprobe(&tp->rp); 1113 else 1114 return enable_kprobe(&tp->rp.kp); 1115 } 1116 1117 static void probe_event_disable(struct ftrace_event_call *call) 1118 { 1119 struct trace_probe *tp = (struct trace_probe *)call->data; 1120 1121 tp->flags &= ~TP_FLAG_TRACE; 1122 if (!(tp->flags & (TP_FLAG_TRACE | TP_FLAG_PROFILE))) { 1123 if (probe_is_return(tp)) 1124 disable_kretprobe(&tp->rp); 1125 else 1126 disable_kprobe(&tp->rp.kp); 1127 } 1128 } 1129 1130 static int probe_event_raw_init(struct ftrace_event_call *event_call) 1131 { 1132 INIT_LIST_HEAD(&event_call->fields); 1133 1134 return 0; 1135 } 1136 1137 #undef DEFINE_FIELD 1138 #define DEFINE_FIELD(type, item, name, is_signed) \ 1139 do { \ 1140 ret = trace_define_field(event_call, #type, name, \ 1141 offsetof(typeof(field), item), \ 1142 sizeof(field.item), is_signed, \ 1143 FILTER_OTHER); \ 1144 if (ret) \ 1145 return ret; \ 1146 } while (0) 1147 1148 static int kprobe_event_define_fields(struct ftrace_event_call *event_call) 1149 { 1150 int ret, i; 1151 struct kprobe_trace_entry field; 1152 struct trace_probe *tp = (struct trace_probe *)event_call->data; 1153 1154 DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0); 1155 DEFINE_FIELD(int, nargs, FIELD_STRING_NARGS, 1); 1156 /* Set argument names as fields */ 1157 for (i = 0; i < tp->nr_args; i++) 1158 DEFINE_FIELD(unsigned long, args[i], tp->args[i].name, 0); 1159 return 0; 1160 } 1161 1162 static int kretprobe_event_define_fields(struct ftrace_event_call *event_call) 1163 { 1164 int ret, i; 1165 struct kretprobe_trace_entry field; 1166 struct trace_probe *tp = (struct trace_probe *)event_call->data; 1167 1168 DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0); 1169 DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0); 1170 DEFINE_FIELD(int, nargs, FIELD_STRING_NARGS, 1); 1171 /* Set argument names as fields */ 1172 for (i = 0; i < tp->nr_args; i++) 1173 DEFINE_FIELD(unsigned long, args[i], tp->args[i].name, 0); 1174 return 0; 1175 } 1176 1177 static int __probe_event_show_format(struct trace_seq *s, 1178 struct trace_probe *tp, const char *fmt, 1179 const char *arg) 1180 { 1181 int i; 1182 1183 /* Show format */ 1184 if (!trace_seq_printf(s, "\nprint fmt: \"%s", fmt)) 1185 return 0; 1186 1187 for (i = 0; i < tp->nr_args; i++) 1188 if (!trace_seq_printf(s, " %s=%%lx", tp->args[i].name)) 1189 return 0; 1190 1191 if (!trace_seq_printf(s, "\", %s", arg)) 1192 return 0; 1193 1194 for (i = 0; i < tp->nr_args; i++) 1195 if (!trace_seq_printf(s, ", REC->%s", tp->args[i].name)) 1196 return 0; 1197 1198 return trace_seq_puts(s, "\n"); 1199 } 1200 1201 #undef SHOW_FIELD 1202 #define SHOW_FIELD(type, item, name) \ 1203 do { \ 1204 ret = trace_seq_printf(s, "\tfield:" #type " %s;\t" \ 1205 "offset:%u;\tsize:%u;\tsigned:%d;\n", name,\ 1206 (unsigned int)offsetof(typeof(field), item),\ 1207 (unsigned int)sizeof(type), \ 1208 is_signed_type(type)); \ 1209 if (!ret) \ 1210 return 0; \ 1211 } while (0) 1212 1213 static int kprobe_event_show_format(struct ftrace_event_call *call, 1214 struct trace_seq *s) 1215 { 1216 struct kprobe_trace_entry field __attribute__((unused)); 1217 int ret, i; 1218 struct trace_probe *tp = (struct trace_probe *)call->data; 1219 1220 SHOW_FIELD(unsigned long, ip, FIELD_STRING_IP); 1221 SHOW_FIELD(int, nargs, FIELD_STRING_NARGS); 1222 1223 /* Show fields */ 1224 for (i = 0; i < tp->nr_args; i++) 1225 SHOW_FIELD(unsigned long, args[i], tp->args[i].name); 1226 trace_seq_puts(s, "\n"); 1227 1228 return __probe_event_show_format(s, tp, "(%lx)", 1229 "REC->" FIELD_STRING_IP); 1230 } 1231 1232 static int kretprobe_event_show_format(struct ftrace_event_call *call, 1233 struct trace_seq *s) 1234 { 1235 struct kretprobe_trace_entry field __attribute__((unused)); 1236 int ret, i; 1237 struct trace_probe *tp = (struct trace_probe *)call->data; 1238 1239 SHOW_FIELD(unsigned long, func, FIELD_STRING_FUNC); 1240 SHOW_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP); 1241 SHOW_FIELD(int, nargs, FIELD_STRING_NARGS); 1242 1243 /* Show fields */ 1244 for (i = 0; i < tp->nr_args; i++) 1245 SHOW_FIELD(unsigned long, args[i], tp->args[i].name); 1246 trace_seq_puts(s, "\n"); 1247 1248 return __probe_event_show_format(s, tp, "(%lx <- %lx)", 1249 "REC->" FIELD_STRING_FUNC 1250 ", REC->" FIELD_STRING_RETIP); 1251 } 1252 1253 #ifdef CONFIG_EVENT_PROFILE 1254 1255 /* Kprobe profile handler */ 1256 static __kprobes int kprobe_profile_func(struct kprobe *kp, 1257 struct pt_regs *regs) 1258 { 1259 struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp); 1260 struct ftrace_event_call *call = &tp->call; 1261 struct kprobe_trace_entry *entry; 1262 struct trace_entry *ent; 1263 int size, __size, i, pc, __cpu; 1264 unsigned long irq_flags; 1265 char *trace_buf; 1266 char *raw_data; 1267 int rctx; 1268 1269 pc = preempt_count(); 1270 __size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args); 1271 size = ALIGN(__size + sizeof(u32), sizeof(u64)); 1272 size -= sizeof(u32); 1273 if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE, 1274 "profile buffer not large enough")) 1275 return 0; 1276 1277 /* 1278 * Protect the non nmi buffer 1279 * This also protects the rcu read side 1280 */ 1281 local_irq_save(irq_flags); 1282 1283 rctx = perf_swevent_get_recursion_context(); 1284 if (rctx < 0) 1285 goto end_recursion; 1286 1287 __cpu = smp_processor_id(); 1288 1289 if (in_nmi()) 1290 trace_buf = rcu_dereference(perf_trace_buf_nmi); 1291 else 1292 trace_buf = rcu_dereference(perf_trace_buf); 1293 1294 if (!trace_buf) 1295 goto end; 1296 1297 raw_data = per_cpu_ptr(trace_buf, __cpu); 1298 1299 /* Zero dead bytes from alignment to avoid buffer leak to userspace */ 1300 *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL; 1301 entry = (struct kprobe_trace_entry *)raw_data; 1302 ent = &entry->ent; 1303 1304 tracing_generic_entry_update(ent, irq_flags, pc); 1305 ent->type = call->id; 1306 entry->nargs = tp->nr_args; 1307 entry->ip = (unsigned long)kp->addr; 1308 for (i = 0; i < tp->nr_args; i++) 1309 entry->args[i] = call_fetch(&tp->args[i].fetch, regs); 1310 perf_tp_event(call->id, entry->ip, 1, entry, size); 1311 1312 end: 1313 perf_swevent_put_recursion_context(rctx); 1314 end_recursion: 1315 local_irq_restore(irq_flags); 1316 1317 return 0; 1318 } 1319 1320 /* Kretprobe profile handler */ 1321 static __kprobes int kretprobe_profile_func(struct kretprobe_instance *ri, 1322 struct pt_regs *regs) 1323 { 1324 struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp); 1325 struct ftrace_event_call *call = &tp->call; 1326 struct kretprobe_trace_entry *entry; 1327 struct trace_entry *ent; 1328 int size, __size, i, pc, __cpu; 1329 unsigned long irq_flags; 1330 char *trace_buf; 1331 char *raw_data; 1332 int rctx; 1333 1334 pc = preempt_count(); 1335 __size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args); 1336 size = ALIGN(__size + sizeof(u32), sizeof(u64)); 1337 size -= sizeof(u32); 1338 if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE, 1339 "profile buffer not large enough")) 1340 return 0; 1341 1342 /* 1343 * Protect the non nmi buffer 1344 * This also protects the rcu read side 1345 */ 1346 local_irq_save(irq_flags); 1347 1348 rctx = perf_swevent_get_recursion_context(); 1349 if (rctx < 0) 1350 goto end_recursion; 1351 1352 __cpu = smp_processor_id(); 1353 1354 if (in_nmi()) 1355 trace_buf = rcu_dereference(perf_trace_buf_nmi); 1356 else 1357 trace_buf = rcu_dereference(perf_trace_buf); 1358 1359 if (!trace_buf) 1360 goto end; 1361 1362 raw_data = per_cpu_ptr(trace_buf, __cpu); 1363 1364 /* Zero dead bytes from alignment to avoid buffer leak to userspace */ 1365 *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL; 1366 entry = (struct kretprobe_trace_entry *)raw_data; 1367 ent = &entry->ent; 1368 1369 tracing_generic_entry_update(ent, irq_flags, pc); 1370 ent->type = call->id; 1371 entry->nargs = tp->nr_args; 1372 entry->func = (unsigned long)tp->rp.kp.addr; 1373 entry->ret_ip = (unsigned long)ri->ret_addr; 1374 for (i = 0; i < tp->nr_args; i++) 1375 entry->args[i] = call_fetch(&tp->args[i].fetch, regs); 1376 perf_tp_event(call->id, entry->ret_ip, 1, entry, size); 1377 1378 end: 1379 perf_swevent_put_recursion_context(rctx); 1380 end_recursion: 1381 local_irq_restore(irq_flags); 1382 1383 return 0; 1384 } 1385 1386 static int probe_profile_enable(struct ftrace_event_call *call) 1387 { 1388 struct trace_probe *tp = (struct trace_probe *)call->data; 1389 1390 tp->flags |= TP_FLAG_PROFILE; 1391 1392 if (probe_is_return(tp)) 1393 return enable_kretprobe(&tp->rp); 1394 else 1395 return enable_kprobe(&tp->rp.kp); 1396 } 1397 1398 static void probe_profile_disable(struct ftrace_event_call *call) 1399 { 1400 struct trace_probe *tp = (struct trace_probe *)call->data; 1401 1402 tp->flags &= ~TP_FLAG_PROFILE; 1403 1404 if (!(tp->flags & TP_FLAG_TRACE)) { 1405 if (probe_is_return(tp)) 1406 disable_kretprobe(&tp->rp); 1407 else 1408 disable_kprobe(&tp->rp.kp); 1409 } 1410 } 1411 #endif /* CONFIG_EVENT_PROFILE */ 1412 1413 1414 static __kprobes 1415 int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs) 1416 { 1417 struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp); 1418 1419 if (tp->flags & TP_FLAG_TRACE) 1420 kprobe_trace_func(kp, regs); 1421 #ifdef CONFIG_EVENT_PROFILE 1422 if (tp->flags & TP_FLAG_PROFILE) 1423 kprobe_profile_func(kp, regs); 1424 #endif /* CONFIG_EVENT_PROFILE */ 1425 return 0; /* We don't tweek kernel, so just return 0 */ 1426 } 1427 1428 static __kprobes 1429 int kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs) 1430 { 1431 struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp); 1432 1433 if (tp->flags & TP_FLAG_TRACE) 1434 kretprobe_trace_func(ri, regs); 1435 #ifdef CONFIG_EVENT_PROFILE 1436 if (tp->flags & TP_FLAG_PROFILE) 1437 kretprobe_profile_func(ri, regs); 1438 #endif /* CONFIG_EVENT_PROFILE */ 1439 return 0; /* We don't tweek kernel, so just return 0 */ 1440 } 1441 1442 static int register_probe_event(struct trace_probe *tp) 1443 { 1444 struct ftrace_event_call *call = &tp->call; 1445 int ret; 1446 1447 /* Initialize ftrace_event_call */ 1448 if (probe_is_return(tp)) { 1449 tp->event.trace = print_kretprobe_event; 1450 call->raw_init = probe_event_raw_init; 1451 call->show_format = kretprobe_event_show_format; 1452 call->define_fields = kretprobe_event_define_fields; 1453 } else { 1454 tp->event.trace = print_kprobe_event; 1455 call->raw_init = probe_event_raw_init; 1456 call->show_format = kprobe_event_show_format; 1457 call->define_fields = kprobe_event_define_fields; 1458 } 1459 call->event = &tp->event; 1460 call->id = register_ftrace_event(&tp->event); 1461 if (!call->id) 1462 return -ENODEV; 1463 call->enabled = 0; 1464 call->regfunc = probe_event_enable; 1465 call->unregfunc = probe_event_disable; 1466 1467 #ifdef CONFIG_EVENT_PROFILE 1468 call->profile_enable = probe_profile_enable; 1469 call->profile_disable = probe_profile_disable; 1470 #endif 1471 call->data = tp; 1472 ret = trace_add_event_call(call); 1473 if (ret) { 1474 pr_info("Failed to register kprobe event: %s\n", call->name); 1475 unregister_ftrace_event(&tp->event); 1476 } 1477 return ret; 1478 } 1479 1480 static void unregister_probe_event(struct trace_probe *tp) 1481 { 1482 /* tp->event is unregistered in trace_remove_event_call() */ 1483 trace_remove_event_call(&tp->call); 1484 } 1485 1486 /* Make a debugfs interface for controling probe points */ 1487 static __init int init_kprobe_trace(void) 1488 { 1489 struct dentry *d_tracer; 1490 struct dentry *entry; 1491 1492 d_tracer = tracing_init_dentry(); 1493 if (!d_tracer) 1494 return 0; 1495 1496 entry = debugfs_create_file("kprobe_events", 0644, d_tracer, 1497 NULL, &kprobe_events_ops); 1498 1499 /* Event list interface */ 1500 if (!entry) 1501 pr_warning("Could not create debugfs " 1502 "'kprobe_events' entry\n"); 1503 1504 /* Profile interface */ 1505 entry = debugfs_create_file("kprobe_profile", 0444, d_tracer, 1506 NULL, &kprobe_profile_ops); 1507 1508 if (!entry) 1509 pr_warning("Could not create debugfs " 1510 "'kprobe_profile' entry\n"); 1511 return 0; 1512 } 1513 fs_initcall(init_kprobe_trace); 1514 1515 1516 #ifdef CONFIG_FTRACE_STARTUP_TEST 1517 1518 static int kprobe_trace_selftest_target(int a1, int a2, int a3, 1519 int a4, int a5, int a6) 1520 { 1521 return a1 + a2 + a3 + a4 + a5 + a6; 1522 } 1523 1524 static __init int kprobe_trace_self_tests_init(void) 1525 { 1526 int ret; 1527 int (*target)(int, int, int, int, int, int); 1528 1529 target = kprobe_trace_selftest_target; 1530 1531 pr_info("Testing kprobe tracing: "); 1532 1533 ret = command_trace_probe("p:testprobe kprobe_trace_selftest_target " 1534 "$arg1 $arg2 $arg3 $arg4 $stack $stack0"); 1535 if (WARN_ON_ONCE(ret)) 1536 pr_warning("error enabling function entry\n"); 1537 1538 ret = command_trace_probe("r:testprobe2 kprobe_trace_selftest_target " 1539 "$retval"); 1540 if (WARN_ON_ONCE(ret)) 1541 pr_warning("error enabling function return\n"); 1542 1543 ret = target(1, 2, 3, 4, 5, 6); 1544 1545 cleanup_all_probes(); 1546 1547 pr_cont("OK\n"); 1548 return 0; 1549 } 1550 1551 late_initcall(kprobe_trace_self_tests_init); 1552 1553 #endif 1554