1 /* 2 * Kprobes-based tracing events 3 * 4 * Created by Masami Hiramatsu <mhiramat@redhat.com> 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 */ 19 20 #include <linux/module.h> 21 #include <linux/uaccess.h> 22 23 #include "trace_probe.h" 24 25 #define KPROBE_EVENT_SYSTEM "kprobes" 26 27 /** 28 * Kprobe event core functions 29 */ 30 struct trace_kprobe { 31 struct list_head list; 32 struct kretprobe rp; /* Use rp.kp for kprobe use */ 33 unsigned long nhit; 34 const char *symbol; /* symbol name */ 35 struct trace_probe tp; 36 }; 37 38 struct event_file_link { 39 struct ftrace_event_file *file; 40 struct list_head list; 41 }; 42 43 #define SIZEOF_TRACE_KPROBE(n) \ 44 (offsetof(struct trace_kprobe, tp.args) + \ 45 (sizeof(struct probe_arg) * (n))) 46 47 48 static __kprobes bool trace_kprobe_is_return(struct trace_kprobe *tk) 49 { 50 return tk->rp.handler != NULL; 51 } 52 53 static __kprobes const char *trace_kprobe_symbol(struct trace_kprobe *tk) 54 { 55 return tk->symbol ? tk->symbol : "unknown"; 56 } 57 58 static __kprobes unsigned long trace_kprobe_offset(struct trace_kprobe *tk) 59 { 60 return tk->rp.kp.offset; 61 } 62 63 static __kprobes bool trace_kprobe_has_gone(struct trace_kprobe *tk) 64 { 65 return !!(kprobe_gone(&tk->rp.kp)); 66 } 67 68 static __kprobes bool trace_kprobe_within_module(struct trace_kprobe *tk, 69 struct module *mod) 70 { 71 int len = strlen(mod->name); 72 const char *name = trace_kprobe_symbol(tk); 73 return strncmp(mod->name, name, len) == 0 && name[len] == ':'; 74 } 75 76 static __kprobes bool trace_kprobe_is_on_module(struct trace_kprobe *tk) 77 { 78 return !!strchr(trace_kprobe_symbol(tk), ':'); 79 } 80 81 static int register_kprobe_event(struct trace_kprobe *tk); 82 static int unregister_kprobe_event(struct trace_kprobe *tk); 83 84 static DEFINE_MUTEX(probe_lock); 85 static LIST_HEAD(probe_list); 86 87 static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs); 88 static int kretprobe_dispatcher(struct kretprobe_instance *ri, 89 struct pt_regs *regs); 90 91 /* Memory fetching by symbol */ 92 struct symbol_cache { 93 char *symbol; 94 long offset; 95 unsigned long addr; 96 }; 97 98 unsigned long update_symbol_cache(struct symbol_cache *sc) 99 { 100 sc->addr = (unsigned long)kallsyms_lookup_name(sc->symbol); 101 102 if (sc->addr) 103 sc->addr += sc->offset; 104 105 return sc->addr; 106 } 107 108 void free_symbol_cache(struct symbol_cache *sc) 109 { 110 kfree(sc->symbol); 111 kfree(sc); 112 } 113 114 struct symbol_cache *alloc_symbol_cache(const char *sym, long offset) 115 { 116 struct symbol_cache *sc; 117 118 if (!sym || strlen(sym) == 0) 119 return NULL; 120 121 sc = kzalloc(sizeof(struct symbol_cache), GFP_KERNEL); 122 if (!sc) 123 return NULL; 124 125 sc->symbol = kstrdup(sym, GFP_KERNEL); 126 if (!sc->symbol) { 127 kfree(sc); 128 return NULL; 129 } 130 sc->offset = offset; 131 update_symbol_cache(sc); 132 133 return sc; 134 } 135 136 /* 137 * Kprobes-specific fetch functions 138 */ 139 #define DEFINE_FETCH_stack(type) \ 140 static __kprobes void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs,\ 141 void *offset, void *dest) \ 142 { \ 143 *(type *)dest = (type)regs_get_kernel_stack_nth(regs, \ 144 (unsigned int)((unsigned long)offset)); \ 145 } 146 DEFINE_BASIC_FETCH_FUNCS(stack) 147 /* No string on the stack entry */ 148 #define fetch_stack_string NULL 149 #define fetch_stack_string_size NULL 150 151 #define DEFINE_FETCH_memory(type) \ 152 static __kprobes void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs,\ 153 void *addr, void *dest) \ 154 { \ 155 type retval; \ 156 if (probe_kernel_address(addr, retval)) \ 157 *(type *)dest = 0; \ 158 else \ 159 *(type *)dest = retval; \ 160 } 161 DEFINE_BASIC_FETCH_FUNCS(memory) 162 /* 163 * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max 164 * length and relative data location. 165 */ 166 static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs, 167 void *addr, void *dest) 168 { 169 long ret; 170 int maxlen = get_rloc_len(*(u32 *)dest); 171 u8 *dst = get_rloc_data(dest); 172 u8 *src = addr; 173 mm_segment_t old_fs = get_fs(); 174 175 if (!maxlen) 176 return; 177 178 /* 179 * Try to get string again, since the string can be changed while 180 * probing. 181 */ 182 set_fs(KERNEL_DS); 183 pagefault_disable(); 184 185 do 186 ret = __copy_from_user_inatomic(dst++, src++, 1); 187 while (dst[-1] && ret == 0 && src - (u8 *)addr < maxlen); 188 189 dst[-1] = '\0'; 190 pagefault_enable(); 191 set_fs(old_fs); 192 193 if (ret < 0) { /* Failed to fetch string */ 194 ((u8 *)get_rloc_data(dest))[0] = '\0'; 195 *(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest)); 196 } else { 197 *(u32 *)dest = make_data_rloc(src - (u8 *)addr, 198 get_rloc_offs(*(u32 *)dest)); 199 } 200 } 201 202 /* Return the length of string -- including null terminal byte */ 203 static __kprobes void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs, 204 void *addr, void *dest) 205 { 206 mm_segment_t old_fs; 207 int ret, len = 0; 208 u8 c; 209 210 old_fs = get_fs(); 211 set_fs(KERNEL_DS); 212 pagefault_disable(); 213 214 do { 215 ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1); 216 len++; 217 } while (c && ret == 0 && len < MAX_STRING_SIZE); 218 219 pagefault_enable(); 220 set_fs(old_fs); 221 222 if (ret < 0) /* Failed to check the length */ 223 *(u32 *)dest = 0; 224 else 225 *(u32 *)dest = len; 226 } 227 228 #define DEFINE_FETCH_symbol(type) \ 229 __kprobes void FETCH_FUNC_NAME(symbol, type)(struct pt_regs *regs, \ 230 void *data, void *dest) \ 231 { \ 232 struct symbol_cache *sc = data; \ 233 if (sc->addr) \ 234 fetch_memory_##type(regs, (void *)sc->addr, dest); \ 235 else \ 236 *(type *)dest = 0; \ 237 } 238 DEFINE_BASIC_FETCH_FUNCS(symbol) 239 DEFINE_FETCH_symbol(string) 240 DEFINE_FETCH_symbol(string_size) 241 242 /* kprobes don't support file_offset fetch methods */ 243 #define fetch_file_offset_u8 NULL 244 #define fetch_file_offset_u16 NULL 245 #define fetch_file_offset_u32 NULL 246 #define fetch_file_offset_u64 NULL 247 #define fetch_file_offset_string NULL 248 #define fetch_file_offset_string_size NULL 249 250 /* Fetch type information table */ 251 const struct fetch_type kprobes_fetch_type_table[] = { 252 /* Special types */ 253 [FETCH_TYPE_STRING] = __ASSIGN_FETCH_TYPE("string", string, string, 254 sizeof(u32), 1, "__data_loc char[]"), 255 [FETCH_TYPE_STRSIZE] = __ASSIGN_FETCH_TYPE("string_size", u32, 256 string_size, sizeof(u32), 0, "u32"), 257 /* Basic types */ 258 ASSIGN_FETCH_TYPE(u8, u8, 0), 259 ASSIGN_FETCH_TYPE(u16, u16, 0), 260 ASSIGN_FETCH_TYPE(u32, u32, 0), 261 ASSIGN_FETCH_TYPE(u64, u64, 0), 262 ASSIGN_FETCH_TYPE(s8, u8, 1), 263 ASSIGN_FETCH_TYPE(s16, u16, 1), 264 ASSIGN_FETCH_TYPE(s32, u32, 1), 265 ASSIGN_FETCH_TYPE(s64, u64, 1), 266 267 ASSIGN_FETCH_TYPE_END 268 }; 269 270 /* 271 * Allocate new trace_probe and initialize it (including kprobes). 272 */ 273 static struct trace_kprobe *alloc_trace_kprobe(const char *group, 274 const char *event, 275 void *addr, 276 const char *symbol, 277 unsigned long offs, 278 int nargs, bool is_return) 279 { 280 struct trace_kprobe *tk; 281 int ret = -ENOMEM; 282 283 tk = kzalloc(SIZEOF_TRACE_KPROBE(nargs), GFP_KERNEL); 284 if (!tk) 285 return ERR_PTR(ret); 286 287 if (symbol) { 288 tk->symbol = kstrdup(symbol, GFP_KERNEL); 289 if (!tk->symbol) 290 goto error; 291 tk->rp.kp.symbol_name = tk->symbol; 292 tk->rp.kp.offset = offs; 293 } else 294 tk->rp.kp.addr = addr; 295 296 if (is_return) 297 tk->rp.handler = kretprobe_dispatcher; 298 else 299 tk->rp.kp.pre_handler = kprobe_dispatcher; 300 301 if (!event || !is_good_name(event)) { 302 ret = -EINVAL; 303 goto error; 304 } 305 306 tk->tp.call.class = &tk->tp.class; 307 tk->tp.call.name = kstrdup(event, GFP_KERNEL); 308 if (!tk->tp.call.name) 309 goto error; 310 311 if (!group || !is_good_name(group)) { 312 ret = -EINVAL; 313 goto error; 314 } 315 316 tk->tp.class.system = kstrdup(group, GFP_KERNEL); 317 if (!tk->tp.class.system) 318 goto error; 319 320 INIT_LIST_HEAD(&tk->list); 321 INIT_LIST_HEAD(&tk->tp.files); 322 return tk; 323 error: 324 kfree(tk->tp.call.name); 325 kfree(tk->symbol); 326 kfree(tk); 327 return ERR_PTR(ret); 328 } 329 330 static void free_trace_kprobe(struct trace_kprobe *tk) 331 { 332 int i; 333 334 for (i = 0; i < tk->tp.nr_args; i++) 335 traceprobe_free_probe_arg(&tk->tp.args[i]); 336 337 kfree(tk->tp.call.class->system); 338 kfree(tk->tp.call.name); 339 kfree(tk->symbol); 340 kfree(tk); 341 } 342 343 static struct trace_kprobe *find_trace_kprobe(const char *event, 344 const char *group) 345 { 346 struct trace_kprobe *tk; 347 348 list_for_each_entry(tk, &probe_list, list) 349 if (strcmp(tk->tp.call.name, event) == 0 && 350 strcmp(tk->tp.call.class->system, group) == 0) 351 return tk; 352 return NULL; 353 } 354 355 /* 356 * Enable trace_probe 357 * if the file is NULL, enable "perf" handler, or enable "trace" handler. 358 */ 359 static int 360 enable_trace_kprobe(struct trace_kprobe *tk, struct ftrace_event_file *file) 361 { 362 int ret = 0; 363 364 if (file) { 365 struct event_file_link *link; 366 367 link = kmalloc(sizeof(*link), GFP_KERNEL); 368 if (!link) { 369 ret = -ENOMEM; 370 goto out; 371 } 372 373 link->file = file; 374 list_add_tail_rcu(&link->list, &tk->tp.files); 375 376 tk->tp.flags |= TP_FLAG_TRACE; 377 } else 378 tk->tp.flags |= TP_FLAG_PROFILE; 379 380 if (trace_probe_is_registered(&tk->tp) && !trace_kprobe_has_gone(tk)) { 381 if (trace_kprobe_is_return(tk)) 382 ret = enable_kretprobe(&tk->rp); 383 else 384 ret = enable_kprobe(&tk->rp.kp); 385 } 386 out: 387 return ret; 388 } 389 390 static struct event_file_link * 391 find_event_file_link(struct trace_probe *tp, struct ftrace_event_file *file) 392 { 393 struct event_file_link *link; 394 395 list_for_each_entry(link, &tp->files, list) 396 if (link->file == file) 397 return link; 398 399 return NULL; 400 } 401 402 /* 403 * Disable trace_probe 404 * if the file is NULL, disable "perf" handler, or disable "trace" handler. 405 */ 406 static int 407 disable_trace_kprobe(struct trace_kprobe *tk, struct ftrace_event_file *file) 408 { 409 struct event_file_link *link = NULL; 410 int wait = 0; 411 int ret = 0; 412 413 if (file) { 414 link = find_event_file_link(&tk->tp, file); 415 if (!link) { 416 ret = -EINVAL; 417 goto out; 418 } 419 420 list_del_rcu(&link->list); 421 wait = 1; 422 if (!list_empty(&tk->tp.files)) 423 goto out; 424 425 tk->tp.flags &= ~TP_FLAG_TRACE; 426 } else 427 tk->tp.flags &= ~TP_FLAG_PROFILE; 428 429 if (!trace_probe_is_enabled(&tk->tp) && trace_probe_is_registered(&tk->tp)) { 430 if (trace_kprobe_is_return(tk)) 431 disable_kretprobe(&tk->rp); 432 else 433 disable_kprobe(&tk->rp.kp); 434 wait = 1; 435 } 436 out: 437 if (wait) { 438 /* 439 * Synchronize with kprobe_trace_func/kretprobe_trace_func 440 * to ensure disabled (all running handlers are finished). 441 * This is not only for kfree(), but also the caller, 442 * trace_remove_event_call() supposes it for releasing 443 * event_call related objects, which will be accessed in 444 * the kprobe_trace_func/kretprobe_trace_func. 445 */ 446 synchronize_sched(); 447 kfree(link); /* Ignored if link == NULL */ 448 } 449 450 return ret; 451 } 452 453 /* Internal register function - just handle k*probes and flags */ 454 static int __register_trace_kprobe(struct trace_kprobe *tk) 455 { 456 int i, ret; 457 458 if (trace_probe_is_registered(&tk->tp)) 459 return -EINVAL; 460 461 for (i = 0; i < tk->tp.nr_args; i++) 462 traceprobe_update_arg(&tk->tp.args[i]); 463 464 /* Set/clear disabled flag according to tp->flag */ 465 if (trace_probe_is_enabled(&tk->tp)) 466 tk->rp.kp.flags &= ~KPROBE_FLAG_DISABLED; 467 else 468 tk->rp.kp.flags |= KPROBE_FLAG_DISABLED; 469 470 if (trace_kprobe_is_return(tk)) 471 ret = register_kretprobe(&tk->rp); 472 else 473 ret = register_kprobe(&tk->rp.kp); 474 475 if (ret == 0) 476 tk->tp.flags |= TP_FLAG_REGISTERED; 477 else { 478 pr_warning("Could not insert probe at %s+%lu: %d\n", 479 trace_kprobe_symbol(tk), trace_kprobe_offset(tk), ret); 480 if (ret == -ENOENT && trace_kprobe_is_on_module(tk)) { 481 pr_warning("This probe might be able to register after" 482 "target module is loaded. Continue.\n"); 483 ret = 0; 484 } else if (ret == -EILSEQ) { 485 pr_warning("Probing address(0x%p) is not an " 486 "instruction boundary.\n", 487 tk->rp.kp.addr); 488 ret = -EINVAL; 489 } 490 } 491 492 return ret; 493 } 494 495 /* Internal unregister function - just handle k*probes and flags */ 496 static void __unregister_trace_kprobe(struct trace_kprobe *tk) 497 { 498 if (trace_probe_is_registered(&tk->tp)) { 499 if (trace_kprobe_is_return(tk)) 500 unregister_kretprobe(&tk->rp); 501 else 502 unregister_kprobe(&tk->rp.kp); 503 tk->tp.flags &= ~TP_FLAG_REGISTERED; 504 /* Cleanup kprobe for reuse */ 505 if (tk->rp.kp.symbol_name) 506 tk->rp.kp.addr = NULL; 507 } 508 } 509 510 /* Unregister a trace_probe and probe_event: call with locking probe_lock */ 511 static int unregister_trace_kprobe(struct trace_kprobe *tk) 512 { 513 /* Enabled event can not be unregistered */ 514 if (trace_probe_is_enabled(&tk->tp)) 515 return -EBUSY; 516 517 /* Will fail if probe is being used by ftrace or perf */ 518 if (unregister_kprobe_event(tk)) 519 return -EBUSY; 520 521 __unregister_trace_kprobe(tk); 522 list_del(&tk->list); 523 524 return 0; 525 } 526 527 /* Register a trace_probe and probe_event */ 528 static int register_trace_kprobe(struct trace_kprobe *tk) 529 { 530 struct trace_kprobe *old_tk; 531 int ret; 532 533 mutex_lock(&probe_lock); 534 535 /* Delete old (same name) event if exist */ 536 old_tk = find_trace_kprobe(tk->tp.call.name, tk->tp.call.class->system); 537 if (old_tk) { 538 ret = unregister_trace_kprobe(old_tk); 539 if (ret < 0) 540 goto end; 541 free_trace_kprobe(old_tk); 542 } 543 544 /* Register new event */ 545 ret = register_kprobe_event(tk); 546 if (ret) { 547 pr_warning("Failed to register probe event(%d)\n", ret); 548 goto end; 549 } 550 551 /* Register k*probe */ 552 ret = __register_trace_kprobe(tk); 553 if (ret < 0) 554 unregister_kprobe_event(tk); 555 else 556 list_add_tail(&tk->list, &probe_list); 557 558 end: 559 mutex_unlock(&probe_lock); 560 return ret; 561 } 562 563 /* Module notifier call back, checking event on the module */ 564 static int trace_kprobe_module_callback(struct notifier_block *nb, 565 unsigned long val, void *data) 566 { 567 struct module *mod = data; 568 struct trace_kprobe *tk; 569 int ret; 570 571 if (val != MODULE_STATE_COMING) 572 return NOTIFY_DONE; 573 574 /* Update probes on coming module */ 575 mutex_lock(&probe_lock); 576 list_for_each_entry(tk, &probe_list, list) { 577 if (trace_kprobe_within_module(tk, mod)) { 578 /* Don't need to check busy - this should have gone. */ 579 __unregister_trace_kprobe(tk); 580 ret = __register_trace_kprobe(tk); 581 if (ret) 582 pr_warning("Failed to re-register probe %s on" 583 "%s: %d\n", 584 tk->tp.call.name, mod->name, ret); 585 } 586 } 587 mutex_unlock(&probe_lock); 588 589 return NOTIFY_DONE; 590 } 591 592 static struct notifier_block trace_kprobe_module_nb = { 593 .notifier_call = trace_kprobe_module_callback, 594 .priority = 1 /* Invoked after kprobe module callback */ 595 }; 596 597 static int create_trace_kprobe(int argc, char **argv) 598 { 599 /* 600 * Argument syntax: 601 * - Add kprobe: p[:[GRP/]EVENT] [MOD:]KSYM[+OFFS]|KADDR [FETCHARGS] 602 * - Add kretprobe: r[:[GRP/]EVENT] [MOD:]KSYM[+0] [FETCHARGS] 603 * Fetch args: 604 * $retval : fetch return value 605 * $stack : fetch stack address 606 * $stackN : fetch Nth of stack (N:0-) 607 * @ADDR : fetch memory at ADDR (ADDR should be in kernel) 608 * @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol) 609 * %REG : fetch register REG 610 * Dereferencing memory fetch: 611 * +|-offs(ARG) : fetch memory at ARG +|- offs address. 612 * Alias name of args: 613 * NAME=FETCHARG : set NAME as alias of FETCHARG. 614 * Type of args: 615 * FETCHARG:TYPE : use TYPE instead of unsigned long. 616 */ 617 struct trace_kprobe *tk; 618 int i, ret = 0; 619 bool is_return = false, is_delete = false; 620 char *symbol = NULL, *event = NULL, *group = NULL; 621 char *arg; 622 unsigned long offset = 0; 623 void *addr = NULL; 624 char buf[MAX_EVENT_NAME_LEN]; 625 626 /* argc must be >= 1 */ 627 if (argv[0][0] == 'p') 628 is_return = false; 629 else if (argv[0][0] == 'r') 630 is_return = true; 631 else if (argv[0][0] == '-') 632 is_delete = true; 633 else { 634 pr_info("Probe definition must be started with 'p', 'r' or" 635 " '-'.\n"); 636 return -EINVAL; 637 } 638 639 if (argv[0][1] == ':') { 640 event = &argv[0][2]; 641 if (strchr(event, '/')) { 642 group = event; 643 event = strchr(group, '/') + 1; 644 event[-1] = '\0'; 645 if (strlen(group) == 0) { 646 pr_info("Group name is not specified\n"); 647 return -EINVAL; 648 } 649 } 650 if (strlen(event) == 0) { 651 pr_info("Event name is not specified\n"); 652 return -EINVAL; 653 } 654 } 655 if (!group) 656 group = KPROBE_EVENT_SYSTEM; 657 658 if (is_delete) { 659 if (!event) { 660 pr_info("Delete command needs an event name.\n"); 661 return -EINVAL; 662 } 663 mutex_lock(&probe_lock); 664 tk = find_trace_kprobe(event, group); 665 if (!tk) { 666 mutex_unlock(&probe_lock); 667 pr_info("Event %s/%s doesn't exist.\n", group, event); 668 return -ENOENT; 669 } 670 /* delete an event */ 671 ret = unregister_trace_kprobe(tk); 672 if (ret == 0) 673 free_trace_kprobe(tk); 674 mutex_unlock(&probe_lock); 675 return ret; 676 } 677 678 if (argc < 2) { 679 pr_info("Probe point is not specified.\n"); 680 return -EINVAL; 681 } 682 if (isdigit(argv[1][0])) { 683 if (is_return) { 684 pr_info("Return probe point must be a symbol.\n"); 685 return -EINVAL; 686 } 687 /* an address specified */ 688 ret = kstrtoul(&argv[1][0], 0, (unsigned long *)&addr); 689 if (ret) { 690 pr_info("Failed to parse address.\n"); 691 return ret; 692 } 693 } else { 694 /* a symbol specified */ 695 symbol = argv[1]; 696 /* TODO: support .init module functions */ 697 ret = traceprobe_split_symbol_offset(symbol, &offset); 698 if (ret) { 699 pr_info("Failed to parse symbol.\n"); 700 return ret; 701 } 702 if (offset && is_return) { 703 pr_info("Return probe must be used without offset.\n"); 704 return -EINVAL; 705 } 706 } 707 argc -= 2; argv += 2; 708 709 /* setup a probe */ 710 if (!event) { 711 /* Make a new event name */ 712 if (symbol) 713 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_%ld", 714 is_return ? 'r' : 'p', symbol, offset); 715 else 716 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_0x%p", 717 is_return ? 'r' : 'p', addr); 718 event = buf; 719 } 720 tk = alloc_trace_kprobe(group, event, addr, symbol, offset, argc, 721 is_return); 722 if (IS_ERR(tk)) { 723 pr_info("Failed to allocate trace_probe.(%d)\n", 724 (int)PTR_ERR(tk)); 725 return PTR_ERR(tk); 726 } 727 728 /* parse arguments */ 729 ret = 0; 730 for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) { 731 struct probe_arg *parg = &tk->tp.args[i]; 732 733 /* Increment count for freeing args in error case */ 734 tk->tp.nr_args++; 735 736 /* Parse argument name */ 737 arg = strchr(argv[i], '='); 738 if (arg) { 739 *arg++ = '\0'; 740 parg->name = kstrdup(argv[i], GFP_KERNEL); 741 } else { 742 arg = argv[i]; 743 /* If argument name is omitted, set "argN" */ 744 snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1); 745 parg->name = kstrdup(buf, GFP_KERNEL); 746 } 747 748 if (!parg->name) { 749 pr_info("Failed to allocate argument[%d] name.\n", i); 750 ret = -ENOMEM; 751 goto error; 752 } 753 754 if (!is_good_name(parg->name)) { 755 pr_info("Invalid argument[%d] name: %s\n", 756 i, parg->name); 757 ret = -EINVAL; 758 goto error; 759 } 760 761 if (traceprobe_conflict_field_name(parg->name, 762 tk->tp.args, i)) { 763 pr_info("Argument[%d] name '%s' conflicts with " 764 "another field.\n", i, argv[i]); 765 ret = -EINVAL; 766 goto error; 767 } 768 769 /* Parse fetch argument */ 770 ret = traceprobe_parse_probe_arg(arg, &tk->tp.size, parg, 771 is_return, true); 772 if (ret) { 773 pr_info("Parse error at argument[%d]. (%d)\n", i, ret); 774 goto error; 775 } 776 } 777 778 ret = register_trace_kprobe(tk); 779 if (ret) 780 goto error; 781 return 0; 782 783 error: 784 free_trace_kprobe(tk); 785 return ret; 786 } 787 788 static int release_all_trace_kprobes(void) 789 { 790 struct trace_kprobe *tk; 791 int ret = 0; 792 793 mutex_lock(&probe_lock); 794 /* Ensure no probe is in use. */ 795 list_for_each_entry(tk, &probe_list, list) 796 if (trace_probe_is_enabled(&tk->tp)) { 797 ret = -EBUSY; 798 goto end; 799 } 800 /* TODO: Use batch unregistration */ 801 while (!list_empty(&probe_list)) { 802 tk = list_entry(probe_list.next, struct trace_kprobe, list); 803 ret = unregister_trace_kprobe(tk); 804 if (ret) 805 goto end; 806 free_trace_kprobe(tk); 807 } 808 809 end: 810 mutex_unlock(&probe_lock); 811 812 return ret; 813 } 814 815 /* Probes listing interfaces */ 816 static void *probes_seq_start(struct seq_file *m, loff_t *pos) 817 { 818 mutex_lock(&probe_lock); 819 return seq_list_start(&probe_list, *pos); 820 } 821 822 static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos) 823 { 824 return seq_list_next(v, &probe_list, pos); 825 } 826 827 static void probes_seq_stop(struct seq_file *m, void *v) 828 { 829 mutex_unlock(&probe_lock); 830 } 831 832 static int probes_seq_show(struct seq_file *m, void *v) 833 { 834 struct trace_kprobe *tk = v; 835 int i; 836 837 seq_printf(m, "%c", trace_kprobe_is_return(tk) ? 'r' : 'p'); 838 seq_printf(m, ":%s/%s", tk->tp.call.class->system, tk->tp.call.name); 839 840 if (!tk->symbol) 841 seq_printf(m, " 0x%p", tk->rp.kp.addr); 842 else if (tk->rp.kp.offset) 843 seq_printf(m, " %s+%u", trace_kprobe_symbol(tk), 844 tk->rp.kp.offset); 845 else 846 seq_printf(m, " %s", trace_kprobe_symbol(tk)); 847 848 for (i = 0; i < tk->tp.nr_args; i++) 849 seq_printf(m, " %s=%s", tk->tp.args[i].name, tk->tp.args[i].comm); 850 seq_printf(m, "\n"); 851 852 return 0; 853 } 854 855 static const struct seq_operations probes_seq_op = { 856 .start = probes_seq_start, 857 .next = probes_seq_next, 858 .stop = probes_seq_stop, 859 .show = probes_seq_show 860 }; 861 862 static int probes_open(struct inode *inode, struct file *file) 863 { 864 int ret; 865 866 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) { 867 ret = release_all_trace_kprobes(); 868 if (ret < 0) 869 return ret; 870 } 871 872 return seq_open(file, &probes_seq_op); 873 } 874 875 static ssize_t probes_write(struct file *file, const char __user *buffer, 876 size_t count, loff_t *ppos) 877 { 878 return traceprobe_probes_write(file, buffer, count, ppos, 879 create_trace_kprobe); 880 } 881 882 static const struct file_operations kprobe_events_ops = { 883 .owner = THIS_MODULE, 884 .open = probes_open, 885 .read = seq_read, 886 .llseek = seq_lseek, 887 .release = seq_release, 888 .write = probes_write, 889 }; 890 891 /* Probes profiling interfaces */ 892 static int probes_profile_seq_show(struct seq_file *m, void *v) 893 { 894 struct trace_kprobe *tk = v; 895 896 seq_printf(m, " %-44s %15lu %15lu\n", tk->tp.call.name, tk->nhit, 897 tk->rp.kp.nmissed); 898 899 return 0; 900 } 901 902 static const struct seq_operations profile_seq_op = { 903 .start = probes_seq_start, 904 .next = probes_seq_next, 905 .stop = probes_seq_stop, 906 .show = probes_profile_seq_show 907 }; 908 909 static int profile_open(struct inode *inode, struct file *file) 910 { 911 return seq_open(file, &profile_seq_op); 912 } 913 914 static const struct file_operations kprobe_profile_ops = { 915 .owner = THIS_MODULE, 916 .open = profile_open, 917 .read = seq_read, 918 .llseek = seq_lseek, 919 .release = seq_release, 920 }; 921 922 /* Kprobe handler */ 923 static __kprobes void 924 __kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs, 925 struct ftrace_event_file *ftrace_file) 926 { 927 struct kprobe_trace_entry_head *entry; 928 struct ring_buffer_event *event; 929 struct ring_buffer *buffer; 930 int size, dsize, pc; 931 unsigned long irq_flags; 932 struct ftrace_event_call *call = &tk->tp.call; 933 934 WARN_ON(call != ftrace_file->event_call); 935 936 if (ftrace_trigger_soft_disabled(ftrace_file)) 937 return; 938 939 local_save_flags(irq_flags); 940 pc = preempt_count(); 941 942 dsize = __get_data_size(&tk->tp, regs); 943 size = sizeof(*entry) + tk->tp.size + dsize; 944 945 event = trace_event_buffer_lock_reserve(&buffer, ftrace_file, 946 call->event.type, 947 size, irq_flags, pc); 948 if (!event) 949 return; 950 951 entry = ring_buffer_event_data(event); 952 entry->ip = (unsigned long)tk->rp.kp.addr; 953 store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize); 954 955 event_trigger_unlock_commit_regs(ftrace_file, buffer, event, 956 entry, irq_flags, pc, regs); 957 } 958 959 static __kprobes void 960 kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs) 961 { 962 struct event_file_link *link; 963 964 list_for_each_entry_rcu(link, &tk->tp.files, list) 965 __kprobe_trace_func(tk, regs, link->file); 966 } 967 968 /* Kretprobe handler */ 969 static __kprobes void 970 __kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri, 971 struct pt_regs *regs, 972 struct ftrace_event_file *ftrace_file) 973 { 974 struct kretprobe_trace_entry_head *entry; 975 struct ring_buffer_event *event; 976 struct ring_buffer *buffer; 977 int size, pc, dsize; 978 unsigned long irq_flags; 979 struct ftrace_event_call *call = &tk->tp.call; 980 981 WARN_ON(call != ftrace_file->event_call); 982 983 if (ftrace_trigger_soft_disabled(ftrace_file)) 984 return; 985 986 local_save_flags(irq_flags); 987 pc = preempt_count(); 988 989 dsize = __get_data_size(&tk->tp, regs); 990 size = sizeof(*entry) + tk->tp.size + dsize; 991 992 event = trace_event_buffer_lock_reserve(&buffer, ftrace_file, 993 call->event.type, 994 size, irq_flags, pc); 995 if (!event) 996 return; 997 998 entry = ring_buffer_event_data(event); 999 entry->func = (unsigned long)tk->rp.kp.addr; 1000 entry->ret_ip = (unsigned long)ri->ret_addr; 1001 store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize); 1002 1003 event_trigger_unlock_commit_regs(ftrace_file, buffer, event, 1004 entry, irq_flags, pc, regs); 1005 } 1006 1007 static __kprobes void 1008 kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri, 1009 struct pt_regs *regs) 1010 { 1011 struct event_file_link *link; 1012 1013 list_for_each_entry_rcu(link, &tk->tp.files, list) 1014 __kretprobe_trace_func(tk, ri, regs, link->file); 1015 } 1016 1017 /* Event entry printers */ 1018 static enum print_line_t 1019 print_kprobe_event(struct trace_iterator *iter, int flags, 1020 struct trace_event *event) 1021 { 1022 struct kprobe_trace_entry_head *field; 1023 struct trace_seq *s = &iter->seq; 1024 struct trace_probe *tp; 1025 u8 *data; 1026 int i; 1027 1028 field = (struct kprobe_trace_entry_head *)iter->ent; 1029 tp = container_of(event, struct trace_probe, call.event); 1030 1031 if (!trace_seq_printf(s, "%s: (", tp->call.name)) 1032 goto partial; 1033 1034 if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET)) 1035 goto partial; 1036 1037 if (!trace_seq_puts(s, ")")) 1038 goto partial; 1039 1040 data = (u8 *)&field[1]; 1041 for (i = 0; i < tp->nr_args; i++) 1042 if (!tp->args[i].type->print(s, tp->args[i].name, 1043 data + tp->args[i].offset, field)) 1044 goto partial; 1045 1046 if (!trace_seq_puts(s, "\n")) 1047 goto partial; 1048 1049 return TRACE_TYPE_HANDLED; 1050 partial: 1051 return TRACE_TYPE_PARTIAL_LINE; 1052 } 1053 1054 static enum print_line_t 1055 print_kretprobe_event(struct trace_iterator *iter, int flags, 1056 struct trace_event *event) 1057 { 1058 struct kretprobe_trace_entry_head *field; 1059 struct trace_seq *s = &iter->seq; 1060 struct trace_probe *tp; 1061 u8 *data; 1062 int i; 1063 1064 field = (struct kretprobe_trace_entry_head *)iter->ent; 1065 tp = container_of(event, struct trace_probe, call.event); 1066 1067 if (!trace_seq_printf(s, "%s: (", tp->call.name)) 1068 goto partial; 1069 1070 if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET)) 1071 goto partial; 1072 1073 if (!trace_seq_puts(s, " <- ")) 1074 goto partial; 1075 1076 if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET)) 1077 goto partial; 1078 1079 if (!trace_seq_puts(s, ")")) 1080 goto partial; 1081 1082 data = (u8 *)&field[1]; 1083 for (i = 0; i < tp->nr_args; i++) 1084 if (!tp->args[i].type->print(s, tp->args[i].name, 1085 data + tp->args[i].offset, field)) 1086 goto partial; 1087 1088 if (!trace_seq_puts(s, "\n")) 1089 goto partial; 1090 1091 return TRACE_TYPE_HANDLED; 1092 partial: 1093 return TRACE_TYPE_PARTIAL_LINE; 1094 } 1095 1096 1097 static int kprobe_event_define_fields(struct ftrace_event_call *event_call) 1098 { 1099 int ret, i; 1100 struct kprobe_trace_entry_head field; 1101 struct trace_kprobe *tk = (struct trace_kprobe *)event_call->data; 1102 1103 DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0); 1104 /* Set argument names as fields */ 1105 for (i = 0; i < tk->tp.nr_args; i++) { 1106 struct probe_arg *parg = &tk->tp.args[i]; 1107 1108 ret = trace_define_field(event_call, parg->type->fmttype, 1109 parg->name, 1110 sizeof(field) + parg->offset, 1111 parg->type->size, 1112 parg->type->is_signed, 1113 FILTER_OTHER); 1114 if (ret) 1115 return ret; 1116 } 1117 return 0; 1118 } 1119 1120 static int kretprobe_event_define_fields(struct ftrace_event_call *event_call) 1121 { 1122 int ret, i; 1123 struct kretprobe_trace_entry_head field; 1124 struct trace_kprobe *tk = (struct trace_kprobe *)event_call->data; 1125 1126 DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0); 1127 DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0); 1128 /* Set argument names as fields */ 1129 for (i = 0; i < tk->tp.nr_args; i++) { 1130 struct probe_arg *parg = &tk->tp.args[i]; 1131 1132 ret = trace_define_field(event_call, parg->type->fmttype, 1133 parg->name, 1134 sizeof(field) + parg->offset, 1135 parg->type->size, 1136 parg->type->is_signed, 1137 FILTER_OTHER); 1138 if (ret) 1139 return ret; 1140 } 1141 return 0; 1142 } 1143 1144 #ifdef CONFIG_PERF_EVENTS 1145 1146 /* Kprobe profile handler */ 1147 static __kprobes void 1148 kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs) 1149 { 1150 struct ftrace_event_call *call = &tk->tp.call; 1151 struct kprobe_trace_entry_head *entry; 1152 struct hlist_head *head; 1153 int size, __size, dsize; 1154 int rctx; 1155 1156 head = this_cpu_ptr(call->perf_events); 1157 if (hlist_empty(head)) 1158 return; 1159 1160 dsize = __get_data_size(&tk->tp, regs); 1161 __size = sizeof(*entry) + tk->tp.size + dsize; 1162 size = ALIGN(__size + sizeof(u32), sizeof(u64)); 1163 size -= sizeof(u32); 1164 1165 entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx); 1166 if (!entry) 1167 return; 1168 1169 entry->ip = (unsigned long)tk->rp.kp.addr; 1170 memset(&entry[1], 0, dsize); 1171 store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize); 1172 perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL); 1173 } 1174 1175 /* Kretprobe profile handler */ 1176 static __kprobes void 1177 kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri, 1178 struct pt_regs *regs) 1179 { 1180 struct ftrace_event_call *call = &tk->tp.call; 1181 struct kretprobe_trace_entry_head *entry; 1182 struct hlist_head *head; 1183 int size, __size, dsize; 1184 int rctx; 1185 1186 head = this_cpu_ptr(call->perf_events); 1187 if (hlist_empty(head)) 1188 return; 1189 1190 dsize = __get_data_size(&tk->tp, regs); 1191 __size = sizeof(*entry) + tk->tp.size + dsize; 1192 size = ALIGN(__size + sizeof(u32), sizeof(u64)); 1193 size -= sizeof(u32); 1194 1195 entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx); 1196 if (!entry) 1197 return; 1198 1199 entry->func = (unsigned long)tk->rp.kp.addr; 1200 entry->ret_ip = (unsigned long)ri->ret_addr; 1201 store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize); 1202 perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL); 1203 } 1204 #endif /* CONFIG_PERF_EVENTS */ 1205 1206 /* 1207 * called by perf_trace_init() or __ftrace_set_clr_event() under event_mutex. 1208 * 1209 * kprobe_trace_self_tests_init() does enable_trace_probe/disable_trace_probe 1210 * lockless, but we can't race with this __init function. 1211 */ 1212 static __kprobes 1213 int kprobe_register(struct ftrace_event_call *event, 1214 enum trace_reg type, void *data) 1215 { 1216 struct trace_kprobe *tk = (struct trace_kprobe *)event->data; 1217 struct ftrace_event_file *file = data; 1218 1219 switch (type) { 1220 case TRACE_REG_REGISTER: 1221 return enable_trace_kprobe(tk, file); 1222 case TRACE_REG_UNREGISTER: 1223 return disable_trace_kprobe(tk, file); 1224 1225 #ifdef CONFIG_PERF_EVENTS 1226 case TRACE_REG_PERF_REGISTER: 1227 return enable_trace_kprobe(tk, NULL); 1228 case TRACE_REG_PERF_UNREGISTER: 1229 return disable_trace_kprobe(tk, NULL); 1230 case TRACE_REG_PERF_OPEN: 1231 case TRACE_REG_PERF_CLOSE: 1232 case TRACE_REG_PERF_ADD: 1233 case TRACE_REG_PERF_DEL: 1234 return 0; 1235 #endif 1236 } 1237 return 0; 1238 } 1239 1240 static __kprobes 1241 int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs) 1242 { 1243 struct trace_kprobe *tk = container_of(kp, struct trace_kprobe, rp.kp); 1244 1245 tk->nhit++; 1246 1247 if (tk->tp.flags & TP_FLAG_TRACE) 1248 kprobe_trace_func(tk, regs); 1249 #ifdef CONFIG_PERF_EVENTS 1250 if (tk->tp.flags & TP_FLAG_PROFILE) 1251 kprobe_perf_func(tk, regs); 1252 #endif 1253 return 0; /* We don't tweek kernel, so just return 0 */ 1254 } 1255 1256 static __kprobes 1257 int kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs) 1258 { 1259 struct trace_kprobe *tk = container_of(ri->rp, struct trace_kprobe, rp); 1260 1261 tk->nhit++; 1262 1263 if (tk->tp.flags & TP_FLAG_TRACE) 1264 kretprobe_trace_func(tk, ri, regs); 1265 #ifdef CONFIG_PERF_EVENTS 1266 if (tk->tp.flags & TP_FLAG_PROFILE) 1267 kretprobe_perf_func(tk, ri, regs); 1268 #endif 1269 return 0; /* We don't tweek kernel, so just return 0 */ 1270 } 1271 1272 static struct trace_event_functions kretprobe_funcs = { 1273 .trace = print_kretprobe_event 1274 }; 1275 1276 static struct trace_event_functions kprobe_funcs = { 1277 .trace = print_kprobe_event 1278 }; 1279 1280 static int register_kprobe_event(struct trace_kprobe *tk) 1281 { 1282 struct ftrace_event_call *call = &tk->tp.call; 1283 int ret; 1284 1285 /* Initialize ftrace_event_call */ 1286 INIT_LIST_HEAD(&call->class->fields); 1287 if (trace_kprobe_is_return(tk)) { 1288 call->event.funcs = &kretprobe_funcs; 1289 call->class->define_fields = kretprobe_event_define_fields; 1290 } else { 1291 call->event.funcs = &kprobe_funcs; 1292 call->class->define_fields = kprobe_event_define_fields; 1293 } 1294 if (set_print_fmt(&tk->tp, trace_kprobe_is_return(tk)) < 0) 1295 return -ENOMEM; 1296 ret = register_ftrace_event(&call->event); 1297 if (!ret) { 1298 kfree(call->print_fmt); 1299 return -ENODEV; 1300 } 1301 call->flags = 0; 1302 call->class->reg = kprobe_register; 1303 call->data = tk; 1304 ret = trace_add_event_call(call); 1305 if (ret) { 1306 pr_info("Failed to register kprobe event: %s\n", call->name); 1307 kfree(call->print_fmt); 1308 unregister_ftrace_event(&call->event); 1309 } 1310 return ret; 1311 } 1312 1313 static int unregister_kprobe_event(struct trace_kprobe *tk) 1314 { 1315 int ret; 1316 1317 /* tp->event is unregistered in trace_remove_event_call() */ 1318 ret = trace_remove_event_call(&tk->tp.call); 1319 if (!ret) 1320 kfree(tk->tp.call.print_fmt); 1321 return ret; 1322 } 1323 1324 /* Make a debugfs interface for controlling probe points */ 1325 static __init int init_kprobe_trace(void) 1326 { 1327 struct dentry *d_tracer; 1328 struct dentry *entry; 1329 1330 if (register_module_notifier(&trace_kprobe_module_nb)) 1331 return -EINVAL; 1332 1333 d_tracer = tracing_init_dentry(); 1334 if (!d_tracer) 1335 return 0; 1336 1337 entry = debugfs_create_file("kprobe_events", 0644, d_tracer, 1338 NULL, &kprobe_events_ops); 1339 1340 /* Event list interface */ 1341 if (!entry) 1342 pr_warning("Could not create debugfs " 1343 "'kprobe_events' entry\n"); 1344 1345 /* Profile interface */ 1346 entry = debugfs_create_file("kprobe_profile", 0444, d_tracer, 1347 NULL, &kprobe_profile_ops); 1348 1349 if (!entry) 1350 pr_warning("Could not create debugfs " 1351 "'kprobe_profile' entry\n"); 1352 return 0; 1353 } 1354 fs_initcall(init_kprobe_trace); 1355 1356 1357 #ifdef CONFIG_FTRACE_STARTUP_TEST 1358 1359 /* 1360 * The "__used" keeps gcc from removing the function symbol 1361 * from the kallsyms table. 1362 */ 1363 static __used int kprobe_trace_selftest_target(int a1, int a2, int a3, 1364 int a4, int a5, int a6) 1365 { 1366 return a1 + a2 + a3 + a4 + a5 + a6; 1367 } 1368 1369 static struct ftrace_event_file * 1370 find_trace_probe_file(struct trace_kprobe *tk, struct trace_array *tr) 1371 { 1372 struct ftrace_event_file *file; 1373 1374 list_for_each_entry(file, &tr->events, list) 1375 if (file->event_call == &tk->tp.call) 1376 return file; 1377 1378 return NULL; 1379 } 1380 1381 /* 1382 * Nobody but us can call enable_trace_kprobe/disable_trace_kprobe at this 1383 * stage, we can do this lockless. 1384 */ 1385 static __init int kprobe_trace_self_tests_init(void) 1386 { 1387 int ret, warn = 0; 1388 int (*target)(int, int, int, int, int, int); 1389 struct trace_kprobe *tk; 1390 struct ftrace_event_file *file; 1391 1392 target = kprobe_trace_selftest_target; 1393 1394 pr_info("Testing kprobe tracing: "); 1395 1396 ret = traceprobe_command("p:testprobe kprobe_trace_selftest_target " 1397 "$stack $stack0 +0($stack)", 1398 create_trace_kprobe); 1399 if (WARN_ON_ONCE(ret)) { 1400 pr_warn("error on probing function entry.\n"); 1401 warn++; 1402 } else { 1403 /* Enable trace point */ 1404 tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM); 1405 if (WARN_ON_ONCE(tk == NULL)) { 1406 pr_warn("error on getting new probe.\n"); 1407 warn++; 1408 } else { 1409 file = find_trace_probe_file(tk, top_trace_array()); 1410 if (WARN_ON_ONCE(file == NULL)) { 1411 pr_warn("error on getting probe file.\n"); 1412 warn++; 1413 } else 1414 enable_trace_kprobe(tk, file); 1415 } 1416 } 1417 1418 ret = traceprobe_command("r:testprobe2 kprobe_trace_selftest_target " 1419 "$retval", create_trace_kprobe); 1420 if (WARN_ON_ONCE(ret)) { 1421 pr_warn("error on probing function return.\n"); 1422 warn++; 1423 } else { 1424 /* Enable trace point */ 1425 tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM); 1426 if (WARN_ON_ONCE(tk == NULL)) { 1427 pr_warn("error on getting 2nd new probe.\n"); 1428 warn++; 1429 } else { 1430 file = find_trace_probe_file(tk, top_trace_array()); 1431 if (WARN_ON_ONCE(file == NULL)) { 1432 pr_warn("error on getting probe file.\n"); 1433 warn++; 1434 } else 1435 enable_trace_kprobe(tk, file); 1436 } 1437 } 1438 1439 if (warn) 1440 goto end; 1441 1442 ret = target(1, 2, 3, 4, 5, 6); 1443 1444 /* Disable trace points before removing it */ 1445 tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM); 1446 if (WARN_ON_ONCE(tk == NULL)) { 1447 pr_warn("error on getting test probe.\n"); 1448 warn++; 1449 } else { 1450 file = find_trace_probe_file(tk, top_trace_array()); 1451 if (WARN_ON_ONCE(file == NULL)) { 1452 pr_warn("error on getting probe file.\n"); 1453 warn++; 1454 } else 1455 disable_trace_kprobe(tk, file); 1456 } 1457 1458 tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM); 1459 if (WARN_ON_ONCE(tk == NULL)) { 1460 pr_warn("error on getting 2nd test probe.\n"); 1461 warn++; 1462 } else { 1463 file = find_trace_probe_file(tk, top_trace_array()); 1464 if (WARN_ON_ONCE(file == NULL)) { 1465 pr_warn("error on getting probe file.\n"); 1466 warn++; 1467 } else 1468 disable_trace_kprobe(tk, file); 1469 } 1470 1471 ret = traceprobe_command("-:testprobe", create_trace_kprobe); 1472 if (WARN_ON_ONCE(ret)) { 1473 pr_warn("error on deleting a probe.\n"); 1474 warn++; 1475 } 1476 1477 ret = traceprobe_command("-:testprobe2", create_trace_kprobe); 1478 if (WARN_ON_ONCE(ret)) { 1479 pr_warn("error on deleting a probe.\n"); 1480 warn++; 1481 } 1482 1483 end: 1484 release_all_trace_kprobes(); 1485 if (warn) 1486 pr_cont("NG: Some tests are failed. Please check them.\n"); 1487 else 1488 pr_cont("OK\n"); 1489 return 0; 1490 } 1491 1492 late_initcall(kprobe_trace_self_tests_init); 1493 1494 #endif 1495