1 /* 2 * Kprobes-based tracing events 3 * 4 * Created by Masami Hiramatsu <mhiramat@redhat.com> 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 */ 19 #define pr_fmt(fmt) "trace_kprobe: " fmt 20 21 #include <linux/module.h> 22 #include <linux/uaccess.h> 23 #include <linux/rculist.h> 24 #include <linux/error-injection.h> 25 26 #include "trace_probe.h" 27 28 #define KPROBE_EVENT_SYSTEM "kprobes" 29 #define KRETPROBE_MAXACTIVE_MAX 4096 30 31 /** 32 * Kprobe event core functions 33 */ 34 struct trace_kprobe { 35 struct list_head list; 36 struct kretprobe rp; /* Use rp.kp for kprobe use */ 37 unsigned long __percpu *nhit; 38 const char *symbol; /* symbol name */ 39 struct trace_probe tp; 40 }; 41 42 #define SIZEOF_TRACE_KPROBE(n) \ 43 (offsetof(struct trace_kprobe, tp.args) + \ 44 (sizeof(struct probe_arg) * (n))) 45 46 static nokprobe_inline bool trace_kprobe_is_return(struct trace_kprobe *tk) 47 { 48 return tk->rp.handler != NULL; 49 } 50 51 static nokprobe_inline const char *trace_kprobe_symbol(struct trace_kprobe *tk) 52 { 53 return tk->symbol ? tk->symbol : "unknown"; 54 } 55 56 static nokprobe_inline unsigned long trace_kprobe_offset(struct trace_kprobe *tk) 57 { 58 return tk->rp.kp.offset; 59 } 60 61 static nokprobe_inline bool trace_kprobe_has_gone(struct trace_kprobe *tk) 62 { 63 return !!(kprobe_gone(&tk->rp.kp)); 64 } 65 66 static nokprobe_inline bool trace_kprobe_within_module(struct trace_kprobe *tk, 67 struct module *mod) 68 { 69 int len = strlen(mod->name); 70 const char *name = trace_kprobe_symbol(tk); 71 return strncmp(mod->name, name, len) == 0 && name[len] == ':'; 72 } 73 74 static nokprobe_inline bool trace_kprobe_is_on_module(struct trace_kprobe *tk) 75 { 76 return !!strchr(trace_kprobe_symbol(tk), ':'); 77 } 78 79 static nokprobe_inline unsigned long trace_kprobe_nhit(struct trace_kprobe *tk) 80 { 81 unsigned long nhit = 0; 82 int cpu; 83 84 for_each_possible_cpu(cpu) 85 nhit += *per_cpu_ptr(tk->nhit, cpu); 86 87 return nhit; 88 } 89 90 bool trace_kprobe_on_func_entry(struct trace_event_call *call) 91 { 92 struct trace_kprobe *tk = (struct trace_kprobe *)call->data; 93 94 return kprobe_on_func_entry(tk->rp.kp.addr, 95 tk->rp.kp.addr ? NULL : tk->rp.kp.symbol_name, 96 tk->rp.kp.addr ? 0 : tk->rp.kp.offset); 97 } 98 99 bool trace_kprobe_error_injectable(struct trace_event_call *call) 100 { 101 struct trace_kprobe *tk = (struct trace_kprobe *)call->data; 102 unsigned long addr; 103 104 if (tk->symbol) { 105 addr = (unsigned long) 106 kallsyms_lookup_name(trace_kprobe_symbol(tk)); 107 addr += tk->rp.kp.offset; 108 } else { 109 addr = (unsigned long)tk->rp.kp.addr; 110 } 111 return within_error_injection_list(addr); 112 } 113 114 static int register_kprobe_event(struct trace_kprobe *tk); 115 static int unregister_kprobe_event(struct trace_kprobe *tk); 116 117 static DEFINE_MUTEX(probe_lock); 118 static LIST_HEAD(probe_list); 119 120 static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs); 121 static int kretprobe_dispatcher(struct kretprobe_instance *ri, 122 struct pt_regs *regs); 123 124 /* Memory fetching by symbol */ 125 struct symbol_cache { 126 char *symbol; 127 long offset; 128 unsigned long addr; 129 }; 130 131 unsigned long update_symbol_cache(struct symbol_cache *sc) 132 { 133 sc->addr = (unsigned long)kallsyms_lookup_name(sc->symbol); 134 135 if (sc->addr) 136 sc->addr += sc->offset; 137 138 return sc->addr; 139 } 140 141 void free_symbol_cache(struct symbol_cache *sc) 142 { 143 kfree(sc->symbol); 144 kfree(sc); 145 } 146 147 struct symbol_cache *alloc_symbol_cache(const char *sym, long offset) 148 { 149 struct symbol_cache *sc; 150 151 if (!sym || strlen(sym) == 0) 152 return NULL; 153 154 sc = kzalloc(sizeof(struct symbol_cache), GFP_KERNEL); 155 if (!sc) 156 return NULL; 157 158 sc->symbol = kstrdup(sym, GFP_KERNEL); 159 if (!sc->symbol) { 160 kfree(sc); 161 return NULL; 162 } 163 sc->offset = offset; 164 update_symbol_cache(sc); 165 166 return sc; 167 } 168 169 /* 170 * Kprobes-specific fetch functions 171 */ 172 #define DEFINE_FETCH_stack(type) \ 173 static void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs, \ 174 void *offset, void *dest) \ 175 { \ 176 *(type *)dest = (type)regs_get_kernel_stack_nth(regs, \ 177 (unsigned int)((unsigned long)offset)); \ 178 } \ 179 NOKPROBE_SYMBOL(FETCH_FUNC_NAME(stack, type)); 180 181 DEFINE_BASIC_FETCH_FUNCS(stack) 182 /* No string on the stack entry */ 183 #define fetch_stack_string NULL 184 #define fetch_stack_string_size NULL 185 186 #define DEFINE_FETCH_memory(type) \ 187 static void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs, \ 188 void *addr, void *dest) \ 189 { \ 190 type retval; \ 191 if (probe_kernel_address(addr, retval)) \ 192 *(type *)dest = 0; \ 193 else \ 194 *(type *)dest = retval; \ 195 } \ 196 NOKPROBE_SYMBOL(FETCH_FUNC_NAME(memory, type)); 197 198 DEFINE_BASIC_FETCH_FUNCS(memory) 199 /* 200 * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max 201 * length and relative data location. 202 */ 203 static void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs, 204 void *addr, void *dest) 205 { 206 int maxlen = get_rloc_len(*(u32 *)dest); 207 u8 *dst = get_rloc_data(dest); 208 long ret; 209 210 if (!maxlen) 211 return; 212 213 /* 214 * Try to get string again, since the string can be changed while 215 * probing. 216 */ 217 ret = strncpy_from_unsafe(dst, addr, maxlen); 218 219 if (ret < 0) { /* Failed to fetch string */ 220 dst[0] = '\0'; 221 *(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest)); 222 } else { 223 *(u32 *)dest = make_data_rloc(ret, get_rloc_offs(*(u32 *)dest)); 224 } 225 } 226 NOKPROBE_SYMBOL(FETCH_FUNC_NAME(memory, string)); 227 228 /* Return the length of string -- including null terminal byte */ 229 static void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs, 230 void *addr, void *dest) 231 { 232 mm_segment_t old_fs; 233 int ret, len = 0; 234 u8 c; 235 236 old_fs = get_fs(); 237 set_fs(KERNEL_DS); 238 pagefault_disable(); 239 240 do { 241 ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1); 242 len++; 243 } while (c && ret == 0 && len < MAX_STRING_SIZE); 244 245 pagefault_enable(); 246 set_fs(old_fs); 247 248 if (ret < 0) /* Failed to check the length */ 249 *(u32 *)dest = 0; 250 else 251 *(u32 *)dest = len; 252 } 253 NOKPROBE_SYMBOL(FETCH_FUNC_NAME(memory, string_size)); 254 255 #define DEFINE_FETCH_symbol(type) \ 256 void FETCH_FUNC_NAME(symbol, type)(struct pt_regs *regs, void *data, void *dest)\ 257 { \ 258 struct symbol_cache *sc = data; \ 259 if (sc->addr) \ 260 fetch_memory_##type(regs, (void *)sc->addr, dest); \ 261 else \ 262 *(type *)dest = 0; \ 263 } \ 264 NOKPROBE_SYMBOL(FETCH_FUNC_NAME(symbol, type)); 265 266 DEFINE_BASIC_FETCH_FUNCS(symbol) 267 DEFINE_FETCH_symbol(string) 268 DEFINE_FETCH_symbol(string_size) 269 270 /* kprobes don't support file_offset fetch methods */ 271 #define fetch_file_offset_u8 NULL 272 #define fetch_file_offset_u16 NULL 273 #define fetch_file_offset_u32 NULL 274 #define fetch_file_offset_u64 NULL 275 #define fetch_file_offset_string NULL 276 #define fetch_file_offset_string_size NULL 277 278 /* Fetch type information table */ 279 static const struct fetch_type kprobes_fetch_type_table[] = { 280 /* Special types */ 281 [FETCH_TYPE_STRING] = __ASSIGN_FETCH_TYPE("string", string, string, 282 sizeof(u32), 1, "__data_loc char[]"), 283 [FETCH_TYPE_STRSIZE] = __ASSIGN_FETCH_TYPE("string_size", u32, 284 string_size, sizeof(u32), 0, "u32"), 285 /* Basic types */ 286 ASSIGN_FETCH_TYPE(u8, u8, 0), 287 ASSIGN_FETCH_TYPE(u16, u16, 0), 288 ASSIGN_FETCH_TYPE(u32, u32, 0), 289 ASSIGN_FETCH_TYPE(u64, u64, 0), 290 ASSIGN_FETCH_TYPE(s8, u8, 1), 291 ASSIGN_FETCH_TYPE(s16, u16, 1), 292 ASSIGN_FETCH_TYPE(s32, u32, 1), 293 ASSIGN_FETCH_TYPE(s64, u64, 1), 294 ASSIGN_FETCH_TYPE_ALIAS(x8, u8, u8, 0), 295 ASSIGN_FETCH_TYPE_ALIAS(x16, u16, u16, 0), 296 ASSIGN_FETCH_TYPE_ALIAS(x32, u32, u32, 0), 297 ASSIGN_FETCH_TYPE_ALIAS(x64, u64, u64, 0), 298 299 ASSIGN_FETCH_TYPE_END 300 }; 301 302 /* 303 * Allocate new trace_probe and initialize it (including kprobes). 304 */ 305 static struct trace_kprobe *alloc_trace_kprobe(const char *group, 306 const char *event, 307 void *addr, 308 const char *symbol, 309 unsigned long offs, 310 int maxactive, 311 int nargs, bool is_return) 312 { 313 struct trace_kprobe *tk; 314 int ret = -ENOMEM; 315 316 tk = kzalloc(SIZEOF_TRACE_KPROBE(nargs), GFP_KERNEL); 317 if (!tk) 318 return ERR_PTR(ret); 319 320 tk->nhit = alloc_percpu(unsigned long); 321 if (!tk->nhit) 322 goto error; 323 324 if (symbol) { 325 tk->symbol = kstrdup(symbol, GFP_KERNEL); 326 if (!tk->symbol) 327 goto error; 328 tk->rp.kp.symbol_name = tk->symbol; 329 tk->rp.kp.offset = offs; 330 } else 331 tk->rp.kp.addr = addr; 332 333 if (is_return) 334 tk->rp.handler = kretprobe_dispatcher; 335 else 336 tk->rp.kp.pre_handler = kprobe_dispatcher; 337 338 tk->rp.maxactive = maxactive; 339 340 if (!event || !is_good_name(event)) { 341 ret = -EINVAL; 342 goto error; 343 } 344 345 tk->tp.call.class = &tk->tp.class; 346 tk->tp.call.name = kstrdup(event, GFP_KERNEL); 347 if (!tk->tp.call.name) 348 goto error; 349 350 if (!group || !is_good_name(group)) { 351 ret = -EINVAL; 352 goto error; 353 } 354 355 tk->tp.class.system = kstrdup(group, GFP_KERNEL); 356 if (!tk->tp.class.system) 357 goto error; 358 359 INIT_LIST_HEAD(&tk->list); 360 INIT_LIST_HEAD(&tk->tp.files); 361 return tk; 362 error: 363 kfree(tk->tp.call.name); 364 kfree(tk->symbol); 365 free_percpu(tk->nhit); 366 kfree(tk); 367 return ERR_PTR(ret); 368 } 369 370 static void free_trace_kprobe(struct trace_kprobe *tk) 371 { 372 int i; 373 374 for (i = 0; i < tk->tp.nr_args; i++) 375 traceprobe_free_probe_arg(&tk->tp.args[i]); 376 377 kfree(tk->tp.call.class->system); 378 kfree(tk->tp.call.name); 379 kfree(tk->symbol); 380 free_percpu(tk->nhit); 381 kfree(tk); 382 } 383 384 static struct trace_kprobe *find_trace_kprobe(const char *event, 385 const char *group) 386 { 387 struct trace_kprobe *tk; 388 389 list_for_each_entry(tk, &probe_list, list) 390 if (strcmp(trace_event_name(&tk->tp.call), event) == 0 && 391 strcmp(tk->tp.call.class->system, group) == 0) 392 return tk; 393 return NULL; 394 } 395 396 /* 397 * Enable trace_probe 398 * if the file is NULL, enable "perf" handler, or enable "trace" handler. 399 */ 400 static int 401 enable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file) 402 { 403 int ret = 0; 404 405 if (file) { 406 struct event_file_link *link; 407 408 link = kmalloc(sizeof(*link), GFP_KERNEL); 409 if (!link) { 410 ret = -ENOMEM; 411 goto out; 412 } 413 414 link->file = file; 415 list_add_tail_rcu(&link->list, &tk->tp.files); 416 417 tk->tp.flags |= TP_FLAG_TRACE; 418 } else 419 tk->tp.flags |= TP_FLAG_PROFILE; 420 421 if (trace_probe_is_registered(&tk->tp) && !trace_kprobe_has_gone(tk)) { 422 if (trace_kprobe_is_return(tk)) 423 ret = enable_kretprobe(&tk->rp); 424 else 425 ret = enable_kprobe(&tk->rp.kp); 426 } 427 out: 428 return ret; 429 } 430 431 /* 432 * Disable trace_probe 433 * if the file is NULL, disable "perf" handler, or disable "trace" handler. 434 */ 435 static int 436 disable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file) 437 { 438 struct event_file_link *link = NULL; 439 int wait = 0; 440 int ret = 0; 441 442 if (file) { 443 link = find_event_file_link(&tk->tp, file); 444 if (!link) { 445 ret = -EINVAL; 446 goto out; 447 } 448 449 list_del_rcu(&link->list); 450 wait = 1; 451 if (!list_empty(&tk->tp.files)) 452 goto out; 453 454 tk->tp.flags &= ~TP_FLAG_TRACE; 455 } else 456 tk->tp.flags &= ~TP_FLAG_PROFILE; 457 458 if (!trace_probe_is_enabled(&tk->tp) && trace_probe_is_registered(&tk->tp)) { 459 if (trace_kprobe_is_return(tk)) 460 disable_kretprobe(&tk->rp); 461 else 462 disable_kprobe(&tk->rp.kp); 463 wait = 1; 464 } 465 out: 466 if (wait) { 467 /* 468 * Synchronize with kprobe_trace_func/kretprobe_trace_func 469 * to ensure disabled (all running handlers are finished). 470 * This is not only for kfree(), but also the caller, 471 * trace_remove_event_call() supposes it for releasing 472 * event_call related objects, which will be accessed in 473 * the kprobe_trace_func/kretprobe_trace_func. 474 */ 475 synchronize_sched(); 476 kfree(link); /* Ignored if link == NULL */ 477 } 478 479 return ret; 480 } 481 482 /* Internal register function - just handle k*probes and flags */ 483 static int __register_trace_kprobe(struct trace_kprobe *tk) 484 { 485 int i, ret; 486 487 if (trace_probe_is_registered(&tk->tp)) 488 return -EINVAL; 489 490 for (i = 0; i < tk->tp.nr_args; i++) 491 traceprobe_update_arg(&tk->tp.args[i]); 492 493 /* Set/clear disabled flag according to tp->flag */ 494 if (trace_probe_is_enabled(&tk->tp)) 495 tk->rp.kp.flags &= ~KPROBE_FLAG_DISABLED; 496 else 497 tk->rp.kp.flags |= KPROBE_FLAG_DISABLED; 498 499 if (trace_kprobe_is_return(tk)) 500 ret = register_kretprobe(&tk->rp); 501 else 502 ret = register_kprobe(&tk->rp.kp); 503 504 if (ret == 0) 505 tk->tp.flags |= TP_FLAG_REGISTERED; 506 else { 507 pr_warn("Could not insert probe at %s+%lu: %d\n", 508 trace_kprobe_symbol(tk), trace_kprobe_offset(tk), ret); 509 if (ret == -ENOENT && trace_kprobe_is_on_module(tk)) { 510 pr_warn("This probe might be able to register after target module is loaded. Continue.\n"); 511 ret = 0; 512 } else if (ret == -EILSEQ) { 513 pr_warn("Probing address(0x%p) is not an instruction boundary.\n", 514 tk->rp.kp.addr); 515 ret = -EINVAL; 516 } 517 } 518 519 return ret; 520 } 521 522 /* Internal unregister function - just handle k*probes and flags */ 523 static void __unregister_trace_kprobe(struct trace_kprobe *tk) 524 { 525 if (trace_probe_is_registered(&tk->tp)) { 526 if (trace_kprobe_is_return(tk)) 527 unregister_kretprobe(&tk->rp); 528 else 529 unregister_kprobe(&tk->rp.kp); 530 tk->tp.flags &= ~TP_FLAG_REGISTERED; 531 /* Cleanup kprobe for reuse */ 532 if (tk->rp.kp.symbol_name) 533 tk->rp.kp.addr = NULL; 534 } 535 } 536 537 /* Unregister a trace_probe and probe_event: call with locking probe_lock */ 538 static int unregister_trace_kprobe(struct trace_kprobe *tk) 539 { 540 /* Enabled event can not be unregistered */ 541 if (trace_probe_is_enabled(&tk->tp)) 542 return -EBUSY; 543 544 /* Will fail if probe is being used by ftrace or perf */ 545 if (unregister_kprobe_event(tk)) 546 return -EBUSY; 547 548 __unregister_trace_kprobe(tk); 549 list_del(&tk->list); 550 551 return 0; 552 } 553 554 /* Register a trace_probe and probe_event */ 555 static int register_trace_kprobe(struct trace_kprobe *tk) 556 { 557 struct trace_kprobe *old_tk; 558 int ret; 559 560 mutex_lock(&probe_lock); 561 562 /* Delete old (same name) event if exist */ 563 old_tk = find_trace_kprobe(trace_event_name(&tk->tp.call), 564 tk->tp.call.class->system); 565 if (old_tk) { 566 ret = unregister_trace_kprobe(old_tk); 567 if (ret < 0) 568 goto end; 569 free_trace_kprobe(old_tk); 570 } 571 572 /* Register new event */ 573 ret = register_kprobe_event(tk); 574 if (ret) { 575 pr_warn("Failed to register probe event(%d)\n", ret); 576 goto end; 577 } 578 579 /* Register k*probe */ 580 ret = __register_trace_kprobe(tk); 581 if (ret < 0) 582 unregister_kprobe_event(tk); 583 else 584 list_add_tail(&tk->list, &probe_list); 585 586 end: 587 mutex_unlock(&probe_lock); 588 return ret; 589 } 590 591 /* Module notifier call back, checking event on the module */ 592 static int trace_kprobe_module_callback(struct notifier_block *nb, 593 unsigned long val, void *data) 594 { 595 struct module *mod = data; 596 struct trace_kprobe *tk; 597 int ret; 598 599 if (val != MODULE_STATE_COMING) 600 return NOTIFY_DONE; 601 602 /* Update probes on coming module */ 603 mutex_lock(&probe_lock); 604 list_for_each_entry(tk, &probe_list, list) { 605 if (trace_kprobe_within_module(tk, mod)) { 606 /* Don't need to check busy - this should have gone. */ 607 __unregister_trace_kprobe(tk); 608 ret = __register_trace_kprobe(tk); 609 if (ret) 610 pr_warn("Failed to re-register probe %s on %s: %d\n", 611 trace_event_name(&tk->tp.call), 612 mod->name, ret); 613 } 614 } 615 mutex_unlock(&probe_lock); 616 617 return NOTIFY_DONE; 618 } 619 620 static struct notifier_block trace_kprobe_module_nb = { 621 .notifier_call = trace_kprobe_module_callback, 622 .priority = 1 /* Invoked after kprobe module callback */ 623 }; 624 625 /* Convert certain expected symbols into '_' when generating event names */ 626 static inline void sanitize_event_name(char *name) 627 { 628 while (*name++ != '\0') 629 if (*name == ':' || *name == '.') 630 *name = '_'; 631 } 632 633 static int create_trace_kprobe(int argc, char **argv) 634 { 635 /* 636 * Argument syntax: 637 * - Add kprobe: 638 * p[:[GRP/]EVENT] [MOD:]KSYM[+OFFS]|KADDR [FETCHARGS] 639 * - Add kretprobe: 640 * r[MAXACTIVE][:[GRP/]EVENT] [MOD:]KSYM[+0] [FETCHARGS] 641 * Fetch args: 642 * $retval : fetch return value 643 * $stack : fetch stack address 644 * $stackN : fetch Nth of stack (N:0-) 645 * $comm : fetch current task comm 646 * @ADDR : fetch memory at ADDR (ADDR should be in kernel) 647 * @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol) 648 * %REG : fetch register REG 649 * Dereferencing memory fetch: 650 * +|-offs(ARG) : fetch memory at ARG +|- offs address. 651 * Alias name of args: 652 * NAME=FETCHARG : set NAME as alias of FETCHARG. 653 * Type of args: 654 * FETCHARG:TYPE : use TYPE instead of unsigned long. 655 */ 656 struct trace_kprobe *tk; 657 int i, ret = 0; 658 bool is_return = false, is_delete = false; 659 char *symbol = NULL, *event = NULL, *group = NULL; 660 int maxactive = 0; 661 char *arg; 662 unsigned long offset = 0; 663 void *addr = NULL; 664 char buf[MAX_EVENT_NAME_LEN]; 665 666 /* argc must be >= 1 */ 667 if (argv[0][0] == 'p') 668 is_return = false; 669 else if (argv[0][0] == 'r') 670 is_return = true; 671 else if (argv[0][0] == '-') 672 is_delete = true; 673 else { 674 pr_info("Probe definition must be started with 'p', 'r' or" 675 " '-'.\n"); 676 return -EINVAL; 677 } 678 679 event = strchr(&argv[0][1], ':'); 680 if (event) { 681 event[0] = '\0'; 682 event++; 683 } 684 if (is_return && isdigit(argv[0][1])) { 685 ret = kstrtouint(&argv[0][1], 0, &maxactive); 686 if (ret) { 687 pr_info("Failed to parse maxactive.\n"); 688 return ret; 689 } 690 /* kretprobes instances are iterated over via a list. The 691 * maximum should stay reasonable. 692 */ 693 if (maxactive > KRETPROBE_MAXACTIVE_MAX) { 694 pr_info("Maxactive is too big (%d > %d).\n", 695 maxactive, KRETPROBE_MAXACTIVE_MAX); 696 return -E2BIG; 697 } 698 } 699 700 if (event) { 701 if (strchr(event, '/')) { 702 group = event; 703 event = strchr(group, '/') + 1; 704 event[-1] = '\0'; 705 if (strlen(group) == 0) { 706 pr_info("Group name is not specified\n"); 707 return -EINVAL; 708 } 709 } 710 if (strlen(event) == 0) { 711 pr_info("Event name is not specified\n"); 712 return -EINVAL; 713 } 714 } 715 if (!group) 716 group = KPROBE_EVENT_SYSTEM; 717 718 if (is_delete) { 719 if (!event) { 720 pr_info("Delete command needs an event name.\n"); 721 return -EINVAL; 722 } 723 mutex_lock(&probe_lock); 724 tk = find_trace_kprobe(event, group); 725 if (!tk) { 726 mutex_unlock(&probe_lock); 727 pr_info("Event %s/%s doesn't exist.\n", group, event); 728 return -ENOENT; 729 } 730 /* delete an event */ 731 ret = unregister_trace_kprobe(tk); 732 if (ret == 0) 733 free_trace_kprobe(tk); 734 mutex_unlock(&probe_lock); 735 return ret; 736 } 737 738 if (argc < 2) { 739 pr_info("Probe point is not specified.\n"); 740 return -EINVAL; 741 } 742 743 /* try to parse an address. if that fails, try to read the 744 * input as a symbol. */ 745 if (kstrtoul(argv[1], 0, (unsigned long *)&addr)) { 746 /* a symbol specified */ 747 symbol = argv[1]; 748 /* TODO: support .init module functions */ 749 ret = traceprobe_split_symbol_offset(symbol, &offset); 750 if (ret) { 751 pr_info("Failed to parse either an address or a symbol.\n"); 752 return ret; 753 } 754 if (offset && is_return && 755 !kprobe_on_func_entry(NULL, symbol, offset)) { 756 pr_info("Given offset is not valid for return probe.\n"); 757 return -EINVAL; 758 } 759 } 760 argc -= 2; argv += 2; 761 762 /* setup a probe */ 763 if (!event) { 764 /* Make a new event name */ 765 if (symbol) 766 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_%ld", 767 is_return ? 'r' : 'p', symbol, offset); 768 else 769 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_0x%p", 770 is_return ? 'r' : 'p', addr); 771 sanitize_event_name(buf); 772 event = buf; 773 } 774 tk = alloc_trace_kprobe(group, event, addr, symbol, offset, maxactive, 775 argc, is_return); 776 if (IS_ERR(tk)) { 777 pr_info("Failed to allocate trace_probe.(%d)\n", 778 (int)PTR_ERR(tk)); 779 return PTR_ERR(tk); 780 } 781 782 /* parse arguments */ 783 ret = 0; 784 for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) { 785 struct probe_arg *parg = &tk->tp.args[i]; 786 787 /* Increment count for freeing args in error case */ 788 tk->tp.nr_args++; 789 790 /* Parse argument name */ 791 arg = strchr(argv[i], '='); 792 if (arg) { 793 *arg++ = '\0'; 794 parg->name = kstrdup(argv[i], GFP_KERNEL); 795 } else { 796 arg = argv[i]; 797 /* If argument name is omitted, set "argN" */ 798 snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1); 799 parg->name = kstrdup(buf, GFP_KERNEL); 800 } 801 802 if (!parg->name) { 803 pr_info("Failed to allocate argument[%d] name.\n", i); 804 ret = -ENOMEM; 805 goto error; 806 } 807 808 if (!is_good_name(parg->name)) { 809 pr_info("Invalid argument[%d] name: %s\n", 810 i, parg->name); 811 ret = -EINVAL; 812 goto error; 813 } 814 815 if (traceprobe_conflict_field_name(parg->name, 816 tk->tp.args, i)) { 817 pr_info("Argument[%d] name '%s' conflicts with " 818 "another field.\n", i, argv[i]); 819 ret = -EINVAL; 820 goto error; 821 } 822 823 /* Parse fetch argument */ 824 ret = traceprobe_parse_probe_arg(arg, &tk->tp.size, parg, 825 is_return, true, 826 kprobes_fetch_type_table); 827 if (ret) { 828 pr_info("Parse error at argument[%d]. (%d)\n", i, ret); 829 goto error; 830 } 831 } 832 833 ret = register_trace_kprobe(tk); 834 if (ret) 835 goto error; 836 return 0; 837 838 error: 839 free_trace_kprobe(tk); 840 return ret; 841 } 842 843 static int release_all_trace_kprobes(void) 844 { 845 struct trace_kprobe *tk; 846 int ret = 0; 847 848 mutex_lock(&probe_lock); 849 /* Ensure no probe is in use. */ 850 list_for_each_entry(tk, &probe_list, list) 851 if (trace_probe_is_enabled(&tk->tp)) { 852 ret = -EBUSY; 853 goto end; 854 } 855 /* TODO: Use batch unregistration */ 856 while (!list_empty(&probe_list)) { 857 tk = list_entry(probe_list.next, struct trace_kprobe, list); 858 ret = unregister_trace_kprobe(tk); 859 if (ret) 860 goto end; 861 free_trace_kprobe(tk); 862 } 863 864 end: 865 mutex_unlock(&probe_lock); 866 867 return ret; 868 } 869 870 /* Probes listing interfaces */ 871 static void *probes_seq_start(struct seq_file *m, loff_t *pos) 872 { 873 mutex_lock(&probe_lock); 874 return seq_list_start(&probe_list, *pos); 875 } 876 877 static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos) 878 { 879 return seq_list_next(v, &probe_list, pos); 880 } 881 882 static void probes_seq_stop(struct seq_file *m, void *v) 883 { 884 mutex_unlock(&probe_lock); 885 } 886 887 static int probes_seq_show(struct seq_file *m, void *v) 888 { 889 struct trace_kprobe *tk = v; 890 int i; 891 892 seq_putc(m, trace_kprobe_is_return(tk) ? 'r' : 'p'); 893 seq_printf(m, ":%s/%s", tk->tp.call.class->system, 894 trace_event_name(&tk->tp.call)); 895 896 if (!tk->symbol) 897 seq_printf(m, " 0x%p", tk->rp.kp.addr); 898 else if (tk->rp.kp.offset) 899 seq_printf(m, " %s+%u", trace_kprobe_symbol(tk), 900 tk->rp.kp.offset); 901 else 902 seq_printf(m, " %s", trace_kprobe_symbol(tk)); 903 904 for (i = 0; i < tk->tp.nr_args; i++) 905 seq_printf(m, " %s=%s", tk->tp.args[i].name, tk->tp.args[i].comm); 906 seq_putc(m, '\n'); 907 908 return 0; 909 } 910 911 static const struct seq_operations probes_seq_op = { 912 .start = probes_seq_start, 913 .next = probes_seq_next, 914 .stop = probes_seq_stop, 915 .show = probes_seq_show 916 }; 917 918 static int probes_open(struct inode *inode, struct file *file) 919 { 920 int ret; 921 922 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) { 923 ret = release_all_trace_kprobes(); 924 if (ret < 0) 925 return ret; 926 } 927 928 return seq_open(file, &probes_seq_op); 929 } 930 931 static ssize_t probes_write(struct file *file, const char __user *buffer, 932 size_t count, loff_t *ppos) 933 { 934 return trace_parse_run_command(file, buffer, count, ppos, 935 create_trace_kprobe); 936 } 937 938 static const struct file_operations kprobe_events_ops = { 939 .owner = THIS_MODULE, 940 .open = probes_open, 941 .read = seq_read, 942 .llseek = seq_lseek, 943 .release = seq_release, 944 .write = probes_write, 945 }; 946 947 /* Probes profiling interfaces */ 948 static int probes_profile_seq_show(struct seq_file *m, void *v) 949 { 950 struct trace_kprobe *tk = v; 951 952 seq_printf(m, " %-44s %15lu %15lu\n", 953 trace_event_name(&tk->tp.call), 954 trace_kprobe_nhit(tk), 955 tk->rp.kp.nmissed); 956 957 return 0; 958 } 959 960 static const struct seq_operations profile_seq_op = { 961 .start = probes_seq_start, 962 .next = probes_seq_next, 963 .stop = probes_seq_stop, 964 .show = probes_profile_seq_show 965 }; 966 967 static int profile_open(struct inode *inode, struct file *file) 968 { 969 return seq_open(file, &profile_seq_op); 970 } 971 972 static const struct file_operations kprobe_profile_ops = { 973 .owner = THIS_MODULE, 974 .open = profile_open, 975 .read = seq_read, 976 .llseek = seq_lseek, 977 .release = seq_release, 978 }; 979 980 /* Kprobe handler */ 981 static nokprobe_inline void 982 __kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs, 983 struct trace_event_file *trace_file) 984 { 985 struct kprobe_trace_entry_head *entry; 986 struct ring_buffer_event *event; 987 struct ring_buffer *buffer; 988 int size, dsize, pc; 989 unsigned long irq_flags; 990 struct trace_event_call *call = &tk->tp.call; 991 992 WARN_ON(call != trace_file->event_call); 993 994 if (trace_trigger_soft_disabled(trace_file)) 995 return; 996 997 local_save_flags(irq_flags); 998 pc = preempt_count(); 999 1000 dsize = __get_data_size(&tk->tp, regs); 1001 size = sizeof(*entry) + tk->tp.size + dsize; 1002 1003 event = trace_event_buffer_lock_reserve(&buffer, trace_file, 1004 call->event.type, 1005 size, irq_flags, pc); 1006 if (!event) 1007 return; 1008 1009 entry = ring_buffer_event_data(event); 1010 entry->ip = (unsigned long)tk->rp.kp.addr; 1011 store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize); 1012 1013 event_trigger_unlock_commit_regs(trace_file, buffer, event, 1014 entry, irq_flags, pc, regs); 1015 } 1016 1017 static void 1018 kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs) 1019 { 1020 struct event_file_link *link; 1021 1022 list_for_each_entry_rcu(link, &tk->tp.files, list) 1023 __kprobe_trace_func(tk, regs, link->file); 1024 } 1025 NOKPROBE_SYMBOL(kprobe_trace_func); 1026 1027 /* Kretprobe handler */ 1028 static nokprobe_inline void 1029 __kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri, 1030 struct pt_regs *regs, 1031 struct trace_event_file *trace_file) 1032 { 1033 struct kretprobe_trace_entry_head *entry; 1034 struct ring_buffer_event *event; 1035 struct ring_buffer *buffer; 1036 int size, pc, dsize; 1037 unsigned long irq_flags; 1038 struct trace_event_call *call = &tk->tp.call; 1039 1040 WARN_ON(call != trace_file->event_call); 1041 1042 if (trace_trigger_soft_disabled(trace_file)) 1043 return; 1044 1045 local_save_flags(irq_flags); 1046 pc = preempt_count(); 1047 1048 dsize = __get_data_size(&tk->tp, regs); 1049 size = sizeof(*entry) + tk->tp.size + dsize; 1050 1051 event = trace_event_buffer_lock_reserve(&buffer, trace_file, 1052 call->event.type, 1053 size, irq_flags, pc); 1054 if (!event) 1055 return; 1056 1057 entry = ring_buffer_event_data(event); 1058 entry->func = (unsigned long)tk->rp.kp.addr; 1059 entry->ret_ip = (unsigned long)ri->ret_addr; 1060 store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize); 1061 1062 event_trigger_unlock_commit_regs(trace_file, buffer, event, 1063 entry, irq_flags, pc, regs); 1064 } 1065 1066 static void 1067 kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri, 1068 struct pt_regs *regs) 1069 { 1070 struct event_file_link *link; 1071 1072 list_for_each_entry_rcu(link, &tk->tp.files, list) 1073 __kretprobe_trace_func(tk, ri, regs, link->file); 1074 } 1075 NOKPROBE_SYMBOL(kretprobe_trace_func); 1076 1077 /* Event entry printers */ 1078 static enum print_line_t 1079 print_kprobe_event(struct trace_iterator *iter, int flags, 1080 struct trace_event *event) 1081 { 1082 struct kprobe_trace_entry_head *field; 1083 struct trace_seq *s = &iter->seq; 1084 struct trace_probe *tp; 1085 u8 *data; 1086 int i; 1087 1088 field = (struct kprobe_trace_entry_head *)iter->ent; 1089 tp = container_of(event, struct trace_probe, call.event); 1090 1091 trace_seq_printf(s, "%s: (", trace_event_name(&tp->call)); 1092 1093 if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET)) 1094 goto out; 1095 1096 trace_seq_putc(s, ')'); 1097 1098 data = (u8 *)&field[1]; 1099 for (i = 0; i < tp->nr_args; i++) 1100 if (!tp->args[i].type->print(s, tp->args[i].name, 1101 data + tp->args[i].offset, field)) 1102 goto out; 1103 1104 trace_seq_putc(s, '\n'); 1105 out: 1106 return trace_handle_return(s); 1107 } 1108 1109 static enum print_line_t 1110 print_kretprobe_event(struct trace_iterator *iter, int flags, 1111 struct trace_event *event) 1112 { 1113 struct kretprobe_trace_entry_head *field; 1114 struct trace_seq *s = &iter->seq; 1115 struct trace_probe *tp; 1116 u8 *data; 1117 int i; 1118 1119 field = (struct kretprobe_trace_entry_head *)iter->ent; 1120 tp = container_of(event, struct trace_probe, call.event); 1121 1122 trace_seq_printf(s, "%s: (", trace_event_name(&tp->call)); 1123 1124 if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET)) 1125 goto out; 1126 1127 trace_seq_puts(s, " <- "); 1128 1129 if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET)) 1130 goto out; 1131 1132 trace_seq_putc(s, ')'); 1133 1134 data = (u8 *)&field[1]; 1135 for (i = 0; i < tp->nr_args; i++) 1136 if (!tp->args[i].type->print(s, tp->args[i].name, 1137 data + tp->args[i].offset, field)) 1138 goto out; 1139 1140 trace_seq_putc(s, '\n'); 1141 1142 out: 1143 return trace_handle_return(s); 1144 } 1145 1146 1147 static int kprobe_event_define_fields(struct trace_event_call *event_call) 1148 { 1149 int ret, i; 1150 struct kprobe_trace_entry_head field; 1151 struct trace_kprobe *tk = (struct trace_kprobe *)event_call->data; 1152 1153 DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0); 1154 /* Set argument names as fields */ 1155 for (i = 0; i < tk->tp.nr_args; i++) { 1156 struct probe_arg *parg = &tk->tp.args[i]; 1157 1158 ret = trace_define_field(event_call, parg->type->fmttype, 1159 parg->name, 1160 sizeof(field) + parg->offset, 1161 parg->type->size, 1162 parg->type->is_signed, 1163 FILTER_OTHER); 1164 if (ret) 1165 return ret; 1166 } 1167 return 0; 1168 } 1169 1170 static int kretprobe_event_define_fields(struct trace_event_call *event_call) 1171 { 1172 int ret, i; 1173 struct kretprobe_trace_entry_head field; 1174 struct trace_kprobe *tk = (struct trace_kprobe *)event_call->data; 1175 1176 DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0); 1177 DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0); 1178 /* Set argument names as fields */ 1179 for (i = 0; i < tk->tp.nr_args; i++) { 1180 struct probe_arg *parg = &tk->tp.args[i]; 1181 1182 ret = trace_define_field(event_call, parg->type->fmttype, 1183 parg->name, 1184 sizeof(field) + parg->offset, 1185 parg->type->size, 1186 parg->type->is_signed, 1187 FILTER_OTHER); 1188 if (ret) 1189 return ret; 1190 } 1191 return 0; 1192 } 1193 1194 #ifdef CONFIG_PERF_EVENTS 1195 1196 /* Kprobe profile handler */ 1197 static int 1198 kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs) 1199 { 1200 struct trace_event_call *call = &tk->tp.call; 1201 struct kprobe_trace_entry_head *entry; 1202 struct hlist_head *head; 1203 int size, __size, dsize; 1204 int rctx; 1205 1206 if (bpf_prog_array_valid(call)) { 1207 unsigned long orig_ip = instruction_pointer(regs); 1208 int ret; 1209 1210 ret = trace_call_bpf(call, regs); 1211 1212 /* 1213 * We need to check and see if we modified the pc of the 1214 * pt_regs, and if so clear the kprobe and return 1 so that we 1215 * don't do the single stepping. 1216 * The ftrace kprobe handler leaves it up to us to re-enable 1217 * preemption here before returning if we've modified the ip. 1218 */ 1219 if (orig_ip != instruction_pointer(regs)) { 1220 reset_current_kprobe(); 1221 preempt_enable_no_resched(); 1222 return 1; 1223 } 1224 if (!ret) 1225 return 0; 1226 } 1227 1228 head = this_cpu_ptr(call->perf_events); 1229 if (hlist_empty(head)) 1230 return 0; 1231 1232 dsize = __get_data_size(&tk->tp, regs); 1233 __size = sizeof(*entry) + tk->tp.size + dsize; 1234 size = ALIGN(__size + sizeof(u32), sizeof(u64)); 1235 size -= sizeof(u32); 1236 1237 entry = perf_trace_buf_alloc(size, NULL, &rctx); 1238 if (!entry) 1239 return 0; 1240 1241 entry->ip = (unsigned long)tk->rp.kp.addr; 1242 memset(&entry[1], 0, dsize); 1243 store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize); 1244 perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs, 1245 head, NULL); 1246 return 0; 1247 } 1248 NOKPROBE_SYMBOL(kprobe_perf_func); 1249 1250 /* Kretprobe profile handler */ 1251 static void 1252 kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri, 1253 struct pt_regs *regs) 1254 { 1255 struct trace_event_call *call = &tk->tp.call; 1256 struct kretprobe_trace_entry_head *entry; 1257 struct hlist_head *head; 1258 int size, __size, dsize; 1259 int rctx; 1260 1261 if (bpf_prog_array_valid(call) && !trace_call_bpf(call, regs)) 1262 return; 1263 1264 head = this_cpu_ptr(call->perf_events); 1265 if (hlist_empty(head)) 1266 return; 1267 1268 dsize = __get_data_size(&tk->tp, regs); 1269 __size = sizeof(*entry) + tk->tp.size + dsize; 1270 size = ALIGN(__size + sizeof(u32), sizeof(u64)); 1271 size -= sizeof(u32); 1272 1273 entry = perf_trace_buf_alloc(size, NULL, &rctx); 1274 if (!entry) 1275 return; 1276 1277 entry->func = (unsigned long)tk->rp.kp.addr; 1278 entry->ret_ip = (unsigned long)ri->ret_addr; 1279 store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize); 1280 perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs, 1281 head, NULL); 1282 } 1283 NOKPROBE_SYMBOL(kretprobe_perf_func); 1284 #endif /* CONFIG_PERF_EVENTS */ 1285 1286 /* 1287 * called by perf_trace_init() or __ftrace_set_clr_event() under event_mutex. 1288 * 1289 * kprobe_trace_self_tests_init() does enable_trace_probe/disable_trace_probe 1290 * lockless, but we can't race with this __init function. 1291 */ 1292 static int kprobe_register(struct trace_event_call *event, 1293 enum trace_reg type, void *data) 1294 { 1295 struct trace_kprobe *tk = (struct trace_kprobe *)event->data; 1296 struct trace_event_file *file = data; 1297 1298 switch (type) { 1299 case TRACE_REG_REGISTER: 1300 return enable_trace_kprobe(tk, file); 1301 case TRACE_REG_UNREGISTER: 1302 return disable_trace_kprobe(tk, file); 1303 1304 #ifdef CONFIG_PERF_EVENTS 1305 case TRACE_REG_PERF_REGISTER: 1306 return enable_trace_kprobe(tk, NULL); 1307 case TRACE_REG_PERF_UNREGISTER: 1308 return disable_trace_kprobe(tk, NULL); 1309 case TRACE_REG_PERF_OPEN: 1310 case TRACE_REG_PERF_CLOSE: 1311 case TRACE_REG_PERF_ADD: 1312 case TRACE_REG_PERF_DEL: 1313 return 0; 1314 #endif 1315 } 1316 return 0; 1317 } 1318 1319 static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs) 1320 { 1321 struct trace_kprobe *tk = container_of(kp, struct trace_kprobe, rp.kp); 1322 int ret = 0; 1323 1324 raw_cpu_inc(*tk->nhit); 1325 1326 if (tk->tp.flags & TP_FLAG_TRACE) 1327 kprobe_trace_func(tk, regs); 1328 #ifdef CONFIG_PERF_EVENTS 1329 if (tk->tp.flags & TP_FLAG_PROFILE) 1330 ret = kprobe_perf_func(tk, regs); 1331 #endif 1332 return ret; 1333 } 1334 NOKPROBE_SYMBOL(kprobe_dispatcher); 1335 1336 static int 1337 kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs) 1338 { 1339 struct trace_kprobe *tk = container_of(ri->rp, struct trace_kprobe, rp); 1340 1341 raw_cpu_inc(*tk->nhit); 1342 1343 if (tk->tp.flags & TP_FLAG_TRACE) 1344 kretprobe_trace_func(tk, ri, regs); 1345 #ifdef CONFIG_PERF_EVENTS 1346 if (tk->tp.flags & TP_FLAG_PROFILE) 1347 kretprobe_perf_func(tk, ri, regs); 1348 #endif 1349 return 0; /* We don't tweek kernel, so just return 0 */ 1350 } 1351 NOKPROBE_SYMBOL(kretprobe_dispatcher); 1352 1353 static struct trace_event_functions kretprobe_funcs = { 1354 .trace = print_kretprobe_event 1355 }; 1356 1357 static struct trace_event_functions kprobe_funcs = { 1358 .trace = print_kprobe_event 1359 }; 1360 1361 static int register_kprobe_event(struct trace_kprobe *tk) 1362 { 1363 struct trace_event_call *call = &tk->tp.call; 1364 int ret; 1365 1366 /* Initialize trace_event_call */ 1367 INIT_LIST_HEAD(&call->class->fields); 1368 if (trace_kprobe_is_return(tk)) { 1369 call->event.funcs = &kretprobe_funcs; 1370 call->class->define_fields = kretprobe_event_define_fields; 1371 } else { 1372 call->event.funcs = &kprobe_funcs; 1373 call->class->define_fields = kprobe_event_define_fields; 1374 } 1375 if (set_print_fmt(&tk->tp, trace_kprobe_is_return(tk)) < 0) 1376 return -ENOMEM; 1377 ret = register_trace_event(&call->event); 1378 if (!ret) { 1379 kfree(call->print_fmt); 1380 return -ENODEV; 1381 } 1382 call->flags = TRACE_EVENT_FL_KPROBE; 1383 call->class->reg = kprobe_register; 1384 call->data = tk; 1385 ret = trace_add_event_call(call); 1386 if (ret) { 1387 pr_info("Failed to register kprobe event: %s\n", 1388 trace_event_name(call)); 1389 kfree(call->print_fmt); 1390 unregister_trace_event(&call->event); 1391 } 1392 return ret; 1393 } 1394 1395 static int unregister_kprobe_event(struct trace_kprobe *tk) 1396 { 1397 int ret; 1398 1399 /* tp->event is unregistered in trace_remove_event_call() */ 1400 ret = trace_remove_event_call(&tk->tp.call); 1401 if (!ret) 1402 kfree(tk->tp.call.print_fmt); 1403 return ret; 1404 } 1405 1406 /* Make a tracefs interface for controlling probe points */ 1407 static __init int init_kprobe_trace(void) 1408 { 1409 struct dentry *d_tracer; 1410 struct dentry *entry; 1411 1412 if (register_module_notifier(&trace_kprobe_module_nb)) 1413 return -EINVAL; 1414 1415 d_tracer = tracing_init_dentry(); 1416 if (IS_ERR(d_tracer)) 1417 return 0; 1418 1419 entry = tracefs_create_file("kprobe_events", 0644, d_tracer, 1420 NULL, &kprobe_events_ops); 1421 1422 /* Event list interface */ 1423 if (!entry) 1424 pr_warn("Could not create tracefs 'kprobe_events' entry\n"); 1425 1426 /* Profile interface */ 1427 entry = tracefs_create_file("kprobe_profile", 0444, d_tracer, 1428 NULL, &kprobe_profile_ops); 1429 1430 if (!entry) 1431 pr_warn("Could not create tracefs 'kprobe_profile' entry\n"); 1432 return 0; 1433 } 1434 fs_initcall(init_kprobe_trace); 1435 1436 1437 #ifdef CONFIG_FTRACE_STARTUP_TEST 1438 /* 1439 * The "__used" keeps gcc from removing the function symbol 1440 * from the kallsyms table. 'noinline' makes sure that there 1441 * isn't an inlined version used by the test method below 1442 */ 1443 static __used __init noinline int 1444 kprobe_trace_selftest_target(int a1, int a2, int a3, int a4, int a5, int a6) 1445 { 1446 return a1 + a2 + a3 + a4 + a5 + a6; 1447 } 1448 1449 static __init struct trace_event_file * 1450 find_trace_probe_file(struct trace_kprobe *tk, struct trace_array *tr) 1451 { 1452 struct trace_event_file *file; 1453 1454 list_for_each_entry(file, &tr->events, list) 1455 if (file->event_call == &tk->tp.call) 1456 return file; 1457 1458 return NULL; 1459 } 1460 1461 /* 1462 * Nobody but us can call enable_trace_kprobe/disable_trace_kprobe at this 1463 * stage, we can do this lockless. 1464 */ 1465 static __init int kprobe_trace_self_tests_init(void) 1466 { 1467 int ret, warn = 0; 1468 int (*target)(int, int, int, int, int, int); 1469 struct trace_kprobe *tk; 1470 struct trace_event_file *file; 1471 1472 if (tracing_is_disabled()) 1473 return -ENODEV; 1474 1475 target = kprobe_trace_selftest_target; 1476 1477 pr_info("Testing kprobe tracing: "); 1478 1479 ret = trace_run_command("p:testprobe kprobe_trace_selftest_target " 1480 "$stack $stack0 +0($stack)", 1481 create_trace_kprobe); 1482 if (WARN_ON_ONCE(ret)) { 1483 pr_warn("error on probing function entry.\n"); 1484 warn++; 1485 } else { 1486 /* Enable trace point */ 1487 tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM); 1488 if (WARN_ON_ONCE(tk == NULL)) { 1489 pr_warn("error on getting new probe.\n"); 1490 warn++; 1491 } else { 1492 file = find_trace_probe_file(tk, top_trace_array()); 1493 if (WARN_ON_ONCE(file == NULL)) { 1494 pr_warn("error on getting probe file.\n"); 1495 warn++; 1496 } else 1497 enable_trace_kprobe(tk, file); 1498 } 1499 } 1500 1501 ret = trace_run_command("r:testprobe2 kprobe_trace_selftest_target " 1502 "$retval", create_trace_kprobe); 1503 if (WARN_ON_ONCE(ret)) { 1504 pr_warn("error on probing function return.\n"); 1505 warn++; 1506 } else { 1507 /* Enable trace point */ 1508 tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM); 1509 if (WARN_ON_ONCE(tk == NULL)) { 1510 pr_warn("error on getting 2nd new probe.\n"); 1511 warn++; 1512 } else { 1513 file = find_trace_probe_file(tk, top_trace_array()); 1514 if (WARN_ON_ONCE(file == NULL)) { 1515 pr_warn("error on getting probe file.\n"); 1516 warn++; 1517 } else 1518 enable_trace_kprobe(tk, file); 1519 } 1520 } 1521 1522 if (warn) 1523 goto end; 1524 1525 ret = target(1, 2, 3, 4, 5, 6); 1526 1527 /* 1528 * Not expecting an error here, the check is only to prevent the 1529 * optimizer from removing the call to target() as otherwise there 1530 * are no side-effects and the call is never performed. 1531 */ 1532 if (ret != 21) 1533 warn++; 1534 1535 /* Disable trace points before removing it */ 1536 tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM); 1537 if (WARN_ON_ONCE(tk == NULL)) { 1538 pr_warn("error on getting test probe.\n"); 1539 warn++; 1540 } else { 1541 if (trace_kprobe_nhit(tk) != 1) { 1542 pr_warn("incorrect number of testprobe hits\n"); 1543 warn++; 1544 } 1545 1546 file = find_trace_probe_file(tk, top_trace_array()); 1547 if (WARN_ON_ONCE(file == NULL)) { 1548 pr_warn("error on getting probe file.\n"); 1549 warn++; 1550 } else 1551 disable_trace_kprobe(tk, file); 1552 } 1553 1554 tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM); 1555 if (WARN_ON_ONCE(tk == NULL)) { 1556 pr_warn("error on getting 2nd test probe.\n"); 1557 warn++; 1558 } else { 1559 if (trace_kprobe_nhit(tk) != 1) { 1560 pr_warn("incorrect number of testprobe2 hits\n"); 1561 warn++; 1562 } 1563 1564 file = find_trace_probe_file(tk, top_trace_array()); 1565 if (WARN_ON_ONCE(file == NULL)) { 1566 pr_warn("error on getting probe file.\n"); 1567 warn++; 1568 } else 1569 disable_trace_kprobe(tk, file); 1570 } 1571 1572 ret = trace_run_command("-:testprobe", create_trace_kprobe); 1573 if (WARN_ON_ONCE(ret)) { 1574 pr_warn("error on deleting a probe.\n"); 1575 warn++; 1576 } 1577 1578 ret = trace_run_command("-:testprobe2", create_trace_kprobe); 1579 if (WARN_ON_ONCE(ret)) { 1580 pr_warn("error on deleting a probe.\n"); 1581 warn++; 1582 } 1583 1584 end: 1585 release_all_trace_kprobes(); 1586 /* 1587 * Wait for the optimizer work to finish. Otherwise it might fiddle 1588 * with probes in already freed __init text. 1589 */ 1590 wait_for_kprobe_optimizer(); 1591 if (warn) 1592 pr_cont("NG: Some tests are failed. Please check them.\n"); 1593 else 1594 pr_cont("OK\n"); 1595 return 0; 1596 } 1597 1598 late_initcall(kprobe_trace_self_tests_init); 1599 1600 #endif 1601