1 #include <trace/syscall.h> 2 #include <trace/events/syscalls.h> 3 #include <linux/syscalls.h> 4 #include <linux/slab.h> 5 #include <linux/kernel.h> 6 #include <linux/module.h> /* for MODULE_NAME_LEN via KSYM_SYMBOL_LEN */ 7 #include <linux/ftrace.h> 8 #include <linux/perf_event.h> 9 #include <asm/syscall.h> 10 11 #include "trace_output.h" 12 #include "trace.h" 13 14 static DEFINE_MUTEX(syscall_trace_lock); 15 16 static int syscall_enter_register(struct trace_event_call *event, 17 enum trace_reg type, void *data); 18 static int syscall_exit_register(struct trace_event_call *event, 19 enum trace_reg type, void *data); 20 21 static struct list_head * 22 syscall_get_enter_fields(struct trace_event_call *call) 23 { 24 struct syscall_metadata *entry = call->data; 25 26 return &entry->enter_fields; 27 } 28 29 extern struct syscall_metadata *__start_syscalls_metadata[]; 30 extern struct syscall_metadata *__stop_syscalls_metadata[]; 31 32 static struct syscall_metadata **syscalls_metadata; 33 34 #ifndef ARCH_HAS_SYSCALL_MATCH_SYM_NAME 35 static inline bool arch_syscall_match_sym_name(const char *sym, const char *name) 36 { 37 /* 38 * Only compare after the "sys" prefix. Archs that use 39 * syscall wrappers may have syscalls symbols aliases prefixed 40 * with ".SyS" or ".sys" instead of "sys", leading to an unwanted 41 * mismatch. 42 */ 43 return !strcmp(sym + 3, name + 3); 44 } 45 #endif 46 47 #ifdef ARCH_TRACE_IGNORE_COMPAT_SYSCALLS 48 /* 49 * Some architectures that allow for 32bit applications 50 * to run on a 64bit kernel, do not map the syscalls for 51 * the 32bit tasks the same as they do for 64bit tasks. 52 * 53 * *cough*x86*cough* 54 * 55 * In such a case, instead of reporting the wrong syscalls, 56 * simply ignore them. 57 * 58 * For an arch to ignore the compat syscalls it needs to 59 * define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS as well as 60 * define the function arch_trace_is_compat_syscall() to let 61 * the tracing system know that it should ignore it. 62 */ 63 static int 64 trace_get_syscall_nr(struct task_struct *task, struct pt_regs *regs) 65 { 66 if (unlikely(arch_trace_is_compat_syscall(regs))) 67 return -1; 68 69 return syscall_get_nr(task, regs); 70 } 71 #else 72 static inline int 73 trace_get_syscall_nr(struct task_struct *task, struct pt_regs *regs) 74 { 75 return syscall_get_nr(task, regs); 76 } 77 #endif /* ARCH_TRACE_IGNORE_COMPAT_SYSCALLS */ 78 79 static __init struct syscall_metadata * 80 find_syscall_meta(unsigned long syscall) 81 { 82 struct syscall_metadata **start; 83 struct syscall_metadata **stop; 84 char str[KSYM_SYMBOL_LEN]; 85 86 87 start = __start_syscalls_metadata; 88 stop = __stop_syscalls_metadata; 89 kallsyms_lookup(syscall, NULL, NULL, NULL, str); 90 91 if (arch_syscall_match_sym_name(str, "sys_ni_syscall")) 92 return NULL; 93 94 for ( ; start < stop; start++) { 95 if ((*start)->name && arch_syscall_match_sym_name(str, (*start)->name)) 96 return *start; 97 } 98 return NULL; 99 } 100 101 static struct syscall_metadata *syscall_nr_to_meta(int nr) 102 { 103 if (!syscalls_metadata || nr >= NR_syscalls || nr < 0) 104 return NULL; 105 106 return syscalls_metadata[nr]; 107 } 108 109 const char *get_syscall_name(int syscall) 110 { 111 struct syscall_metadata *entry; 112 113 entry = syscall_nr_to_meta(syscall); 114 if (!entry) 115 return NULL; 116 117 return entry->name; 118 } 119 120 static enum print_line_t 121 print_syscall_enter(struct trace_iterator *iter, int flags, 122 struct trace_event *event) 123 { 124 struct trace_array *tr = iter->tr; 125 struct trace_seq *s = &iter->seq; 126 struct trace_entry *ent = iter->ent; 127 struct syscall_trace_enter *trace; 128 struct syscall_metadata *entry; 129 int i, syscall; 130 131 trace = (typeof(trace))ent; 132 syscall = trace->nr; 133 entry = syscall_nr_to_meta(syscall); 134 135 if (!entry) 136 goto end; 137 138 if (entry->enter_event->event.type != ent->type) { 139 WARN_ON_ONCE(1); 140 goto end; 141 } 142 143 trace_seq_printf(s, "%s(", entry->name); 144 145 for (i = 0; i < entry->nb_args; i++) { 146 147 if (trace_seq_has_overflowed(s)) 148 goto end; 149 150 /* parameter types */ 151 if (tr->trace_flags & TRACE_ITER_VERBOSE) 152 trace_seq_printf(s, "%s ", entry->types[i]); 153 154 /* parameter values */ 155 trace_seq_printf(s, "%s: %lx%s", entry->args[i], 156 trace->args[i], 157 i == entry->nb_args - 1 ? "" : ", "); 158 } 159 160 trace_seq_putc(s, ')'); 161 end: 162 trace_seq_putc(s, '\n'); 163 164 return trace_handle_return(s); 165 } 166 167 static enum print_line_t 168 print_syscall_exit(struct trace_iterator *iter, int flags, 169 struct trace_event *event) 170 { 171 struct trace_seq *s = &iter->seq; 172 struct trace_entry *ent = iter->ent; 173 struct syscall_trace_exit *trace; 174 int syscall; 175 struct syscall_metadata *entry; 176 177 trace = (typeof(trace))ent; 178 syscall = trace->nr; 179 entry = syscall_nr_to_meta(syscall); 180 181 if (!entry) { 182 trace_seq_putc(s, '\n'); 183 goto out; 184 } 185 186 if (entry->exit_event->event.type != ent->type) { 187 WARN_ON_ONCE(1); 188 return TRACE_TYPE_UNHANDLED; 189 } 190 191 trace_seq_printf(s, "%s -> 0x%lx\n", entry->name, 192 trace->ret); 193 194 out: 195 return trace_handle_return(s); 196 } 197 198 extern char *__bad_type_size(void); 199 200 #define SYSCALL_FIELD(type, field, name) \ 201 sizeof(type) != sizeof(trace.field) ? \ 202 __bad_type_size() : \ 203 #type, #name, offsetof(typeof(trace), field), \ 204 sizeof(trace.field), is_signed_type(type) 205 206 static int __init 207 __set_enter_print_fmt(struct syscall_metadata *entry, char *buf, int len) 208 { 209 int i; 210 int pos = 0; 211 212 /* When len=0, we just calculate the needed length */ 213 #define LEN_OR_ZERO (len ? len - pos : 0) 214 215 pos += snprintf(buf + pos, LEN_OR_ZERO, "\""); 216 for (i = 0; i < entry->nb_args; i++) { 217 pos += snprintf(buf + pos, LEN_OR_ZERO, "%s: 0x%%0%zulx%s", 218 entry->args[i], sizeof(unsigned long), 219 i == entry->nb_args - 1 ? "" : ", "); 220 } 221 pos += snprintf(buf + pos, LEN_OR_ZERO, "\""); 222 223 for (i = 0; i < entry->nb_args; i++) { 224 pos += snprintf(buf + pos, LEN_OR_ZERO, 225 ", ((unsigned long)(REC->%s))", entry->args[i]); 226 } 227 228 #undef LEN_OR_ZERO 229 230 /* return the length of print_fmt */ 231 return pos; 232 } 233 234 static int __init set_syscall_print_fmt(struct trace_event_call *call) 235 { 236 char *print_fmt; 237 int len; 238 struct syscall_metadata *entry = call->data; 239 240 if (entry->enter_event != call) { 241 call->print_fmt = "\"0x%lx\", REC->ret"; 242 return 0; 243 } 244 245 /* First: called with 0 length to calculate the needed length */ 246 len = __set_enter_print_fmt(entry, NULL, 0); 247 248 print_fmt = kmalloc(len + 1, GFP_KERNEL); 249 if (!print_fmt) 250 return -ENOMEM; 251 252 /* Second: actually write the @print_fmt */ 253 __set_enter_print_fmt(entry, print_fmt, len + 1); 254 call->print_fmt = print_fmt; 255 256 return 0; 257 } 258 259 static void __init free_syscall_print_fmt(struct trace_event_call *call) 260 { 261 struct syscall_metadata *entry = call->data; 262 263 if (entry->enter_event == call) 264 kfree(call->print_fmt); 265 } 266 267 static int __init syscall_enter_define_fields(struct trace_event_call *call) 268 { 269 struct syscall_trace_enter trace; 270 struct syscall_metadata *meta = call->data; 271 int ret; 272 int i; 273 int offset = offsetof(typeof(trace), args); 274 275 ret = trace_define_field(call, SYSCALL_FIELD(int, nr, __syscall_nr), 276 FILTER_OTHER); 277 if (ret) 278 return ret; 279 280 for (i = 0; i < meta->nb_args; i++) { 281 ret = trace_define_field(call, meta->types[i], 282 meta->args[i], offset, 283 sizeof(unsigned long), 0, 284 FILTER_OTHER); 285 offset += sizeof(unsigned long); 286 } 287 288 return ret; 289 } 290 291 static int __init syscall_exit_define_fields(struct trace_event_call *call) 292 { 293 struct syscall_trace_exit trace; 294 int ret; 295 296 ret = trace_define_field(call, SYSCALL_FIELD(int, nr, __syscall_nr), 297 FILTER_OTHER); 298 if (ret) 299 return ret; 300 301 ret = trace_define_field(call, SYSCALL_FIELD(long, ret, ret), 302 FILTER_OTHER); 303 304 return ret; 305 } 306 307 static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id) 308 { 309 struct trace_array *tr = data; 310 struct trace_event_file *trace_file; 311 struct syscall_trace_enter *entry; 312 struct syscall_metadata *sys_data; 313 struct ring_buffer_event *event; 314 struct ring_buffer *buffer; 315 unsigned long irq_flags; 316 int pc; 317 int syscall_nr; 318 int size; 319 320 syscall_nr = trace_get_syscall_nr(current, regs); 321 if (syscall_nr < 0 || syscall_nr >= NR_syscalls) 322 return; 323 324 /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE) */ 325 trace_file = rcu_dereference_sched(tr->enter_syscall_files[syscall_nr]); 326 if (!trace_file) 327 return; 328 329 if (trace_trigger_soft_disabled(trace_file)) 330 return; 331 332 sys_data = syscall_nr_to_meta(syscall_nr); 333 if (!sys_data) 334 return; 335 336 size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args; 337 338 local_save_flags(irq_flags); 339 pc = preempt_count(); 340 341 buffer = tr->trace_buffer.buffer; 342 event = trace_buffer_lock_reserve(buffer, 343 sys_data->enter_event->event.type, size, irq_flags, pc); 344 if (!event) 345 return; 346 347 entry = ring_buffer_event_data(event); 348 entry->nr = syscall_nr; 349 syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args); 350 351 event_trigger_unlock_commit(trace_file, buffer, event, entry, 352 irq_flags, pc); 353 } 354 355 static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret) 356 { 357 struct trace_array *tr = data; 358 struct trace_event_file *trace_file; 359 struct syscall_trace_exit *entry; 360 struct syscall_metadata *sys_data; 361 struct ring_buffer_event *event; 362 struct ring_buffer *buffer; 363 unsigned long irq_flags; 364 int pc; 365 int syscall_nr; 366 367 syscall_nr = trace_get_syscall_nr(current, regs); 368 if (syscall_nr < 0 || syscall_nr >= NR_syscalls) 369 return; 370 371 /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE()) */ 372 trace_file = rcu_dereference_sched(tr->exit_syscall_files[syscall_nr]); 373 if (!trace_file) 374 return; 375 376 if (trace_trigger_soft_disabled(trace_file)) 377 return; 378 379 sys_data = syscall_nr_to_meta(syscall_nr); 380 if (!sys_data) 381 return; 382 383 local_save_flags(irq_flags); 384 pc = preempt_count(); 385 386 buffer = tr->trace_buffer.buffer; 387 event = trace_buffer_lock_reserve(buffer, 388 sys_data->exit_event->event.type, sizeof(*entry), 389 irq_flags, pc); 390 if (!event) 391 return; 392 393 entry = ring_buffer_event_data(event); 394 entry->nr = syscall_nr; 395 entry->ret = syscall_get_return_value(current, regs); 396 397 event_trigger_unlock_commit(trace_file, buffer, event, entry, 398 irq_flags, pc); 399 } 400 401 static int reg_event_syscall_enter(struct trace_event_file *file, 402 struct trace_event_call *call) 403 { 404 struct trace_array *tr = file->tr; 405 int ret = 0; 406 int num; 407 408 num = ((struct syscall_metadata *)call->data)->syscall_nr; 409 if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls)) 410 return -ENOSYS; 411 mutex_lock(&syscall_trace_lock); 412 if (!tr->sys_refcount_enter) 413 ret = register_trace_sys_enter(ftrace_syscall_enter, tr); 414 if (!ret) { 415 rcu_assign_pointer(tr->enter_syscall_files[num], file); 416 tr->sys_refcount_enter++; 417 } 418 mutex_unlock(&syscall_trace_lock); 419 return ret; 420 } 421 422 static void unreg_event_syscall_enter(struct trace_event_file *file, 423 struct trace_event_call *call) 424 { 425 struct trace_array *tr = file->tr; 426 int num; 427 428 num = ((struct syscall_metadata *)call->data)->syscall_nr; 429 if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls)) 430 return; 431 mutex_lock(&syscall_trace_lock); 432 tr->sys_refcount_enter--; 433 RCU_INIT_POINTER(tr->enter_syscall_files[num], NULL); 434 if (!tr->sys_refcount_enter) 435 unregister_trace_sys_enter(ftrace_syscall_enter, tr); 436 mutex_unlock(&syscall_trace_lock); 437 } 438 439 static int reg_event_syscall_exit(struct trace_event_file *file, 440 struct trace_event_call *call) 441 { 442 struct trace_array *tr = file->tr; 443 int ret = 0; 444 int num; 445 446 num = ((struct syscall_metadata *)call->data)->syscall_nr; 447 if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls)) 448 return -ENOSYS; 449 mutex_lock(&syscall_trace_lock); 450 if (!tr->sys_refcount_exit) 451 ret = register_trace_sys_exit(ftrace_syscall_exit, tr); 452 if (!ret) { 453 rcu_assign_pointer(tr->exit_syscall_files[num], file); 454 tr->sys_refcount_exit++; 455 } 456 mutex_unlock(&syscall_trace_lock); 457 return ret; 458 } 459 460 static void unreg_event_syscall_exit(struct trace_event_file *file, 461 struct trace_event_call *call) 462 { 463 struct trace_array *tr = file->tr; 464 int num; 465 466 num = ((struct syscall_metadata *)call->data)->syscall_nr; 467 if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls)) 468 return; 469 mutex_lock(&syscall_trace_lock); 470 tr->sys_refcount_exit--; 471 RCU_INIT_POINTER(tr->exit_syscall_files[num], NULL); 472 if (!tr->sys_refcount_exit) 473 unregister_trace_sys_exit(ftrace_syscall_exit, tr); 474 mutex_unlock(&syscall_trace_lock); 475 } 476 477 static int __init init_syscall_trace(struct trace_event_call *call) 478 { 479 int id; 480 int num; 481 482 num = ((struct syscall_metadata *)call->data)->syscall_nr; 483 if (num < 0 || num >= NR_syscalls) { 484 pr_debug("syscall %s metadata not mapped, disabling ftrace event\n", 485 ((struct syscall_metadata *)call->data)->name); 486 return -ENOSYS; 487 } 488 489 if (set_syscall_print_fmt(call) < 0) 490 return -ENOMEM; 491 492 id = trace_event_raw_init(call); 493 494 if (id < 0) { 495 free_syscall_print_fmt(call); 496 return id; 497 } 498 499 return id; 500 } 501 502 struct trace_event_functions enter_syscall_print_funcs = { 503 .trace = print_syscall_enter, 504 }; 505 506 struct trace_event_functions exit_syscall_print_funcs = { 507 .trace = print_syscall_exit, 508 }; 509 510 struct trace_event_class __refdata event_class_syscall_enter = { 511 .system = "syscalls", 512 .reg = syscall_enter_register, 513 .define_fields = syscall_enter_define_fields, 514 .get_fields = syscall_get_enter_fields, 515 .raw_init = init_syscall_trace, 516 }; 517 518 struct trace_event_class __refdata event_class_syscall_exit = { 519 .system = "syscalls", 520 .reg = syscall_exit_register, 521 .define_fields = syscall_exit_define_fields, 522 .fields = LIST_HEAD_INIT(event_class_syscall_exit.fields), 523 .raw_init = init_syscall_trace, 524 }; 525 526 unsigned long __init __weak arch_syscall_addr(int nr) 527 { 528 return (unsigned long)sys_call_table[nr]; 529 } 530 531 void __init init_ftrace_syscalls(void) 532 { 533 struct syscall_metadata *meta; 534 unsigned long addr; 535 int i; 536 537 syscalls_metadata = kcalloc(NR_syscalls, sizeof(*syscalls_metadata), 538 GFP_KERNEL); 539 if (!syscalls_metadata) { 540 WARN_ON(1); 541 return; 542 } 543 544 for (i = 0; i < NR_syscalls; i++) { 545 addr = arch_syscall_addr(i); 546 meta = find_syscall_meta(addr); 547 if (!meta) 548 continue; 549 550 meta->syscall_nr = i; 551 syscalls_metadata[i] = meta; 552 } 553 } 554 555 #ifdef CONFIG_PERF_EVENTS 556 557 static DECLARE_BITMAP(enabled_perf_enter_syscalls, NR_syscalls); 558 static DECLARE_BITMAP(enabled_perf_exit_syscalls, NR_syscalls); 559 static int sys_perf_refcount_enter; 560 static int sys_perf_refcount_exit; 561 562 static int perf_call_bpf_enter(struct bpf_prog *prog, struct pt_regs *regs, 563 struct syscall_metadata *sys_data, 564 struct syscall_trace_enter *rec) { 565 struct syscall_tp_t { 566 unsigned long long regs; 567 unsigned long syscall_nr; 568 unsigned long args[SYSCALL_DEFINE_MAXARGS]; 569 } param; 570 int i; 571 572 *(struct pt_regs **)¶m = regs; 573 param.syscall_nr = rec->nr; 574 for (i = 0; i < sys_data->nb_args; i++) 575 param.args[i] = rec->args[i]; 576 return trace_call_bpf(prog, ¶m); 577 } 578 579 static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id) 580 { 581 struct syscall_metadata *sys_data; 582 struct syscall_trace_enter *rec; 583 struct hlist_head *head; 584 struct bpf_prog *prog; 585 int syscall_nr; 586 int rctx; 587 int size; 588 589 syscall_nr = trace_get_syscall_nr(current, regs); 590 if (syscall_nr < 0 || syscall_nr >= NR_syscalls) 591 return; 592 if (!test_bit(syscall_nr, enabled_perf_enter_syscalls)) 593 return; 594 595 sys_data = syscall_nr_to_meta(syscall_nr); 596 if (!sys_data) 597 return; 598 599 prog = READ_ONCE(sys_data->enter_event->prog); 600 head = this_cpu_ptr(sys_data->enter_event->perf_events); 601 if (!prog && hlist_empty(head)) 602 return; 603 604 /* get the size after alignment with the u32 buffer size field */ 605 size = sizeof(unsigned long) * sys_data->nb_args + sizeof(*rec); 606 size = ALIGN(size + sizeof(u32), sizeof(u64)); 607 size -= sizeof(u32); 608 609 rec = perf_trace_buf_alloc(size, NULL, &rctx); 610 if (!rec) 611 return; 612 613 rec->nr = syscall_nr; 614 syscall_get_arguments(current, regs, 0, sys_data->nb_args, 615 (unsigned long *)&rec->args); 616 617 if ((prog && !perf_call_bpf_enter(prog, regs, sys_data, rec)) || 618 hlist_empty(head)) { 619 perf_swevent_put_recursion_context(rctx); 620 return; 621 } 622 623 perf_trace_buf_submit(rec, size, rctx, 624 sys_data->enter_event->event.type, 1, regs, 625 head, NULL, NULL); 626 } 627 628 static int perf_sysenter_enable(struct trace_event_call *call) 629 { 630 int ret = 0; 631 int num; 632 633 num = ((struct syscall_metadata *)call->data)->syscall_nr; 634 635 mutex_lock(&syscall_trace_lock); 636 if (!sys_perf_refcount_enter) 637 ret = register_trace_sys_enter(perf_syscall_enter, NULL); 638 if (ret) { 639 pr_info("event trace: Could not activate syscall entry trace point"); 640 } else { 641 set_bit(num, enabled_perf_enter_syscalls); 642 sys_perf_refcount_enter++; 643 } 644 mutex_unlock(&syscall_trace_lock); 645 return ret; 646 } 647 648 static void perf_sysenter_disable(struct trace_event_call *call) 649 { 650 int num; 651 652 num = ((struct syscall_metadata *)call->data)->syscall_nr; 653 654 mutex_lock(&syscall_trace_lock); 655 sys_perf_refcount_enter--; 656 clear_bit(num, enabled_perf_enter_syscalls); 657 if (!sys_perf_refcount_enter) 658 unregister_trace_sys_enter(perf_syscall_enter, NULL); 659 mutex_unlock(&syscall_trace_lock); 660 } 661 662 static int perf_call_bpf_exit(struct bpf_prog *prog, struct pt_regs *regs, 663 struct syscall_trace_exit *rec) { 664 struct syscall_tp_t { 665 unsigned long long regs; 666 unsigned long syscall_nr; 667 unsigned long ret; 668 } param; 669 670 *(struct pt_regs **)¶m = regs; 671 param.syscall_nr = rec->nr; 672 param.ret = rec->ret; 673 return trace_call_bpf(prog, ¶m); 674 } 675 676 static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret) 677 { 678 struct syscall_metadata *sys_data; 679 struct syscall_trace_exit *rec; 680 struct hlist_head *head; 681 struct bpf_prog *prog; 682 int syscall_nr; 683 int rctx; 684 int size; 685 686 syscall_nr = trace_get_syscall_nr(current, regs); 687 if (syscall_nr < 0 || syscall_nr >= NR_syscalls) 688 return; 689 if (!test_bit(syscall_nr, enabled_perf_exit_syscalls)) 690 return; 691 692 sys_data = syscall_nr_to_meta(syscall_nr); 693 if (!sys_data) 694 return; 695 696 prog = READ_ONCE(sys_data->exit_event->prog); 697 head = this_cpu_ptr(sys_data->exit_event->perf_events); 698 if (!prog && hlist_empty(head)) 699 return; 700 701 /* We can probably do that at build time */ 702 size = ALIGN(sizeof(*rec) + sizeof(u32), sizeof(u64)); 703 size -= sizeof(u32); 704 705 rec = perf_trace_buf_alloc(size, NULL, &rctx); 706 if (!rec) 707 return; 708 709 rec->nr = syscall_nr; 710 rec->ret = syscall_get_return_value(current, regs); 711 712 if ((prog && !perf_call_bpf_exit(prog, regs, rec)) || 713 hlist_empty(head)) { 714 perf_swevent_put_recursion_context(rctx); 715 return; 716 } 717 718 perf_trace_buf_submit(rec, size, rctx, sys_data->exit_event->event.type, 719 1, regs, head, NULL, NULL); 720 } 721 722 static int perf_sysexit_enable(struct trace_event_call *call) 723 { 724 int ret = 0; 725 int num; 726 727 num = ((struct syscall_metadata *)call->data)->syscall_nr; 728 729 mutex_lock(&syscall_trace_lock); 730 if (!sys_perf_refcount_exit) 731 ret = register_trace_sys_exit(perf_syscall_exit, NULL); 732 if (ret) { 733 pr_info("event trace: Could not activate syscall exit trace point"); 734 } else { 735 set_bit(num, enabled_perf_exit_syscalls); 736 sys_perf_refcount_exit++; 737 } 738 mutex_unlock(&syscall_trace_lock); 739 return ret; 740 } 741 742 static void perf_sysexit_disable(struct trace_event_call *call) 743 { 744 int num; 745 746 num = ((struct syscall_metadata *)call->data)->syscall_nr; 747 748 mutex_lock(&syscall_trace_lock); 749 sys_perf_refcount_exit--; 750 clear_bit(num, enabled_perf_exit_syscalls); 751 if (!sys_perf_refcount_exit) 752 unregister_trace_sys_exit(perf_syscall_exit, NULL); 753 mutex_unlock(&syscall_trace_lock); 754 } 755 756 #endif /* CONFIG_PERF_EVENTS */ 757 758 static int syscall_enter_register(struct trace_event_call *event, 759 enum trace_reg type, void *data) 760 { 761 struct trace_event_file *file = data; 762 763 switch (type) { 764 case TRACE_REG_REGISTER: 765 return reg_event_syscall_enter(file, event); 766 case TRACE_REG_UNREGISTER: 767 unreg_event_syscall_enter(file, event); 768 return 0; 769 770 #ifdef CONFIG_PERF_EVENTS 771 case TRACE_REG_PERF_REGISTER: 772 return perf_sysenter_enable(event); 773 case TRACE_REG_PERF_UNREGISTER: 774 perf_sysenter_disable(event); 775 return 0; 776 case TRACE_REG_PERF_OPEN: 777 case TRACE_REG_PERF_CLOSE: 778 case TRACE_REG_PERF_ADD: 779 case TRACE_REG_PERF_DEL: 780 return 0; 781 #endif 782 } 783 return 0; 784 } 785 786 static int syscall_exit_register(struct trace_event_call *event, 787 enum trace_reg type, void *data) 788 { 789 struct trace_event_file *file = data; 790 791 switch (type) { 792 case TRACE_REG_REGISTER: 793 return reg_event_syscall_exit(file, event); 794 case TRACE_REG_UNREGISTER: 795 unreg_event_syscall_exit(file, event); 796 return 0; 797 798 #ifdef CONFIG_PERF_EVENTS 799 case TRACE_REG_PERF_REGISTER: 800 return perf_sysexit_enable(event); 801 case TRACE_REG_PERF_UNREGISTER: 802 perf_sysexit_disable(event); 803 return 0; 804 case TRACE_REG_PERF_OPEN: 805 case TRACE_REG_PERF_CLOSE: 806 case TRACE_REG_PERF_ADD: 807 case TRACE_REG_PERF_DEL: 808 return 0; 809 #endif 810 } 811 return 0; 812 } 813