1 // SPDX-License-Identifier: GPL-2.0 2 #include <trace/syscall.h> 3 #include <trace/events/syscalls.h> 4 #include <linux/syscalls.h> 5 #include <linux/slab.h> 6 #include <linux/kernel.h> 7 #include <linux/module.h> /* for MODULE_NAME_LEN via KSYM_SYMBOL_LEN */ 8 #include <linux/ftrace.h> 9 #include <linux/perf_event.h> 10 #include <asm/syscall.h> 11 12 #include "trace_output.h" 13 #include "trace.h" 14 15 static DEFINE_MUTEX(syscall_trace_lock); 16 17 static int syscall_enter_register(struct trace_event_call *event, 18 enum trace_reg type, void *data); 19 static int syscall_exit_register(struct trace_event_call *event, 20 enum trace_reg type, void *data); 21 22 static struct list_head * 23 syscall_get_enter_fields(struct trace_event_call *call) 24 { 25 struct syscall_metadata *entry = call->data; 26 27 return &entry->enter_fields; 28 } 29 30 extern struct syscall_metadata *__start_syscalls_metadata[]; 31 extern struct syscall_metadata *__stop_syscalls_metadata[]; 32 33 static struct syscall_metadata **syscalls_metadata; 34 35 #ifndef ARCH_HAS_SYSCALL_MATCH_SYM_NAME 36 static inline bool arch_syscall_match_sym_name(const char *sym, const char *name) 37 { 38 /* 39 * Only compare after the "sys" prefix. Archs that use 40 * syscall wrappers may have syscalls symbols aliases prefixed 41 * with ".SyS" or ".sys" instead of "sys", leading to an unwanted 42 * mismatch. 43 */ 44 return !strcmp(sym + 3, name + 3); 45 } 46 #endif 47 48 #ifdef ARCH_TRACE_IGNORE_COMPAT_SYSCALLS 49 /* 50 * Some architectures that allow for 32bit applications 51 * to run on a 64bit kernel, do not map the syscalls for 52 * the 32bit tasks the same as they do for 64bit tasks. 53 * 54 * *cough*x86*cough* 55 * 56 * In such a case, instead of reporting the wrong syscalls, 57 * simply ignore them. 58 * 59 * For an arch to ignore the compat syscalls it needs to 60 * define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS as well as 61 * define the function arch_trace_is_compat_syscall() to let 62 * the tracing system know that it should ignore it. 63 */ 64 static int 65 trace_get_syscall_nr(struct task_struct *task, struct pt_regs *regs) 66 { 67 if (unlikely(arch_trace_is_compat_syscall(regs))) 68 return -1; 69 70 return syscall_get_nr(task, regs); 71 } 72 #else 73 static inline int 74 trace_get_syscall_nr(struct task_struct *task, struct pt_regs *regs) 75 { 76 return syscall_get_nr(task, regs); 77 } 78 #endif /* ARCH_TRACE_IGNORE_COMPAT_SYSCALLS */ 79 80 static __init struct syscall_metadata * 81 find_syscall_meta(unsigned long syscall) 82 { 83 struct syscall_metadata **start; 84 struct syscall_metadata **stop; 85 char str[KSYM_SYMBOL_LEN]; 86 87 88 start = __start_syscalls_metadata; 89 stop = __stop_syscalls_metadata; 90 kallsyms_lookup(syscall, NULL, NULL, NULL, str); 91 92 if (arch_syscall_match_sym_name(str, "sys_ni_syscall")) 93 return NULL; 94 95 for ( ; start < stop; start++) { 96 if ((*start)->name && arch_syscall_match_sym_name(str, (*start)->name)) 97 return *start; 98 } 99 return NULL; 100 } 101 102 static struct syscall_metadata *syscall_nr_to_meta(int nr) 103 { 104 if (!syscalls_metadata || nr >= NR_syscalls || nr < 0) 105 return NULL; 106 107 return syscalls_metadata[nr]; 108 } 109 110 const char *get_syscall_name(int syscall) 111 { 112 struct syscall_metadata *entry; 113 114 entry = syscall_nr_to_meta(syscall); 115 if (!entry) 116 return NULL; 117 118 return entry->name; 119 } 120 121 static enum print_line_t 122 print_syscall_enter(struct trace_iterator *iter, int flags, 123 struct trace_event *event) 124 { 125 struct trace_array *tr = iter->tr; 126 struct trace_seq *s = &iter->seq; 127 struct trace_entry *ent = iter->ent; 128 struct syscall_trace_enter *trace; 129 struct syscall_metadata *entry; 130 int i, syscall; 131 132 trace = (typeof(trace))ent; 133 syscall = trace->nr; 134 entry = syscall_nr_to_meta(syscall); 135 136 if (!entry) 137 goto end; 138 139 if (entry->enter_event->event.type != ent->type) { 140 WARN_ON_ONCE(1); 141 goto end; 142 } 143 144 trace_seq_printf(s, "%s(", entry->name); 145 146 for (i = 0; i < entry->nb_args; i++) { 147 148 if (trace_seq_has_overflowed(s)) 149 goto end; 150 151 /* parameter types */ 152 if (tr->trace_flags & TRACE_ITER_VERBOSE) 153 trace_seq_printf(s, "%s ", entry->types[i]); 154 155 /* parameter values */ 156 trace_seq_printf(s, "%s: %lx%s", entry->args[i], 157 trace->args[i], 158 i == entry->nb_args - 1 ? "" : ", "); 159 } 160 161 trace_seq_putc(s, ')'); 162 end: 163 trace_seq_putc(s, '\n'); 164 165 return trace_handle_return(s); 166 } 167 168 static enum print_line_t 169 print_syscall_exit(struct trace_iterator *iter, int flags, 170 struct trace_event *event) 171 { 172 struct trace_seq *s = &iter->seq; 173 struct trace_entry *ent = iter->ent; 174 struct syscall_trace_exit *trace; 175 int syscall; 176 struct syscall_metadata *entry; 177 178 trace = (typeof(trace))ent; 179 syscall = trace->nr; 180 entry = syscall_nr_to_meta(syscall); 181 182 if (!entry) { 183 trace_seq_putc(s, '\n'); 184 goto out; 185 } 186 187 if (entry->exit_event->event.type != ent->type) { 188 WARN_ON_ONCE(1); 189 return TRACE_TYPE_UNHANDLED; 190 } 191 192 trace_seq_printf(s, "%s -> 0x%lx\n", entry->name, 193 trace->ret); 194 195 out: 196 return trace_handle_return(s); 197 } 198 199 extern char *__bad_type_size(void); 200 201 #define SYSCALL_FIELD(type, field, name) \ 202 sizeof(type) != sizeof(trace.field) ? \ 203 __bad_type_size() : \ 204 #type, #name, offsetof(typeof(trace), field), \ 205 sizeof(trace.field), is_signed_type(type) 206 207 static int __init 208 __set_enter_print_fmt(struct syscall_metadata *entry, char *buf, int len) 209 { 210 int i; 211 int pos = 0; 212 213 /* When len=0, we just calculate the needed length */ 214 #define LEN_OR_ZERO (len ? len - pos : 0) 215 216 pos += snprintf(buf + pos, LEN_OR_ZERO, "\""); 217 for (i = 0; i < entry->nb_args; i++) { 218 pos += snprintf(buf + pos, LEN_OR_ZERO, "%s: 0x%%0%zulx%s", 219 entry->args[i], sizeof(unsigned long), 220 i == entry->nb_args - 1 ? "" : ", "); 221 } 222 pos += snprintf(buf + pos, LEN_OR_ZERO, "\""); 223 224 for (i = 0; i < entry->nb_args; i++) { 225 pos += snprintf(buf + pos, LEN_OR_ZERO, 226 ", ((unsigned long)(REC->%s))", entry->args[i]); 227 } 228 229 #undef LEN_OR_ZERO 230 231 /* return the length of print_fmt */ 232 return pos; 233 } 234 235 static int __init set_syscall_print_fmt(struct trace_event_call *call) 236 { 237 char *print_fmt; 238 int len; 239 struct syscall_metadata *entry = call->data; 240 241 if (entry->enter_event != call) { 242 call->print_fmt = "\"0x%lx\", REC->ret"; 243 return 0; 244 } 245 246 /* First: called with 0 length to calculate the needed length */ 247 len = __set_enter_print_fmt(entry, NULL, 0); 248 249 print_fmt = kmalloc(len + 1, GFP_KERNEL); 250 if (!print_fmt) 251 return -ENOMEM; 252 253 /* Second: actually write the @print_fmt */ 254 __set_enter_print_fmt(entry, print_fmt, len + 1); 255 call->print_fmt = print_fmt; 256 257 return 0; 258 } 259 260 static void __init free_syscall_print_fmt(struct trace_event_call *call) 261 { 262 struct syscall_metadata *entry = call->data; 263 264 if (entry->enter_event == call) 265 kfree(call->print_fmt); 266 } 267 268 static int __init syscall_enter_define_fields(struct trace_event_call *call) 269 { 270 struct syscall_trace_enter trace; 271 struct syscall_metadata *meta = call->data; 272 int ret; 273 int i; 274 int offset = offsetof(typeof(trace), args); 275 276 ret = trace_define_field(call, SYSCALL_FIELD(int, nr, __syscall_nr), 277 FILTER_OTHER); 278 if (ret) 279 return ret; 280 281 for (i = 0; i < meta->nb_args; i++) { 282 ret = trace_define_field(call, meta->types[i], 283 meta->args[i], offset, 284 sizeof(unsigned long), 0, 285 FILTER_OTHER); 286 offset += sizeof(unsigned long); 287 } 288 289 return ret; 290 } 291 292 static int __init syscall_exit_define_fields(struct trace_event_call *call) 293 { 294 struct syscall_trace_exit trace; 295 int ret; 296 297 ret = trace_define_field(call, SYSCALL_FIELD(int, nr, __syscall_nr), 298 FILTER_OTHER); 299 if (ret) 300 return ret; 301 302 ret = trace_define_field(call, SYSCALL_FIELD(long, ret, ret), 303 FILTER_OTHER); 304 305 return ret; 306 } 307 308 static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id) 309 { 310 struct trace_array *tr = data; 311 struct trace_event_file *trace_file; 312 struct syscall_trace_enter *entry; 313 struct syscall_metadata *sys_data; 314 struct ring_buffer_event *event; 315 struct ring_buffer *buffer; 316 unsigned long irq_flags; 317 int pc; 318 int syscall_nr; 319 int size; 320 321 syscall_nr = trace_get_syscall_nr(current, regs); 322 if (syscall_nr < 0 || syscall_nr >= NR_syscalls) 323 return; 324 325 /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE) */ 326 trace_file = rcu_dereference_sched(tr->enter_syscall_files[syscall_nr]); 327 if (!trace_file) 328 return; 329 330 if (trace_trigger_soft_disabled(trace_file)) 331 return; 332 333 sys_data = syscall_nr_to_meta(syscall_nr); 334 if (!sys_data) 335 return; 336 337 size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args; 338 339 local_save_flags(irq_flags); 340 pc = preempt_count(); 341 342 buffer = tr->trace_buffer.buffer; 343 event = trace_buffer_lock_reserve(buffer, 344 sys_data->enter_event->event.type, size, irq_flags, pc); 345 if (!event) 346 return; 347 348 entry = ring_buffer_event_data(event); 349 entry->nr = syscall_nr; 350 syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args); 351 352 event_trigger_unlock_commit(trace_file, buffer, event, entry, 353 irq_flags, pc); 354 } 355 356 static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret) 357 { 358 struct trace_array *tr = data; 359 struct trace_event_file *trace_file; 360 struct syscall_trace_exit *entry; 361 struct syscall_metadata *sys_data; 362 struct ring_buffer_event *event; 363 struct ring_buffer *buffer; 364 unsigned long irq_flags; 365 int pc; 366 int syscall_nr; 367 368 syscall_nr = trace_get_syscall_nr(current, regs); 369 if (syscall_nr < 0 || syscall_nr >= NR_syscalls) 370 return; 371 372 /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE()) */ 373 trace_file = rcu_dereference_sched(tr->exit_syscall_files[syscall_nr]); 374 if (!trace_file) 375 return; 376 377 if (trace_trigger_soft_disabled(trace_file)) 378 return; 379 380 sys_data = syscall_nr_to_meta(syscall_nr); 381 if (!sys_data) 382 return; 383 384 local_save_flags(irq_flags); 385 pc = preempt_count(); 386 387 buffer = tr->trace_buffer.buffer; 388 event = trace_buffer_lock_reserve(buffer, 389 sys_data->exit_event->event.type, sizeof(*entry), 390 irq_flags, pc); 391 if (!event) 392 return; 393 394 entry = ring_buffer_event_data(event); 395 entry->nr = syscall_nr; 396 entry->ret = syscall_get_return_value(current, regs); 397 398 event_trigger_unlock_commit(trace_file, buffer, event, entry, 399 irq_flags, pc); 400 } 401 402 static int reg_event_syscall_enter(struct trace_event_file *file, 403 struct trace_event_call *call) 404 { 405 struct trace_array *tr = file->tr; 406 int ret = 0; 407 int num; 408 409 num = ((struct syscall_metadata *)call->data)->syscall_nr; 410 if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls)) 411 return -ENOSYS; 412 mutex_lock(&syscall_trace_lock); 413 if (!tr->sys_refcount_enter) 414 ret = register_trace_sys_enter(ftrace_syscall_enter, tr); 415 if (!ret) { 416 rcu_assign_pointer(tr->enter_syscall_files[num], file); 417 tr->sys_refcount_enter++; 418 } 419 mutex_unlock(&syscall_trace_lock); 420 return ret; 421 } 422 423 static void unreg_event_syscall_enter(struct trace_event_file *file, 424 struct trace_event_call *call) 425 { 426 struct trace_array *tr = file->tr; 427 int num; 428 429 num = ((struct syscall_metadata *)call->data)->syscall_nr; 430 if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls)) 431 return; 432 mutex_lock(&syscall_trace_lock); 433 tr->sys_refcount_enter--; 434 RCU_INIT_POINTER(tr->enter_syscall_files[num], NULL); 435 if (!tr->sys_refcount_enter) 436 unregister_trace_sys_enter(ftrace_syscall_enter, tr); 437 mutex_unlock(&syscall_trace_lock); 438 } 439 440 static int reg_event_syscall_exit(struct trace_event_file *file, 441 struct trace_event_call *call) 442 { 443 struct trace_array *tr = file->tr; 444 int ret = 0; 445 int num; 446 447 num = ((struct syscall_metadata *)call->data)->syscall_nr; 448 if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls)) 449 return -ENOSYS; 450 mutex_lock(&syscall_trace_lock); 451 if (!tr->sys_refcount_exit) 452 ret = register_trace_sys_exit(ftrace_syscall_exit, tr); 453 if (!ret) { 454 rcu_assign_pointer(tr->exit_syscall_files[num], file); 455 tr->sys_refcount_exit++; 456 } 457 mutex_unlock(&syscall_trace_lock); 458 return ret; 459 } 460 461 static void unreg_event_syscall_exit(struct trace_event_file *file, 462 struct trace_event_call *call) 463 { 464 struct trace_array *tr = file->tr; 465 int num; 466 467 num = ((struct syscall_metadata *)call->data)->syscall_nr; 468 if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls)) 469 return; 470 mutex_lock(&syscall_trace_lock); 471 tr->sys_refcount_exit--; 472 RCU_INIT_POINTER(tr->exit_syscall_files[num], NULL); 473 if (!tr->sys_refcount_exit) 474 unregister_trace_sys_exit(ftrace_syscall_exit, tr); 475 mutex_unlock(&syscall_trace_lock); 476 } 477 478 static int __init init_syscall_trace(struct trace_event_call *call) 479 { 480 int id; 481 int num; 482 483 num = ((struct syscall_metadata *)call->data)->syscall_nr; 484 if (num < 0 || num >= NR_syscalls) { 485 pr_debug("syscall %s metadata not mapped, disabling ftrace event\n", 486 ((struct syscall_metadata *)call->data)->name); 487 return -ENOSYS; 488 } 489 490 if (set_syscall_print_fmt(call) < 0) 491 return -ENOMEM; 492 493 id = trace_event_raw_init(call); 494 495 if (id < 0) { 496 free_syscall_print_fmt(call); 497 return id; 498 } 499 500 return id; 501 } 502 503 struct trace_event_functions enter_syscall_print_funcs = { 504 .trace = print_syscall_enter, 505 }; 506 507 struct trace_event_functions exit_syscall_print_funcs = { 508 .trace = print_syscall_exit, 509 }; 510 511 struct trace_event_class __refdata event_class_syscall_enter = { 512 .system = "syscalls", 513 .reg = syscall_enter_register, 514 .define_fields = syscall_enter_define_fields, 515 .get_fields = syscall_get_enter_fields, 516 .raw_init = init_syscall_trace, 517 }; 518 519 struct trace_event_class __refdata event_class_syscall_exit = { 520 .system = "syscalls", 521 .reg = syscall_exit_register, 522 .define_fields = syscall_exit_define_fields, 523 .fields = LIST_HEAD_INIT(event_class_syscall_exit.fields), 524 .raw_init = init_syscall_trace, 525 }; 526 527 unsigned long __init __weak arch_syscall_addr(int nr) 528 { 529 return (unsigned long)sys_call_table[nr]; 530 } 531 532 void __init init_ftrace_syscalls(void) 533 { 534 struct syscall_metadata *meta; 535 unsigned long addr; 536 int i; 537 538 syscalls_metadata = kcalloc(NR_syscalls, sizeof(*syscalls_metadata), 539 GFP_KERNEL); 540 if (!syscalls_metadata) { 541 WARN_ON(1); 542 return; 543 } 544 545 for (i = 0; i < NR_syscalls; i++) { 546 addr = arch_syscall_addr(i); 547 meta = find_syscall_meta(addr); 548 if (!meta) 549 continue; 550 551 meta->syscall_nr = i; 552 syscalls_metadata[i] = meta; 553 } 554 } 555 556 #ifdef CONFIG_PERF_EVENTS 557 558 static DECLARE_BITMAP(enabled_perf_enter_syscalls, NR_syscalls); 559 static DECLARE_BITMAP(enabled_perf_exit_syscalls, NR_syscalls); 560 static int sys_perf_refcount_enter; 561 static int sys_perf_refcount_exit; 562 563 static int perf_call_bpf_enter(struct trace_event_call *call, struct pt_regs *regs, 564 struct syscall_metadata *sys_data, 565 struct syscall_trace_enter *rec) 566 { 567 struct syscall_tp_t { 568 unsigned long long regs; 569 unsigned long syscall_nr; 570 unsigned long args[SYSCALL_DEFINE_MAXARGS]; 571 } param; 572 int i; 573 574 *(struct pt_regs **)¶m = regs; 575 param.syscall_nr = rec->nr; 576 for (i = 0; i < sys_data->nb_args; i++) 577 param.args[i] = rec->args[i]; 578 return trace_call_bpf(call, ¶m); 579 } 580 581 static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id) 582 { 583 struct syscall_metadata *sys_data; 584 struct syscall_trace_enter *rec; 585 struct hlist_head *head; 586 bool valid_prog_array; 587 int syscall_nr; 588 int rctx; 589 int size; 590 591 syscall_nr = trace_get_syscall_nr(current, regs); 592 if (syscall_nr < 0 || syscall_nr >= NR_syscalls) 593 return; 594 if (!test_bit(syscall_nr, enabled_perf_enter_syscalls)) 595 return; 596 597 sys_data = syscall_nr_to_meta(syscall_nr); 598 if (!sys_data) 599 return; 600 601 head = this_cpu_ptr(sys_data->enter_event->perf_events); 602 valid_prog_array = bpf_prog_array_valid(sys_data->enter_event); 603 if (!valid_prog_array && hlist_empty(head)) 604 return; 605 606 /* get the size after alignment with the u32 buffer size field */ 607 size = sizeof(unsigned long) * sys_data->nb_args + sizeof(*rec); 608 size = ALIGN(size + sizeof(u32), sizeof(u64)); 609 size -= sizeof(u32); 610 611 rec = perf_trace_buf_alloc(size, NULL, &rctx); 612 if (!rec) 613 return; 614 615 rec->nr = syscall_nr; 616 syscall_get_arguments(current, regs, 0, sys_data->nb_args, 617 (unsigned long *)&rec->args); 618 619 if ((valid_prog_array && 620 !perf_call_bpf_enter(sys_data->enter_event, regs, sys_data, rec)) || 621 hlist_empty(head)) { 622 perf_swevent_put_recursion_context(rctx); 623 return; 624 } 625 626 perf_trace_buf_submit(rec, size, rctx, 627 sys_data->enter_event->event.type, 1, regs, 628 head, NULL); 629 } 630 631 static int perf_sysenter_enable(struct trace_event_call *call) 632 { 633 int ret = 0; 634 int num; 635 636 num = ((struct syscall_metadata *)call->data)->syscall_nr; 637 638 mutex_lock(&syscall_trace_lock); 639 if (!sys_perf_refcount_enter) 640 ret = register_trace_sys_enter(perf_syscall_enter, NULL); 641 if (ret) { 642 pr_info("event trace: Could not activate syscall entry trace point"); 643 } else { 644 set_bit(num, enabled_perf_enter_syscalls); 645 sys_perf_refcount_enter++; 646 } 647 mutex_unlock(&syscall_trace_lock); 648 return ret; 649 } 650 651 static void perf_sysenter_disable(struct trace_event_call *call) 652 { 653 int num; 654 655 num = ((struct syscall_metadata *)call->data)->syscall_nr; 656 657 mutex_lock(&syscall_trace_lock); 658 sys_perf_refcount_enter--; 659 clear_bit(num, enabled_perf_enter_syscalls); 660 if (!sys_perf_refcount_enter) 661 unregister_trace_sys_enter(perf_syscall_enter, NULL); 662 mutex_unlock(&syscall_trace_lock); 663 } 664 665 static int perf_call_bpf_exit(struct trace_event_call *call, struct pt_regs *regs, 666 struct syscall_trace_exit *rec) 667 { 668 struct syscall_tp_t { 669 unsigned long long regs; 670 unsigned long syscall_nr; 671 unsigned long ret; 672 } param; 673 674 *(struct pt_regs **)¶m = regs; 675 param.syscall_nr = rec->nr; 676 param.ret = rec->ret; 677 return trace_call_bpf(call, ¶m); 678 } 679 680 static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret) 681 { 682 struct syscall_metadata *sys_data; 683 struct syscall_trace_exit *rec; 684 struct hlist_head *head; 685 bool valid_prog_array; 686 int syscall_nr; 687 int rctx; 688 int size; 689 690 syscall_nr = trace_get_syscall_nr(current, regs); 691 if (syscall_nr < 0 || syscall_nr >= NR_syscalls) 692 return; 693 if (!test_bit(syscall_nr, enabled_perf_exit_syscalls)) 694 return; 695 696 sys_data = syscall_nr_to_meta(syscall_nr); 697 if (!sys_data) 698 return; 699 700 head = this_cpu_ptr(sys_data->exit_event->perf_events); 701 valid_prog_array = bpf_prog_array_valid(sys_data->exit_event); 702 if (!valid_prog_array && hlist_empty(head)) 703 return; 704 705 /* We can probably do that at build time */ 706 size = ALIGN(sizeof(*rec) + sizeof(u32), sizeof(u64)); 707 size -= sizeof(u32); 708 709 rec = perf_trace_buf_alloc(size, NULL, &rctx); 710 if (!rec) 711 return; 712 713 rec->nr = syscall_nr; 714 rec->ret = syscall_get_return_value(current, regs); 715 716 if ((valid_prog_array && 717 !perf_call_bpf_exit(sys_data->exit_event, regs, rec)) || 718 hlist_empty(head)) { 719 perf_swevent_put_recursion_context(rctx); 720 return; 721 } 722 723 perf_trace_buf_submit(rec, size, rctx, sys_data->exit_event->event.type, 724 1, regs, head, NULL); 725 } 726 727 static int perf_sysexit_enable(struct trace_event_call *call) 728 { 729 int ret = 0; 730 int num; 731 732 num = ((struct syscall_metadata *)call->data)->syscall_nr; 733 734 mutex_lock(&syscall_trace_lock); 735 if (!sys_perf_refcount_exit) 736 ret = register_trace_sys_exit(perf_syscall_exit, NULL); 737 if (ret) { 738 pr_info("event trace: Could not activate syscall exit trace point"); 739 } else { 740 set_bit(num, enabled_perf_exit_syscalls); 741 sys_perf_refcount_exit++; 742 } 743 mutex_unlock(&syscall_trace_lock); 744 return ret; 745 } 746 747 static void perf_sysexit_disable(struct trace_event_call *call) 748 { 749 int num; 750 751 num = ((struct syscall_metadata *)call->data)->syscall_nr; 752 753 mutex_lock(&syscall_trace_lock); 754 sys_perf_refcount_exit--; 755 clear_bit(num, enabled_perf_exit_syscalls); 756 if (!sys_perf_refcount_exit) 757 unregister_trace_sys_exit(perf_syscall_exit, NULL); 758 mutex_unlock(&syscall_trace_lock); 759 } 760 761 #endif /* CONFIG_PERF_EVENTS */ 762 763 static int syscall_enter_register(struct trace_event_call *event, 764 enum trace_reg type, void *data) 765 { 766 struct trace_event_file *file = data; 767 768 switch (type) { 769 case TRACE_REG_REGISTER: 770 return reg_event_syscall_enter(file, event); 771 case TRACE_REG_UNREGISTER: 772 unreg_event_syscall_enter(file, event); 773 return 0; 774 775 #ifdef CONFIG_PERF_EVENTS 776 case TRACE_REG_PERF_REGISTER: 777 return perf_sysenter_enable(event); 778 case TRACE_REG_PERF_UNREGISTER: 779 perf_sysenter_disable(event); 780 return 0; 781 case TRACE_REG_PERF_OPEN: 782 case TRACE_REG_PERF_CLOSE: 783 case TRACE_REG_PERF_ADD: 784 case TRACE_REG_PERF_DEL: 785 return 0; 786 #endif 787 } 788 return 0; 789 } 790 791 static int syscall_exit_register(struct trace_event_call *event, 792 enum trace_reg type, void *data) 793 { 794 struct trace_event_file *file = data; 795 796 switch (type) { 797 case TRACE_REG_REGISTER: 798 return reg_event_syscall_exit(file, event); 799 case TRACE_REG_UNREGISTER: 800 unreg_event_syscall_exit(file, event); 801 return 0; 802 803 #ifdef CONFIG_PERF_EVENTS 804 case TRACE_REG_PERF_REGISTER: 805 return perf_sysexit_enable(event); 806 case TRACE_REG_PERF_UNREGISTER: 807 perf_sysexit_disable(event); 808 return 0; 809 case TRACE_REG_PERF_OPEN: 810 case TRACE_REG_PERF_CLOSE: 811 case TRACE_REG_PERF_ADD: 812 case TRACE_REG_PERF_DEL: 813 return 0; 814 #endif 815 } 816 return 0; 817 } 818