1 #include <trace/syscall.h> 2 #include <trace/events/syscalls.h> 3 #include <linux/syscalls.h> 4 #include <linux/slab.h> 5 #include <linux/kernel.h> 6 #include <linux/module.h> /* for MODULE_NAME_LEN via KSYM_SYMBOL_LEN */ 7 #include <linux/ftrace.h> 8 #include <linux/perf_event.h> 9 #include <asm/syscall.h> 10 11 #include "trace_output.h" 12 #include "trace.h" 13 14 static DEFINE_MUTEX(syscall_trace_lock); 15 16 static int syscall_enter_register(struct ftrace_event_call *event, 17 enum trace_reg type, void *data); 18 static int syscall_exit_register(struct ftrace_event_call *event, 19 enum trace_reg type, void *data); 20 21 static struct list_head * 22 syscall_get_enter_fields(struct ftrace_event_call *call) 23 { 24 struct syscall_metadata *entry = call->data; 25 26 return &entry->enter_fields; 27 } 28 29 extern struct syscall_metadata *__start_syscalls_metadata[]; 30 extern struct syscall_metadata *__stop_syscalls_metadata[]; 31 32 static struct syscall_metadata **syscalls_metadata; 33 34 #ifndef ARCH_HAS_SYSCALL_MATCH_SYM_NAME 35 static inline bool arch_syscall_match_sym_name(const char *sym, const char *name) 36 { 37 /* 38 * Only compare after the "sys" prefix. Archs that use 39 * syscall wrappers may have syscalls symbols aliases prefixed 40 * with ".SyS" or ".sys" instead of "sys", leading to an unwanted 41 * mismatch. 42 */ 43 return !strcmp(sym + 3, name + 3); 44 } 45 #endif 46 47 #ifdef ARCH_TRACE_IGNORE_COMPAT_SYSCALLS 48 /* 49 * Some architectures that allow for 32bit applications 50 * to run on a 64bit kernel, do not map the syscalls for 51 * the 32bit tasks the same as they do for 64bit tasks. 52 * 53 * *cough*x86*cough* 54 * 55 * In such a case, instead of reporting the wrong syscalls, 56 * simply ignore them. 57 * 58 * For an arch to ignore the compat syscalls it needs to 59 * define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS as well as 60 * define the function arch_trace_is_compat_syscall() to let 61 * the tracing system know that it should ignore it. 62 */ 63 static int 64 trace_get_syscall_nr(struct task_struct *task, struct pt_regs *regs) 65 { 66 if (unlikely(arch_trace_is_compat_syscall(regs))) 67 return -1; 68 69 return syscall_get_nr(task, regs); 70 } 71 #else 72 static inline int 73 trace_get_syscall_nr(struct task_struct *task, struct pt_regs *regs) 74 { 75 return syscall_get_nr(task, regs); 76 } 77 #endif /* ARCH_TRACE_IGNORE_COMPAT_SYSCALLS */ 78 79 static __init struct syscall_metadata * 80 find_syscall_meta(unsigned long syscall) 81 { 82 struct syscall_metadata **start; 83 struct syscall_metadata **stop; 84 char str[KSYM_SYMBOL_LEN]; 85 86 87 start = __start_syscalls_metadata; 88 stop = __stop_syscalls_metadata; 89 kallsyms_lookup(syscall, NULL, NULL, NULL, str); 90 91 if (arch_syscall_match_sym_name(str, "sys_ni_syscall")) 92 return NULL; 93 94 for ( ; start < stop; start++) { 95 if ((*start)->name && arch_syscall_match_sym_name(str, (*start)->name)) 96 return *start; 97 } 98 return NULL; 99 } 100 101 static struct syscall_metadata *syscall_nr_to_meta(int nr) 102 { 103 if (!syscalls_metadata || nr >= NR_syscalls || nr < 0) 104 return NULL; 105 106 return syscalls_metadata[nr]; 107 } 108 109 static enum print_line_t 110 print_syscall_enter(struct trace_iterator *iter, int flags, 111 struct trace_event *event) 112 { 113 struct trace_seq *s = &iter->seq; 114 struct trace_entry *ent = iter->ent; 115 struct syscall_trace_enter *trace; 116 struct syscall_metadata *entry; 117 int i, ret, syscall; 118 119 trace = (typeof(trace))ent; 120 syscall = trace->nr; 121 entry = syscall_nr_to_meta(syscall); 122 123 if (!entry) 124 goto end; 125 126 if (entry->enter_event->event.type != ent->type) { 127 WARN_ON_ONCE(1); 128 goto end; 129 } 130 131 ret = trace_seq_printf(s, "%s(", entry->name); 132 if (!ret) 133 return TRACE_TYPE_PARTIAL_LINE; 134 135 for (i = 0; i < entry->nb_args; i++) { 136 /* parameter types */ 137 if (trace_flags & TRACE_ITER_VERBOSE) { 138 ret = trace_seq_printf(s, "%s ", entry->types[i]); 139 if (!ret) 140 return TRACE_TYPE_PARTIAL_LINE; 141 } 142 /* parameter values */ 143 ret = trace_seq_printf(s, "%s: %lx%s", entry->args[i], 144 trace->args[i], 145 i == entry->nb_args - 1 ? "" : ", "); 146 if (!ret) 147 return TRACE_TYPE_PARTIAL_LINE; 148 } 149 150 ret = trace_seq_putc(s, ')'); 151 if (!ret) 152 return TRACE_TYPE_PARTIAL_LINE; 153 154 end: 155 ret = trace_seq_putc(s, '\n'); 156 if (!ret) 157 return TRACE_TYPE_PARTIAL_LINE; 158 159 return TRACE_TYPE_HANDLED; 160 } 161 162 static enum print_line_t 163 print_syscall_exit(struct trace_iterator *iter, int flags, 164 struct trace_event *event) 165 { 166 struct trace_seq *s = &iter->seq; 167 struct trace_entry *ent = iter->ent; 168 struct syscall_trace_exit *trace; 169 int syscall; 170 struct syscall_metadata *entry; 171 int ret; 172 173 trace = (typeof(trace))ent; 174 syscall = trace->nr; 175 entry = syscall_nr_to_meta(syscall); 176 177 if (!entry) { 178 trace_seq_printf(s, "\n"); 179 return TRACE_TYPE_HANDLED; 180 } 181 182 if (entry->exit_event->event.type != ent->type) { 183 WARN_ON_ONCE(1); 184 return TRACE_TYPE_UNHANDLED; 185 } 186 187 ret = trace_seq_printf(s, "%s -> 0x%lx\n", entry->name, 188 trace->ret); 189 if (!ret) 190 return TRACE_TYPE_PARTIAL_LINE; 191 192 return TRACE_TYPE_HANDLED; 193 } 194 195 extern char *__bad_type_size(void); 196 197 #define SYSCALL_FIELD(type, name) \ 198 sizeof(type) != sizeof(trace.name) ? \ 199 __bad_type_size() : \ 200 #type, #name, offsetof(typeof(trace), name), \ 201 sizeof(trace.name), is_signed_type(type) 202 203 static 204 int __set_enter_print_fmt(struct syscall_metadata *entry, char *buf, int len) 205 { 206 int i; 207 int pos = 0; 208 209 /* When len=0, we just calculate the needed length */ 210 #define LEN_OR_ZERO (len ? len - pos : 0) 211 212 pos += snprintf(buf + pos, LEN_OR_ZERO, "\""); 213 for (i = 0; i < entry->nb_args; i++) { 214 pos += snprintf(buf + pos, LEN_OR_ZERO, "%s: 0x%%0%zulx%s", 215 entry->args[i], sizeof(unsigned long), 216 i == entry->nb_args - 1 ? "" : ", "); 217 } 218 pos += snprintf(buf + pos, LEN_OR_ZERO, "\""); 219 220 for (i = 0; i < entry->nb_args; i++) { 221 pos += snprintf(buf + pos, LEN_OR_ZERO, 222 ", ((unsigned long)(REC->%s))", entry->args[i]); 223 } 224 225 #undef LEN_OR_ZERO 226 227 /* return the length of print_fmt */ 228 return pos; 229 } 230 231 static int set_syscall_print_fmt(struct ftrace_event_call *call) 232 { 233 char *print_fmt; 234 int len; 235 struct syscall_metadata *entry = call->data; 236 237 if (entry->enter_event != call) { 238 call->print_fmt = "\"0x%lx\", REC->ret"; 239 return 0; 240 } 241 242 /* First: called with 0 length to calculate the needed length */ 243 len = __set_enter_print_fmt(entry, NULL, 0); 244 245 print_fmt = kmalloc(len + 1, GFP_KERNEL); 246 if (!print_fmt) 247 return -ENOMEM; 248 249 /* Second: actually write the @print_fmt */ 250 __set_enter_print_fmt(entry, print_fmt, len + 1); 251 call->print_fmt = print_fmt; 252 253 return 0; 254 } 255 256 static void free_syscall_print_fmt(struct ftrace_event_call *call) 257 { 258 struct syscall_metadata *entry = call->data; 259 260 if (entry->enter_event == call) 261 kfree(call->print_fmt); 262 } 263 264 static int __init syscall_enter_define_fields(struct ftrace_event_call *call) 265 { 266 struct syscall_trace_enter trace; 267 struct syscall_metadata *meta = call->data; 268 int ret; 269 int i; 270 int offset = offsetof(typeof(trace), args); 271 272 ret = trace_define_field(call, SYSCALL_FIELD(int, nr), FILTER_OTHER); 273 if (ret) 274 return ret; 275 276 for (i = 0; i < meta->nb_args; i++) { 277 ret = trace_define_field(call, meta->types[i], 278 meta->args[i], offset, 279 sizeof(unsigned long), 0, 280 FILTER_OTHER); 281 offset += sizeof(unsigned long); 282 } 283 284 return ret; 285 } 286 287 static int __init syscall_exit_define_fields(struct ftrace_event_call *call) 288 { 289 struct syscall_trace_exit trace; 290 int ret; 291 292 ret = trace_define_field(call, SYSCALL_FIELD(int, nr), FILTER_OTHER); 293 if (ret) 294 return ret; 295 296 ret = trace_define_field(call, SYSCALL_FIELD(long, ret), 297 FILTER_OTHER); 298 299 return ret; 300 } 301 302 static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id) 303 { 304 struct trace_array *tr = data; 305 struct syscall_trace_enter *entry; 306 struct syscall_metadata *sys_data; 307 struct ring_buffer_event *event; 308 struct ring_buffer *buffer; 309 int syscall_nr; 310 int size; 311 312 syscall_nr = trace_get_syscall_nr(current, regs); 313 if (syscall_nr < 0) 314 return; 315 if (!test_bit(syscall_nr, tr->enabled_enter_syscalls)) 316 return; 317 318 sys_data = syscall_nr_to_meta(syscall_nr); 319 if (!sys_data) 320 return; 321 322 size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args; 323 324 buffer = tr->trace_buffer.buffer; 325 event = trace_buffer_lock_reserve(buffer, 326 sys_data->enter_event->event.type, size, 0, 0); 327 if (!event) 328 return; 329 330 entry = ring_buffer_event_data(event); 331 entry->nr = syscall_nr; 332 syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args); 333 334 if (!filter_current_check_discard(buffer, sys_data->enter_event, 335 entry, event)) 336 trace_current_buffer_unlock_commit(buffer, event, 0, 0); 337 } 338 339 static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret) 340 { 341 struct trace_array *tr = data; 342 struct syscall_trace_exit *entry; 343 struct syscall_metadata *sys_data; 344 struct ring_buffer_event *event; 345 struct ring_buffer *buffer; 346 int syscall_nr; 347 348 syscall_nr = trace_get_syscall_nr(current, regs); 349 if (syscall_nr < 0) 350 return; 351 if (!test_bit(syscall_nr, tr->enabled_exit_syscalls)) 352 return; 353 354 sys_data = syscall_nr_to_meta(syscall_nr); 355 if (!sys_data) 356 return; 357 358 buffer = tr->trace_buffer.buffer; 359 event = trace_buffer_lock_reserve(buffer, 360 sys_data->exit_event->event.type, sizeof(*entry), 0, 0); 361 if (!event) 362 return; 363 364 entry = ring_buffer_event_data(event); 365 entry->nr = syscall_nr; 366 entry->ret = syscall_get_return_value(current, regs); 367 368 if (!filter_current_check_discard(buffer, sys_data->exit_event, 369 entry, event)) 370 trace_current_buffer_unlock_commit(buffer, event, 0, 0); 371 } 372 373 static int reg_event_syscall_enter(struct ftrace_event_file *file, 374 struct ftrace_event_call *call) 375 { 376 struct trace_array *tr = file->tr; 377 int ret = 0; 378 int num; 379 380 num = ((struct syscall_metadata *)call->data)->syscall_nr; 381 if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls)) 382 return -ENOSYS; 383 mutex_lock(&syscall_trace_lock); 384 if (!tr->sys_refcount_enter) 385 ret = register_trace_sys_enter(ftrace_syscall_enter, tr); 386 if (!ret) { 387 set_bit(num, tr->enabled_enter_syscalls); 388 tr->sys_refcount_enter++; 389 } 390 mutex_unlock(&syscall_trace_lock); 391 return ret; 392 } 393 394 static void unreg_event_syscall_enter(struct ftrace_event_file *file, 395 struct ftrace_event_call *call) 396 { 397 struct trace_array *tr = file->tr; 398 int num; 399 400 num = ((struct syscall_metadata *)call->data)->syscall_nr; 401 if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls)) 402 return; 403 mutex_lock(&syscall_trace_lock); 404 tr->sys_refcount_enter--; 405 clear_bit(num, tr->enabled_enter_syscalls); 406 if (!tr->sys_refcount_enter) 407 unregister_trace_sys_enter(ftrace_syscall_enter, tr); 408 mutex_unlock(&syscall_trace_lock); 409 } 410 411 static int reg_event_syscall_exit(struct ftrace_event_file *file, 412 struct ftrace_event_call *call) 413 { 414 struct trace_array *tr = file->tr; 415 int ret = 0; 416 int num; 417 418 num = ((struct syscall_metadata *)call->data)->syscall_nr; 419 if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls)) 420 return -ENOSYS; 421 mutex_lock(&syscall_trace_lock); 422 if (!tr->sys_refcount_exit) 423 ret = register_trace_sys_exit(ftrace_syscall_exit, tr); 424 if (!ret) { 425 set_bit(num, tr->enabled_exit_syscalls); 426 tr->sys_refcount_exit++; 427 } 428 mutex_unlock(&syscall_trace_lock); 429 return ret; 430 } 431 432 static void unreg_event_syscall_exit(struct ftrace_event_file *file, 433 struct ftrace_event_call *call) 434 { 435 struct trace_array *tr = file->tr; 436 int num; 437 438 num = ((struct syscall_metadata *)call->data)->syscall_nr; 439 if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls)) 440 return; 441 mutex_lock(&syscall_trace_lock); 442 tr->sys_refcount_exit--; 443 clear_bit(num, tr->enabled_exit_syscalls); 444 if (!tr->sys_refcount_exit) 445 unregister_trace_sys_exit(ftrace_syscall_exit, tr); 446 mutex_unlock(&syscall_trace_lock); 447 } 448 449 static int init_syscall_trace(struct ftrace_event_call *call) 450 { 451 int id; 452 int num; 453 454 num = ((struct syscall_metadata *)call->data)->syscall_nr; 455 if (num < 0 || num >= NR_syscalls) { 456 pr_debug("syscall %s metadata not mapped, disabling ftrace event\n", 457 ((struct syscall_metadata *)call->data)->name); 458 return -ENOSYS; 459 } 460 461 if (set_syscall_print_fmt(call) < 0) 462 return -ENOMEM; 463 464 id = trace_event_raw_init(call); 465 466 if (id < 0) { 467 free_syscall_print_fmt(call); 468 return id; 469 } 470 471 return id; 472 } 473 474 struct trace_event_functions enter_syscall_print_funcs = { 475 .trace = print_syscall_enter, 476 }; 477 478 struct trace_event_functions exit_syscall_print_funcs = { 479 .trace = print_syscall_exit, 480 }; 481 482 struct ftrace_event_class __refdata event_class_syscall_enter = { 483 .system = "syscalls", 484 .reg = syscall_enter_register, 485 .define_fields = syscall_enter_define_fields, 486 .get_fields = syscall_get_enter_fields, 487 .raw_init = init_syscall_trace, 488 }; 489 490 struct ftrace_event_class __refdata event_class_syscall_exit = { 491 .system = "syscalls", 492 .reg = syscall_exit_register, 493 .define_fields = syscall_exit_define_fields, 494 .fields = LIST_HEAD_INIT(event_class_syscall_exit.fields), 495 .raw_init = init_syscall_trace, 496 }; 497 498 unsigned long __init __weak arch_syscall_addr(int nr) 499 { 500 return (unsigned long)sys_call_table[nr]; 501 } 502 503 static int __init init_ftrace_syscalls(void) 504 { 505 struct syscall_metadata *meta; 506 unsigned long addr; 507 int i; 508 509 syscalls_metadata = kcalloc(NR_syscalls, sizeof(*syscalls_metadata), 510 GFP_KERNEL); 511 if (!syscalls_metadata) { 512 WARN_ON(1); 513 return -ENOMEM; 514 } 515 516 for (i = 0; i < NR_syscalls; i++) { 517 addr = arch_syscall_addr(i); 518 meta = find_syscall_meta(addr); 519 if (!meta) 520 continue; 521 522 meta->syscall_nr = i; 523 syscalls_metadata[i] = meta; 524 } 525 526 return 0; 527 } 528 early_initcall(init_ftrace_syscalls); 529 530 #ifdef CONFIG_PERF_EVENTS 531 532 static DECLARE_BITMAP(enabled_perf_enter_syscalls, NR_syscalls); 533 static DECLARE_BITMAP(enabled_perf_exit_syscalls, NR_syscalls); 534 static int sys_perf_refcount_enter; 535 static int sys_perf_refcount_exit; 536 537 static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id) 538 { 539 struct syscall_metadata *sys_data; 540 struct syscall_trace_enter *rec; 541 struct hlist_head *head; 542 int syscall_nr; 543 int rctx; 544 int size; 545 546 syscall_nr = trace_get_syscall_nr(current, regs); 547 if (syscall_nr < 0) 548 return; 549 if (!test_bit(syscall_nr, enabled_perf_enter_syscalls)) 550 return; 551 552 sys_data = syscall_nr_to_meta(syscall_nr); 553 if (!sys_data) 554 return; 555 556 /* get the size after alignment with the u32 buffer size field */ 557 size = sizeof(unsigned long) * sys_data->nb_args + sizeof(*rec); 558 size = ALIGN(size + sizeof(u32), sizeof(u64)); 559 size -= sizeof(u32); 560 561 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, 562 "perf buffer not large enough")) 563 return; 564 565 rec = (struct syscall_trace_enter *)perf_trace_buf_prepare(size, 566 sys_data->enter_event->event.type, regs, &rctx); 567 if (!rec) 568 return; 569 570 rec->nr = syscall_nr; 571 syscall_get_arguments(current, regs, 0, sys_data->nb_args, 572 (unsigned long *)&rec->args); 573 574 head = this_cpu_ptr(sys_data->enter_event->perf_events); 575 perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL); 576 } 577 578 static int perf_sysenter_enable(struct ftrace_event_call *call) 579 { 580 int ret = 0; 581 int num; 582 583 num = ((struct syscall_metadata *)call->data)->syscall_nr; 584 585 mutex_lock(&syscall_trace_lock); 586 if (!sys_perf_refcount_enter) 587 ret = register_trace_sys_enter(perf_syscall_enter, NULL); 588 if (ret) { 589 pr_info("event trace: Could not activate" 590 "syscall entry trace point"); 591 } else { 592 set_bit(num, enabled_perf_enter_syscalls); 593 sys_perf_refcount_enter++; 594 } 595 mutex_unlock(&syscall_trace_lock); 596 return ret; 597 } 598 599 static void perf_sysenter_disable(struct ftrace_event_call *call) 600 { 601 int num; 602 603 num = ((struct syscall_metadata *)call->data)->syscall_nr; 604 605 mutex_lock(&syscall_trace_lock); 606 sys_perf_refcount_enter--; 607 clear_bit(num, enabled_perf_enter_syscalls); 608 if (!sys_perf_refcount_enter) 609 unregister_trace_sys_enter(perf_syscall_enter, NULL); 610 mutex_unlock(&syscall_trace_lock); 611 } 612 613 static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret) 614 { 615 struct syscall_metadata *sys_data; 616 struct syscall_trace_exit *rec; 617 struct hlist_head *head; 618 int syscall_nr; 619 int rctx; 620 int size; 621 622 syscall_nr = trace_get_syscall_nr(current, regs); 623 if (syscall_nr < 0) 624 return; 625 if (!test_bit(syscall_nr, enabled_perf_exit_syscalls)) 626 return; 627 628 sys_data = syscall_nr_to_meta(syscall_nr); 629 if (!sys_data) 630 return; 631 632 /* We can probably do that at build time */ 633 size = ALIGN(sizeof(*rec) + sizeof(u32), sizeof(u64)); 634 size -= sizeof(u32); 635 636 /* 637 * Impossible, but be paranoid with the future 638 * How to put this check outside runtime? 639 */ 640 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, 641 "exit event has grown above perf buffer size")) 642 return; 643 644 rec = (struct syscall_trace_exit *)perf_trace_buf_prepare(size, 645 sys_data->exit_event->event.type, regs, &rctx); 646 if (!rec) 647 return; 648 649 rec->nr = syscall_nr; 650 rec->ret = syscall_get_return_value(current, regs); 651 652 head = this_cpu_ptr(sys_data->exit_event->perf_events); 653 perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL); 654 } 655 656 static int perf_sysexit_enable(struct ftrace_event_call *call) 657 { 658 int ret = 0; 659 int num; 660 661 num = ((struct syscall_metadata *)call->data)->syscall_nr; 662 663 mutex_lock(&syscall_trace_lock); 664 if (!sys_perf_refcount_exit) 665 ret = register_trace_sys_exit(perf_syscall_exit, NULL); 666 if (ret) { 667 pr_info("event trace: Could not activate" 668 "syscall exit trace point"); 669 } else { 670 set_bit(num, enabled_perf_exit_syscalls); 671 sys_perf_refcount_exit++; 672 } 673 mutex_unlock(&syscall_trace_lock); 674 return ret; 675 } 676 677 static void perf_sysexit_disable(struct ftrace_event_call *call) 678 { 679 int num; 680 681 num = ((struct syscall_metadata *)call->data)->syscall_nr; 682 683 mutex_lock(&syscall_trace_lock); 684 sys_perf_refcount_exit--; 685 clear_bit(num, enabled_perf_exit_syscalls); 686 if (!sys_perf_refcount_exit) 687 unregister_trace_sys_exit(perf_syscall_exit, NULL); 688 mutex_unlock(&syscall_trace_lock); 689 } 690 691 #endif /* CONFIG_PERF_EVENTS */ 692 693 static int syscall_enter_register(struct ftrace_event_call *event, 694 enum trace_reg type, void *data) 695 { 696 struct ftrace_event_file *file = data; 697 698 switch (type) { 699 case TRACE_REG_REGISTER: 700 return reg_event_syscall_enter(file, event); 701 case TRACE_REG_UNREGISTER: 702 unreg_event_syscall_enter(file, event); 703 return 0; 704 705 #ifdef CONFIG_PERF_EVENTS 706 case TRACE_REG_PERF_REGISTER: 707 return perf_sysenter_enable(event); 708 case TRACE_REG_PERF_UNREGISTER: 709 perf_sysenter_disable(event); 710 return 0; 711 case TRACE_REG_PERF_OPEN: 712 case TRACE_REG_PERF_CLOSE: 713 case TRACE_REG_PERF_ADD: 714 case TRACE_REG_PERF_DEL: 715 return 0; 716 #endif 717 } 718 return 0; 719 } 720 721 static int syscall_exit_register(struct ftrace_event_call *event, 722 enum trace_reg type, void *data) 723 { 724 struct ftrace_event_file *file = data; 725 726 switch (type) { 727 case TRACE_REG_REGISTER: 728 return reg_event_syscall_exit(file, event); 729 case TRACE_REG_UNREGISTER: 730 unreg_event_syscall_exit(file, event); 731 return 0; 732 733 #ifdef CONFIG_PERF_EVENTS 734 case TRACE_REG_PERF_REGISTER: 735 return perf_sysexit_enable(event); 736 case TRACE_REG_PERF_UNREGISTER: 737 perf_sysexit_disable(event); 738 return 0; 739 case TRACE_REG_PERF_OPEN: 740 case TRACE_REG_PERF_CLOSE: 741 case TRACE_REG_PERF_ADD: 742 case TRACE_REG_PERF_DEL: 743 return 0; 744 #endif 745 } 746 return 0; 747 } 748