xref: /openbmc/linux/kernel/trace/trace_syscalls.c (revision 97da55fc)
1 #include <trace/syscall.h>
2 #include <trace/events/syscalls.h>
3 #include <linux/syscalls.h>
4 #include <linux/slab.h>
5 #include <linux/kernel.h>
6 #include <linux/module.h>	/* for MODULE_NAME_LEN via KSYM_SYMBOL_LEN */
7 #include <linux/ftrace.h>
8 #include <linux/perf_event.h>
9 #include <asm/syscall.h>
10 
11 #include "trace_output.h"
12 #include "trace.h"
13 
14 static DEFINE_MUTEX(syscall_trace_lock);
15 static int sys_refcount_enter;
16 static int sys_refcount_exit;
17 static DECLARE_BITMAP(enabled_enter_syscalls, NR_syscalls);
18 static DECLARE_BITMAP(enabled_exit_syscalls, NR_syscalls);
19 
20 static int syscall_enter_register(struct ftrace_event_call *event,
21 				 enum trace_reg type, void *data);
22 static int syscall_exit_register(struct ftrace_event_call *event,
23 				 enum trace_reg type, void *data);
24 
25 static struct list_head *
26 syscall_get_enter_fields(struct ftrace_event_call *call)
27 {
28 	struct syscall_metadata *entry = call->data;
29 
30 	return &entry->enter_fields;
31 }
32 
33 extern struct syscall_metadata *__start_syscalls_metadata[];
34 extern struct syscall_metadata *__stop_syscalls_metadata[];
35 
36 static struct syscall_metadata **syscalls_metadata;
37 
38 #ifndef ARCH_HAS_SYSCALL_MATCH_SYM_NAME
39 static inline bool arch_syscall_match_sym_name(const char *sym, const char *name)
40 {
41 	/*
42 	 * Only compare after the "sys" prefix. Archs that use
43 	 * syscall wrappers may have syscalls symbols aliases prefixed
44 	 * with "SyS" instead of "sys", leading to an unwanted
45 	 * mismatch.
46 	 */
47 	return !strcmp(sym + 3, name + 3);
48 }
49 #endif
50 
51 #ifdef ARCH_TRACE_IGNORE_COMPAT_SYSCALLS
52 /*
53  * Some architectures that allow for 32bit applications
54  * to run on a 64bit kernel, do not map the syscalls for
55  * the 32bit tasks the same as they do for 64bit tasks.
56  *
57  *     *cough*x86*cough*
58  *
59  * In such a case, instead of reporting the wrong syscalls,
60  * simply ignore them.
61  *
62  * For an arch to ignore the compat syscalls it needs to
63  * define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS as well as
64  * define the function arch_trace_is_compat_syscall() to let
65  * the tracing system know that it should ignore it.
66  */
67 static int
68 trace_get_syscall_nr(struct task_struct *task, struct pt_regs *regs)
69 {
70 	if (unlikely(arch_trace_is_compat_syscall(regs)))
71 		return -1;
72 
73 	return syscall_get_nr(task, regs);
74 }
75 #else
76 static inline int
77 trace_get_syscall_nr(struct task_struct *task, struct pt_regs *regs)
78 {
79 	return syscall_get_nr(task, regs);
80 }
81 #endif /* ARCH_TRACE_IGNORE_COMPAT_SYSCALLS */
82 
83 static __init struct syscall_metadata *
84 find_syscall_meta(unsigned long syscall)
85 {
86 	struct syscall_metadata **start;
87 	struct syscall_metadata **stop;
88 	char str[KSYM_SYMBOL_LEN];
89 
90 
91 	start = __start_syscalls_metadata;
92 	stop = __stop_syscalls_metadata;
93 	kallsyms_lookup(syscall, NULL, NULL, NULL, str);
94 
95 	if (arch_syscall_match_sym_name(str, "sys_ni_syscall"))
96 		return NULL;
97 
98 	for ( ; start < stop; start++) {
99 		if ((*start)->name && arch_syscall_match_sym_name(str, (*start)->name))
100 			return *start;
101 	}
102 	return NULL;
103 }
104 
105 static struct syscall_metadata *syscall_nr_to_meta(int nr)
106 {
107 	if (!syscalls_metadata || nr >= NR_syscalls || nr < 0)
108 		return NULL;
109 
110 	return syscalls_metadata[nr];
111 }
112 
113 static enum print_line_t
114 print_syscall_enter(struct trace_iterator *iter, int flags,
115 		    struct trace_event *event)
116 {
117 	struct trace_seq *s = &iter->seq;
118 	struct trace_entry *ent = iter->ent;
119 	struct syscall_trace_enter *trace;
120 	struct syscall_metadata *entry;
121 	int i, ret, syscall;
122 
123 	trace = (typeof(trace))ent;
124 	syscall = trace->nr;
125 	entry = syscall_nr_to_meta(syscall);
126 
127 	if (!entry)
128 		goto end;
129 
130 	if (entry->enter_event->event.type != ent->type) {
131 		WARN_ON_ONCE(1);
132 		goto end;
133 	}
134 
135 	ret = trace_seq_printf(s, "%s(", entry->name);
136 	if (!ret)
137 		return TRACE_TYPE_PARTIAL_LINE;
138 
139 	for (i = 0; i < entry->nb_args; i++) {
140 		/* parameter types */
141 		if (trace_flags & TRACE_ITER_VERBOSE) {
142 			ret = trace_seq_printf(s, "%s ", entry->types[i]);
143 			if (!ret)
144 				return TRACE_TYPE_PARTIAL_LINE;
145 		}
146 		/* parameter values */
147 		ret = trace_seq_printf(s, "%s: %lx%s", entry->args[i],
148 				       trace->args[i],
149 				       i == entry->nb_args - 1 ? "" : ", ");
150 		if (!ret)
151 			return TRACE_TYPE_PARTIAL_LINE;
152 	}
153 
154 	ret = trace_seq_putc(s, ')');
155 	if (!ret)
156 		return TRACE_TYPE_PARTIAL_LINE;
157 
158 end:
159 	ret =  trace_seq_putc(s, '\n');
160 	if (!ret)
161 		return TRACE_TYPE_PARTIAL_LINE;
162 
163 	return TRACE_TYPE_HANDLED;
164 }
165 
166 static enum print_line_t
167 print_syscall_exit(struct trace_iterator *iter, int flags,
168 		   struct trace_event *event)
169 {
170 	struct trace_seq *s = &iter->seq;
171 	struct trace_entry *ent = iter->ent;
172 	struct syscall_trace_exit *trace;
173 	int syscall;
174 	struct syscall_metadata *entry;
175 	int ret;
176 
177 	trace = (typeof(trace))ent;
178 	syscall = trace->nr;
179 	entry = syscall_nr_to_meta(syscall);
180 
181 	if (!entry) {
182 		trace_seq_printf(s, "\n");
183 		return TRACE_TYPE_HANDLED;
184 	}
185 
186 	if (entry->exit_event->event.type != ent->type) {
187 		WARN_ON_ONCE(1);
188 		return TRACE_TYPE_UNHANDLED;
189 	}
190 
191 	ret = trace_seq_printf(s, "%s -> 0x%lx\n", entry->name,
192 				trace->ret);
193 	if (!ret)
194 		return TRACE_TYPE_PARTIAL_LINE;
195 
196 	return TRACE_TYPE_HANDLED;
197 }
198 
199 extern char *__bad_type_size(void);
200 
201 #define SYSCALL_FIELD(type, name)					\
202 	sizeof(type) != sizeof(trace.name) ?				\
203 		__bad_type_size() :					\
204 		#type, #name, offsetof(typeof(trace), name),		\
205 		sizeof(trace.name), is_signed_type(type)
206 
207 static
208 int  __set_enter_print_fmt(struct syscall_metadata *entry, char *buf, int len)
209 {
210 	int i;
211 	int pos = 0;
212 
213 	/* When len=0, we just calculate the needed length */
214 #define LEN_OR_ZERO (len ? len - pos : 0)
215 
216 	pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
217 	for (i = 0; i < entry->nb_args; i++) {
218 		pos += snprintf(buf + pos, LEN_OR_ZERO, "%s: 0x%%0%zulx%s",
219 				entry->args[i], sizeof(unsigned long),
220 				i == entry->nb_args - 1 ? "" : ", ");
221 	}
222 	pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
223 
224 	for (i = 0; i < entry->nb_args; i++) {
225 		pos += snprintf(buf + pos, LEN_OR_ZERO,
226 				", ((unsigned long)(REC->%s))", entry->args[i]);
227 	}
228 
229 #undef LEN_OR_ZERO
230 
231 	/* return the length of print_fmt */
232 	return pos;
233 }
234 
235 static int set_syscall_print_fmt(struct ftrace_event_call *call)
236 {
237 	char *print_fmt;
238 	int len;
239 	struct syscall_metadata *entry = call->data;
240 
241 	if (entry->enter_event != call) {
242 		call->print_fmt = "\"0x%lx\", REC->ret";
243 		return 0;
244 	}
245 
246 	/* First: called with 0 length to calculate the needed length */
247 	len = __set_enter_print_fmt(entry, NULL, 0);
248 
249 	print_fmt = kmalloc(len + 1, GFP_KERNEL);
250 	if (!print_fmt)
251 		return -ENOMEM;
252 
253 	/* Second: actually write the @print_fmt */
254 	__set_enter_print_fmt(entry, print_fmt, len + 1);
255 	call->print_fmt = print_fmt;
256 
257 	return 0;
258 }
259 
260 static void free_syscall_print_fmt(struct ftrace_event_call *call)
261 {
262 	struct syscall_metadata *entry = call->data;
263 
264 	if (entry->enter_event == call)
265 		kfree(call->print_fmt);
266 }
267 
268 static int syscall_enter_define_fields(struct ftrace_event_call *call)
269 {
270 	struct syscall_trace_enter trace;
271 	struct syscall_metadata *meta = call->data;
272 	int ret;
273 	int i;
274 	int offset = offsetof(typeof(trace), args);
275 
276 	ret = trace_define_field(call, SYSCALL_FIELD(int, nr), FILTER_OTHER);
277 	if (ret)
278 		return ret;
279 
280 	for (i = 0; i < meta->nb_args; i++) {
281 		ret = trace_define_field(call, meta->types[i],
282 					 meta->args[i], offset,
283 					 sizeof(unsigned long), 0,
284 					 FILTER_OTHER);
285 		offset += sizeof(unsigned long);
286 	}
287 
288 	return ret;
289 }
290 
291 static int syscall_exit_define_fields(struct ftrace_event_call *call)
292 {
293 	struct syscall_trace_exit trace;
294 	int ret;
295 
296 	ret = trace_define_field(call, SYSCALL_FIELD(int, nr), FILTER_OTHER);
297 	if (ret)
298 		return ret;
299 
300 	ret = trace_define_field(call, SYSCALL_FIELD(long, ret),
301 				 FILTER_OTHER);
302 
303 	return ret;
304 }
305 
306 static void ftrace_syscall_enter(void *ignore, struct pt_regs *regs, long id)
307 {
308 	struct syscall_trace_enter *entry;
309 	struct syscall_metadata *sys_data;
310 	struct ring_buffer_event *event;
311 	struct ring_buffer *buffer;
312 	int syscall_nr;
313 	int size;
314 
315 	syscall_nr = trace_get_syscall_nr(current, regs);
316 	if (syscall_nr < 0)
317 		return;
318 	if (!test_bit(syscall_nr, enabled_enter_syscalls))
319 		return;
320 
321 	sys_data = syscall_nr_to_meta(syscall_nr);
322 	if (!sys_data)
323 		return;
324 
325 	size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args;
326 
327 	event = trace_current_buffer_lock_reserve(&buffer,
328 			sys_data->enter_event->event.type, size, 0, 0);
329 	if (!event)
330 		return;
331 
332 	entry = ring_buffer_event_data(event);
333 	entry->nr = syscall_nr;
334 	syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args);
335 
336 	if (!filter_current_check_discard(buffer, sys_data->enter_event,
337 					  entry, event))
338 		trace_current_buffer_unlock_commit(buffer, event, 0, 0);
339 }
340 
341 static void ftrace_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
342 {
343 	struct syscall_trace_exit *entry;
344 	struct syscall_metadata *sys_data;
345 	struct ring_buffer_event *event;
346 	struct ring_buffer *buffer;
347 	int syscall_nr;
348 
349 	syscall_nr = trace_get_syscall_nr(current, regs);
350 	if (syscall_nr < 0)
351 		return;
352 	if (!test_bit(syscall_nr, enabled_exit_syscalls))
353 		return;
354 
355 	sys_data = syscall_nr_to_meta(syscall_nr);
356 	if (!sys_data)
357 		return;
358 
359 	event = trace_current_buffer_lock_reserve(&buffer,
360 			sys_data->exit_event->event.type, sizeof(*entry), 0, 0);
361 	if (!event)
362 		return;
363 
364 	entry = ring_buffer_event_data(event);
365 	entry->nr = syscall_nr;
366 	entry->ret = syscall_get_return_value(current, regs);
367 
368 	if (!filter_current_check_discard(buffer, sys_data->exit_event,
369 					  entry, event))
370 		trace_current_buffer_unlock_commit(buffer, event, 0, 0);
371 }
372 
373 static int reg_event_syscall_enter(struct ftrace_event_call *call)
374 {
375 	int ret = 0;
376 	int num;
377 
378 	num = ((struct syscall_metadata *)call->data)->syscall_nr;
379 	if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
380 		return -ENOSYS;
381 	mutex_lock(&syscall_trace_lock);
382 	if (!sys_refcount_enter)
383 		ret = register_trace_sys_enter(ftrace_syscall_enter, NULL);
384 	if (!ret) {
385 		set_bit(num, enabled_enter_syscalls);
386 		sys_refcount_enter++;
387 	}
388 	mutex_unlock(&syscall_trace_lock);
389 	return ret;
390 }
391 
392 static void unreg_event_syscall_enter(struct ftrace_event_call *call)
393 {
394 	int num;
395 
396 	num = ((struct syscall_metadata *)call->data)->syscall_nr;
397 	if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
398 		return;
399 	mutex_lock(&syscall_trace_lock);
400 	sys_refcount_enter--;
401 	clear_bit(num, enabled_enter_syscalls);
402 	if (!sys_refcount_enter)
403 		unregister_trace_sys_enter(ftrace_syscall_enter, NULL);
404 	mutex_unlock(&syscall_trace_lock);
405 }
406 
407 static int reg_event_syscall_exit(struct ftrace_event_call *call)
408 {
409 	int ret = 0;
410 	int num;
411 
412 	num = ((struct syscall_metadata *)call->data)->syscall_nr;
413 	if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
414 		return -ENOSYS;
415 	mutex_lock(&syscall_trace_lock);
416 	if (!sys_refcount_exit)
417 		ret = register_trace_sys_exit(ftrace_syscall_exit, NULL);
418 	if (!ret) {
419 		set_bit(num, enabled_exit_syscalls);
420 		sys_refcount_exit++;
421 	}
422 	mutex_unlock(&syscall_trace_lock);
423 	return ret;
424 }
425 
426 static void unreg_event_syscall_exit(struct ftrace_event_call *call)
427 {
428 	int num;
429 
430 	num = ((struct syscall_metadata *)call->data)->syscall_nr;
431 	if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
432 		return;
433 	mutex_lock(&syscall_trace_lock);
434 	sys_refcount_exit--;
435 	clear_bit(num, enabled_exit_syscalls);
436 	if (!sys_refcount_exit)
437 		unregister_trace_sys_exit(ftrace_syscall_exit, NULL);
438 	mutex_unlock(&syscall_trace_lock);
439 }
440 
441 static int init_syscall_trace(struct ftrace_event_call *call)
442 {
443 	int id;
444 	int num;
445 
446 	num = ((struct syscall_metadata *)call->data)->syscall_nr;
447 	if (num < 0 || num >= NR_syscalls) {
448 		pr_debug("syscall %s metadata not mapped, disabling ftrace event\n",
449 				((struct syscall_metadata *)call->data)->name);
450 		return -ENOSYS;
451 	}
452 
453 	if (set_syscall_print_fmt(call) < 0)
454 		return -ENOMEM;
455 
456 	id = trace_event_raw_init(call);
457 
458 	if (id < 0) {
459 		free_syscall_print_fmt(call);
460 		return id;
461 	}
462 
463 	return id;
464 }
465 
466 struct trace_event_functions enter_syscall_print_funcs = {
467 	.trace		= print_syscall_enter,
468 };
469 
470 struct trace_event_functions exit_syscall_print_funcs = {
471 	.trace		= print_syscall_exit,
472 };
473 
474 struct ftrace_event_class event_class_syscall_enter = {
475 	.system		= "syscalls",
476 	.reg		= syscall_enter_register,
477 	.define_fields	= syscall_enter_define_fields,
478 	.get_fields	= syscall_get_enter_fields,
479 	.raw_init	= init_syscall_trace,
480 };
481 
482 struct ftrace_event_class event_class_syscall_exit = {
483 	.system		= "syscalls",
484 	.reg		= syscall_exit_register,
485 	.define_fields	= syscall_exit_define_fields,
486 	.fields		= LIST_HEAD_INIT(event_class_syscall_exit.fields),
487 	.raw_init	= init_syscall_trace,
488 };
489 
490 unsigned long __init __weak arch_syscall_addr(int nr)
491 {
492 	return (unsigned long)sys_call_table[nr];
493 }
494 
495 static int __init init_ftrace_syscalls(void)
496 {
497 	struct syscall_metadata *meta;
498 	unsigned long addr;
499 	int i;
500 
501 	syscalls_metadata = kcalloc(NR_syscalls, sizeof(*syscalls_metadata),
502 				    GFP_KERNEL);
503 	if (!syscalls_metadata) {
504 		WARN_ON(1);
505 		return -ENOMEM;
506 	}
507 
508 	for (i = 0; i < NR_syscalls; i++) {
509 		addr = arch_syscall_addr(i);
510 		meta = find_syscall_meta(addr);
511 		if (!meta)
512 			continue;
513 
514 		meta->syscall_nr = i;
515 		syscalls_metadata[i] = meta;
516 	}
517 
518 	return 0;
519 }
520 early_initcall(init_ftrace_syscalls);
521 
522 #ifdef CONFIG_PERF_EVENTS
523 
524 static DECLARE_BITMAP(enabled_perf_enter_syscalls, NR_syscalls);
525 static DECLARE_BITMAP(enabled_perf_exit_syscalls, NR_syscalls);
526 static int sys_perf_refcount_enter;
527 static int sys_perf_refcount_exit;
528 
529 static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
530 {
531 	struct syscall_metadata *sys_data;
532 	struct syscall_trace_enter *rec;
533 	struct hlist_head *head;
534 	int syscall_nr;
535 	int rctx;
536 	int size;
537 
538 	syscall_nr = trace_get_syscall_nr(current, regs);
539 	if (syscall_nr < 0)
540 		return;
541 	if (!test_bit(syscall_nr, enabled_perf_enter_syscalls))
542 		return;
543 
544 	sys_data = syscall_nr_to_meta(syscall_nr);
545 	if (!sys_data)
546 		return;
547 
548 	/* get the size after alignment with the u32 buffer size field */
549 	size = sizeof(unsigned long) * sys_data->nb_args + sizeof(*rec);
550 	size = ALIGN(size + sizeof(u32), sizeof(u64));
551 	size -= sizeof(u32);
552 
553 	if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
554 		      "perf buffer not large enough"))
555 		return;
556 
557 	rec = (struct syscall_trace_enter *)perf_trace_buf_prepare(size,
558 				sys_data->enter_event->event.type, regs, &rctx);
559 	if (!rec)
560 		return;
561 
562 	rec->nr = syscall_nr;
563 	syscall_get_arguments(current, regs, 0, sys_data->nb_args,
564 			       (unsigned long *)&rec->args);
565 
566 	head = this_cpu_ptr(sys_data->enter_event->perf_events);
567 	perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL);
568 }
569 
570 static int perf_sysenter_enable(struct ftrace_event_call *call)
571 {
572 	int ret = 0;
573 	int num;
574 
575 	num = ((struct syscall_metadata *)call->data)->syscall_nr;
576 
577 	mutex_lock(&syscall_trace_lock);
578 	if (!sys_perf_refcount_enter)
579 		ret = register_trace_sys_enter(perf_syscall_enter, NULL);
580 	if (ret) {
581 		pr_info("event trace: Could not activate"
582 				"syscall entry trace point");
583 	} else {
584 		set_bit(num, enabled_perf_enter_syscalls);
585 		sys_perf_refcount_enter++;
586 	}
587 	mutex_unlock(&syscall_trace_lock);
588 	return ret;
589 }
590 
591 static void perf_sysenter_disable(struct ftrace_event_call *call)
592 {
593 	int num;
594 
595 	num = ((struct syscall_metadata *)call->data)->syscall_nr;
596 
597 	mutex_lock(&syscall_trace_lock);
598 	sys_perf_refcount_enter--;
599 	clear_bit(num, enabled_perf_enter_syscalls);
600 	if (!sys_perf_refcount_enter)
601 		unregister_trace_sys_enter(perf_syscall_enter, NULL);
602 	mutex_unlock(&syscall_trace_lock);
603 }
604 
605 static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
606 {
607 	struct syscall_metadata *sys_data;
608 	struct syscall_trace_exit *rec;
609 	struct hlist_head *head;
610 	int syscall_nr;
611 	int rctx;
612 	int size;
613 
614 	syscall_nr = trace_get_syscall_nr(current, regs);
615 	if (syscall_nr < 0)
616 		return;
617 	if (!test_bit(syscall_nr, enabled_perf_exit_syscalls))
618 		return;
619 
620 	sys_data = syscall_nr_to_meta(syscall_nr);
621 	if (!sys_data)
622 		return;
623 
624 	/* We can probably do that at build time */
625 	size = ALIGN(sizeof(*rec) + sizeof(u32), sizeof(u64));
626 	size -= sizeof(u32);
627 
628 	/*
629 	 * Impossible, but be paranoid with the future
630 	 * How to put this check outside runtime?
631 	 */
632 	if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
633 		"exit event has grown above perf buffer size"))
634 		return;
635 
636 	rec = (struct syscall_trace_exit *)perf_trace_buf_prepare(size,
637 				sys_data->exit_event->event.type, regs, &rctx);
638 	if (!rec)
639 		return;
640 
641 	rec->nr = syscall_nr;
642 	rec->ret = syscall_get_return_value(current, regs);
643 
644 	head = this_cpu_ptr(sys_data->exit_event->perf_events);
645 	perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL);
646 }
647 
648 static int perf_sysexit_enable(struct ftrace_event_call *call)
649 {
650 	int ret = 0;
651 	int num;
652 
653 	num = ((struct syscall_metadata *)call->data)->syscall_nr;
654 
655 	mutex_lock(&syscall_trace_lock);
656 	if (!sys_perf_refcount_exit)
657 		ret = register_trace_sys_exit(perf_syscall_exit, NULL);
658 	if (ret) {
659 		pr_info("event trace: Could not activate"
660 				"syscall exit trace point");
661 	} else {
662 		set_bit(num, enabled_perf_exit_syscalls);
663 		sys_perf_refcount_exit++;
664 	}
665 	mutex_unlock(&syscall_trace_lock);
666 	return ret;
667 }
668 
669 static void perf_sysexit_disable(struct ftrace_event_call *call)
670 {
671 	int num;
672 
673 	num = ((struct syscall_metadata *)call->data)->syscall_nr;
674 
675 	mutex_lock(&syscall_trace_lock);
676 	sys_perf_refcount_exit--;
677 	clear_bit(num, enabled_perf_exit_syscalls);
678 	if (!sys_perf_refcount_exit)
679 		unregister_trace_sys_exit(perf_syscall_exit, NULL);
680 	mutex_unlock(&syscall_trace_lock);
681 }
682 
683 #endif /* CONFIG_PERF_EVENTS */
684 
685 static int syscall_enter_register(struct ftrace_event_call *event,
686 				 enum trace_reg type, void *data)
687 {
688 	switch (type) {
689 	case TRACE_REG_REGISTER:
690 		return reg_event_syscall_enter(event);
691 	case TRACE_REG_UNREGISTER:
692 		unreg_event_syscall_enter(event);
693 		return 0;
694 
695 #ifdef CONFIG_PERF_EVENTS
696 	case TRACE_REG_PERF_REGISTER:
697 		return perf_sysenter_enable(event);
698 	case TRACE_REG_PERF_UNREGISTER:
699 		perf_sysenter_disable(event);
700 		return 0;
701 	case TRACE_REG_PERF_OPEN:
702 	case TRACE_REG_PERF_CLOSE:
703 	case TRACE_REG_PERF_ADD:
704 	case TRACE_REG_PERF_DEL:
705 		return 0;
706 #endif
707 	}
708 	return 0;
709 }
710 
711 static int syscall_exit_register(struct ftrace_event_call *event,
712 				 enum trace_reg type, void *data)
713 {
714 	switch (type) {
715 	case TRACE_REG_REGISTER:
716 		return reg_event_syscall_exit(event);
717 	case TRACE_REG_UNREGISTER:
718 		unreg_event_syscall_exit(event);
719 		return 0;
720 
721 #ifdef CONFIG_PERF_EVENTS
722 	case TRACE_REG_PERF_REGISTER:
723 		return perf_sysexit_enable(event);
724 	case TRACE_REG_PERF_UNREGISTER:
725 		perf_sysexit_disable(event);
726 		return 0;
727 	case TRACE_REG_PERF_OPEN:
728 	case TRACE_REG_PERF_CLOSE:
729 	case TRACE_REG_PERF_ADD:
730 	case TRACE_REG_PERF_DEL:
731 		return 0;
732 #endif
733 	}
734 	return 0;
735 }
736