1 /* SPDX-License-Identifier: GPL-2.0 */
2
3 #ifndef _LINUX_TRACE_EVENT_H
4 #define _LINUX_TRACE_EVENT_H
5
6 #include <linux/ring_buffer.h>
7 #include <linux/trace_seq.h>
8 #include <linux/percpu.h>
9 #include <linux/hardirq.h>
10 #include <linux/perf_event.h>
11 #include <linux/tracepoint.h>
12
13 struct trace_array;
14 struct array_buffer;
15 struct tracer;
16 struct dentry;
17 struct bpf_prog;
18 union bpf_attr;
19
20 const char *trace_print_flags_seq(struct trace_seq *p, const char *delim,
21 unsigned long flags,
22 const struct trace_print_flags *flag_array);
23
24 const char *trace_print_symbols_seq(struct trace_seq *p, unsigned long val,
25 const struct trace_print_flags *symbol_array);
26
27 #if BITS_PER_LONG == 32
28 const char *trace_print_flags_seq_u64(struct trace_seq *p, const char *delim,
29 unsigned long long flags,
30 const struct trace_print_flags_u64 *flag_array);
31
32 const char *trace_print_symbols_seq_u64(struct trace_seq *p,
33 unsigned long long val,
34 const struct trace_print_flags_u64
35 *symbol_array);
36 #endif
37
38 const char *trace_print_bitmask_seq(struct trace_seq *p, void *bitmask_ptr,
39 unsigned int bitmask_size);
40
41 const char *trace_print_hex_seq(struct trace_seq *p,
42 const unsigned char *buf, int len,
43 bool concatenate);
44
45 const char *trace_print_array_seq(struct trace_seq *p,
46 const void *buf, int count,
47 size_t el_size);
48
49 const char *
50 trace_print_hex_dump_seq(struct trace_seq *p, const char *prefix_str,
51 int prefix_type, int rowsize, int groupsize,
52 const void *buf, size_t len, bool ascii);
53
54 struct trace_iterator;
55 struct trace_event;
56
57 int trace_raw_output_prep(struct trace_iterator *iter,
58 struct trace_event *event);
59 extern __printf(2, 3)
60 void trace_event_printf(struct trace_iterator *iter, const char *fmt, ...);
61
62 /* Used to find the offset and length of dynamic fields in trace events */
63 struct trace_dynamic_info {
64 #ifdef CONFIG_CPU_BIG_ENDIAN
65 u16 len;
66 u16 offset;
67 #else
68 u16 offset;
69 u16 len;
70 #endif
71 } __packed;
72
73 /*
74 * The trace entry - the most basic unit of tracing. This is what
75 * is printed in the end as a single line in the trace output, such as:
76 *
77 * bash-15816 [01] 235.197585: idle_cpu <- irq_enter
78 */
79 struct trace_entry {
80 unsigned short type;
81 unsigned char flags;
82 unsigned char preempt_count;
83 int pid;
84 };
85
86 #define TRACE_EVENT_TYPE_MAX \
87 ((1 << (sizeof(((struct trace_entry *)0)->type) * 8)) - 1)
88
89 /*
90 * Trace iterator - used by printout routines who present trace
91 * results to users and which routines might sleep, etc:
92 */
93 struct trace_iterator {
94 struct trace_array *tr;
95 struct tracer *trace;
96 struct array_buffer *array_buffer;
97 void *private;
98 int cpu_file;
99 struct mutex mutex;
100 struct ring_buffer_iter **buffer_iter;
101 unsigned long iter_flags;
102 void *temp; /* temp holder */
103 unsigned int temp_size;
104 char *fmt; /* modified format holder */
105 unsigned int fmt_size;
106 long wait_index;
107
108 /* trace_seq for __print_flags() and __print_symbolic() etc. */
109 struct trace_seq tmp_seq;
110
111 cpumask_var_t started;
112
113 /* it's true when current open file is snapshot */
114 bool snapshot;
115
116 /* The below is zeroed out in pipe_read */
117 struct trace_seq seq;
118 struct trace_entry *ent;
119 unsigned long lost_events;
120 int leftover;
121 int ent_size;
122 int cpu;
123 u64 ts;
124
125 loff_t pos;
126 long idx;
127
128 /* All new field here will be zeroed out in pipe_read */
129 };
130
131 enum trace_iter_flags {
132 TRACE_FILE_LAT_FMT = 1,
133 TRACE_FILE_ANNOTATE = 2,
134 TRACE_FILE_TIME_IN_NS = 4,
135 };
136
137
138 typedef enum print_line_t (*trace_print_func)(struct trace_iterator *iter,
139 int flags, struct trace_event *event);
140
141 struct trace_event_functions {
142 trace_print_func trace;
143 trace_print_func raw;
144 trace_print_func hex;
145 trace_print_func binary;
146 };
147
148 struct trace_event {
149 struct hlist_node node;
150 int type;
151 struct trace_event_functions *funcs;
152 };
153
154 extern int register_trace_event(struct trace_event *event);
155 extern int unregister_trace_event(struct trace_event *event);
156
157 /* Return values for print_line callback */
158 enum print_line_t {
159 TRACE_TYPE_PARTIAL_LINE = 0, /* Retry after flushing the seq */
160 TRACE_TYPE_HANDLED = 1,
161 TRACE_TYPE_UNHANDLED = 2, /* Relay to other output functions */
162 TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */
163 };
164
165 enum print_line_t trace_handle_return(struct trace_seq *s);
166
tracing_generic_entry_update(struct trace_entry * entry,unsigned short type,unsigned int trace_ctx)167 static inline void tracing_generic_entry_update(struct trace_entry *entry,
168 unsigned short type,
169 unsigned int trace_ctx)
170 {
171 entry->preempt_count = trace_ctx & 0xff;
172 entry->pid = current->pid;
173 entry->type = type;
174 entry->flags = trace_ctx >> 16;
175 }
176
177 unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status);
178
179 enum trace_flag_type {
180 TRACE_FLAG_IRQS_OFF = 0x01,
181 TRACE_FLAG_IRQS_NOSUPPORT = 0x02,
182 TRACE_FLAG_NEED_RESCHED = 0x04,
183 TRACE_FLAG_HARDIRQ = 0x08,
184 TRACE_FLAG_SOFTIRQ = 0x10,
185 TRACE_FLAG_PREEMPT_RESCHED = 0x20,
186 TRACE_FLAG_NMI = 0x40,
187 TRACE_FLAG_BH_OFF = 0x80,
188 };
189
190 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
tracing_gen_ctx_flags(unsigned long irqflags)191 static inline unsigned int tracing_gen_ctx_flags(unsigned long irqflags)
192 {
193 unsigned int irq_status = irqs_disabled_flags(irqflags) ?
194 TRACE_FLAG_IRQS_OFF : 0;
195 return tracing_gen_ctx_irq_test(irq_status);
196 }
tracing_gen_ctx(void)197 static inline unsigned int tracing_gen_ctx(void)
198 {
199 unsigned long irqflags;
200
201 local_save_flags(irqflags);
202 return tracing_gen_ctx_flags(irqflags);
203 }
204 #else
205
tracing_gen_ctx_flags(unsigned long irqflags)206 static inline unsigned int tracing_gen_ctx_flags(unsigned long irqflags)
207 {
208 return tracing_gen_ctx_irq_test(TRACE_FLAG_IRQS_NOSUPPORT);
209 }
tracing_gen_ctx(void)210 static inline unsigned int tracing_gen_ctx(void)
211 {
212 return tracing_gen_ctx_irq_test(TRACE_FLAG_IRQS_NOSUPPORT);
213 }
214 #endif
215
tracing_gen_ctx_dec(void)216 static inline unsigned int tracing_gen_ctx_dec(void)
217 {
218 unsigned int trace_ctx;
219
220 trace_ctx = tracing_gen_ctx();
221 /*
222 * Subtract one from the preemption counter if preemption is enabled,
223 * see trace_event_buffer_reserve()for details.
224 */
225 if (IS_ENABLED(CONFIG_PREEMPTION))
226 trace_ctx--;
227 return trace_ctx;
228 }
229
230 struct trace_event_file;
231
232 struct ring_buffer_event *
233 trace_event_buffer_lock_reserve(struct trace_buffer **current_buffer,
234 struct trace_event_file *trace_file,
235 int type, unsigned long len,
236 unsigned int trace_ctx);
237
238 #define TRACE_RECORD_CMDLINE BIT(0)
239 #define TRACE_RECORD_TGID BIT(1)
240
241 void tracing_record_taskinfo(struct task_struct *task, int flags);
242 void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
243 struct task_struct *next, int flags);
244
245 void tracing_record_cmdline(struct task_struct *task);
246 void tracing_record_tgid(struct task_struct *task);
247
248 int trace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...)
249 __printf(3, 4);
250
251 struct event_filter;
252
253 enum trace_reg {
254 TRACE_REG_REGISTER,
255 TRACE_REG_UNREGISTER,
256 #ifdef CONFIG_PERF_EVENTS
257 TRACE_REG_PERF_REGISTER,
258 TRACE_REG_PERF_UNREGISTER,
259 TRACE_REG_PERF_OPEN,
260 TRACE_REG_PERF_CLOSE,
261 /*
262 * These (ADD/DEL) use a 'boolean' return value, where 1 (true) means a
263 * custom action was taken and the default action is not to be
264 * performed.
265 */
266 TRACE_REG_PERF_ADD,
267 TRACE_REG_PERF_DEL,
268 #endif
269 };
270
271 struct trace_event_call;
272
273 #define TRACE_FUNCTION_TYPE ((const char *)~0UL)
274
275 struct trace_event_fields {
276 const char *type;
277 union {
278 struct {
279 const char *name;
280 const int size;
281 const int align;
282 const unsigned int is_signed:1;
283 unsigned int needs_test:1;
284 const int filter_type;
285 const int len;
286 };
287 int (*define_fields)(struct trace_event_call *);
288 };
289 };
290
291 struct trace_event_class {
292 const char *system;
293 void *probe;
294 #ifdef CONFIG_PERF_EVENTS
295 void *perf_probe;
296 #endif
297 int (*reg)(struct trace_event_call *event,
298 enum trace_reg type, void *data);
299 struct trace_event_fields *fields_array;
300 struct list_head *(*get_fields)(struct trace_event_call *);
301 struct list_head fields;
302 int (*raw_init)(struct trace_event_call *);
303 };
304
305 extern int trace_event_reg(struct trace_event_call *event,
306 enum trace_reg type, void *data);
307
308 struct trace_event_buffer {
309 struct trace_buffer *buffer;
310 struct ring_buffer_event *event;
311 struct trace_event_file *trace_file;
312 void *entry;
313 unsigned int trace_ctx;
314 struct pt_regs *regs;
315 };
316
317 void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer,
318 struct trace_event_file *trace_file,
319 unsigned long len);
320
321 void trace_event_buffer_commit(struct trace_event_buffer *fbuffer);
322
323 enum {
324 TRACE_EVENT_FL_FILTERED_BIT,
325 TRACE_EVENT_FL_CAP_ANY_BIT,
326 TRACE_EVENT_FL_NO_SET_FILTER_BIT,
327 TRACE_EVENT_FL_IGNORE_ENABLE_BIT,
328 TRACE_EVENT_FL_TRACEPOINT_BIT,
329 TRACE_EVENT_FL_DYNAMIC_BIT,
330 TRACE_EVENT_FL_KPROBE_BIT,
331 TRACE_EVENT_FL_UPROBE_BIT,
332 TRACE_EVENT_FL_EPROBE_BIT,
333 TRACE_EVENT_FL_FPROBE_BIT,
334 TRACE_EVENT_FL_CUSTOM_BIT,
335 TRACE_EVENT_FL_TEST_STR_BIT,
336 };
337
338 /*
339 * Event flags:
340 * FILTERED - The event has a filter attached
341 * CAP_ANY - Any user can enable for perf
342 * NO_SET_FILTER - Set when filter has error and is to be ignored
343 * IGNORE_ENABLE - For trace internal events, do not enable with debugfs file
344 * TRACEPOINT - Event is a tracepoint
345 * DYNAMIC - Event is a dynamic event (created at run time)
346 * KPROBE - Event is a kprobe
347 * UPROBE - Event is a uprobe
348 * EPROBE - Event is an event probe
349 * FPROBE - Event is an function probe
350 * CUSTOM - Event is a custom event (to be attached to an exsiting tracepoint)
351 * This is set when the custom event has not been attached
352 * to a tracepoint yet, then it is cleared when it is.
353 * TEST_STR - The event has a "%s" that points to a string outside the event
354 */
355 enum {
356 TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT),
357 TRACE_EVENT_FL_CAP_ANY = (1 << TRACE_EVENT_FL_CAP_ANY_BIT),
358 TRACE_EVENT_FL_NO_SET_FILTER = (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT),
359 TRACE_EVENT_FL_IGNORE_ENABLE = (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT),
360 TRACE_EVENT_FL_TRACEPOINT = (1 << TRACE_EVENT_FL_TRACEPOINT_BIT),
361 TRACE_EVENT_FL_DYNAMIC = (1 << TRACE_EVENT_FL_DYNAMIC_BIT),
362 TRACE_EVENT_FL_KPROBE = (1 << TRACE_EVENT_FL_KPROBE_BIT),
363 TRACE_EVENT_FL_UPROBE = (1 << TRACE_EVENT_FL_UPROBE_BIT),
364 TRACE_EVENT_FL_EPROBE = (1 << TRACE_EVENT_FL_EPROBE_BIT),
365 TRACE_EVENT_FL_FPROBE = (1 << TRACE_EVENT_FL_FPROBE_BIT),
366 TRACE_EVENT_FL_CUSTOM = (1 << TRACE_EVENT_FL_CUSTOM_BIT),
367 TRACE_EVENT_FL_TEST_STR = (1 << TRACE_EVENT_FL_TEST_STR_BIT),
368 };
369
370 #define TRACE_EVENT_FL_UKPROBE (TRACE_EVENT_FL_KPROBE | TRACE_EVENT_FL_UPROBE)
371
372 struct trace_event_call {
373 struct list_head list;
374 struct trace_event_class *class;
375 union {
376 const char *name;
377 /* Set TRACE_EVENT_FL_TRACEPOINT flag when using "tp" */
378 struct tracepoint *tp;
379 };
380 struct trace_event event;
381 char *print_fmt;
382 struct event_filter *filter;
383 /*
384 * Static events can disappear with modules,
385 * where as dynamic ones need their own ref count.
386 */
387 union {
388 void *module;
389 atomic_t refcnt;
390 };
391 void *data;
392
393 /* See the TRACE_EVENT_FL_* flags above */
394 int flags; /* static flags of different events */
395
396 #ifdef CONFIG_PERF_EVENTS
397 int perf_refcount;
398 struct hlist_head __percpu *perf_events;
399 struct bpf_prog_array __rcu *prog_array;
400
401 int (*perf_perm)(struct trace_event_call *,
402 struct perf_event *);
403 #endif
404 };
405
406 #ifdef CONFIG_DYNAMIC_EVENTS
407 bool trace_event_dyn_try_get_ref(struct trace_event_call *call);
408 void trace_event_dyn_put_ref(struct trace_event_call *call);
409 bool trace_event_dyn_busy(struct trace_event_call *call);
410 #else
trace_event_dyn_try_get_ref(struct trace_event_call * call)411 static inline bool trace_event_dyn_try_get_ref(struct trace_event_call *call)
412 {
413 /* Without DYNAMIC_EVENTS configured, nothing should be calling this */
414 return false;
415 }
trace_event_dyn_put_ref(struct trace_event_call * call)416 static inline void trace_event_dyn_put_ref(struct trace_event_call *call)
417 {
418 }
trace_event_dyn_busy(struct trace_event_call * call)419 static inline bool trace_event_dyn_busy(struct trace_event_call *call)
420 {
421 /* Nothing should call this without DYNAIMIC_EVENTS configured. */
422 return true;
423 }
424 #endif
425
trace_event_try_get_ref(struct trace_event_call * call)426 static inline bool trace_event_try_get_ref(struct trace_event_call *call)
427 {
428 if (call->flags & TRACE_EVENT_FL_DYNAMIC)
429 return trace_event_dyn_try_get_ref(call);
430 else
431 return try_module_get(call->module);
432 }
433
trace_event_put_ref(struct trace_event_call * call)434 static inline void trace_event_put_ref(struct trace_event_call *call)
435 {
436 if (call->flags & TRACE_EVENT_FL_DYNAMIC)
437 trace_event_dyn_put_ref(call);
438 else
439 module_put(call->module);
440 }
441
442 #ifdef CONFIG_PERF_EVENTS
bpf_prog_array_valid(struct trace_event_call * call)443 static inline bool bpf_prog_array_valid(struct trace_event_call *call)
444 {
445 /*
446 * This inline function checks whether call->prog_array
447 * is valid or not. The function is called in various places,
448 * outside rcu_read_lock/unlock, as a heuristic to speed up execution.
449 *
450 * If this function returns true, and later call->prog_array
451 * becomes false inside rcu_read_lock/unlock region,
452 * we bail out then. If this function return false,
453 * there is a risk that we might miss a few events if the checking
454 * were delayed until inside rcu_read_lock/unlock region and
455 * call->prog_array happened to become non-NULL then.
456 *
457 * Here, READ_ONCE() is used instead of rcu_access_pointer().
458 * rcu_access_pointer() requires the actual definition of
459 * "struct bpf_prog_array" while READ_ONCE() only needs
460 * a declaration of the same type.
461 */
462 return !!READ_ONCE(call->prog_array);
463 }
464 #endif
465
466 static inline const char *
trace_event_name(struct trace_event_call * call)467 trace_event_name(struct trace_event_call *call)
468 {
469 if (call->flags & TRACE_EVENT_FL_CUSTOM)
470 return call->name;
471 else if (call->flags & TRACE_EVENT_FL_TRACEPOINT)
472 return call->tp ? call->tp->name : NULL;
473 else
474 return call->name;
475 }
476
477 static inline struct list_head *
trace_get_fields(struct trace_event_call * event_call)478 trace_get_fields(struct trace_event_call *event_call)
479 {
480 if (!event_call->class->get_fields)
481 return &event_call->class->fields;
482 return event_call->class->get_fields(event_call);
483 }
484
485 struct trace_subsystem_dir;
486
487 enum {
488 EVENT_FILE_FL_ENABLED_BIT,
489 EVENT_FILE_FL_RECORDED_CMD_BIT,
490 EVENT_FILE_FL_RECORDED_TGID_BIT,
491 EVENT_FILE_FL_FILTERED_BIT,
492 EVENT_FILE_FL_NO_SET_FILTER_BIT,
493 EVENT_FILE_FL_SOFT_MODE_BIT,
494 EVENT_FILE_FL_SOFT_DISABLED_BIT,
495 EVENT_FILE_FL_TRIGGER_MODE_BIT,
496 EVENT_FILE_FL_TRIGGER_COND_BIT,
497 EVENT_FILE_FL_PID_FILTER_BIT,
498 EVENT_FILE_FL_WAS_ENABLED_BIT,
499 EVENT_FILE_FL_FREED_BIT,
500 };
501
502 extern struct trace_event_file *trace_get_event_file(const char *instance,
503 const char *system,
504 const char *event);
505 extern void trace_put_event_file(struct trace_event_file *file);
506
507 #define MAX_DYNEVENT_CMD_LEN (2048)
508
509 enum dynevent_type {
510 DYNEVENT_TYPE_SYNTH = 1,
511 DYNEVENT_TYPE_KPROBE,
512 DYNEVENT_TYPE_NONE,
513 };
514
515 struct dynevent_cmd;
516
517 typedef int (*dynevent_create_fn_t)(struct dynevent_cmd *cmd);
518
519 struct dynevent_cmd {
520 struct seq_buf seq;
521 const char *event_name;
522 unsigned int n_fields;
523 enum dynevent_type type;
524 dynevent_create_fn_t run_command;
525 void *private_data;
526 };
527
528 extern int dynevent_create(struct dynevent_cmd *cmd);
529
530 extern int synth_event_delete(const char *name);
531
532 extern void synth_event_cmd_init(struct dynevent_cmd *cmd,
533 char *buf, int maxlen);
534
535 extern int __synth_event_gen_cmd_start(struct dynevent_cmd *cmd,
536 const char *name,
537 struct module *mod, ...);
538
539 #define synth_event_gen_cmd_start(cmd, name, mod, ...) \
540 __synth_event_gen_cmd_start(cmd, name, mod, ## __VA_ARGS__, NULL)
541
542 struct synth_field_desc {
543 const char *type;
544 const char *name;
545 };
546
547 extern int synth_event_gen_cmd_array_start(struct dynevent_cmd *cmd,
548 const char *name,
549 struct module *mod,
550 struct synth_field_desc *fields,
551 unsigned int n_fields);
552 extern int synth_event_create(const char *name,
553 struct synth_field_desc *fields,
554 unsigned int n_fields, struct module *mod);
555
556 extern int synth_event_add_field(struct dynevent_cmd *cmd,
557 const char *type,
558 const char *name);
559 extern int synth_event_add_field_str(struct dynevent_cmd *cmd,
560 const char *type_name);
561 extern int synth_event_add_fields(struct dynevent_cmd *cmd,
562 struct synth_field_desc *fields,
563 unsigned int n_fields);
564
565 #define synth_event_gen_cmd_end(cmd) \
566 dynevent_create(cmd)
567
568 struct synth_event;
569
570 struct synth_event_trace_state {
571 struct trace_event_buffer fbuffer;
572 struct synth_trace_event *entry;
573 struct trace_buffer *buffer;
574 struct synth_event *event;
575 unsigned int cur_field;
576 unsigned int n_u64;
577 bool disabled;
578 bool add_next;
579 bool add_name;
580 };
581
582 extern int synth_event_trace(struct trace_event_file *file,
583 unsigned int n_vals, ...);
584 extern int synth_event_trace_array(struct trace_event_file *file, u64 *vals,
585 unsigned int n_vals);
586 extern int synth_event_trace_start(struct trace_event_file *file,
587 struct synth_event_trace_state *trace_state);
588 extern int synth_event_add_next_val(u64 val,
589 struct synth_event_trace_state *trace_state);
590 extern int synth_event_add_val(const char *field_name, u64 val,
591 struct synth_event_trace_state *trace_state);
592 extern int synth_event_trace_end(struct synth_event_trace_state *trace_state);
593
594 extern int kprobe_event_delete(const char *name);
595
596 extern void kprobe_event_cmd_init(struct dynevent_cmd *cmd,
597 char *buf, int maxlen);
598
599 #define kprobe_event_gen_cmd_start(cmd, name, loc, ...) \
600 __kprobe_event_gen_cmd_start(cmd, false, name, loc, ## __VA_ARGS__, NULL)
601
602 #define kretprobe_event_gen_cmd_start(cmd, name, loc, ...) \
603 __kprobe_event_gen_cmd_start(cmd, true, name, loc, ## __VA_ARGS__, NULL)
604
605 extern int __kprobe_event_gen_cmd_start(struct dynevent_cmd *cmd,
606 bool kretprobe,
607 const char *name,
608 const char *loc, ...);
609
610 #define kprobe_event_add_fields(cmd, ...) \
611 __kprobe_event_add_fields(cmd, ## __VA_ARGS__, NULL)
612
613 #define kprobe_event_add_field(cmd, field) \
614 __kprobe_event_add_fields(cmd, field, NULL)
615
616 extern int __kprobe_event_add_fields(struct dynevent_cmd *cmd, ...);
617
618 #define kprobe_event_gen_cmd_end(cmd) \
619 dynevent_create(cmd)
620
621 #define kretprobe_event_gen_cmd_end(cmd) \
622 dynevent_create(cmd)
623
624 /*
625 * Event file flags:
626 * ENABLED - The event is enabled
627 * RECORDED_CMD - The comms should be recorded at sched_switch
628 * RECORDED_TGID - The tgids should be recorded at sched_switch
629 * FILTERED - The event has a filter attached
630 * NO_SET_FILTER - Set when filter has error and is to be ignored
631 * SOFT_MODE - The event is enabled/disabled by SOFT_DISABLED
632 * SOFT_DISABLED - When set, do not trace the event (even though its
633 * tracepoint may be enabled)
634 * TRIGGER_MODE - When set, invoke the triggers associated with the event
635 * TRIGGER_COND - When set, one or more triggers has an associated filter
636 * PID_FILTER - When set, the event is filtered based on pid
637 * WAS_ENABLED - Set when enabled to know to clear trace on module removal
638 * FREED - File descriptor is freed, all fields should be considered invalid
639 */
640 enum {
641 EVENT_FILE_FL_ENABLED = (1 << EVENT_FILE_FL_ENABLED_BIT),
642 EVENT_FILE_FL_RECORDED_CMD = (1 << EVENT_FILE_FL_RECORDED_CMD_BIT),
643 EVENT_FILE_FL_RECORDED_TGID = (1 << EVENT_FILE_FL_RECORDED_TGID_BIT),
644 EVENT_FILE_FL_FILTERED = (1 << EVENT_FILE_FL_FILTERED_BIT),
645 EVENT_FILE_FL_NO_SET_FILTER = (1 << EVENT_FILE_FL_NO_SET_FILTER_BIT),
646 EVENT_FILE_FL_SOFT_MODE = (1 << EVENT_FILE_FL_SOFT_MODE_BIT),
647 EVENT_FILE_FL_SOFT_DISABLED = (1 << EVENT_FILE_FL_SOFT_DISABLED_BIT),
648 EVENT_FILE_FL_TRIGGER_MODE = (1 << EVENT_FILE_FL_TRIGGER_MODE_BIT),
649 EVENT_FILE_FL_TRIGGER_COND = (1 << EVENT_FILE_FL_TRIGGER_COND_BIT),
650 EVENT_FILE_FL_PID_FILTER = (1 << EVENT_FILE_FL_PID_FILTER_BIT),
651 EVENT_FILE_FL_WAS_ENABLED = (1 << EVENT_FILE_FL_WAS_ENABLED_BIT),
652 EVENT_FILE_FL_FREED = (1 << EVENT_FILE_FL_FREED_BIT),
653 };
654
655 struct trace_event_file {
656 struct list_head list;
657 struct trace_event_call *event_call;
658 struct event_filter __rcu *filter;
659 struct eventfs_inode *ei;
660 struct trace_array *tr;
661 struct trace_subsystem_dir *system;
662 struct list_head triggers;
663
664 /*
665 * 32 bit flags:
666 * bit 0: enabled
667 * bit 1: enabled cmd record
668 * bit 2: enable/disable with the soft disable bit
669 * bit 3: soft disabled
670 * bit 4: trigger enabled
671 *
672 * Note: The bits must be set atomically to prevent races
673 * from other writers. Reads of flags do not need to be in
674 * sync as they occur in critical sections. But the way flags
675 * is currently used, these changes do not affect the code
676 * except that when a change is made, it may have a slight
677 * delay in propagating the changes to other CPUs due to
678 * caching and such. Which is mostly OK ;-)
679 */
680 unsigned long flags;
681 atomic_t ref; /* ref count for opened files */
682 atomic_t sm_ref; /* soft-mode reference counter */
683 atomic_t tm_ref; /* trigger-mode reference counter */
684 };
685
686 #define __TRACE_EVENT_FLAGS(name, value) \
687 static int __init trace_init_flags_##name(void) \
688 { \
689 event_##name.flags |= value; \
690 return 0; \
691 } \
692 early_initcall(trace_init_flags_##name);
693
694 #define __TRACE_EVENT_PERF_PERM(name, expr...) \
695 static int perf_perm_##name(struct trace_event_call *tp_event, \
696 struct perf_event *p_event) \
697 { \
698 return ({ expr; }); \
699 } \
700 static int __init trace_init_perf_perm_##name(void) \
701 { \
702 event_##name.perf_perm = &perf_perm_##name; \
703 return 0; \
704 } \
705 early_initcall(trace_init_perf_perm_##name);
706
707 #define PERF_MAX_TRACE_SIZE 8192
708
709 #define MAX_FILTER_STR_VAL 256U /* Should handle KSYM_SYMBOL_LEN */
710
711 enum event_trigger_type {
712 ETT_NONE = (0),
713 ETT_TRACE_ONOFF = (1 << 0),
714 ETT_SNAPSHOT = (1 << 1),
715 ETT_STACKTRACE = (1 << 2),
716 ETT_EVENT_ENABLE = (1 << 3),
717 ETT_EVENT_HIST = (1 << 4),
718 ETT_HIST_ENABLE = (1 << 5),
719 ETT_EVENT_EPROBE = (1 << 6),
720 };
721
722 extern int filter_match_preds(struct event_filter *filter, void *rec);
723
724 extern enum event_trigger_type
725 event_triggers_call(struct trace_event_file *file,
726 struct trace_buffer *buffer, void *rec,
727 struct ring_buffer_event *event);
728 extern void
729 event_triggers_post_call(struct trace_event_file *file,
730 enum event_trigger_type tt);
731
732 bool trace_event_ignore_this_pid(struct trace_event_file *trace_file);
733
734 bool __trace_trigger_soft_disabled(struct trace_event_file *file);
735
736 /**
737 * trace_trigger_soft_disabled - do triggers and test if soft disabled
738 * @file: The file pointer of the event to test
739 *
740 * If any triggers without filters are attached to this event, they
741 * will be called here. If the event is soft disabled and has no
742 * triggers that require testing the fields, it will return true,
743 * otherwise false.
744 */
745 static __always_inline bool
trace_trigger_soft_disabled(struct trace_event_file * file)746 trace_trigger_soft_disabled(struct trace_event_file *file)
747 {
748 unsigned long eflags = file->flags;
749
750 if (likely(!(eflags & (EVENT_FILE_FL_TRIGGER_MODE |
751 EVENT_FILE_FL_SOFT_DISABLED |
752 EVENT_FILE_FL_PID_FILTER))))
753 return false;
754
755 if (likely(eflags & EVENT_FILE_FL_TRIGGER_COND))
756 return false;
757
758 return __trace_trigger_soft_disabled(file);
759 }
760
761 #ifdef CONFIG_BPF_EVENTS
762 unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx);
763 int perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog, u64 bpf_cookie);
764 void perf_event_detach_bpf_prog(struct perf_event *event);
765 int perf_event_query_prog_array(struct perf_event *event, void __user *info);
766 int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog);
767 int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog);
768 struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name);
769 void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp);
770 int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
771 u32 *fd_type, const char **buf,
772 u64 *probe_offset, u64 *probe_addr,
773 unsigned long *missed);
774 int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
775 int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
776 #else
trace_call_bpf(struct trace_event_call * call,void * ctx)777 static inline unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
778 {
779 return 1;
780 }
781
782 static inline int
perf_event_attach_bpf_prog(struct perf_event * event,struct bpf_prog * prog,u64 bpf_cookie)783 perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog, u64 bpf_cookie)
784 {
785 return -EOPNOTSUPP;
786 }
787
perf_event_detach_bpf_prog(struct perf_event * event)788 static inline void perf_event_detach_bpf_prog(struct perf_event *event) { }
789
790 static inline int
perf_event_query_prog_array(struct perf_event * event,void __user * info)791 perf_event_query_prog_array(struct perf_event *event, void __user *info)
792 {
793 return -EOPNOTSUPP;
794 }
bpf_probe_register(struct bpf_raw_event_map * btp,struct bpf_prog * p)795 static inline int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *p)
796 {
797 return -EOPNOTSUPP;
798 }
bpf_probe_unregister(struct bpf_raw_event_map * btp,struct bpf_prog * p)799 static inline int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *p)
800 {
801 return -EOPNOTSUPP;
802 }
bpf_get_raw_tracepoint(const char * name)803 static inline struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name)
804 {
805 return NULL;
806 }
bpf_put_raw_tracepoint(struct bpf_raw_event_map * btp)807 static inline void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
808 {
809 }
bpf_get_perf_event_info(const struct perf_event * event,u32 * prog_id,u32 * fd_type,const char ** buf,u64 * probe_offset,u64 * probe_addr,unsigned long * missed)810 static inline int bpf_get_perf_event_info(const struct perf_event *event,
811 u32 *prog_id, u32 *fd_type,
812 const char **buf, u64 *probe_offset,
813 u64 *probe_addr, unsigned long *missed)
814 {
815 return -EOPNOTSUPP;
816 }
817 static inline int
bpf_kprobe_multi_link_attach(const union bpf_attr * attr,struct bpf_prog * prog)818 bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
819 {
820 return -EOPNOTSUPP;
821 }
822 static inline int
bpf_uprobe_multi_link_attach(const union bpf_attr * attr,struct bpf_prog * prog)823 bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
824 {
825 return -EOPNOTSUPP;
826 }
827 #endif
828
829 enum {
830 FILTER_OTHER = 0,
831 FILTER_STATIC_STRING,
832 FILTER_DYN_STRING,
833 FILTER_RDYN_STRING,
834 FILTER_PTR_STRING,
835 FILTER_TRACE_FN,
836 FILTER_CPUMASK,
837 FILTER_COMM,
838 FILTER_CPU,
839 FILTER_STACKTRACE,
840 };
841
842 extern int trace_event_raw_init(struct trace_event_call *call);
843 extern int trace_define_field(struct trace_event_call *call, const char *type,
844 const char *name, int offset, int size,
845 int is_signed, int filter_type);
846 extern int trace_add_event_call(struct trace_event_call *call);
847 extern int trace_remove_event_call(struct trace_event_call *call);
848 extern int trace_event_get_offsets(struct trace_event_call *call);
849
850 int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set);
851 int trace_set_clr_event(const char *system, const char *event, int set);
852 int trace_array_set_clr_event(struct trace_array *tr, const char *system,
853 const char *event, bool enable);
854 /*
855 * The double __builtin_constant_p is because gcc will give us an error
856 * if we try to allocate the static variable to fmt if it is not a
857 * constant. Even with the outer if statement optimizing out.
858 */
859 #define event_trace_printk(ip, fmt, args...) \
860 do { \
861 __trace_printk_check_format(fmt, ##args); \
862 tracing_record_cmdline(current); \
863 if (__builtin_constant_p(fmt)) { \
864 static const char *trace_printk_fmt \
865 __section("__trace_printk_fmt") = \
866 __builtin_constant_p(fmt) ? fmt : NULL; \
867 \
868 __trace_bprintk(ip, trace_printk_fmt, ##args); \
869 } else \
870 __trace_printk(ip, fmt, ##args); \
871 } while (0)
872
873 #ifdef CONFIG_PERF_EVENTS
874 struct perf_event;
875
876 DECLARE_PER_CPU(struct pt_regs, perf_trace_regs);
877
878 extern int perf_trace_init(struct perf_event *event);
879 extern void perf_trace_destroy(struct perf_event *event);
880 extern int perf_trace_add(struct perf_event *event, int flags);
881 extern void perf_trace_del(struct perf_event *event, int flags);
882 #ifdef CONFIG_KPROBE_EVENTS
883 extern int perf_kprobe_init(struct perf_event *event, bool is_retprobe);
884 extern void perf_kprobe_destroy(struct perf_event *event);
885 extern int bpf_get_kprobe_info(const struct perf_event *event,
886 u32 *fd_type, const char **symbol,
887 u64 *probe_offset, u64 *probe_addr,
888 unsigned long *missed,
889 bool perf_type_tracepoint);
890 #endif
891 #ifdef CONFIG_UPROBE_EVENTS
892 extern int perf_uprobe_init(struct perf_event *event,
893 unsigned long ref_ctr_offset, bool is_retprobe);
894 extern void perf_uprobe_destroy(struct perf_event *event);
895 extern int bpf_get_uprobe_info(const struct perf_event *event,
896 u32 *fd_type, const char **filename,
897 u64 *probe_offset, u64 *probe_addr,
898 bool perf_type_tracepoint);
899 #endif
900 extern int ftrace_profile_set_filter(struct perf_event *event, int event_id,
901 char *filter_str);
902 extern void ftrace_profile_free_filter(struct perf_event *event);
903 void perf_trace_buf_update(void *record, u16 type);
904 void *perf_trace_buf_alloc(int size, struct pt_regs **regs, int *rctxp);
905
906 int perf_event_set_bpf_prog(struct perf_event *event, struct bpf_prog *prog, u64 bpf_cookie);
907 void perf_event_free_bpf_prog(struct perf_event *event);
908
909 void bpf_trace_run1(struct bpf_prog *prog, u64 arg1);
910 void bpf_trace_run2(struct bpf_prog *prog, u64 arg1, u64 arg2);
911 void bpf_trace_run3(struct bpf_prog *prog, u64 arg1, u64 arg2,
912 u64 arg3);
913 void bpf_trace_run4(struct bpf_prog *prog, u64 arg1, u64 arg2,
914 u64 arg3, u64 arg4);
915 void bpf_trace_run5(struct bpf_prog *prog, u64 arg1, u64 arg2,
916 u64 arg3, u64 arg4, u64 arg5);
917 void bpf_trace_run6(struct bpf_prog *prog, u64 arg1, u64 arg2,
918 u64 arg3, u64 arg4, u64 arg5, u64 arg6);
919 void bpf_trace_run7(struct bpf_prog *prog, u64 arg1, u64 arg2,
920 u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7);
921 void bpf_trace_run8(struct bpf_prog *prog, u64 arg1, u64 arg2,
922 u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
923 u64 arg8);
924 void bpf_trace_run9(struct bpf_prog *prog, u64 arg1, u64 arg2,
925 u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
926 u64 arg8, u64 arg9);
927 void bpf_trace_run10(struct bpf_prog *prog, u64 arg1, u64 arg2,
928 u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
929 u64 arg8, u64 arg9, u64 arg10);
930 void bpf_trace_run11(struct bpf_prog *prog, u64 arg1, u64 arg2,
931 u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
932 u64 arg8, u64 arg9, u64 arg10, u64 arg11);
933 void bpf_trace_run12(struct bpf_prog *prog, u64 arg1, u64 arg2,
934 u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
935 u64 arg8, u64 arg9, u64 arg10, u64 arg11, u64 arg12);
936 void perf_trace_run_bpf_submit(void *raw_data, int size, int rctx,
937 struct trace_event_call *call, u64 count,
938 struct pt_regs *regs, struct hlist_head *head,
939 struct task_struct *task);
940
941 static inline void
perf_trace_buf_submit(void * raw_data,int size,int rctx,u16 type,u64 count,struct pt_regs * regs,void * head,struct task_struct * task)942 perf_trace_buf_submit(void *raw_data, int size, int rctx, u16 type,
943 u64 count, struct pt_regs *regs, void *head,
944 struct task_struct *task)
945 {
946 perf_tp_event(type, count, raw_data, size, regs, head, rctx, task);
947 }
948
949 #endif
950
951 #define TRACE_EVENT_STR_MAX 512
952
953 /*
954 * gcc warns that you can not use a va_list in an inlined
955 * function. But lets me make it into a macro :-/
956 */
957 #define __trace_event_vstr_len(fmt, va) \
958 ({ \
959 va_list __ap; \
960 int __ret; \
961 \
962 va_copy(__ap, *(va)); \
963 __ret = vsnprintf(NULL, 0, fmt, __ap) + 1; \
964 va_end(__ap); \
965 \
966 min(__ret, TRACE_EVENT_STR_MAX); \
967 })
968
969 #endif /* _LINUX_TRACE_EVENT_H */
970
971 /*
972 * Note: we keep the TRACE_CUSTOM_EVENT outside the include file ifdef protection.
973 * This is due to the way trace custom events work. If a file includes two
974 * trace event headers under one "CREATE_CUSTOM_TRACE_EVENTS" the first include
975 * will override the TRACE_CUSTOM_EVENT and break the second include.
976 */
977
978 #ifndef TRACE_CUSTOM_EVENT
979
980 #define DECLARE_CUSTOM_EVENT_CLASS(name, proto, args, tstruct, assign, print)
981 #define DEFINE_CUSTOM_EVENT(template, name, proto, args)
982 #define TRACE_CUSTOM_EVENT(name, proto, args, struct, assign, print)
983
984 #endif /* ifdef TRACE_CUSTOM_EVENT (see note above) */
985