1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
27a8e76a3SSteven Rostedt #ifndef _LINUX_RING_BUFFER_H
37a8e76a3SSteven Rostedt #define _LINUX_RING_BUFFER_H
47a8e76a3SSteven Rostedt
57a8e76a3SSteven Rostedt #include <linux/mm.h>
67a8e76a3SSteven Rostedt #include <linux/seq_file.h>
715693458SSteven Rostedt (Red Hat) #include <linux/poll.h>
87a8e76a3SSteven Rostedt
913292494SSteven Rostedt (VMware) struct trace_buffer;
107a8e76a3SSteven Rostedt struct ring_buffer_iter;
117a8e76a3SSteven Rostedt
127a8e76a3SSteven Rostedt /*
13c3706f00SWenji Huang * Don't refer to this struct directly, use functions below.
147a8e76a3SSteven Rostedt */
157a8e76a3SSteven Rostedt struct ring_buffer_event {
16334d4169SLai Jiangshan u32 type_len:5, time_delta:27;
171744a21dSVegard Nossum
187a8e76a3SSteven Rostedt u32 array[];
197a8e76a3SSteven Rostedt };
207a8e76a3SSteven Rostedt
217a8e76a3SSteven Rostedt /**
227a8e76a3SSteven Rostedt * enum ring_buffer_type - internal ring buffer types
237a8e76a3SSteven Rostedt *
242d622719STom Zanussi * @RINGBUF_TYPE_PADDING: Left over page padding or discarded event
252d622719STom Zanussi * If time_delta is 0:
267a8e76a3SSteven Rostedt * array is ignored
277a8e76a3SSteven Rostedt * size is variable depending on how much
287a8e76a3SSteven Rostedt * padding is needed
292d622719STom Zanussi * If time_delta is non zero:
30334d4169SLai Jiangshan * array[0] holds the actual length
31334d4169SLai Jiangshan * size = 4 + length (bytes)
327a8e76a3SSteven Rostedt *
337a8e76a3SSteven Rostedt * @RINGBUF_TYPE_TIME_EXTEND: Extend the time delta
347a8e76a3SSteven Rostedt * array[0] = time delta (28 .. 59)
357a8e76a3SSteven Rostedt * size = 8 bytes
367a8e76a3SSteven Rostedt *
37dc4e2801STom Zanussi * @RINGBUF_TYPE_TIME_STAMP: Absolute timestamp
38dc4e2801STom Zanussi * Same format as TIME_EXTEND except that the
39dc4e2801STom Zanussi * value is an absolute timestamp, not a delta
40dc4e2801STom Zanussi * event.time_delta contains bottom 27 bits
41dc4e2801STom Zanussi * array[0] = top (28 .. 59) bits
42dc4e2801STom Zanussi * size = 8 bytes
437a8e76a3SSteven Rostedt *
44334d4169SLai Jiangshan * <= @RINGBUF_TYPE_DATA_TYPE_LEN_MAX:
45334d4169SLai Jiangshan * Data record
46334d4169SLai Jiangshan * If type_len is zero:
477a8e76a3SSteven Rostedt * array[0] holds the actual length
48361b73d5SLai Jiangshan * array[1..(length+3)/4] holds data
49334d4169SLai Jiangshan * size = 4 + length (bytes)
507a8e76a3SSteven Rostedt * else
51334d4169SLai Jiangshan * length = type_len << 2
52361b73d5SLai Jiangshan * array[0..(length+3)/4-1] holds data
53361b73d5SLai Jiangshan * size = 4 + length (bytes)
547a8e76a3SSteven Rostedt */
557a8e76a3SSteven Rostedt enum ring_buffer_type {
56334d4169SLai Jiangshan RINGBUF_TYPE_DATA_TYPE_LEN_MAX = 28,
577a8e76a3SSteven Rostedt RINGBUF_TYPE_PADDING,
587a8e76a3SSteven Rostedt RINGBUF_TYPE_TIME_EXTEND,
597a8e76a3SSteven Rostedt RINGBUF_TYPE_TIME_STAMP,
607a8e76a3SSteven Rostedt };
617a8e76a3SSteven Rostedt
627a8e76a3SSteven Rostedt unsigned ring_buffer_event_length(struct ring_buffer_event *event);
637a8e76a3SSteven Rostedt void *ring_buffer_event_data(struct ring_buffer_event *event);
64efe6196aSSteven Rostedt (VMware) u64 ring_buffer_event_time_stamp(struct trace_buffer *buffer,
65efe6196aSSteven Rostedt (VMware) struct ring_buffer_event *event);
667a8e76a3SSteven Rostedt
67fa1b47ddSSteven Rostedt /*
68fa1b47ddSSteven Rostedt * ring_buffer_discard_commit will remove an event that has not
69a9235b54SVasyl Gomonovych * been committed yet. If this is used, then ring_buffer_unlock_commit
70fa1b47ddSSteven Rostedt * must not be called on the discarded event. This function
71fa1b47ddSSteven Rostedt * will try to remove the event from the ring buffer completely
72fa1b47ddSSteven Rostedt * if another event has not been written after it.
73fa1b47ddSSteven Rostedt *
74fa1b47ddSSteven Rostedt * Example use:
75fa1b47ddSSteven Rostedt *
76fa1b47ddSSteven Rostedt * if (some_condition)
77fa1b47ddSSteven Rostedt * ring_buffer_discard_commit(buffer, event);
78fa1b47ddSSteven Rostedt * else
79fa1b47ddSSteven Rostedt * ring_buffer_unlock_commit(buffer, event);
80fa1b47ddSSteven Rostedt */
8113292494SSteven Rostedt (VMware) void ring_buffer_discard_commit(struct trace_buffer *buffer,
82fa1b47ddSSteven Rostedt struct ring_buffer_event *event);
83fa1b47ddSSteven Rostedt
84fa1b47ddSSteven Rostedt /*
857a8e76a3SSteven Rostedt * size is in bytes for each per CPU buffer.
867a8e76a3SSteven Rostedt */
8713292494SSteven Rostedt (VMware) struct trace_buffer *
881f8a6a10SPeter Zijlstra __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *key);
891f8a6a10SPeter Zijlstra
901f8a6a10SPeter Zijlstra /*
911f8a6a10SPeter Zijlstra * Because the ring buffer is generic, if other users of the ring buffer get
921f8a6a10SPeter Zijlstra * traced by ftrace, it can produce lockdep warnings. We need to keep each
931f8a6a10SPeter Zijlstra * ring buffer's lock class separate.
941f8a6a10SPeter Zijlstra */
951f8a6a10SPeter Zijlstra #define ring_buffer_alloc(size, flags) \
961f8a6a10SPeter Zijlstra ({ \
971f8a6a10SPeter Zijlstra static struct lock_class_key __key; \
981f8a6a10SPeter Zijlstra __ring_buffer_alloc((size), (flags), &__key); \
991f8a6a10SPeter Zijlstra })
1001f8a6a10SPeter Zijlstra
101*b31301a1SSteven Rostedt (Google) typedef bool (*ring_buffer_cond_fn)(void *data);
10213292494SSteven Rostedt (VMware) int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full);
10313292494SSteven Rostedt (VMware) __poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,
10442fb0a1eSSteven Rostedt (Google) struct file *filp, poll_table *poll_table, int full);
1057e9fbbb1SSteven Rostedt (Google) void ring_buffer_wake_waiters(struct trace_buffer *buffer, int cpu);
10615693458SSteven Rostedt (Red Hat)
107438ced17SVaibhav Nagarnaik #define RING_BUFFER_ALL_CPUS -1
108438ced17SVaibhav Nagarnaik
10913292494SSteven Rostedt (VMware) void ring_buffer_free(struct trace_buffer *buffer);
1107a8e76a3SSteven Rostedt
11113292494SSteven Rostedt (VMware) int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size, int cpu);
1127a8e76a3SSteven Rostedt
11313292494SSteven Rostedt (VMware) void ring_buffer_change_overwrite(struct trace_buffer *buffer, int val);
114750912faSDavid Sharp
11513292494SSteven Rostedt (VMware) struct ring_buffer_event *ring_buffer_lock_reserve(struct trace_buffer *buffer,
1160a987751SArnaldo Carvalho de Melo unsigned long length);
11704aabc32SSong Chen int ring_buffer_unlock_commit(struct trace_buffer *buffer);
11813292494SSteven Rostedt (VMware) int ring_buffer_write(struct trace_buffer *buffer,
1197a8e76a3SSteven Rostedt unsigned long length, void *data);
1207a8e76a3SSteven Rostedt
12113292494SSteven Rostedt (VMware) void ring_buffer_nest_start(struct trace_buffer *buffer);
12213292494SSteven Rostedt (VMware) void ring_buffer_nest_end(struct trace_buffer *buffer);
1238e012066SSteven Rostedt (VMware)
1247a8e76a3SSteven Rostedt struct ring_buffer_event *
12513292494SSteven Rostedt (VMware) ring_buffer_peek(struct trace_buffer *buffer, int cpu, u64 *ts,
12666a8cb95SSteven Rostedt unsigned long *lost_events);
1277a8e76a3SSteven Rostedt struct ring_buffer_event *
12813292494SSteven Rostedt (VMware) ring_buffer_consume(struct trace_buffer *buffer, int cpu, u64 *ts,
12966a8cb95SSteven Rostedt unsigned long *lost_events);
1307a8e76a3SSteven Rostedt
1317a8e76a3SSteven Rostedt struct ring_buffer_iter *
13213292494SSteven Rostedt (VMware) ring_buffer_read_prepare(struct trace_buffer *buffer, int cpu, gfp_t flags);
13372c9ddfdSDavid Miller void ring_buffer_read_prepare_sync(void);
13472c9ddfdSDavid Miller void ring_buffer_read_start(struct ring_buffer_iter *iter);
1357a8e76a3SSteven Rostedt void ring_buffer_read_finish(struct ring_buffer_iter *iter);
1367a8e76a3SSteven Rostedt
1377a8e76a3SSteven Rostedt struct ring_buffer_event *
1387a8e76a3SSteven Rostedt ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts);
139bc1a72afSSteven Rostedt (VMware) void ring_buffer_iter_advance(struct ring_buffer_iter *iter);
1407a8e76a3SSteven Rostedt void ring_buffer_iter_reset(struct ring_buffer_iter *iter);
1417a8e76a3SSteven Rostedt int ring_buffer_iter_empty(struct ring_buffer_iter *iter);
142c9b7a4a7SSteven Rostedt (VMware) bool ring_buffer_iter_dropped(struct ring_buffer_iter *iter);
1437a8e76a3SSteven Rostedt
14413292494SSteven Rostedt (VMware) unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu);
1457a8e76a3SSteven Rostedt
14613292494SSteven Rostedt (VMware) void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu);
147b23d7a5fSNicholas Piggin void ring_buffer_reset_online_cpus(struct trace_buffer *buffer);
14813292494SSteven Rostedt (VMware) void ring_buffer_reset(struct trace_buffer *buffer);
1497a8e76a3SSteven Rostedt
15085bac32cSSteven Rostedt #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
15113292494SSteven Rostedt (VMware) int ring_buffer_swap_cpu(struct trace_buffer *buffer_a,
15213292494SSteven Rostedt (VMware) struct trace_buffer *buffer_b, int cpu);
15385bac32cSSteven Rostedt #else
15485bac32cSSteven Rostedt static inline int
ring_buffer_swap_cpu(struct trace_buffer * buffer_a,struct trace_buffer * buffer_b,int cpu)15513292494SSteven Rostedt (VMware) ring_buffer_swap_cpu(struct trace_buffer *buffer_a,
15613292494SSteven Rostedt (VMware) struct trace_buffer *buffer_b, int cpu)
15785bac32cSSteven Rostedt {
15885bac32cSSteven Rostedt return -ENODEV;
15985bac32cSSteven Rostedt }
16085bac32cSSteven Rostedt #endif
1617a8e76a3SSteven Rostedt
16213292494SSteven Rostedt (VMware) bool ring_buffer_empty(struct trace_buffer *buffer);
16313292494SSteven Rostedt (VMware) bool ring_buffer_empty_cpu(struct trace_buffer *buffer, int cpu);
1647a8e76a3SSteven Rostedt
16513292494SSteven Rostedt (VMware) void ring_buffer_record_disable(struct trace_buffer *buffer);
16613292494SSteven Rostedt (VMware) void ring_buffer_record_enable(struct trace_buffer *buffer);
16713292494SSteven Rostedt (VMware) void ring_buffer_record_off(struct trace_buffer *buffer);
16813292494SSteven Rostedt (VMware) void ring_buffer_record_on(struct trace_buffer *buffer);
16913292494SSteven Rostedt (VMware) bool ring_buffer_record_is_on(struct trace_buffer *buffer);
17013292494SSteven Rostedt (VMware) bool ring_buffer_record_is_set_on(struct trace_buffer *buffer);
17113292494SSteven Rostedt (VMware) void ring_buffer_record_disable_cpu(struct trace_buffer *buffer, int cpu);
17213292494SSteven Rostedt (VMware) void ring_buffer_record_enable_cpu(struct trace_buffer *buffer, int cpu);
1737a8e76a3SSteven Rostedt
17413292494SSteven Rostedt (VMware) u64 ring_buffer_oldest_event_ts(struct trace_buffer *buffer, int cpu);
17513292494SSteven Rostedt (VMware) unsigned long ring_buffer_bytes_cpu(struct trace_buffer *buffer, int cpu);
17613292494SSteven Rostedt (VMware) unsigned long ring_buffer_entries(struct trace_buffer *buffer);
17713292494SSteven Rostedt (VMware) unsigned long ring_buffer_overruns(struct trace_buffer *buffer);
17813292494SSteven Rostedt (VMware) unsigned long ring_buffer_entries_cpu(struct trace_buffer *buffer, int cpu);
17913292494SSteven Rostedt (VMware) unsigned long ring_buffer_overrun_cpu(struct trace_buffer *buffer, int cpu);
18013292494SSteven Rostedt (VMware) unsigned long ring_buffer_commit_overrun_cpu(struct trace_buffer *buffer, int cpu);
18113292494SSteven Rostedt (VMware) unsigned long ring_buffer_dropped_events_cpu(struct trace_buffer *buffer, int cpu);
18213292494SSteven Rostedt (VMware) unsigned long ring_buffer_read_events_cpu(struct trace_buffer *buffer, int cpu);
1837a8e76a3SSteven Rostedt
184f3ef7202SYordan Karadzhov (VMware) u64 ring_buffer_time_stamp(struct trace_buffer *buffer);
18513292494SSteven Rostedt (VMware) void ring_buffer_normalize_time_stamp(struct trace_buffer *buffer,
18637886f6aSSteven Rostedt int cpu, u64 *ts);
18713292494SSteven Rostedt (VMware) void ring_buffer_set_clock(struct trace_buffer *buffer,
18837886f6aSSteven Rostedt u64 (*clock)(void));
18913292494SSteven Rostedt (VMware) void ring_buffer_set_time_stamp_abs(struct trace_buffer *buffer, bool abs);
19013292494SSteven Rostedt (VMware) bool ring_buffer_time_stamp_abs(struct trace_buffer *buffer);
1917a8e76a3SSteven Rostedt
19213292494SSteven Rostedt (VMware) size_t ring_buffer_nr_pages(struct trace_buffer *buffer, int cpu);
19313292494SSteven Rostedt (VMware) size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu);
194ef7a4a16SSteven Rostedt
19513292494SSteven Rostedt (VMware) void *ring_buffer_alloc_read_page(struct trace_buffer *buffer, int cpu);
19613292494SSteven Rostedt (VMware) void ring_buffer_free_read_page(struct trace_buffer *buffer, int cpu, void *data);
19713292494SSteven Rostedt (VMware) int ring_buffer_read_page(struct trace_buffer *buffer, void **data_page,
198ef7a4a16SSteven Rostedt size_t len, int cpu, int full);
1998789a9e7SSteven Rostedt
200d1b182a8SSteven Rostedt struct trace_seq;
201d1b182a8SSteven Rostedt
202d1b182a8SSteven Rostedt int ring_buffer_print_entry_header(struct trace_seq *s);
203d1b182a8SSteven Rostedt int ring_buffer_print_page_header(struct trace_seq *s);
204d1b182a8SSteven Rostedt
2057a8e76a3SSteven Rostedt enum ring_buffer_flags {
2067a8e76a3SSteven Rostedt RB_FL_OVERWRITE = 1 << 0,
2077a8e76a3SSteven Rostedt };
2087a8e76a3SSteven Rostedt
209b32614c0SSebastian Andrzej Siewior #ifdef CONFIG_RING_BUFFER
210b32614c0SSebastian Andrzej Siewior int trace_rb_cpu_prepare(unsigned int cpu, struct hlist_node *node);
211b32614c0SSebastian Andrzej Siewior #else
212b32614c0SSebastian Andrzej Siewior #define trace_rb_cpu_prepare NULL
213b32614c0SSebastian Andrzej Siewior #endif
214b32614c0SSebastian Andrzej Siewior
2157a8e76a3SSteven Rostedt #endif /* _LINUX_RING_BUFFER_H */
216