xref: /openbmc/linux/tools/perf/util/mmap.h (revision 45cc842d5b75ba8f9a958f2dd12b95c6dd0452bd)
1 #ifndef __PERF_MMAP_H
2 #define __PERF_MMAP_H 1
3 
4 #include <linux/compiler.h>
5 #include <linux/refcount.h>
6 #include <linux/types.h>
7 #include <asm/barrier.h>
8 #include <stdbool.h>
9 #include "auxtrace.h"
10 #include "event.h"
11 
12 /**
13  * struct perf_mmap - perf's ring buffer mmap details
14  *
15  * @refcnt - e.g. code using PERF_EVENT_IOC_SET_OUTPUT to share this
16  */
17 struct perf_mmap {
18 	void		 *base;
19 	int		 mask;
20 	int		 fd;
21 	refcount_t	 refcnt;
22 	u64		 prev;
23 	struct auxtrace_mmap auxtrace_mmap;
24 	char		 event_copy[PERF_SAMPLE_MAX_SIZE] __aligned(8);
25 };
26 
27 /*
28  * State machine of bkw_mmap_state:
29  *
30  *                     .________________(forbid)_____________.
31  *                     |                                     V
32  * NOTREADY --(0)--> RUNNING --(1)--> DATA_PENDING --(2)--> EMPTY
33  *                     ^  ^              |   ^               |
34  *                     |  |__(forbid)____/   |___(forbid)___/|
35  *                     |                                     |
36  *                      \_________________(3)_______________/
37  *
38  * NOTREADY     : Backward ring buffers are not ready
39  * RUNNING      : Backward ring buffers are recording
40  * DATA_PENDING : We are required to collect data from backward ring buffers
41  * EMPTY        : We have collected data from backward ring buffers.
42  *
43  * (0): Setup backward ring buffer
44  * (1): Pause ring buffers for reading
45  * (2): Read from ring buffers
46  * (3): Resume ring buffers for recording
47  */
48 enum bkw_mmap_state {
49 	BKW_MMAP_NOTREADY,
50 	BKW_MMAP_RUNNING,
51 	BKW_MMAP_DATA_PENDING,
52 	BKW_MMAP_EMPTY,
53 };
54 
55 struct mmap_params {
56 	int			    prot, mask;
57 	struct auxtrace_mmap_params auxtrace_mp;
58 };
59 
60 int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd);
61 void perf_mmap__munmap(struct perf_mmap *map);
62 
63 void perf_mmap__get(struct perf_mmap *map);
64 void perf_mmap__put(struct perf_mmap *map);
65 
66 void perf_mmap__consume(struct perf_mmap *map, bool overwrite);
67 
68 void perf_mmap__read_catchup(struct perf_mmap *md);
69 
70 static inline u64 perf_mmap__read_head(struct perf_mmap *mm)
71 {
72 	struct perf_event_mmap_page *pc = mm->base;
73 	u64 head = READ_ONCE(pc->data_head);
74 	rmb();
75 	return head;
76 }
77 
78 static inline void perf_mmap__write_tail(struct perf_mmap *md, u64 tail)
79 {
80 	struct perf_event_mmap_page *pc = md->base;
81 
82 	/*
83 	 * ensure all reads are done before we write the tail out.
84 	 */
85 	mb();
86 	pc->data_tail = tail;
87 }
88 
89 union perf_event *perf_mmap__read_forward(struct perf_mmap *map);
90 union perf_event *perf_mmap__read_backward(struct perf_mmap *map);
91 
92 int perf_mmap__push(struct perf_mmap *md, bool backward,
93 		    void *to, int push(void *to, void *buf, size_t size));
94 
95 size_t perf_mmap__mmap_len(struct perf_mmap *map);
96 
97 #endif /*__PERF_MMAP_H */
98