xref: /openbmc/linux/tools/lib/perf/mmap.c (revision e721eb06)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <sys/mman.h>
3 #include <inttypes.h>
4 #include <asm/bug.h>
5 #include <errno.h>
6 #include <string.h>
7 #include <linux/ring_buffer.h>
8 #include <linux/perf_event.h>
9 #include <perf/mmap.h>
10 #include <perf/event.h>
11 #include <internal/mmap.h>
12 #include <internal/lib.h>
13 #include <linux/kernel.h>
14 #include "internal.h"
15 
16 void perf_mmap__init(struct perf_mmap *map, struct perf_mmap *prev,
17 		     bool overwrite, libperf_unmap_cb_t unmap_cb)
18 {
19 	map->fd = -1;
20 	map->overwrite = overwrite;
21 	map->unmap_cb  = unmap_cb;
22 	refcount_set(&map->refcnt, 0);
23 	if (prev)
24 		prev->next = map;
25 }
26 
27 size_t perf_mmap__mmap_len(struct perf_mmap *map)
28 {
29 	return map->mask + 1 + page_size;
30 }
31 
32 int perf_mmap__mmap(struct perf_mmap *map, struct perf_mmap_param *mp,
33 		    int fd, int cpu)
34 {
35 	map->prev = 0;
36 	map->mask = mp->mask;
37 	map->base = mmap(NULL, perf_mmap__mmap_len(map), mp->prot,
38 			 MAP_SHARED, fd, 0);
39 	if (map->base == MAP_FAILED) {
40 		map->base = NULL;
41 		return -1;
42 	}
43 
44 	map->fd  = fd;
45 	map->cpu = cpu;
46 	return 0;
47 }
48 
49 void perf_mmap__munmap(struct perf_mmap *map)
50 {
51 	if (map && map->base != NULL) {
52 		munmap(map->base, perf_mmap__mmap_len(map));
53 		map->base = NULL;
54 		map->fd = -1;
55 		refcount_set(&map->refcnt, 0);
56 	}
57 	if (map && map->unmap_cb)
58 		map->unmap_cb(map);
59 }
60 
61 void perf_mmap__get(struct perf_mmap *map)
62 {
63 	refcount_inc(&map->refcnt);
64 }
65 
66 void perf_mmap__put(struct perf_mmap *map)
67 {
68 	BUG_ON(map->base && refcount_read(&map->refcnt) == 0);
69 
70 	if (refcount_dec_and_test(&map->refcnt))
71 		perf_mmap__munmap(map);
72 }
73 
74 static inline void perf_mmap__write_tail(struct perf_mmap *md, u64 tail)
75 {
76 	ring_buffer_write_tail(md->base, tail);
77 }
78 
79 u64 perf_mmap__read_head(struct perf_mmap *map)
80 {
81 	return ring_buffer_read_head(map->base);
82 }
83 
84 static bool perf_mmap__empty(struct perf_mmap *map)
85 {
86 	struct perf_event_mmap_page *pc = map->base;
87 
88 	return perf_mmap__read_head(map) == map->prev && !pc->aux_size;
89 }
90 
91 void perf_mmap__consume(struct perf_mmap *map)
92 {
93 	if (!map->overwrite) {
94 		u64 old = map->prev;
95 
96 		perf_mmap__write_tail(map, old);
97 	}
98 
99 	if (refcount_read(&map->refcnt) == 1 && perf_mmap__empty(map))
100 		perf_mmap__put(map);
101 }
102 
103 static int overwrite_rb_find_range(void *buf, int mask, u64 *start, u64 *end)
104 {
105 	struct perf_event_header *pheader;
106 	u64 evt_head = *start;
107 	int size = mask + 1;
108 
109 	pr_debug2("%s: buf=%p, start=%"PRIx64"\n", __func__, buf, *start);
110 	pheader = (struct perf_event_header *)(buf + (*start & mask));
111 	while (true) {
112 		if (evt_head - *start >= (unsigned int)size) {
113 			pr_debug("Finished reading overwrite ring buffer: rewind\n");
114 			if (evt_head - *start > (unsigned int)size)
115 				evt_head -= pheader->size;
116 			*end = evt_head;
117 			return 0;
118 		}
119 
120 		pheader = (struct perf_event_header *)(buf + (evt_head & mask));
121 
122 		if (pheader->size == 0) {
123 			pr_debug("Finished reading overwrite ring buffer: get start\n");
124 			*end = evt_head;
125 			return 0;
126 		}
127 
128 		evt_head += pheader->size;
129 		pr_debug3("move evt_head: %"PRIx64"\n", evt_head);
130 	}
131 	WARN_ONCE(1, "Shouldn't get here\n");
132 	return -1;
133 }
134 
135 /*
136  * Report the start and end of the available data in ringbuffer
137  */
138 static int __perf_mmap__read_init(struct perf_mmap *md)
139 {
140 	u64 head = perf_mmap__read_head(md);
141 	u64 old = md->prev;
142 	unsigned char *data = md->base + page_size;
143 	unsigned long size;
144 
145 	md->start = md->overwrite ? head : old;
146 	md->end = md->overwrite ? old : head;
147 
148 	if ((md->end - md->start) < md->flush)
149 		return -EAGAIN;
150 
151 	size = md->end - md->start;
152 	if (size > (unsigned long)(md->mask) + 1) {
153 		if (!md->overwrite) {
154 			WARN_ONCE(1, "failed to keep up with mmap data. (warn only once)\n");
155 
156 			md->prev = head;
157 			perf_mmap__consume(md);
158 			return -EAGAIN;
159 		}
160 
161 		/*
162 		 * Backward ring buffer is full. We still have a chance to read
163 		 * most of data from it.
164 		 */
165 		if (overwrite_rb_find_range(data, md->mask, &md->start, &md->end))
166 			return -EINVAL;
167 	}
168 
169 	return 0;
170 }
171 
172 int perf_mmap__read_init(struct perf_mmap *map)
173 {
174 	/*
175 	 * Check if event was unmapped due to a POLLHUP/POLLERR.
176 	 */
177 	if (!refcount_read(&map->refcnt))
178 		return -ENOENT;
179 
180 	return __perf_mmap__read_init(map);
181 }
182 
183 /*
184  * Mandatory for overwrite mode
185  * The direction of overwrite mode is backward.
186  * The last perf_mmap__read() will set tail to map->core.prev.
187  * Need to correct the map->core.prev to head which is the end of next read.
188  */
189 void perf_mmap__read_done(struct perf_mmap *map)
190 {
191 	/*
192 	 * Check if event was unmapped due to a POLLHUP/POLLERR.
193 	 */
194 	if (!refcount_read(&map->refcnt))
195 		return;
196 
197 	map->prev = perf_mmap__read_head(map);
198 }
199 
200 /* When check_messup is true, 'end' must points to a good entry */
201 static union perf_event *perf_mmap__read(struct perf_mmap *map,
202 					 u64 *startp, u64 end)
203 {
204 	unsigned char *data = map->base + page_size;
205 	union perf_event *event = NULL;
206 	int diff = end - *startp;
207 
208 	if (diff >= (int)sizeof(event->header)) {
209 		size_t size;
210 
211 		event = (union perf_event *)&data[*startp & map->mask];
212 		size = event->header.size;
213 
214 		if (size < sizeof(event->header) || diff < (int)size)
215 			return NULL;
216 
217 		/*
218 		 * Event straddles the mmap boundary -- header should always
219 		 * be inside due to u64 alignment of output.
220 		 */
221 		if ((*startp & map->mask) + size != ((*startp + size) & map->mask)) {
222 			unsigned int offset = *startp;
223 			unsigned int len = min(sizeof(*event), size), cpy;
224 			void *dst = map->event_copy;
225 
226 			do {
227 				cpy = min(map->mask + 1 - (offset & map->mask), len);
228 				memcpy(dst, &data[offset & map->mask], cpy);
229 				offset += cpy;
230 				dst += cpy;
231 				len -= cpy;
232 			} while (len);
233 
234 			event = (union perf_event *)map->event_copy;
235 		}
236 
237 		*startp += size;
238 	}
239 
240 	return event;
241 }
242 
243 /*
244  * Read event from ring buffer one by one.
245  * Return one event for each call.
246  *
247  * Usage:
248  * perf_mmap__read_init()
249  * while(event = perf_mmap__read_event()) {
250  *	//process the event
251  *	perf_mmap__consume()
252  * }
253  * perf_mmap__read_done()
254  */
255 union perf_event *perf_mmap__read_event(struct perf_mmap *map)
256 {
257 	union perf_event *event;
258 
259 	/*
260 	 * Check if event was unmapped due to a POLLHUP/POLLERR.
261 	 */
262 	if (!refcount_read(&map->refcnt))
263 		return NULL;
264 
265 	/* non-overwirte doesn't pause the ringbuffer */
266 	if (!map->overwrite)
267 		map->end = perf_mmap__read_head(map);
268 
269 	event = perf_mmap__read(map, &map->start, map->end);
270 
271 	if (!map->overwrite)
272 		map->prev = map->start;
273 
274 	return event;
275 }
276