xref: /openbmc/linux/tools/perf/util/mmap.c (revision 7ed1c1901fe52e6c5828deb155920b44b0adabb1)
1 /*
2  * Copyright (C) 2011-2017, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
3  *
4  * Parts came from evlist.c builtin-{top,stat,record}.c, see those files for further
5  * copyright notes.
6  *
7  * Released under the GPL v2. (and only v2, not any later version)
8  */
9 
10 #include <sys/mman.h>
11 #include <inttypes.h>
12 #include <asm/bug.h>
13 #include "debug.h"
14 #include "event.h"
15 #include "mmap.h"
16 #include "util.h" /* page_size */
17 
18 size_t perf_mmap__mmap_len(struct perf_mmap *map)
19 {
20 	return map->mask + 1 + page_size;
21 }
22 
23 /* When check_messup is true, 'end' must points to a good entry */
24 static union perf_event *perf_mmap__read(struct perf_mmap *map,
25 					 u64 *startp, u64 end)
26 {
27 	unsigned char *data = map->base + page_size;
28 	union perf_event *event = NULL;
29 	int diff = end - *startp;
30 
31 	if (diff >= (int)sizeof(event->header)) {
32 		size_t size;
33 
34 		event = (union perf_event *)&data[*startp & map->mask];
35 		size = event->header.size;
36 
37 		if (size < sizeof(event->header) || diff < (int)size)
38 			return NULL;
39 
40 		/*
41 		 * Event straddles the mmap boundary -- header should always
42 		 * be inside due to u64 alignment of output.
43 		 */
44 		if ((*startp & map->mask) + size != ((*startp + size) & map->mask)) {
45 			unsigned int offset = *startp;
46 			unsigned int len = min(sizeof(*event), size), cpy;
47 			void *dst = map->event_copy;
48 
49 			do {
50 				cpy = min(map->mask + 1 - (offset & map->mask), len);
51 				memcpy(dst, &data[offset & map->mask], cpy);
52 				offset += cpy;
53 				dst += cpy;
54 				len -= cpy;
55 			} while (len);
56 
57 			event = (union perf_event *)map->event_copy;
58 		}
59 
60 		*startp += size;
61 	}
62 
63 	return event;
64 }
65 
66 /*
67  * legacy interface for mmap read.
68  * Don't use it. Use perf_mmap__read_event().
69  */
70 union perf_event *perf_mmap__read_forward(struct perf_mmap *map)
71 {
72 	u64 head;
73 
74 	/*
75 	 * Check if event was unmapped due to a POLLHUP/POLLERR.
76 	 */
77 	if (!refcount_read(&map->refcnt))
78 		return NULL;
79 
80 	head = perf_mmap__read_head(map);
81 
82 	return perf_mmap__read(map, &map->prev, head);
83 }
84 
85 /*
86  * Read event from ring buffer one by one.
87  * Return one event for each call.
88  *
89  * Usage:
90  * perf_mmap__read_init()
91  * while(event = perf_mmap__read_event()) {
92  *	//process the event
93  *	perf_mmap__consume()
94  * }
95  * perf_mmap__read_done()
96  */
97 union perf_event *perf_mmap__read_event(struct perf_mmap *map,
98 					bool overwrite,
99 					u64 *startp, u64 end)
100 {
101 	union perf_event *event;
102 
103 	/*
104 	 * Check if event was unmapped due to a POLLHUP/POLLERR.
105 	 */
106 	if (!refcount_read(&map->refcnt))
107 		return NULL;
108 
109 	if (startp == NULL)
110 		return NULL;
111 
112 	/* non-overwirte doesn't pause the ringbuffer */
113 	if (!overwrite)
114 		end = perf_mmap__read_head(map);
115 
116 	event = perf_mmap__read(map, startp, end);
117 
118 	if (!overwrite)
119 		map->prev = *startp;
120 
121 	return event;
122 }
123 
124 static bool perf_mmap__empty(struct perf_mmap *map)
125 {
126 	return perf_mmap__read_head(map) == map->prev && !map->auxtrace_mmap.base;
127 }
128 
129 void perf_mmap__get(struct perf_mmap *map)
130 {
131 	refcount_inc(&map->refcnt);
132 }
133 
134 void perf_mmap__put(struct perf_mmap *map)
135 {
136 	BUG_ON(map->base && refcount_read(&map->refcnt) == 0);
137 
138 	if (refcount_dec_and_test(&map->refcnt))
139 		perf_mmap__munmap(map);
140 }
141 
142 void perf_mmap__consume(struct perf_mmap *map, bool overwrite)
143 {
144 	if (!overwrite) {
145 		u64 old = map->prev;
146 
147 		perf_mmap__write_tail(map, old);
148 	}
149 
150 	if (refcount_read(&map->refcnt) == 1 && perf_mmap__empty(map))
151 		perf_mmap__put(map);
152 }
153 
154 int __weak auxtrace_mmap__mmap(struct auxtrace_mmap *mm __maybe_unused,
155 			       struct auxtrace_mmap_params *mp __maybe_unused,
156 			       void *userpg __maybe_unused,
157 			       int fd __maybe_unused)
158 {
159 	return 0;
160 }
161 
162 void __weak auxtrace_mmap__munmap(struct auxtrace_mmap *mm __maybe_unused)
163 {
164 }
165 
166 void __weak auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp __maybe_unused,
167 				       off_t auxtrace_offset __maybe_unused,
168 				       unsigned int auxtrace_pages __maybe_unused,
169 				       bool auxtrace_overwrite __maybe_unused)
170 {
171 }
172 
173 void __weak auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp __maybe_unused,
174 					  struct perf_evlist *evlist __maybe_unused,
175 					  int idx __maybe_unused,
176 					  bool per_cpu __maybe_unused)
177 {
178 }
179 
180 void perf_mmap__munmap(struct perf_mmap *map)
181 {
182 	if (map->base != NULL) {
183 		munmap(map->base, perf_mmap__mmap_len(map));
184 		map->base = NULL;
185 		map->fd = -1;
186 		refcount_set(&map->refcnt, 0);
187 	}
188 	auxtrace_mmap__munmap(&map->auxtrace_mmap);
189 }
190 
191 int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd)
192 {
193 	/*
194 	 * The last one will be done at perf_evlist__mmap_consume(), so that we
195 	 * make sure we don't prevent tools from consuming every last event in
196 	 * the ring buffer.
197 	 *
198 	 * I.e. we can get the POLLHUP meaning that the fd doesn't exist
199 	 * anymore, but the last events for it are still in the ring buffer,
200 	 * waiting to be consumed.
201 	 *
202 	 * Tools can chose to ignore this at their own discretion, but the
203 	 * evlist layer can't just drop it when filtering events in
204 	 * perf_evlist__filter_pollfd().
205 	 */
206 	refcount_set(&map->refcnt, 2);
207 	map->prev = 0;
208 	map->mask = mp->mask;
209 	map->base = mmap(NULL, perf_mmap__mmap_len(map), mp->prot,
210 			 MAP_SHARED, fd, 0);
211 	if (map->base == MAP_FAILED) {
212 		pr_debug2("failed to mmap perf event ring buffer, error %d\n",
213 			  errno);
214 		map->base = NULL;
215 		return -1;
216 	}
217 	map->fd = fd;
218 
219 	if (auxtrace_mmap__mmap(&map->auxtrace_mmap,
220 				&mp->auxtrace_mp, map->base, fd))
221 		return -1;
222 
223 	return 0;
224 }
225 
226 static int overwrite_rb_find_range(void *buf, int mask, u64 head, u64 *start, u64 *end)
227 {
228 	struct perf_event_header *pheader;
229 	u64 evt_head = head;
230 	int size = mask + 1;
231 
232 	pr_debug2("overwrite_rb_find_range: buf=%p, head=%"PRIx64"\n", buf, head);
233 	pheader = (struct perf_event_header *)(buf + (head & mask));
234 	*start = head;
235 	while (true) {
236 		if (evt_head - head >= (unsigned int)size) {
237 			pr_debug("Finished reading overwrite ring buffer: rewind\n");
238 			if (evt_head - head > (unsigned int)size)
239 				evt_head -= pheader->size;
240 			*end = evt_head;
241 			return 0;
242 		}
243 
244 		pheader = (struct perf_event_header *)(buf + (evt_head & mask));
245 
246 		if (pheader->size == 0) {
247 			pr_debug("Finished reading overwrite ring buffer: get start\n");
248 			*end = evt_head;
249 			return 0;
250 		}
251 
252 		evt_head += pheader->size;
253 		pr_debug3("move evt_head: %"PRIx64"\n", evt_head);
254 	}
255 	WARN_ONCE(1, "Shouldn't get here\n");
256 	return -1;
257 }
258 
259 /*
260  * Report the start and end of the available data in ringbuffer
261  */
262 int perf_mmap__read_init(struct perf_mmap *md, bool overwrite,
263 			 u64 *startp, u64 *endp)
264 {
265 	u64 head = perf_mmap__read_head(md);
266 	u64 old = md->prev;
267 	unsigned char *data = md->base + page_size;
268 	unsigned long size;
269 
270 	*startp = overwrite ? head : old;
271 	*endp = overwrite ? old : head;
272 
273 	if (*startp == *endp)
274 		return -EAGAIN;
275 
276 	size = *endp - *startp;
277 	if (size > (unsigned long)(md->mask) + 1) {
278 		if (!overwrite) {
279 			WARN_ONCE(1, "failed to keep up with mmap data. (warn only once)\n");
280 
281 			md->prev = head;
282 			perf_mmap__consume(md, overwrite);
283 			return -EAGAIN;
284 		}
285 
286 		/*
287 		 * Backward ring buffer is full. We still have a chance to read
288 		 * most of data from it.
289 		 */
290 		if (overwrite_rb_find_range(data, md->mask, head, startp, endp))
291 			return -EINVAL;
292 	}
293 
294 	return 0;
295 }
296 
297 int perf_mmap__push(struct perf_mmap *md, bool overwrite,
298 		    void *to, int push(void *to, void *buf, size_t size))
299 {
300 	u64 head = perf_mmap__read_head(md);
301 	u64 end, start;
302 	unsigned char *data = md->base + page_size;
303 	unsigned long size;
304 	void *buf;
305 	int rc = 0;
306 
307 	rc = perf_mmap__read_init(md, overwrite, &start, &end);
308 	if (rc < 0)
309 		return (rc == -EAGAIN) ? 0 : -1;
310 
311 	size = end - start;
312 
313 	if ((start & md->mask) + size != (end & md->mask)) {
314 		buf = &data[start & md->mask];
315 		size = md->mask + 1 - (start & md->mask);
316 		start += size;
317 
318 		if (push(to, buf, size) < 0) {
319 			rc = -1;
320 			goto out;
321 		}
322 	}
323 
324 	buf = &data[start & md->mask];
325 	size = end - start;
326 	start += size;
327 
328 	if (push(to, buf, size) < 0) {
329 		rc = -1;
330 		goto out;
331 	}
332 
333 	md->prev = head;
334 	perf_mmap__consume(md, overwrite);
335 out:
336 	return rc;
337 }
338 
339 /*
340  * Mandatory for overwrite mode
341  * The direction of overwrite mode is backward.
342  * The last perf_mmap__read() will set tail to map->prev.
343  * Need to correct the map->prev to head which is the end of next read.
344  */
345 void perf_mmap__read_done(struct perf_mmap *map)
346 {
347 	map->prev = perf_mmap__read_head(map);
348 }
349