xref: /openbmc/linux/tools/perf/util/mmap.c (revision 5927145e)
1 /*
2  * Copyright (C) 2011-2017, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
3  *
4  * Parts came from evlist.c builtin-{top,stat,record}.c, see those files for further
5  * copyright notes.
6  *
7  * Released under the GPL v2. (and only v2, not any later version)
8  */
9 
10 #include <sys/mman.h>
11 #include <inttypes.h>
12 #include <asm/bug.h>
13 #include "debug.h"
14 #include "event.h"
15 #include "mmap.h"
16 #include "util.h" /* page_size */
17 
18 size_t perf_mmap__mmap_len(struct perf_mmap *map)
19 {
20 	return map->mask + 1 + page_size;
21 }
22 
23 /* When check_messup is true, 'end' must points to a good entry */
24 static union perf_event *perf_mmap__read(struct perf_mmap *map,
25 					 u64 start, u64 end, u64 *prev)
26 {
27 	unsigned char *data = map->base + page_size;
28 	union perf_event *event = NULL;
29 	int diff = end - start;
30 
31 	if (diff >= (int)sizeof(event->header)) {
32 		size_t size;
33 
34 		event = (union perf_event *)&data[start & map->mask];
35 		size = event->header.size;
36 
37 		if (size < sizeof(event->header) || diff < (int)size) {
38 			event = NULL;
39 			goto broken_event;
40 		}
41 
42 		/*
43 		 * Event straddles the mmap boundary -- header should always
44 		 * be inside due to u64 alignment of output.
45 		 */
46 		if ((start & map->mask) + size != ((start + size) & map->mask)) {
47 			unsigned int offset = start;
48 			unsigned int len = min(sizeof(*event), size), cpy;
49 			void *dst = map->event_copy;
50 
51 			do {
52 				cpy = min(map->mask + 1 - (offset & map->mask), len);
53 				memcpy(dst, &data[offset & map->mask], cpy);
54 				offset += cpy;
55 				dst += cpy;
56 				len -= cpy;
57 			} while (len);
58 
59 			event = (union perf_event *)map->event_copy;
60 		}
61 
62 		start += size;
63 	}
64 
65 broken_event:
66 	if (prev)
67 		*prev = start;
68 
69 	return event;
70 }
71 
72 union perf_event *perf_mmap__read_forward(struct perf_mmap *map)
73 {
74 	u64 head;
75 	u64 old = map->prev;
76 
77 	/*
78 	 * Check if event was unmapped due to a POLLHUP/POLLERR.
79 	 */
80 	if (!refcount_read(&map->refcnt))
81 		return NULL;
82 
83 	head = perf_mmap__read_head(map);
84 
85 	return perf_mmap__read(map, old, head, &map->prev);
86 }
87 
88 union perf_event *perf_mmap__read_backward(struct perf_mmap *map)
89 {
90 	u64 head, end;
91 	u64 start = map->prev;
92 
93 	/*
94 	 * Check if event was unmapped due to a POLLHUP/POLLERR.
95 	 */
96 	if (!refcount_read(&map->refcnt))
97 		return NULL;
98 
99 	head = perf_mmap__read_head(map);
100 	if (!head)
101 		return NULL;
102 
103 	/*
104 	 * 'head' pointer starts from 0. Kernel minus sizeof(record) form
105 	 * it each time when kernel writes to it, so in fact 'head' is
106 	 * negative. 'end' pointer is made manually by adding the size of
107 	 * the ring buffer to 'head' pointer, means the validate data can
108 	 * read is the whole ring buffer. If 'end' is positive, the ring
109 	 * buffer has not fully filled, so we must adjust 'end' to 0.
110 	 *
111 	 * However, since both 'head' and 'end' is unsigned, we can't
112 	 * simply compare 'end' against 0. Here we compare '-head' and
113 	 * the size of the ring buffer, where -head is the number of bytes
114 	 * kernel write to the ring buffer.
115 	 */
116 	if (-head < (u64)(map->mask + 1))
117 		end = 0;
118 	else
119 		end = head + map->mask + 1;
120 
121 	return perf_mmap__read(map, start, end, &map->prev);
122 }
123 
124 void perf_mmap__read_catchup(struct perf_mmap *map)
125 {
126 	u64 head;
127 
128 	if (!refcount_read(&map->refcnt))
129 		return;
130 
131 	head = perf_mmap__read_head(map);
132 	map->prev = head;
133 }
134 
135 static bool perf_mmap__empty(struct perf_mmap *map)
136 {
137 	return perf_mmap__read_head(map) == map->prev && !map->auxtrace_mmap.base;
138 }
139 
140 void perf_mmap__get(struct perf_mmap *map)
141 {
142 	refcount_inc(&map->refcnt);
143 }
144 
145 void perf_mmap__put(struct perf_mmap *map)
146 {
147 	BUG_ON(map->base && refcount_read(&map->refcnt) == 0);
148 
149 	if (refcount_dec_and_test(&map->refcnt))
150 		perf_mmap__munmap(map);
151 }
152 
153 void perf_mmap__consume(struct perf_mmap *map, bool overwrite)
154 {
155 	if (!overwrite) {
156 		u64 old = map->prev;
157 
158 		perf_mmap__write_tail(map, old);
159 	}
160 
161 	if (refcount_read(&map->refcnt) == 1 && perf_mmap__empty(map))
162 		perf_mmap__put(map);
163 }
164 
165 int __weak auxtrace_mmap__mmap(struct auxtrace_mmap *mm __maybe_unused,
166 			       struct auxtrace_mmap_params *mp __maybe_unused,
167 			       void *userpg __maybe_unused,
168 			       int fd __maybe_unused)
169 {
170 	return 0;
171 }
172 
173 void __weak auxtrace_mmap__munmap(struct auxtrace_mmap *mm __maybe_unused)
174 {
175 }
176 
177 void __weak auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp __maybe_unused,
178 				       off_t auxtrace_offset __maybe_unused,
179 				       unsigned int auxtrace_pages __maybe_unused,
180 				       bool auxtrace_overwrite __maybe_unused)
181 {
182 }
183 
184 void __weak auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp __maybe_unused,
185 					  struct perf_evlist *evlist __maybe_unused,
186 					  int idx __maybe_unused,
187 					  bool per_cpu __maybe_unused)
188 {
189 }
190 
191 void perf_mmap__munmap(struct perf_mmap *map)
192 {
193 	if (map->base != NULL) {
194 		munmap(map->base, perf_mmap__mmap_len(map));
195 		map->base = NULL;
196 		map->fd = -1;
197 		refcount_set(&map->refcnt, 0);
198 	}
199 	auxtrace_mmap__munmap(&map->auxtrace_mmap);
200 }
201 
202 int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd)
203 {
204 	/*
205 	 * The last one will be done at perf_evlist__mmap_consume(), so that we
206 	 * make sure we don't prevent tools from consuming every last event in
207 	 * the ring buffer.
208 	 *
209 	 * I.e. we can get the POLLHUP meaning that the fd doesn't exist
210 	 * anymore, but the last events for it are still in the ring buffer,
211 	 * waiting to be consumed.
212 	 *
213 	 * Tools can chose to ignore this at their own discretion, but the
214 	 * evlist layer can't just drop it when filtering events in
215 	 * perf_evlist__filter_pollfd().
216 	 */
217 	refcount_set(&map->refcnt, 2);
218 	map->prev = 0;
219 	map->mask = mp->mask;
220 	map->base = mmap(NULL, perf_mmap__mmap_len(map), mp->prot,
221 			 MAP_SHARED, fd, 0);
222 	if (map->base == MAP_FAILED) {
223 		pr_debug2("failed to mmap perf event ring buffer, error %d\n",
224 			  errno);
225 		map->base = NULL;
226 		return -1;
227 	}
228 	map->fd = fd;
229 
230 	if (auxtrace_mmap__mmap(&map->auxtrace_mmap,
231 				&mp->auxtrace_mp, map->base, fd))
232 		return -1;
233 
234 	return 0;
235 }
236 
237 static int overwrite_rb_find_range(void *buf, int mask, u64 head, u64 *start, u64 *end)
238 {
239 	struct perf_event_header *pheader;
240 	u64 evt_head = head;
241 	int size = mask + 1;
242 
243 	pr_debug2("overwrite_rb_find_range: buf=%p, head=%"PRIx64"\n", buf, head);
244 	pheader = (struct perf_event_header *)(buf + (head & mask));
245 	*start = head;
246 	while (true) {
247 		if (evt_head - head >= (unsigned int)size) {
248 			pr_debug("Finished reading overwrite ring buffer: rewind\n");
249 			if (evt_head - head > (unsigned int)size)
250 				evt_head -= pheader->size;
251 			*end = evt_head;
252 			return 0;
253 		}
254 
255 		pheader = (struct perf_event_header *)(buf + (evt_head & mask));
256 
257 		if (pheader->size == 0) {
258 			pr_debug("Finished reading overwrite ring buffer: get start\n");
259 			*end = evt_head;
260 			return 0;
261 		}
262 
263 		evt_head += pheader->size;
264 		pr_debug3("move evt_head: %"PRIx64"\n", evt_head);
265 	}
266 	WARN_ONCE(1, "Shouldn't get here\n");
267 	return -1;
268 }
269 
270 int perf_mmap__push(struct perf_mmap *md, bool overwrite,
271 		    void *to, int push(void *to, void *buf, size_t size))
272 {
273 	u64 head = perf_mmap__read_head(md);
274 	u64 old = md->prev;
275 	u64 end = head, start = old;
276 	unsigned char *data = md->base + page_size;
277 	unsigned long size;
278 	void *buf;
279 	int rc = 0;
280 
281 	start = overwrite ? head : old;
282 	end = overwrite ? old : head;
283 
284 	if (start == end)
285 		return 0;
286 
287 	size = end - start;
288 	if (size > (unsigned long)(md->mask) + 1) {
289 		if (!overwrite) {
290 			WARN_ONCE(1, "failed to keep up with mmap data. (warn only once)\n");
291 
292 			md->prev = head;
293 			perf_mmap__consume(md, overwrite);
294 			return 0;
295 		}
296 
297 		/*
298 		 * Backward ring buffer is full. We still have a chance to read
299 		 * most of data from it.
300 		 */
301 		if (overwrite_rb_find_range(data, md->mask, head, &start, &end))
302 			return -1;
303 	}
304 
305 	if ((start & md->mask) + size != (end & md->mask)) {
306 		buf = &data[start & md->mask];
307 		size = md->mask + 1 - (start & md->mask);
308 		start += size;
309 
310 		if (push(to, buf, size) < 0) {
311 			rc = -1;
312 			goto out;
313 		}
314 	}
315 
316 	buf = &data[start & md->mask];
317 	size = end - start;
318 	start += size;
319 
320 	if (push(to, buf, size) < 0) {
321 		rc = -1;
322 		goto out;
323 	}
324 
325 	md->prev = head;
326 	perf_mmap__consume(md, overwrite);
327 out:
328 	return rc;
329 }
330