mmap.c (772c1d06bd402f7ee72c61a18c2db74cd74b6758) mmap.c (a583053299c1e66e6202b494cbc3acd93cedc4cc)
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2011-2017, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
4 *
5 * Parts came from evlist.c builtin-{top,stat,record}.c, see those files for further
6 * copyright notes.
7 */
8

--- 8 unchanged lines hidden (view full) ---

17#endif
18#include "cpumap.h"
19#include "debug.h"
20#include "event.h"
21#include "mmap.h"
22#include "../perf.h"
23#include "util.h" /* page_size */
24
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2011-2017, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
4 *
5 * Parts came from evlist.c builtin-{top,stat,record}.c, see those files for further
6 * copyright notes.
7 */
8

--- 8 unchanged lines hidden (view full) ---

17#endif
18#include "cpumap.h"
19#include "debug.h"
20#include "event.h"
21#include "mmap.h"
22#include "../perf.h"
23#include "util.h" /* page_size */
24
25size_t perf_mmap__mmap_len(struct perf_mmap *map)
25size_t perf_mmap__mmap_len(struct mmap *map)
26{
27 return map->mask + 1 + page_size;
28}
29
30/* When check_messup is true, 'end' must points to a good entry */
26{
27 return map->mask + 1 + page_size;
28}
29
30/* When check_messup is true, 'end' must points to a good entry */
31static union perf_event *perf_mmap__read(struct perf_mmap *map,
31static union perf_event *perf_mmap__read(struct mmap *map,
32 u64 *startp, u64 end)
33{
34 unsigned char *data = map->base + page_size;
35 union perf_event *event = NULL;
36 int diff = end - *startp;
37
38 if (diff >= (int)sizeof(event->header)) {
39 size_t size;

--- 37 unchanged lines hidden (view full) ---

77 * Usage:
78 * perf_mmap__read_init()
79 * while(event = perf_mmap__read_event()) {
80 * //process the event
81 * perf_mmap__consume()
82 * }
83 * perf_mmap__read_done()
84 */
32 u64 *startp, u64 end)
33{
34 unsigned char *data = map->base + page_size;
35 union perf_event *event = NULL;
36 int diff = end - *startp;
37
38 if (diff >= (int)sizeof(event->header)) {
39 size_t size;

--- 37 unchanged lines hidden (view full) ---

77 * Usage:
78 * perf_mmap__read_init()
79 * while(event = perf_mmap__read_event()) {
80 * //process the event
81 * perf_mmap__consume()
82 * }
83 * perf_mmap__read_done()
84 */
85union perf_event *perf_mmap__read_event(struct perf_mmap *map)
85union perf_event *perf_mmap__read_event(struct mmap *map)
86{
87 union perf_event *event;
88
89 /*
90 * Check if event was unmapped due to a POLLHUP/POLLERR.
91 */
92 if (!refcount_read(&map->refcnt))
93 return NULL;

--- 5 unchanged lines hidden (view full) ---

99 event = perf_mmap__read(map, &map->start, map->end);
100
101 if (!map->overwrite)
102 map->prev = map->start;
103
104 return event;
105}
106
86{
87 union perf_event *event;
88
89 /*
90 * Check if event was unmapped due to a POLLHUP/POLLERR.
91 */
92 if (!refcount_read(&map->refcnt))
93 return NULL;

--- 5 unchanged lines hidden (view full) ---

99 event = perf_mmap__read(map, &map->start, map->end);
100
101 if (!map->overwrite)
102 map->prev = map->start;
103
104 return event;
105}
106
107static bool perf_mmap__empty(struct perf_mmap *map)
107static bool perf_mmap__empty(struct mmap *map)
108{
109 return perf_mmap__read_head(map) == map->prev && !map->auxtrace_mmap.base;
110}
111
108{
109 return perf_mmap__read_head(map) == map->prev && !map->auxtrace_mmap.base;
110}
111
112void perf_mmap__get(struct perf_mmap *map)
112void perf_mmap__get(struct mmap *map)
113{
114 refcount_inc(&map->refcnt);
115}
116
113{
114 refcount_inc(&map->refcnt);
115}
116
117void perf_mmap__put(struct perf_mmap *map)
117void perf_mmap__put(struct mmap *map)
118{
119 BUG_ON(map->base && refcount_read(&map->refcnt) == 0);
120
121 if (refcount_dec_and_test(&map->refcnt))
122 perf_mmap__munmap(map);
123}
124
118{
119 BUG_ON(map->base && refcount_read(&map->refcnt) == 0);
120
121 if (refcount_dec_and_test(&map->refcnt))
122 perf_mmap__munmap(map);
123}
124
125void perf_mmap__consume(struct perf_mmap *map)
125void perf_mmap__consume(struct mmap *map)
126{
127 if (!map->overwrite) {
128 u64 old = map->prev;
129
130 perf_mmap__write_tail(map, old);
131 }
132
133 if (refcount_read(&map->refcnt) == 1 && perf_mmap__empty(map))

--- 22 unchanged lines hidden (view full) ---

156void __weak auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp __maybe_unused,
157 struct evlist *evlist __maybe_unused,
158 int idx __maybe_unused,
159 bool per_cpu __maybe_unused)
160{
161}
162
163#ifdef HAVE_AIO_SUPPORT
126{
127 if (!map->overwrite) {
128 u64 old = map->prev;
129
130 perf_mmap__write_tail(map, old);
131 }
132
133 if (refcount_read(&map->refcnt) == 1 && perf_mmap__empty(map))

--- 22 unchanged lines hidden (view full) ---

156void __weak auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp __maybe_unused,
157 struct evlist *evlist __maybe_unused,
158 int idx __maybe_unused,
159 bool per_cpu __maybe_unused)
160{
161}
162
163#ifdef HAVE_AIO_SUPPORT
164static int perf_mmap__aio_enabled(struct perf_mmap *map)
164static int perf_mmap__aio_enabled(struct mmap *map)
165{
166 return map->aio.nr_cblocks > 0;
167}
168
169#ifdef HAVE_LIBNUMA_SUPPORT
165{
166 return map->aio.nr_cblocks > 0;
167}
168
169#ifdef HAVE_LIBNUMA_SUPPORT
170static int perf_mmap__aio_alloc(struct perf_mmap *map, int idx)
170static int perf_mmap__aio_alloc(struct mmap *map, int idx)
171{
172 map->aio.data[idx] = mmap(NULL, perf_mmap__mmap_len(map), PROT_READ|PROT_WRITE,
173 MAP_PRIVATE|MAP_ANONYMOUS, 0, 0);
174 if (map->aio.data[idx] == MAP_FAILED) {
175 map->aio.data[idx] = NULL;
176 return -1;
177 }
178
179 return 0;
180}
181
171{
172 map->aio.data[idx] = mmap(NULL, perf_mmap__mmap_len(map), PROT_READ|PROT_WRITE,
173 MAP_PRIVATE|MAP_ANONYMOUS, 0, 0);
174 if (map->aio.data[idx] == MAP_FAILED) {
175 map->aio.data[idx] = NULL;
176 return -1;
177 }
178
179 return 0;
180}
181
182static void perf_mmap__aio_free(struct perf_mmap *map, int idx)
182static void perf_mmap__aio_free(struct mmap *map, int idx)
183{
184 if (map->aio.data[idx]) {
185 munmap(map->aio.data[idx], perf_mmap__mmap_len(map));
186 map->aio.data[idx] = NULL;
187 }
188}
189
183{
184 if (map->aio.data[idx]) {
185 munmap(map->aio.data[idx], perf_mmap__mmap_len(map));
186 map->aio.data[idx] = NULL;
187 }
188}
189
190static int perf_mmap__aio_bind(struct perf_mmap *map, int idx, int cpu, int affinity)
190static int perf_mmap__aio_bind(struct mmap *map, int idx, int cpu, int affinity)
191{
192 void *data;
193 size_t mmap_len;
194 unsigned long node_mask;
195
196 if (affinity != PERF_AFFINITY_SYS && cpu__max_node() > 1) {
197 data = map->aio.data[idx];
198 mmap_len = perf_mmap__mmap_len(map);
199 node_mask = 1UL << cpu__get_node(cpu);
200 if (mbind(data, mmap_len, MPOL_BIND, &node_mask, 1, 0)) {
201 pr_err("Failed to bind [%p-%p] AIO buffer to node %d: error %m\n",
202 data, data + mmap_len, cpu__get_node(cpu));
203 return -1;
204 }
205 }
206
207 return 0;
208}
209#else /* !HAVE_LIBNUMA_SUPPORT */
191{
192 void *data;
193 size_t mmap_len;
194 unsigned long node_mask;
195
196 if (affinity != PERF_AFFINITY_SYS && cpu__max_node() > 1) {
197 data = map->aio.data[idx];
198 mmap_len = perf_mmap__mmap_len(map);
199 node_mask = 1UL << cpu__get_node(cpu);
200 if (mbind(data, mmap_len, MPOL_BIND, &node_mask, 1, 0)) {
201 pr_err("Failed to bind [%p-%p] AIO buffer to node %d: error %m\n",
202 data, data + mmap_len, cpu__get_node(cpu));
203 return -1;
204 }
205 }
206
207 return 0;
208}
209#else /* !HAVE_LIBNUMA_SUPPORT */
210static int perf_mmap__aio_alloc(struct perf_mmap *map, int idx)
210static int perf_mmap__aio_alloc(struct mmap *map, int idx)
211{
212 map->aio.data[idx] = malloc(perf_mmap__mmap_len(map));
213 if (map->aio.data[idx] == NULL)
214 return -1;
215
216 return 0;
217}
218
211{
212 map->aio.data[idx] = malloc(perf_mmap__mmap_len(map));
213 if (map->aio.data[idx] == NULL)
214 return -1;
215
216 return 0;
217}
218
219static void perf_mmap__aio_free(struct perf_mmap *map, int idx)
219static void perf_mmap__aio_free(struct mmap *map, int idx)
220{
221 zfree(&(map->aio.data[idx]));
222}
223
220{
221 zfree(&(map->aio.data[idx]));
222}
223
224static int perf_mmap__aio_bind(struct perf_mmap *map __maybe_unused, int idx __maybe_unused,
224static int perf_mmap__aio_bind(struct mmap *map __maybe_unused, int idx __maybe_unused,
225 int cpu __maybe_unused, int affinity __maybe_unused)
226{
227 return 0;
228}
229#endif
230
225 int cpu __maybe_unused, int affinity __maybe_unused)
226{
227 return 0;
228}
229#endif
230
231static int perf_mmap__aio_mmap(struct perf_mmap *map, struct mmap_params *mp)
231static int perf_mmap__aio_mmap(struct mmap *map, struct mmap_params *mp)
232{
233 int delta_max, i, prio, ret;
234
235 map->aio.nr_cblocks = mp->nr_cblocks;
236 if (map->aio.nr_cblocks) {
237 map->aio.aiocb = calloc(map->aio.nr_cblocks, sizeof(struct aiocb *));
238 if (!map->aio.aiocb) {
239 pr_debug2("failed to allocate aiocb for data buffer, error %m\n");

--- 37 unchanged lines hidden (view full) ---

277 prio = delta_max - i;
278 map->aio.cblocks[i].aio_reqprio = prio >= 0 ? prio : 0;
279 }
280 }
281
282 return 0;
283}
284
232{
233 int delta_max, i, prio, ret;
234
235 map->aio.nr_cblocks = mp->nr_cblocks;
236 if (map->aio.nr_cblocks) {
237 map->aio.aiocb = calloc(map->aio.nr_cblocks, sizeof(struct aiocb *));
238 if (!map->aio.aiocb) {
239 pr_debug2("failed to allocate aiocb for data buffer, error %m\n");

--- 37 unchanged lines hidden (view full) ---

277 prio = delta_max - i;
278 map->aio.cblocks[i].aio_reqprio = prio >= 0 ? prio : 0;
279 }
280 }
281
282 return 0;
283}
284
285static void perf_mmap__aio_munmap(struct perf_mmap *map)
285static void perf_mmap__aio_munmap(struct mmap *map)
286{
287 int i;
288
289 for (i = 0; i < map->aio.nr_cblocks; ++i)
290 perf_mmap__aio_free(map, i);
291 if (map->aio.data)
292 zfree(&map->aio.data);
293 zfree(&map->aio.cblocks);
294 zfree(&map->aio.aiocb);
295}
296#else /* !HAVE_AIO_SUPPORT */
286{
287 int i;
288
289 for (i = 0; i < map->aio.nr_cblocks; ++i)
290 perf_mmap__aio_free(map, i);
291 if (map->aio.data)
292 zfree(&map->aio.data);
293 zfree(&map->aio.cblocks);
294 zfree(&map->aio.aiocb);
295}
296#else /* !HAVE_AIO_SUPPORT */
297static int perf_mmap__aio_enabled(struct perf_mmap *map __maybe_unused)
297static int perf_mmap__aio_enabled(struct mmap *map __maybe_unused)
298{
299 return 0;
300}
301
298{
299 return 0;
300}
301
302static int perf_mmap__aio_mmap(struct perf_mmap *map __maybe_unused,
302static int perf_mmap__aio_mmap(struct mmap *map __maybe_unused,
303 struct mmap_params *mp __maybe_unused)
304{
305 return 0;
306}
307
303 struct mmap_params *mp __maybe_unused)
304{
305 return 0;
306}
307
308static void perf_mmap__aio_munmap(struct perf_mmap *map __maybe_unused)
308static void perf_mmap__aio_munmap(struct mmap *map __maybe_unused)
309{
310}
311#endif
312
309{
310}
311#endif
312
313void perf_mmap__munmap(struct perf_mmap *map)
313void perf_mmap__munmap(struct mmap *map)
314{
315 perf_mmap__aio_munmap(map);
316 if (map->data != NULL) {
317 munmap(map->data, perf_mmap__mmap_len(map));
318 map->data = NULL;
319 }
320 if (map->base != NULL) {
321 munmap(map->base, perf_mmap__mmap_len(map));

--- 16 unchanged lines hidden (view full) ---

338 nr_cpus = perf_cpu_map__nr(cpu_map);
339 for (c = 0; c < nr_cpus; c++) {
340 cpu = cpu_map->map[c]; /* map c index to online cpu index */
341 if (cpu__get_node(cpu) == node)
342 CPU_SET(cpu, mask);
343 }
344}
345
314{
315 perf_mmap__aio_munmap(map);
316 if (map->data != NULL) {
317 munmap(map->data, perf_mmap__mmap_len(map));
318 map->data = NULL;
319 }
320 if (map->base != NULL) {
321 munmap(map->base, perf_mmap__mmap_len(map));

--- 16 unchanged lines hidden (view full) ---

338 nr_cpus = perf_cpu_map__nr(cpu_map);
339 for (c = 0; c < nr_cpus; c++) {
340 cpu = cpu_map->map[c]; /* map c index to online cpu index */
341 if (cpu__get_node(cpu) == node)
342 CPU_SET(cpu, mask);
343 }
344}
345
346static void perf_mmap__setup_affinity_mask(struct perf_mmap *map, struct mmap_params *mp)
346static void perf_mmap__setup_affinity_mask(struct mmap *map, struct mmap_params *mp)
347{
348 CPU_ZERO(&map->affinity_mask);
349 if (mp->affinity == PERF_AFFINITY_NODE && cpu__max_node() > 1)
350 build_node_mask(cpu__get_node(map->cpu), &map->affinity_mask);
351 else if (mp->affinity == PERF_AFFINITY_CPU)
352 CPU_SET(map->cpu, &map->affinity_mask);
353}
354
347{
348 CPU_ZERO(&map->affinity_mask);
349 if (mp->affinity == PERF_AFFINITY_NODE && cpu__max_node() > 1)
350 build_node_mask(cpu__get_node(map->cpu), &map->affinity_mask);
351 else if (mp->affinity == PERF_AFFINITY_CPU)
352 CPU_SET(map->cpu, &map->affinity_mask);
353}
354
355int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd, int cpu)
355int perf_mmap__mmap(struct mmap *map, struct mmap_params *mp, int fd, int cpu)
356{
357 /*
358 * The last one will be done at perf_mmap__consume(), so that we
359 * make sure we don't prevent tools from consuming every last event in
360 * the ring buffer.
361 *
362 * I.e. we can get the POLLHUP meaning that the fd doesn't exist
363 * anymore, but the last events for it are still in the ring buffer,

--- 71 unchanged lines hidden (view full) ---

435 }
436 WARN_ONCE(1, "Shouldn't get here\n");
437 return -1;
438}
439
440/*
441 * Report the start and end of the available data in ringbuffer
442 */
356{
357 /*
358 * The last one will be done at perf_mmap__consume(), so that we
359 * make sure we don't prevent tools from consuming every last event in
360 * the ring buffer.
361 *
362 * I.e. we can get the POLLHUP meaning that the fd doesn't exist
363 * anymore, but the last events for it are still in the ring buffer,

--- 71 unchanged lines hidden (view full) ---

435 }
436 WARN_ONCE(1, "Shouldn't get here\n");
437 return -1;
438}
439
440/*
441 * Report the start and end of the available data in ringbuffer
442 */
443static int __perf_mmap__read_init(struct perf_mmap *md)
443static int __perf_mmap__read_init(struct mmap *md)
444{
445 u64 head = perf_mmap__read_head(md);
446 u64 old = md->prev;
447 unsigned char *data = md->base + page_size;
448 unsigned long size;
449
450 md->start = md->overwrite ? head : old;
451 md->end = md->overwrite ? old : head;

--- 17 unchanged lines hidden (view full) ---

469 */
470 if (overwrite_rb_find_range(data, md->mask, &md->start, &md->end))
471 return -EINVAL;
472 }
473
474 return 0;
475}
476
444{
445 u64 head = perf_mmap__read_head(md);
446 u64 old = md->prev;
447 unsigned char *data = md->base + page_size;
448 unsigned long size;
449
450 md->start = md->overwrite ? head : old;
451 md->end = md->overwrite ? old : head;

--- 17 unchanged lines hidden (view full) ---

469 */
470 if (overwrite_rb_find_range(data, md->mask, &md->start, &md->end))
471 return -EINVAL;
472 }
473
474 return 0;
475}
476
477int perf_mmap__read_init(struct perf_mmap *map)
477int perf_mmap__read_init(struct mmap *map)
478{
479 /*
480 * Check if event was unmapped due to a POLLHUP/POLLERR.
481 */
482 if (!refcount_read(&map->refcnt))
483 return -ENOENT;
484
485 return __perf_mmap__read_init(map);
486}
487
478{
479 /*
480 * Check if event was unmapped due to a POLLHUP/POLLERR.
481 */
482 if (!refcount_read(&map->refcnt))
483 return -ENOENT;
484
485 return __perf_mmap__read_init(map);
486}
487
488int perf_mmap__push(struct perf_mmap *md, void *to,
489 int push(struct perf_mmap *map, void *to, void *buf, size_t size))
488int perf_mmap__push(struct mmap *md, void *to,
489 int push(struct mmap *map, void *to, void *buf, size_t size))
490{
491 u64 head = perf_mmap__read_head(md);
492 unsigned char *data = md->base + page_size;
493 unsigned long size;
494 void *buf;
495 int rc = 0;
496
497 rc = perf_mmap__read_init(md);

--- 29 unchanged lines hidden (view full) ---

527}
528
529/*
530 * Mandatory for overwrite mode
531 * The direction of overwrite mode is backward.
532 * The last perf_mmap__read() will set tail to map->prev.
533 * Need to correct the map->prev to head which is the end of next read.
534 */
490{
491 u64 head = perf_mmap__read_head(md);
492 unsigned char *data = md->base + page_size;
493 unsigned long size;
494 void *buf;
495 int rc = 0;
496
497 rc = perf_mmap__read_init(md);

--- 29 unchanged lines hidden (view full) ---

527}
528
529/*
530 * Mandatory for overwrite mode
531 * The direction of overwrite mode is backward.
532 * The last perf_mmap__read() will set tail to map->prev.
533 * Need to correct the map->prev to head which is the end of next read.
534 */
535void perf_mmap__read_done(struct perf_mmap *map)
535void perf_mmap__read_done(struct mmap *map)
536{
537 /*
538 * Check if event was unmapped due to a POLLHUP/POLLERR.
539 */
540 if (!refcount_read(&map->refcnt))
541 return;
542
543 map->prev = perf_mmap__read_head(map);
544}
536{
537 /*
538 * Check if event was unmapped due to a POLLHUP/POLLERR.
539 */
540 if (!refcount_read(&map->refcnt))
541 return;
542
543 map->prev = perf_mmap__read_head(map);
544}