1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2011-2017, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com> 4 * 5 * Parts came from evlist.c builtin-{top,stat,record}.c, see those files for further 6 * copyright notes. 7 */ 8 9 #include <sys/mman.h> 10 #include <inttypes.h> 11 #include <asm/bug.h> 12 #include <linux/zalloc.h> 13 #include <stdlib.h> 14 #include <string.h> 15 #include <unistd.h> // sysconf() 16 #include <perf/mmap.h> 17 #ifdef HAVE_LIBNUMA_SUPPORT 18 #include <numaif.h> 19 #endif 20 #include "cpumap.h" 21 #include "debug.h" 22 #include "event.h" 23 #include "mmap.h" 24 #include "../perf.h" 25 #include <internal/lib.h> /* page_size */ 26 #include <linux/bitmap.h> 27 28 #define MASK_SIZE 1023 29 void mmap_cpu_mask__scnprintf(struct mmap_cpu_mask *mask, const char *tag) 30 { 31 char buf[MASK_SIZE + 1]; 32 size_t len; 33 34 len = bitmap_scnprintf(mask->bits, mask->nbits, buf, MASK_SIZE); 35 buf[len] = '\0'; 36 pr_debug("%p: %s mask[%zd]: %s\n", mask, tag, mask->nbits, buf); 37 } 38 39 size_t mmap__mmap_len(struct mmap *map) 40 { 41 return perf_mmap__mmap_len(&map->core); 42 } 43 44 int __weak auxtrace_mmap__mmap(struct auxtrace_mmap *mm __maybe_unused, 45 struct auxtrace_mmap_params *mp __maybe_unused, 46 void *userpg __maybe_unused, 47 int fd __maybe_unused) 48 { 49 return 0; 50 } 51 52 void __weak auxtrace_mmap__munmap(struct auxtrace_mmap *mm __maybe_unused) 53 { 54 } 55 56 void __weak auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp __maybe_unused, 57 off_t auxtrace_offset __maybe_unused, 58 unsigned int auxtrace_pages __maybe_unused, 59 bool auxtrace_overwrite __maybe_unused) 60 { 61 } 62 63 void __weak auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp __maybe_unused, 64 struct evlist *evlist __maybe_unused, 65 int idx __maybe_unused, 66 bool per_cpu __maybe_unused) 67 { 68 } 69 70 #ifdef HAVE_AIO_SUPPORT 71 static int perf_mmap__aio_enabled(struct mmap *map) 72 { 73 return map->aio.nr_cblocks > 0; 74 } 75 76 #ifdef HAVE_LIBNUMA_SUPPORT 77 static int perf_mmap__aio_alloc(struct mmap *map, int idx) 78 { 79 map->aio.data[idx] = mmap(NULL, mmap__mmap_len(map), PROT_READ|PROT_WRITE, 80 MAP_PRIVATE|MAP_ANONYMOUS, 0, 0); 81 if (map->aio.data[idx] == MAP_FAILED) { 82 map->aio.data[idx] = NULL; 83 return -1; 84 } 85 86 return 0; 87 } 88 89 static void perf_mmap__aio_free(struct mmap *map, int idx) 90 { 91 if (map->aio.data[idx]) { 92 munmap(map->aio.data[idx], mmap__mmap_len(map)); 93 map->aio.data[idx] = NULL; 94 } 95 } 96 97 static int perf_mmap__aio_bind(struct mmap *map, int idx, int cpu, int affinity) 98 { 99 void *data; 100 size_t mmap_len; 101 unsigned long node_mask; 102 103 if (affinity != PERF_AFFINITY_SYS && cpu__max_node() > 1) { 104 data = map->aio.data[idx]; 105 mmap_len = mmap__mmap_len(map); 106 node_mask = 1UL << cpu__get_node(cpu); 107 if (mbind(data, mmap_len, MPOL_BIND, &node_mask, 1, 0)) { 108 pr_err("Failed to bind [%p-%p] AIO buffer to node %d: error %m\n", 109 data, data + mmap_len, cpu__get_node(cpu)); 110 return -1; 111 } 112 } 113 114 return 0; 115 } 116 #else /* !HAVE_LIBNUMA_SUPPORT */ 117 static int perf_mmap__aio_alloc(struct mmap *map, int idx) 118 { 119 map->aio.data[idx] = malloc(mmap__mmap_len(map)); 120 if (map->aio.data[idx] == NULL) 121 return -1; 122 123 return 0; 124 } 125 126 static void perf_mmap__aio_free(struct mmap *map, int idx) 127 { 128 zfree(&(map->aio.data[idx])); 129 } 130 131 static int perf_mmap__aio_bind(struct mmap *map __maybe_unused, int idx __maybe_unused, 132 int cpu __maybe_unused, int affinity __maybe_unused) 133 { 134 return 0; 135 } 136 #endif 137 138 static int perf_mmap__aio_mmap(struct mmap *map, struct mmap_params *mp) 139 { 140 int delta_max, i, prio, ret; 141 142 map->aio.nr_cblocks = mp->nr_cblocks; 143 if (map->aio.nr_cblocks) { 144 map->aio.aiocb = calloc(map->aio.nr_cblocks, sizeof(struct aiocb *)); 145 if (!map->aio.aiocb) { 146 pr_debug2("failed to allocate aiocb for data buffer, error %m\n"); 147 return -1; 148 } 149 map->aio.cblocks = calloc(map->aio.nr_cblocks, sizeof(struct aiocb)); 150 if (!map->aio.cblocks) { 151 pr_debug2("failed to allocate cblocks for data buffer, error %m\n"); 152 return -1; 153 } 154 map->aio.data = calloc(map->aio.nr_cblocks, sizeof(void *)); 155 if (!map->aio.data) { 156 pr_debug2("failed to allocate data buffer, error %m\n"); 157 return -1; 158 } 159 delta_max = sysconf(_SC_AIO_PRIO_DELTA_MAX); 160 for (i = 0; i < map->aio.nr_cblocks; ++i) { 161 ret = perf_mmap__aio_alloc(map, i); 162 if (ret == -1) { 163 pr_debug2("failed to allocate data buffer area, error %m"); 164 return -1; 165 } 166 ret = perf_mmap__aio_bind(map, i, map->core.cpu, mp->affinity); 167 if (ret == -1) 168 return -1; 169 /* 170 * Use cblock.aio_fildes value different from -1 171 * to denote started aio write operation on the 172 * cblock so it requires explicit record__aio_sync() 173 * call prior the cblock may be reused again. 174 */ 175 map->aio.cblocks[i].aio_fildes = -1; 176 /* 177 * Allocate cblocks with priority delta to have 178 * faster aio write system calls because queued requests 179 * are kept in separate per-prio queues and adding 180 * a new request will iterate thru shorter per-prio 181 * list. Blocks with numbers higher than 182 * _SC_AIO_PRIO_DELTA_MAX go with priority 0. 183 */ 184 prio = delta_max - i; 185 map->aio.cblocks[i].aio_reqprio = prio >= 0 ? prio : 0; 186 } 187 } 188 189 return 0; 190 } 191 192 static void perf_mmap__aio_munmap(struct mmap *map) 193 { 194 int i; 195 196 for (i = 0; i < map->aio.nr_cblocks; ++i) 197 perf_mmap__aio_free(map, i); 198 if (map->aio.data) 199 zfree(&map->aio.data); 200 zfree(&map->aio.cblocks); 201 zfree(&map->aio.aiocb); 202 } 203 #else /* !HAVE_AIO_SUPPORT */ 204 static int perf_mmap__aio_enabled(struct mmap *map __maybe_unused) 205 { 206 return 0; 207 } 208 209 static int perf_mmap__aio_mmap(struct mmap *map __maybe_unused, 210 struct mmap_params *mp __maybe_unused) 211 { 212 return 0; 213 } 214 215 static void perf_mmap__aio_munmap(struct mmap *map __maybe_unused) 216 { 217 } 218 #endif 219 220 void mmap__munmap(struct mmap *map) 221 { 222 bitmap_free(map->affinity_mask.bits); 223 224 perf_mmap__aio_munmap(map); 225 if (map->data != NULL) { 226 munmap(map->data, mmap__mmap_len(map)); 227 map->data = NULL; 228 } 229 auxtrace_mmap__munmap(&map->auxtrace_mmap); 230 } 231 232 static void build_node_mask(int node, struct mmap_cpu_mask *mask) 233 { 234 int c, cpu, nr_cpus; 235 const struct perf_cpu_map *cpu_map = NULL; 236 237 cpu_map = cpu_map__online(); 238 if (!cpu_map) 239 return; 240 241 nr_cpus = perf_cpu_map__nr(cpu_map); 242 for (c = 0; c < nr_cpus; c++) { 243 cpu = cpu_map->map[c]; /* map c index to online cpu index */ 244 if (cpu__get_node(cpu) == node) 245 set_bit(cpu, mask->bits); 246 } 247 } 248 249 static int perf_mmap__setup_affinity_mask(struct mmap *map, struct mmap_params *mp) 250 { 251 map->affinity_mask.nbits = cpu__max_cpu(); 252 map->affinity_mask.bits = bitmap_alloc(map->affinity_mask.nbits); 253 if (!map->affinity_mask.bits) 254 return -1; 255 256 if (mp->affinity == PERF_AFFINITY_NODE && cpu__max_node() > 1) 257 build_node_mask(cpu__get_node(map->core.cpu), &map->affinity_mask); 258 else if (mp->affinity == PERF_AFFINITY_CPU) 259 set_bit(map->core.cpu, map->affinity_mask.bits); 260 261 return 0; 262 } 263 264 int mmap__mmap(struct mmap *map, struct mmap_params *mp, int fd, int cpu) 265 { 266 if (perf_mmap__mmap(&map->core, &mp->core, fd, cpu)) { 267 pr_debug2("failed to mmap perf event ring buffer, error %d\n", 268 errno); 269 return -1; 270 } 271 272 if (mp->affinity != PERF_AFFINITY_SYS && 273 perf_mmap__setup_affinity_mask(map, mp)) { 274 pr_debug2("failed to alloc mmap affinity mask, error %d\n", 275 errno); 276 return -1; 277 } 278 279 if (verbose == 2) 280 mmap_cpu_mask__scnprintf(&map->affinity_mask, "mmap"); 281 282 map->core.flush = mp->flush; 283 284 map->comp_level = mp->comp_level; 285 286 if (map->comp_level && !perf_mmap__aio_enabled(map)) { 287 map->data = mmap(NULL, mmap__mmap_len(map), PROT_READ|PROT_WRITE, 288 MAP_PRIVATE|MAP_ANONYMOUS, 0, 0); 289 if (map->data == MAP_FAILED) { 290 pr_debug2("failed to mmap data buffer, error %d\n", 291 errno); 292 map->data = NULL; 293 return -1; 294 } 295 } 296 297 if (auxtrace_mmap__mmap(&map->auxtrace_mmap, 298 &mp->auxtrace_mp, map->core.base, fd)) 299 return -1; 300 301 return perf_mmap__aio_mmap(map, mp); 302 } 303 304 int perf_mmap__push(struct mmap *md, void *to, 305 int push(struct mmap *map, void *to, void *buf, size_t size)) 306 { 307 u64 head = perf_mmap__read_head(&md->core); 308 unsigned char *data = md->core.base + page_size; 309 unsigned long size; 310 void *buf; 311 int rc = 0; 312 313 rc = perf_mmap__read_init(&md->core); 314 if (rc < 0) 315 return (rc == -EAGAIN) ? 1 : -1; 316 317 size = md->core.end - md->core.start; 318 319 if ((md->core.start & md->core.mask) + size != (md->core.end & md->core.mask)) { 320 buf = &data[md->core.start & md->core.mask]; 321 size = md->core.mask + 1 - (md->core.start & md->core.mask); 322 md->core.start += size; 323 324 if (push(md, to, buf, size) < 0) { 325 rc = -1; 326 goto out; 327 } 328 } 329 330 buf = &data[md->core.start & md->core.mask]; 331 size = md->core.end - md->core.start; 332 md->core.start += size; 333 334 if (push(md, to, buf, size) < 0) { 335 rc = -1; 336 goto out; 337 } 338 339 md->core.prev = head; 340 perf_mmap__consume(&md->core); 341 out: 342 return rc; 343 } 344