1 /* 2 * Copyright (C) 2011-2017, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com> 3 * 4 * Parts came from evlist.c builtin-{top,stat,record}.c, see those files for further 5 * copyright notes. 6 * 7 * Released under the GPL v2. (and only v2, not any later version) 8 */ 9 10 #include <sys/mman.h> 11 #include <inttypes.h> 12 #include <asm/bug.h> 13 #ifdef HAVE_LIBNUMA_SUPPORT 14 #include <numaif.h> 15 #endif 16 #include "debug.h" 17 #include "event.h" 18 #include "mmap.h" 19 #include "util.h" /* page_size */ 20 21 size_t perf_mmap__mmap_len(struct perf_mmap *map) 22 { 23 return map->mask + 1 + page_size; 24 } 25 26 /* When check_messup is true, 'end' must points to a good entry */ 27 static union perf_event *perf_mmap__read(struct perf_mmap *map, 28 u64 *startp, u64 end) 29 { 30 unsigned char *data = map->base + page_size; 31 union perf_event *event = NULL; 32 int diff = end - *startp; 33 34 if (diff >= (int)sizeof(event->header)) { 35 size_t size; 36 37 event = (union perf_event *)&data[*startp & map->mask]; 38 size = event->header.size; 39 40 if (size < sizeof(event->header) || diff < (int)size) 41 return NULL; 42 43 /* 44 * Event straddles the mmap boundary -- header should always 45 * be inside due to u64 alignment of output. 46 */ 47 if ((*startp & map->mask) + size != ((*startp + size) & map->mask)) { 48 unsigned int offset = *startp; 49 unsigned int len = min(sizeof(*event), size), cpy; 50 void *dst = map->event_copy; 51 52 do { 53 cpy = min(map->mask + 1 - (offset & map->mask), len); 54 memcpy(dst, &data[offset & map->mask], cpy); 55 offset += cpy; 56 dst += cpy; 57 len -= cpy; 58 } while (len); 59 60 event = (union perf_event *)map->event_copy; 61 } 62 63 *startp += size; 64 } 65 66 return event; 67 } 68 69 /* 70 * Read event from ring buffer one by one. 71 * Return one event for each call. 72 * 73 * Usage: 74 * perf_mmap__read_init() 75 * while(event = perf_mmap__read_event()) { 76 * //process the event 77 * perf_mmap__consume() 78 * } 79 * perf_mmap__read_done() 80 */ 81 union perf_event *perf_mmap__read_event(struct perf_mmap *map) 82 { 83 union perf_event *event; 84 85 /* 86 * Check if event was unmapped due to a POLLHUP/POLLERR. 87 */ 88 if (!refcount_read(&map->refcnt)) 89 return NULL; 90 91 /* non-overwirte doesn't pause the ringbuffer */ 92 if (!map->overwrite) 93 map->end = perf_mmap__read_head(map); 94 95 event = perf_mmap__read(map, &map->start, map->end); 96 97 if (!map->overwrite) 98 map->prev = map->start; 99 100 return event; 101 } 102 103 static bool perf_mmap__empty(struct perf_mmap *map) 104 { 105 return perf_mmap__read_head(map) == map->prev && !map->auxtrace_mmap.base; 106 } 107 108 void perf_mmap__get(struct perf_mmap *map) 109 { 110 refcount_inc(&map->refcnt); 111 } 112 113 void perf_mmap__put(struct perf_mmap *map) 114 { 115 BUG_ON(map->base && refcount_read(&map->refcnt) == 0); 116 117 if (refcount_dec_and_test(&map->refcnt)) 118 perf_mmap__munmap(map); 119 } 120 121 void perf_mmap__consume(struct perf_mmap *map) 122 { 123 if (!map->overwrite) { 124 u64 old = map->prev; 125 126 perf_mmap__write_tail(map, old); 127 } 128 129 if (refcount_read(&map->refcnt) == 1 && perf_mmap__empty(map)) 130 perf_mmap__put(map); 131 } 132 133 int __weak auxtrace_mmap__mmap(struct auxtrace_mmap *mm __maybe_unused, 134 struct auxtrace_mmap_params *mp __maybe_unused, 135 void *userpg __maybe_unused, 136 int fd __maybe_unused) 137 { 138 return 0; 139 } 140 141 void __weak auxtrace_mmap__munmap(struct auxtrace_mmap *mm __maybe_unused) 142 { 143 } 144 145 void __weak auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp __maybe_unused, 146 off_t auxtrace_offset __maybe_unused, 147 unsigned int auxtrace_pages __maybe_unused, 148 bool auxtrace_overwrite __maybe_unused) 149 { 150 } 151 152 void __weak auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp __maybe_unused, 153 struct perf_evlist *evlist __maybe_unused, 154 int idx __maybe_unused, 155 bool per_cpu __maybe_unused) 156 { 157 } 158 159 #ifdef HAVE_AIO_SUPPORT 160 161 #ifdef HAVE_LIBNUMA_SUPPORT 162 static int perf_mmap__aio_alloc(struct perf_mmap *map, int idx) 163 { 164 map->aio.data[idx] = mmap(NULL, perf_mmap__mmap_len(map), PROT_READ|PROT_WRITE, 165 MAP_PRIVATE|MAP_ANONYMOUS, 0, 0); 166 if (map->aio.data[idx] == MAP_FAILED) { 167 map->aio.data[idx] = NULL; 168 return -1; 169 } 170 171 return 0; 172 } 173 174 static void perf_mmap__aio_free(struct perf_mmap *map, int idx) 175 { 176 if (map->aio.data[idx]) { 177 munmap(map->aio.data[idx], perf_mmap__mmap_len(map)); 178 map->aio.data[idx] = NULL; 179 } 180 } 181 182 static int perf_mmap__aio_bind(struct perf_mmap *map, int idx, int cpu, int affinity) 183 { 184 void *data; 185 size_t mmap_len; 186 unsigned long node_mask; 187 188 if (affinity != PERF_AFFINITY_SYS && cpu__max_node() > 1) { 189 data = map->aio.data[idx]; 190 mmap_len = perf_mmap__mmap_len(map); 191 node_mask = 1UL << cpu__get_node(cpu); 192 if (mbind(data, mmap_len, MPOL_BIND, &node_mask, 1, 0)) { 193 pr_err("Failed to bind [%p-%p] AIO buffer to node %d: error %m\n", 194 data, data + mmap_len, cpu__get_node(cpu)); 195 return -1; 196 } 197 } 198 199 return 0; 200 } 201 #else 202 static int perf_mmap__aio_alloc(struct perf_mmap *map, int idx) 203 { 204 map->aio.data[idx] = malloc(perf_mmap__mmap_len(map)); 205 if (map->aio.data[idx] == NULL) 206 return -1; 207 208 return 0; 209 } 210 211 static void perf_mmap__aio_free(struct perf_mmap *map, int idx) 212 { 213 zfree(&(map->aio.data[idx])); 214 } 215 216 static int perf_mmap__aio_bind(struct perf_mmap *map __maybe_unused, int idx __maybe_unused, 217 int cpu __maybe_unused, int affinity __maybe_unused) 218 { 219 return 0; 220 } 221 #endif 222 223 static int perf_mmap__aio_mmap(struct perf_mmap *map, struct mmap_params *mp) 224 { 225 int delta_max, i, prio, ret; 226 227 map->aio.nr_cblocks = mp->nr_cblocks; 228 if (map->aio.nr_cblocks) { 229 map->aio.aiocb = calloc(map->aio.nr_cblocks, sizeof(struct aiocb *)); 230 if (!map->aio.aiocb) { 231 pr_debug2("failed to allocate aiocb for data buffer, error %m\n"); 232 return -1; 233 } 234 map->aio.cblocks = calloc(map->aio.nr_cblocks, sizeof(struct aiocb)); 235 if (!map->aio.cblocks) { 236 pr_debug2("failed to allocate cblocks for data buffer, error %m\n"); 237 return -1; 238 } 239 map->aio.data = calloc(map->aio.nr_cblocks, sizeof(void *)); 240 if (!map->aio.data) { 241 pr_debug2("failed to allocate data buffer, error %m\n"); 242 return -1; 243 } 244 delta_max = sysconf(_SC_AIO_PRIO_DELTA_MAX); 245 for (i = 0; i < map->aio.nr_cblocks; ++i) { 246 ret = perf_mmap__aio_alloc(map, i); 247 if (ret == -1) { 248 pr_debug2("failed to allocate data buffer area, error %m"); 249 return -1; 250 } 251 ret = perf_mmap__aio_bind(map, i, map->cpu, mp->affinity); 252 if (ret == -1) 253 return -1; 254 /* 255 * Use cblock.aio_fildes value different from -1 256 * to denote started aio write operation on the 257 * cblock so it requires explicit record__aio_sync() 258 * call prior the cblock may be reused again. 259 */ 260 map->aio.cblocks[i].aio_fildes = -1; 261 /* 262 * Allocate cblocks with priority delta to have 263 * faster aio write system calls because queued requests 264 * are kept in separate per-prio queues and adding 265 * a new request will iterate thru shorter per-prio 266 * list. Blocks with numbers higher than 267 * _SC_AIO_PRIO_DELTA_MAX go with priority 0. 268 */ 269 prio = delta_max - i; 270 map->aio.cblocks[i].aio_reqprio = prio >= 0 ? prio : 0; 271 } 272 } 273 274 return 0; 275 } 276 277 static void perf_mmap__aio_munmap(struct perf_mmap *map) 278 { 279 int i; 280 281 for (i = 0; i < map->aio.nr_cblocks; ++i) 282 perf_mmap__aio_free(map, i); 283 if (map->aio.data) 284 zfree(&map->aio.data); 285 zfree(&map->aio.cblocks); 286 zfree(&map->aio.aiocb); 287 } 288 289 int perf_mmap__aio_push(struct perf_mmap *md, void *to, int idx, 290 int push(void *to, struct aiocb *cblock, void *buf, size_t size, off_t off), 291 off_t *off) 292 { 293 u64 head = perf_mmap__read_head(md); 294 unsigned char *data = md->base + page_size; 295 unsigned long size, size0 = 0; 296 void *buf; 297 int rc = 0; 298 299 rc = perf_mmap__read_init(md); 300 if (rc < 0) 301 return (rc == -EAGAIN) ? 0 : -1; 302 303 /* 304 * md->base data is copied into md->data[idx] buffer to 305 * release space in the kernel buffer as fast as possible, 306 * thru perf_mmap__consume() below. 307 * 308 * That lets the kernel to proceed with storing more 309 * profiling data into the kernel buffer earlier than other 310 * per-cpu kernel buffers are handled. 311 * 312 * Coping can be done in two steps in case the chunk of 313 * profiling data crosses the upper bound of the kernel buffer. 314 * In this case we first move part of data from md->start 315 * till the upper bound and then the reminder from the 316 * beginning of the kernel buffer till the end of 317 * the data chunk. 318 */ 319 320 size = md->end - md->start; 321 322 if ((md->start & md->mask) + size != (md->end & md->mask)) { 323 buf = &data[md->start & md->mask]; 324 size = md->mask + 1 - (md->start & md->mask); 325 md->start += size; 326 memcpy(md->aio.data[idx], buf, size); 327 size0 = size; 328 } 329 330 buf = &data[md->start & md->mask]; 331 size = md->end - md->start; 332 md->start += size; 333 memcpy(md->aio.data[idx] + size0, buf, size); 334 335 /* 336 * Increment md->refcount to guard md->data[idx] buffer 337 * from premature deallocation because md object can be 338 * released earlier than aio write request started 339 * on mmap->data[idx] is complete. 340 * 341 * perf_mmap__put() is done at record__aio_complete() 342 * after started request completion. 343 */ 344 perf_mmap__get(md); 345 346 md->prev = head; 347 perf_mmap__consume(md); 348 349 rc = push(to, &md->aio.cblocks[idx], md->aio.data[idx], size0 + size, *off); 350 if (!rc) { 351 *off += size0 + size; 352 } else { 353 /* 354 * Decrement md->refcount back if aio write 355 * operation failed to start. 356 */ 357 perf_mmap__put(md); 358 } 359 360 return rc; 361 } 362 #else 363 static int perf_mmap__aio_mmap(struct perf_mmap *map __maybe_unused, 364 struct mmap_params *mp __maybe_unused) 365 { 366 return 0; 367 } 368 369 static void perf_mmap__aio_munmap(struct perf_mmap *map __maybe_unused) 370 { 371 } 372 #endif 373 374 void perf_mmap__munmap(struct perf_mmap *map) 375 { 376 perf_mmap__aio_munmap(map); 377 if (map->base != NULL) { 378 munmap(map->base, perf_mmap__mmap_len(map)); 379 map->base = NULL; 380 map->fd = -1; 381 refcount_set(&map->refcnt, 0); 382 } 383 auxtrace_mmap__munmap(&map->auxtrace_mmap); 384 } 385 386 static void build_node_mask(int node, cpu_set_t *mask) 387 { 388 int c, cpu, nr_cpus; 389 const struct cpu_map *cpu_map = NULL; 390 391 cpu_map = cpu_map__online(); 392 if (!cpu_map) 393 return; 394 395 nr_cpus = cpu_map__nr(cpu_map); 396 for (c = 0; c < nr_cpus; c++) { 397 cpu = cpu_map->map[c]; /* map c index to online cpu index */ 398 if (cpu__get_node(cpu) == node) 399 CPU_SET(cpu, mask); 400 } 401 } 402 403 static void perf_mmap__setup_affinity_mask(struct perf_mmap *map, struct mmap_params *mp) 404 { 405 CPU_ZERO(&map->affinity_mask); 406 if (mp->affinity == PERF_AFFINITY_NODE && cpu__max_node() > 1) 407 build_node_mask(cpu__get_node(map->cpu), &map->affinity_mask); 408 else if (mp->affinity == PERF_AFFINITY_CPU) 409 CPU_SET(map->cpu, &map->affinity_mask); 410 } 411 412 int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd, int cpu) 413 { 414 /* 415 * The last one will be done at perf_mmap__consume(), so that we 416 * make sure we don't prevent tools from consuming every last event in 417 * the ring buffer. 418 * 419 * I.e. we can get the POLLHUP meaning that the fd doesn't exist 420 * anymore, but the last events for it are still in the ring buffer, 421 * waiting to be consumed. 422 * 423 * Tools can chose to ignore this at their own discretion, but the 424 * evlist layer can't just drop it when filtering events in 425 * perf_evlist__filter_pollfd(). 426 */ 427 refcount_set(&map->refcnt, 2); 428 map->prev = 0; 429 map->mask = mp->mask; 430 map->base = mmap(NULL, perf_mmap__mmap_len(map), mp->prot, 431 MAP_SHARED, fd, 0); 432 if (map->base == MAP_FAILED) { 433 pr_debug2("failed to mmap perf event ring buffer, error %d\n", 434 errno); 435 map->base = NULL; 436 return -1; 437 } 438 map->fd = fd; 439 map->cpu = cpu; 440 441 perf_mmap__setup_affinity_mask(map, mp); 442 443 if (auxtrace_mmap__mmap(&map->auxtrace_mmap, 444 &mp->auxtrace_mp, map->base, fd)) 445 return -1; 446 447 return perf_mmap__aio_mmap(map, mp); 448 } 449 450 static int overwrite_rb_find_range(void *buf, int mask, u64 *start, u64 *end) 451 { 452 struct perf_event_header *pheader; 453 u64 evt_head = *start; 454 int size = mask + 1; 455 456 pr_debug2("%s: buf=%p, start=%"PRIx64"\n", __func__, buf, *start); 457 pheader = (struct perf_event_header *)(buf + (*start & mask)); 458 while (true) { 459 if (evt_head - *start >= (unsigned int)size) { 460 pr_debug("Finished reading overwrite ring buffer: rewind\n"); 461 if (evt_head - *start > (unsigned int)size) 462 evt_head -= pheader->size; 463 *end = evt_head; 464 return 0; 465 } 466 467 pheader = (struct perf_event_header *)(buf + (evt_head & mask)); 468 469 if (pheader->size == 0) { 470 pr_debug("Finished reading overwrite ring buffer: get start\n"); 471 *end = evt_head; 472 return 0; 473 } 474 475 evt_head += pheader->size; 476 pr_debug3("move evt_head: %"PRIx64"\n", evt_head); 477 } 478 WARN_ONCE(1, "Shouldn't get here\n"); 479 return -1; 480 } 481 482 /* 483 * Report the start and end of the available data in ringbuffer 484 */ 485 static int __perf_mmap__read_init(struct perf_mmap *md) 486 { 487 u64 head = perf_mmap__read_head(md); 488 u64 old = md->prev; 489 unsigned char *data = md->base + page_size; 490 unsigned long size; 491 492 md->start = md->overwrite ? head : old; 493 md->end = md->overwrite ? old : head; 494 495 if (md->start == md->end) 496 return -EAGAIN; 497 498 size = md->end - md->start; 499 if (size > (unsigned long)(md->mask) + 1) { 500 if (!md->overwrite) { 501 WARN_ONCE(1, "failed to keep up with mmap data. (warn only once)\n"); 502 503 md->prev = head; 504 perf_mmap__consume(md); 505 return -EAGAIN; 506 } 507 508 /* 509 * Backward ring buffer is full. We still have a chance to read 510 * most of data from it. 511 */ 512 if (overwrite_rb_find_range(data, md->mask, &md->start, &md->end)) 513 return -EINVAL; 514 } 515 516 return 0; 517 } 518 519 int perf_mmap__read_init(struct perf_mmap *map) 520 { 521 /* 522 * Check if event was unmapped due to a POLLHUP/POLLERR. 523 */ 524 if (!refcount_read(&map->refcnt)) 525 return -ENOENT; 526 527 return __perf_mmap__read_init(map); 528 } 529 530 int perf_mmap__push(struct perf_mmap *md, void *to, 531 int push(struct perf_mmap *map, void *to, void *buf, size_t size)) 532 { 533 u64 head = perf_mmap__read_head(md); 534 unsigned char *data = md->base + page_size; 535 unsigned long size; 536 void *buf; 537 int rc = 0; 538 539 rc = perf_mmap__read_init(md); 540 if (rc < 0) 541 return (rc == -EAGAIN) ? 0 : -1; 542 543 size = md->end - md->start; 544 545 if ((md->start & md->mask) + size != (md->end & md->mask)) { 546 buf = &data[md->start & md->mask]; 547 size = md->mask + 1 - (md->start & md->mask); 548 md->start += size; 549 550 if (push(md, to, buf, size) < 0) { 551 rc = -1; 552 goto out; 553 } 554 } 555 556 buf = &data[md->start & md->mask]; 557 size = md->end - md->start; 558 md->start += size; 559 560 if (push(md, to, buf, size) < 0) { 561 rc = -1; 562 goto out; 563 } 564 565 md->prev = head; 566 perf_mmap__consume(md); 567 out: 568 return rc; 569 } 570 571 /* 572 * Mandatory for overwrite mode 573 * The direction of overwrite mode is backward. 574 * The last perf_mmap__read() will set tail to map->prev. 575 * Need to correct the map->prev to head which is the end of next read. 576 */ 577 void perf_mmap__read_done(struct perf_mmap *map) 578 { 579 /* 580 * Check if event was unmapped due to a POLLHUP/POLLERR. 581 */ 582 if (!refcount_read(&map->refcnt)) 583 return; 584 585 map->prev = perf_mmap__read_head(map); 586 } 587