1 /* 2 * Copyright (C) 2011-2017, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com> 3 * 4 * Parts came from evlist.c builtin-{top,stat,record}.c, see those files for further 5 * copyright notes. 6 * 7 * Released under the GPL v2. (and only v2, not any later version) 8 */ 9 10 #include <sys/mman.h> 11 #include <inttypes.h> 12 #include <asm/bug.h> 13 #include "debug.h" 14 #include "event.h" 15 #include "mmap.h" 16 #include "util.h" /* page_size */ 17 18 size_t perf_mmap__mmap_len(struct perf_mmap *map) 19 { 20 return map->mask + 1 + page_size; 21 } 22 23 /* When check_messup is true, 'end' must points to a good entry */ 24 static union perf_event *perf_mmap__read(struct perf_mmap *map, 25 u64 *startp, u64 end) 26 { 27 unsigned char *data = map->base + page_size; 28 union perf_event *event = NULL; 29 int diff = end - *startp; 30 31 if (diff >= (int)sizeof(event->header)) { 32 size_t size; 33 34 event = (union perf_event *)&data[*startp & map->mask]; 35 size = event->header.size; 36 37 if (size < sizeof(event->header) || diff < (int)size) 38 return NULL; 39 40 /* 41 * Event straddles the mmap boundary -- header should always 42 * be inside due to u64 alignment of output. 43 */ 44 if ((*startp & map->mask) + size != ((*startp + size) & map->mask)) { 45 unsigned int offset = *startp; 46 unsigned int len = min(sizeof(*event), size), cpy; 47 void *dst = map->event_copy; 48 49 do { 50 cpy = min(map->mask + 1 - (offset & map->mask), len); 51 memcpy(dst, &data[offset & map->mask], cpy); 52 offset += cpy; 53 dst += cpy; 54 len -= cpy; 55 } while (len); 56 57 event = (union perf_event *)map->event_copy; 58 } 59 60 *startp += size; 61 } 62 63 return event; 64 } 65 66 /* 67 * Read event from ring buffer one by one. 68 * Return one event for each call. 69 * 70 * Usage: 71 * perf_mmap__read_init() 72 * while(event = perf_mmap__read_event()) { 73 * //process the event 74 * perf_mmap__consume() 75 * } 76 * perf_mmap__read_done() 77 */ 78 union perf_event *perf_mmap__read_event(struct perf_mmap *map) 79 { 80 union perf_event *event; 81 82 /* 83 * Check if event was unmapped due to a POLLHUP/POLLERR. 84 */ 85 if (!refcount_read(&map->refcnt)) 86 return NULL; 87 88 /* non-overwirte doesn't pause the ringbuffer */ 89 if (!map->overwrite) 90 map->end = perf_mmap__read_head(map); 91 92 event = perf_mmap__read(map, &map->start, map->end); 93 94 if (!map->overwrite) 95 map->prev = map->start; 96 97 return event; 98 } 99 100 static bool perf_mmap__empty(struct perf_mmap *map) 101 { 102 return perf_mmap__read_head(map) == map->prev && !map->auxtrace_mmap.base; 103 } 104 105 void perf_mmap__get(struct perf_mmap *map) 106 { 107 refcount_inc(&map->refcnt); 108 } 109 110 void perf_mmap__put(struct perf_mmap *map) 111 { 112 BUG_ON(map->base && refcount_read(&map->refcnt) == 0); 113 114 if (refcount_dec_and_test(&map->refcnt)) 115 perf_mmap__munmap(map); 116 } 117 118 void perf_mmap__consume(struct perf_mmap *map) 119 { 120 if (!map->overwrite) { 121 u64 old = map->prev; 122 123 perf_mmap__write_tail(map, old); 124 } 125 126 if (refcount_read(&map->refcnt) == 1 && perf_mmap__empty(map)) 127 perf_mmap__put(map); 128 } 129 130 int __weak auxtrace_mmap__mmap(struct auxtrace_mmap *mm __maybe_unused, 131 struct auxtrace_mmap_params *mp __maybe_unused, 132 void *userpg __maybe_unused, 133 int fd __maybe_unused) 134 { 135 return 0; 136 } 137 138 void __weak auxtrace_mmap__munmap(struct auxtrace_mmap *mm __maybe_unused) 139 { 140 } 141 142 void __weak auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp __maybe_unused, 143 off_t auxtrace_offset __maybe_unused, 144 unsigned int auxtrace_pages __maybe_unused, 145 bool auxtrace_overwrite __maybe_unused) 146 { 147 } 148 149 void __weak auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp __maybe_unused, 150 struct perf_evlist *evlist __maybe_unused, 151 int idx __maybe_unused, 152 bool per_cpu __maybe_unused) 153 { 154 } 155 156 #ifdef HAVE_AIO_SUPPORT 157 static int perf_mmap__aio_mmap(struct perf_mmap *map, struct mmap_params *mp) 158 { 159 int delta_max, i, prio; 160 161 map->aio.nr_cblocks = mp->nr_cblocks; 162 if (map->aio.nr_cblocks) { 163 map->aio.aiocb = calloc(map->aio.nr_cblocks, sizeof(struct aiocb *)); 164 if (!map->aio.aiocb) { 165 pr_debug2("failed to allocate aiocb for data buffer, error %m\n"); 166 return -1; 167 } 168 map->aio.cblocks = calloc(map->aio.nr_cblocks, sizeof(struct aiocb)); 169 if (!map->aio.cblocks) { 170 pr_debug2("failed to allocate cblocks for data buffer, error %m\n"); 171 return -1; 172 } 173 map->aio.data = calloc(map->aio.nr_cblocks, sizeof(void *)); 174 if (!map->aio.data) { 175 pr_debug2("failed to allocate data buffer, error %m\n"); 176 return -1; 177 } 178 delta_max = sysconf(_SC_AIO_PRIO_DELTA_MAX); 179 for (i = 0; i < map->aio.nr_cblocks; ++i) { 180 map->aio.data[i] = malloc(perf_mmap__mmap_len(map)); 181 if (!map->aio.data[i]) { 182 pr_debug2("failed to allocate data buffer area, error %m"); 183 return -1; 184 } 185 /* 186 * Use cblock.aio_fildes value different from -1 187 * to denote started aio write operation on the 188 * cblock so it requires explicit record__aio_sync() 189 * call prior the cblock may be reused again. 190 */ 191 map->aio.cblocks[i].aio_fildes = -1; 192 /* 193 * Allocate cblocks with priority delta to have 194 * faster aio write system calls because queued requests 195 * are kept in separate per-prio queues and adding 196 * a new request will iterate thru shorter per-prio 197 * list. Blocks with numbers higher than 198 * _SC_AIO_PRIO_DELTA_MAX go with priority 0. 199 */ 200 prio = delta_max - i; 201 map->aio.cblocks[i].aio_reqprio = prio >= 0 ? prio : 0; 202 } 203 } 204 205 return 0; 206 } 207 208 static void perf_mmap__aio_munmap(struct perf_mmap *map) 209 { 210 int i; 211 212 for (i = 0; i < map->aio.nr_cblocks; ++i) 213 zfree(&map->aio.data[i]); 214 if (map->aio.data) 215 zfree(&map->aio.data); 216 zfree(&map->aio.cblocks); 217 zfree(&map->aio.aiocb); 218 } 219 220 int perf_mmap__aio_push(struct perf_mmap *md, void *to, int idx, 221 int push(void *to, struct aiocb *cblock, void *buf, size_t size, off_t off), 222 off_t *off) 223 { 224 u64 head = perf_mmap__read_head(md); 225 unsigned char *data = md->base + page_size; 226 unsigned long size, size0 = 0; 227 void *buf; 228 int rc = 0; 229 230 rc = perf_mmap__read_init(md); 231 if (rc < 0) 232 return (rc == -EAGAIN) ? 0 : -1; 233 234 /* 235 * md->base data is copied into md->data[idx] buffer to 236 * release space in the kernel buffer as fast as possible, 237 * thru perf_mmap__consume() below. 238 * 239 * That lets the kernel to proceed with storing more 240 * profiling data into the kernel buffer earlier than other 241 * per-cpu kernel buffers are handled. 242 * 243 * Coping can be done in two steps in case the chunk of 244 * profiling data crosses the upper bound of the kernel buffer. 245 * In this case we first move part of data from md->start 246 * till the upper bound and then the reminder from the 247 * beginning of the kernel buffer till the end of 248 * the data chunk. 249 */ 250 251 size = md->end - md->start; 252 253 if ((md->start & md->mask) + size != (md->end & md->mask)) { 254 buf = &data[md->start & md->mask]; 255 size = md->mask + 1 - (md->start & md->mask); 256 md->start += size; 257 memcpy(md->aio.data[idx], buf, size); 258 size0 = size; 259 } 260 261 buf = &data[md->start & md->mask]; 262 size = md->end - md->start; 263 md->start += size; 264 memcpy(md->aio.data[idx] + size0, buf, size); 265 266 /* 267 * Increment md->refcount to guard md->data[idx] buffer 268 * from premature deallocation because md object can be 269 * released earlier than aio write request started 270 * on mmap->data[idx] is complete. 271 * 272 * perf_mmap__put() is done at record__aio_complete() 273 * after started request completion. 274 */ 275 perf_mmap__get(md); 276 277 md->prev = head; 278 perf_mmap__consume(md); 279 280 rc = push(to, &md->aio.cblocks[idx], md->aio.data[idx], size0 + size, *off); 281 if (!rc) { 282 *off += size0 + size; 283 } else { 284 /* 285 * Decrement md->refcount back if aio write 286 * operation failed to start. 287 */ 288 perf_mmap__put(md); 289 } 290 291 return rc; 292 } 293 #else 294 static int perf_mmap__aio_mmap(struct perf_mmap *map __maybe_unused, 295 struct mmap_params *mp __maybe_unused) 296 { 297 return 0; 298 } 299 300 static void perf_mmap__aio_munmap(struct perf_mmap *map __maybe_unused) 301 { 302 } 303 #endif 304 305 void perf_mmap__munmap(struct perf_mmap *map) 306 { 307 perf_mmap__aio_munmap(map); 308 if (map->base != NULL) { 309 munmap(map->base, perf_mmap__mmap_len(map)); 310 map->base = NULL; 311 map->fd = -1; 312 refcount_set(&map->refcnt, 0); 313 } 314 auxtrace_mmap__munmap(&map->auxtrace_mmap); 315 } 316 317 int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd, int cpu) 318 { 319 /* 320 * The last one will be done at perf_mmap__consume(), so that we 321 * make sure we don't prevent tools from consuming every last event in 322 * the ring buffer. 323 * 324 * I.e. we can get the POLLHUP meaning that the fd doesn't exist 325 * anymore, but the last events for it are still in the ring buffer, 326 * waiting to be consumed. 327 * 328 * Tools can chose to ignore this at their own discretion, but the 329 * evlist layer can't just drop it when filtering events in 330 * perf_evlist__filter_pollfd(). 331 */ 332 refcount_set(&map->refcnt, 2); 333 map->prev = 0; 334 map->mask = mp->mask; 335 map->base = mmap(NULL, perf_mmap__mmap_len(map), mp->prot, 336 MAP_SHARED, fd, 0); 337 if (map->base == MAP_FAILED) { 338 pr_debug2("failed to mmap perf event ring buffer, error %d\n", 339 errno); 340 map->base = NULL; 341 return -1; 342 } 343 map->fd = fd; 344 map->cpu = cpu; 345 346 if (auxtrace_mmap__mmap(&map->auxtrace_mmap, 347 &mp->auxtrace_mp, map->base, fd)) 348 return -1; 349 350 return perf_mmap__aio_mmap(map, mp); 351 } 352 353 static int overwrite_rb_find_range(void *buf, int mask, u64 *start, u64 *end) 354 { 355 struct perf_event_header *pheader; 356 u64 evt_head = *start; 357 int size = mask + 1; 358 359 pr_debug2("%s: buf=%p, start=%"PRIx64"\n", __func__, buf, *start); 360 pheader = (struct perf_event_header *)(buf + (*start & mask)); 361 while (true) { 362 if (evt_head - *start >= (unsigned int)size) { 363 pr_debug("Finished reading overwrite ring buffer: rewind\n"); 364 if (evt_head - *start > (unsigned int)size) 365 evt_head -= pheader->size; 366 *end = evt_head; 367 return 0; 368 } 369 370 pheader = (struct perf_event_header *)(buf + (evt_head & mask)); 371 372 if (pheader->size == 0) { 373 pr_debug("Finished reading overwrite ring buffer: get start\n"); 374 *end = evt_head; 375 return 0; 376 } 377 378 evt_head += pheader->size; 379 pr_debug3("move evt_head: %"PRIx64"\n", evt_head); 380 } 381 WARN_ONCE(1, "Shouldn't get here\n"); 382 return -1; 383 } 384 385 /* 386 * Report the start and end of the available data in ringbuffer 387 */ 388 static int __perf_mmap__read_init(struct perf_mmap *md) 389 { 390 u64 head = perf_mmap__read_head(md); 391 u64 old = md->prev; 392 unsigned char *data = md->base + page_size; 393 unsigned long size; 394 395 md->start = md->overwrite ? head : old; 396 md->end = md->overwrite ? old : head; 397 398 if (md->start == md->end) 399 return -EAGAIN; 400 401 size = md->end - md->start; 402 if (size > (unsigned long)(md->mask) + 1) { 403 if (!md->overwrite) { 404 WARN_ONCE(1, "failed to keep up with mmap data. (warn only once)\n"); 405 406 md->prev = head; 407 perf_mmap__consume(md); 408 return -EAGAIN; 409 } 410 411 /* 412 * Backward ring buffer is full. We still have a chance to read 413 * most of data from it. 414 */ 415 if (overwrite_rb_find_range(data, md->mask, &md->start, &md->end)) 416 return -EINVAL; 417 } 418 419 return 0; 420 } 421 422 int perf_mmap__read_init(struct perf_mmap *map) 423 { 424 /* 425 * Check if event was unmapped due to a POLLHUP/POLLERR. 426 */ 427 if (!refcount_read(&map->refcnt)) 428 return -ENOENT; 429 430 return __perf_mmap__read_init(map); 431 } 432 433 int perf_mmap__push(struct perf_mmap *md, void *to, 434 int push(struct perf_mmap *map, void *to, void *buf, size_t size)) 435 { 436 u64 head = perf_mmap__read_head(md); 437 unsigned char *data = md->base + page_size; 438 unsigned long size; 439 void *buf; 440 int rc = 0; 441 442 rc = perf_mmap__read_init(md); 443 if (rc < 0) 444 return (rc == -EAGAIN) ? 0 : -1; 445 446 size = md->end - md->start; 447 448 if ((md->start & md->mask) + size != (md->end & md->mask)) { 449 buf = &data[md->start & md->mask]; 450 size = md->mask + 1 - (md->start & md->mask); 451 md->start += size; 452 453 if (push(md, to, buf, size) < 0) { 454 rc = -1; 455 goto out; 456 } 457 } 458 459 buf = &data[md->start & md->mask]; 460 size = md->end - md->start; 461 md->start += size; 462 463 if (push(md, to, buf, size) < 0) { 464 rc = -1; 465 goto out; 466 } 467 468 md->prev = head; 469 perf_mmap__consume(md); 470 out: 471 return rc; 472 } 473 474 /* 475 * Mandatory for overwrite mode 476 * The direction of overwrite mode is backward. 477 * The last perf_mmap__read() will set tail to map->prev. 478 * Need to correct the map->prev to head which is the end of next read. 479 */ 480 void perf_mmap__read_done(struct perf_mmap *map) 481 { 482 /* 483 * Check if event was unmapped due to a POLLHUP/POLLERR. 484 */ 485 if (!refcount_read(&map->refcnt)) 486 return; 487 488 map->prev = perf_mmap__read_head(map); 489 } 490