1 #ifndef __PERF_MMAP_H 2 #define __PERF_MMAP_H 1 3 4 #include <linux/compiler.h> 5 #include <linux/refcount.h> 6 #include <linux/types.h> 7 #include <linux/ring_buffer.h> 8 #include <stdbool.h> 9 #include <pthread.h> // for cpu_set_t 10 #ifdef HAVE_AIO_SUPPORT 11 #include <aio.h> 12 #endif 13 #include "auxtrace.h" 14 #include "event.h" 15 16 struct aiocb; 17 /** 18 * struct perf_mmap - perf's ring buffer mmap details 19 * 20 * @refcnt - e.g. code using PERF_EVENT_IOC_SET_OUTPUT to share this 21 */ 22 struct perf_mmap { 23 void *base; 24 int mask; 25 int fd; 26 int cpu; 27 refcount_t refcnt; 28 u64 prev; 29 u64 start; 30 u64 end; 31 bool overwrite; 32 struct auxtrace_mmap auxtrace_mmap; 33 char event_copy[PERF_SAMPLE_MAX_SIZE] __aligned(8); 34 #ifdef HAVE_AIO_SUPPORT 35 struct { 36 void **data; 37 struct aiocb *cblocks; 38 struct aiocb **aiocb; 39 int nr_cblocks; 40 } aio; 41 #endif 42 cpu_set_t affinity_mask; 43 u64 flush; 44 void *data; 45 int comp_level; 46 }; 47 48 /* 49 * State machine of bkw_mmap_state: 50 * 51 * .________________(forbid)_____________. 52 * | V 53 * NOTREADY --(0)--> RUNNING --(1)--> DATA_PENDING --(2)--> EMPTY 54 * ^ ^ | ^ | 55 * | |__(forbid)____/ |___(forbid)___/| 56 * | | 57 * \_________________(3)_______________/ 58 * 59 * NOTREADY : Backward ring buffers are not ready 60 * RUNNING : Backward ring buffers are recording 61 * DATA_PENDING : We are required to collect data from backward ring buffers 62 * EMPTY : We have collected data from backward ring buffers. 63 * 64 * (0): Setup backward ring buffer 65 * (1): Pause ring buffers for reading 66 * (2): Read from ring buffers 67 * (3): Resume ring buffers for recording 68 */ 69 enum bkw_mmap_state { 70 BKW_MMAP_NOTREADY, 71 BKW_MMAP_RUNNING, 72 BKW_MMAP_DATA_PENDING, 73 BKW_MMAP_EMPTY, 74 }; 75 76 struct mmap_params { 77 int prot, mask, nr_cblocks, affinity, flush, comp_level; 78 struct auxtrace_mmap_params auxtrace_mp; 79 }; 80 81 int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd, int cpu); 82 void perf_mmap__munmap(struct perf_mmap *map); 83 84 void perf_mmap__get(struct perf_mmap *map); 85 void perf_mmap__put(struct perf_mmap *map); 86 87 void perf_mmap__consume(struct perf_mmap *map); 88 89 static inline u64 perf_mmap__read_head(struct perf_mmap *mm) 90 { 91 return ring_buffer_read_head(mm->base); 92 } 93 94 static inline void perf_mmap__write_tail(struct perf_mmap *md, u64 tail) 95 { 96 ring_buffer_write_tail(md->base, tail); 97 } 98 99 union perf_event *perf_mmap__read_forward(struct perf_mmap *map); 100 101 union perf_event *perf_mmap__read_event(struct perf_mmap *map); 102 103 int perf_mmap__push(struct perf_mmap *md, void *to, 104 int push(struct perf_mmap *map, void *to, void *buf, size_t size)); 105 106 size_t perf_mmap__mmap_len(struct perf_mmap *map); 107 108 int perf_mmap__read_init(struct perf_mmap *md); 109 void perf_mmap__read_done(struct perf_mmap *map); 110 #endif /*__PERF_MMAP_H */ 111