1 /* 2 * Test backward bit in event attribute, read ring buffer from end to 3 * beginning 4 */ 5 6 #include <perf.h> 7 #include <evlist.h> 8 #include <sys/prctl.h> 9 #include "tests.h" 10 #include "debug.h" 11 #include <errno.h> 12 13 #define NR_ITERS 111 14 15 static void testcase(void) 16 { 17 int i; 18 19 for (i = 0; i < NR_ITERS; i++) { 20 char proc_name[10]; 21 22 snprintf(proc_name, sizeof(proc_name), "p:%d\n", i); 23 prctl(PR_SET_NAME, proc_name); 24 } 25 } 26 27 static int count_samples(struct perf_evlist *evlist, int *sample_count, 28 int *comm_count) 29 { 30 int i; 31 32 for (i = 0; i < evlist->nr_mmaps; i++) { 33 union perf_event *event; 34 35 perf_mmap__read_catchup(&evlist->backward_mmap[i]); 36 while ((event = perf_mmap__read_backward(&evlist->backward_mmap[i])) != NULL) { 37 const u32 type = event->header.type; 38 39 switch (type) { 40 case PERF_RECORD_SAMPLE: 41 (*sample_count)++; 42 break; 43 case PERF_RECORD_COMM: 44 (*comm_count)++; 45 break; 46 default: 47 pr_err("Unexpected record of type %d\n", type); 48 return TEST_FAIL; 49 } 50 } 51 } 52 return TEST_OK; 53 } 54 55 static int do_test(struct perf_evlist *evlist, int mmap_pages, 56 int *sample_count, int *comm_count) 57 { 58 int err; 59 char sbuf[STRERR_BUFSIZE]; 60 61 err = perf_evlist__mmap(evlist, mmap_pages, true); 62 if (err < 0) { 63 pr_debug("perf_evlist__mmap: %s\n", 64 str_error_r(errno, sbuf, sizeof(sbuf))); 65 return TEST_FAIL; 66 } 67 68 perf_evlist__enable(evlist); 69 testcase(); 70 perf_evlist__disable(evlist); 71 72 err = count_samples(evlist, sample_count, comm_count); 73 perf_evlist__munmap(evlist); 74 return err; 75 } 76 77 78 int test__backward_ring_buffer(struct test *test __maybe_unused, int subtest __maybe_unused) 79 { 80 int ret = TEST_SKIP, err, sample_count = 0, comm_count = 0; 81 char pid[16], sbuf[STRERR_BUFSIZE]; 82 struct perf_evlist *evlist; 83 struct perf_evsel *evsel __maybe_unused; 84 struct parse_events_error parse_error; 85 struct record_opts opts = { 86 .target = { 87 .uid = UINT_MAX, 88 .uses_mmap = true, 89 }, 90 .freq = 0, 91 .mmap_pages = 256, 92 .default_interval = 1, 93 }; 94 95 snprintf(pid, sizeof(pid), "%d", getpid()); 96 pid[sizeof(pid) - 1] = '\0'; 97 opts.target.tid = opts.target.pid = pid; 98 99 evlist = perf_evlist__new(); 100 if (!evlist) { 101 pr_debug("Not enough memory to create evlist\n"); 102 return TEST_FAIL; 103 } 104 105 err = perf_evlist__create_maps(evlist, &opts.target); 106 if (err < 0) { 107 pr_debug("Not enough memory to create thread/cpu maps\n"); 108 goto out_delete_evlist; 109 } 110 111 bzero(&parse_error, sizeof(parse_error)); 112 /* 113 * Set backward bit, ring buffer should be writing from end. Record 114 * it in aux evlist 115 */ 116 err = parse_events(evlist, "syscalls:sys_enter_prctl/overwrite/", &parse_error); 117 if (err) { 118 pr_debug("Failed to parse tracepoint event, try use root\n"); 119 ret = TEST_SKIP; 120 goto out_delete_evlist; 121 } 122 123 perf_evlist__config(evlist, &opts, NULL); 124 125 err = perf_evlist__open(evlist); 126 if (err < 0) { 127 pr_debug("perf_evlist__open: %s\n", 128 str_error_r(errno, sbuf, sizeof(sbuf))); 129 goto out_delete_evlist; 130 } 131 132 ret = TEST_FAIL; 133 err = do_test(evlist, opts.mmap_pages, &sample_count, 134 &comm_count); 135 if (err != TEST_OK) 136 goto out_delete_evlist; 137 138 if ((sample_count != NR_ITERS) || (comm_count != NR_ITERS)) { 139 pr_err("Unexpected counter: sample_count=%d, comm_count=%d\n", 140 sample_count, comm_count); 141 goto out_delete_evlist; 142 } 143 144 err = do_test(evlist, 1, &sample_count, &comm_count); 145 if (err != TEST_OK) 146 goto out_delete_evlist; 147 148 ret = TEST_OK; 149 out_delete_evlist: 150 perf_evlist__delete(evlist); 151 return ret; 152 } 153