1 #include <sched.h> 2 #include "evlist.h" 3 #include "evsel.h" 4 #include "perf.h" 5 #include "debug.h" 6 #include "tests.h" 7 8 static int sched__get_first_possible_cpu(pid_t pid, cpu_set_t *maskp) 9 { 10 int i, cpu = -1, nrcpus = 1024; 11 realloc: 12 CPU_ZERO(maskp); 13 14 if (sched_getaffinity(pid, sizeof(*maskp), maskp) == -1) { 15 if (errno == EINVAL && nrcpus < (1024 << 8)) { 16 nrcpus = nrcpus << 2; 17 goto realloc; 18 } 19 perror("sched_getaffinity"); 20 return -1; 21 } 22 23 for (i = 0; i < nrcpus; i++) { 24 if (CPU_ISSET(i, maskp)) { 25 if (cpu == -1) 26 cpu = i; 27 else 28 CPU_CLR(i, maskp); 29 } 30 } 31 32 return cpu; 33 } 34 35 int test__PERF_RECORD(void) 36 { 37 struct record_opts opts = { 38 .target = { 39 .uid = UINT_MAX, 40 .uses_mmap = true, 41 }, 42 .no_buffering = true, 43 .freq = 10, 44 .mmap_pages = 256, 45 }; 46 cpu_set_t cpu_mask; 47 size_t cpu_mask_size = sizeof(cpu_mask); 48 struct perf_evlist *evlist = perf_evlist__new_default(); 49 struct perf_evsel *evsel; 50 struct perf_sample sample; 51 const char *cmd = "sleep"; 52 const char *argv[] = { cmd, "1", NULL, }; 53 char *bname, *mmap_filename; 54 u64 prev_time = 0; 55 bool found_cmd_mmap = false, 56 found_libc_mmap = false, 57 found_vdso_mmap = false, 58 found_ld_mmap = false; 59 int err = -1, errs = 0, i, wakeups = 0; 60 u32 cpu; 61 int total_events = 0, nr_events[PERF_RECORD_MAX] = { 0, }; 62 63 if (evlist == NULL || argv == NULL) { 64 pr_debug("Not enough memory to create evlist\n"); 65 goto out; 66 } 67 68 /* 69 * Create maps of threads and cpus to monitor. In this case 70 * we start with all threads and cpus (-1, -1) but then in 71 * perf_evlist__prepare_workload we'll fill in the only thread 72 * we're monitoring, the one forked there. 73 */ 74 err = perf_evlist__create_maps(evlist, &opts.target); 75 if (err < 0) { 76 pr_debug("Not enough memory to create thread/cpu maps\n"); 77 goto out_delete_evlist; 78 } 79 80 /* 81 * Prepare the workload in argv[] to run, it'll fork it, and then wait 82 * for perf_evlist__start_workload() to exec it. This is done this way 83 * so that we have time to open the evlist (calling sys_perf_event_open 84 * on all the fds) and then mmap them. 85 */ 86 err = perf_evlist__prepare_workload(evlist, &opts.target, argv, false, NULL); 87 if (err < 0) { 88 pr_debug("Couldn't run the workload!\n"); 89 goto out_delete_evlist; 90 } 91 92 /* 93 * Config the evsels, setting attr->comm on the first one, etc. 94 */ 95 evsel = perf_evlist__first(evlist); 96 perf_evsel__set_sample_bit(evsel, CPU); 97 perf_evsel__set_sample_bit(evsel, TID); 98 perf_evsel__set_sample_bit(evsel, TIME); 99 perf_evlist__config(evlist, &opts); 100 101 err = sched__get_first_possible_cpu(evlist->workload.pid, &cpu_mask); 102 if (err < 0) { 103 pr_debug("sched__get_first_possible_cpu: %s\n", strerror(errno)); 104 goto out_delete_evlist; 105 } 106 107 cpu = err; 108 109 /* 110 * So that we can check perf_sample.cpu on all the samples. 111 */ 112 if (sched_setaffinity(evlist->workload.pid, cpu_mask_size, &cpu_mask) < 0) { 113 pr_debug("sched_setaffinity: %s\n", strerror(errno)); 114 goto out_delete_evlist; 115 } 116 117 /* 118 * Call sys_perf_event_open on all the fds on all the evsels, 119 * grouping them if asked to. 120 */ 121 err = perf_evlist__open(evlist); 122 if (err < 0) { 123 pr_debug("perf_evlist__open: %s\n", strerror(errno)); 124 goto out_delete_evlist; 125 } 126 127 /* 128 * mmap the first fd on a given CPU and ask for events for the other 129 * fds in the same CPU to be injected in the same mmap ring buffer 130 * (using ioctl(PERF_EVENT_IOC_SET_OUTPUT)). 131 */ 132 err = perf_evlist__mmap(evlist, opts.mmap_pages, false); 133 if (err < 0) { 134 pr_debug("perf_evlist__mmap: %s\n", strerror(errno)); 135 goto out_delete_evlist; 136 } 137 138 /* 139 * Now that all is properly set up, enable the events, they will 140 * count just on workload.pid, which will start... 141 */ 142 perf_evlist__enable(evlist); 143 144 /* 145 * Now! 146 */ 147 perf_evlist__start_workload(evlist); 148 149 while (1) { 150 int before = total_events; 151 152 for (i = 0; i < evlist->nr_mmaps; i++) { 153 union perf_event *event; 154 155 while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) { 156 const u32 type = event->header.type; 157 const char *name = perf_event__name(type); 158 159 ++total_events; 160 if (type < PERF_RECORD_MAX) 161 nr_events[type]++; 162 163 err = perf_evlist__parse_sample(evlist, event, &sample); 164 if (err < 0) { 165 if (verbose) 166 perf_event__fprintf(event, stderr); 167 pr_debug("Couldn't parse sample\n"); 168 goto out_delete_evlist; 169 } 170 171 if (verbose) { 172 pr_info("%" PRIu64" %d ", sample.time, sample.cpu); 173 perf_event__fprintf(event, stderr); 174 } 175 176 if (prev_time > sample.time) { 177 pr_debug("%s going backwards in time, prev=%" PRIu64 ", curr=%" PRIu64 "\n", 178 name, prev_time, sample.time); 179 ++errs; 180 } 181 182 prev_time = sample.time; 183 184 if (sample.cpu != cpu) { 185 pr_debug("%s with unexpected cpu, expected %d, got %d\n", 186 name, cpu, sample.cpu); 187 ++errs; 188 } 189 190 if ((pid_t)sample.pid != evlist->workload.pid) { 191 pr_debug("%s with unexpected pid, expected %d, got %d\n", 192 name, evlist->workload.pid, sample.pid); 193 ++errs; 194 } 195 196 if ((pid_t)sample.tid != evlist->workload.pid) { 197 pr_debug("%s with unexpected tid, expected %d, got %d\n", 198 name, evlist->workload.pid, sample.tid); 199 ++errs; 200 } 201 202 if ((type == PERF_RECORD_COMM || 203 type == PERF_RECORD_MMAP || 204 type == PERF_RECORD_MMAP2 || 205 type == PERF_RECORD_FORK || 206 type == PERF_RECORD_EXIT) && 207 (pid_t)event->comm.pid != evlist->workload.pid) { 208 pr_debug("%s with unexpected pid/tid\n", name); 209 ++errs; 210 } 211 212 if ((type == PERF_RECORD_COMM || 213 type == PERF_RECORD_MMAP || 214 type == PERF_RECORD_MMAP2) && 215 event->comm.pid != event->comm.tid) { 216 pr_debug("%s with different pid/tid!\n", name); 217 ++errs; 218 } 219 220 switch (type) { 221 case PERF_RECORD_COMM: 222 if (strcmp(event->comm.comm, cmd)) { 223 pr_debug("%s with unexpected comm!\n", name); 224 ++errs; 225 } 226 break; 227 case PERF_RECORD_EXIT: 228 goto found_exit; 229 case PERF_RECORD_MMAP: 230 mmap_filename = event->mmap.filename; 231 goto check_bname; 232 case PERF_RECORD_MMAP2: 233 mmap_filename = event->mmap2.filename; 234 check_bname: 235 bname = strrchr(mmap_filename, '/'); 236 if (bname != NULL) { 237 if (!found_cmd_mmap) 238 found_cmd_mmap = !strcmp(bname + 1, cmd); 239 if (!found_libc_mmap) 240 found_libc_mmap = !strncmp(bname + 1, "libc", 4); 241 if (!found_ld_mmap) 242 found_ld_mmap = !strncmp(bname + 1, "ld", 2); 243 } else if (!found_vdso_mmap) 244 found_vdso_mmap = !strcmp(mmap_filename, "[vdso]"); 245 break; 246 247 case PERF_RECORD_SAMPLE: 248 /* Just ignore samples for now */ 249 break; 250 default: 251 pr_debug("Unexpected perf_event->header.type %d!\n", 252 type); 253 ++errs; 254 } 255 256 perf_evlist__mmap_consume(evlist, i); 257 } 258 } 259 260 /* 261 * We don't use poll here because at least at 3.1 times the 262 * PERF_RECORD_{!SAMPLE} events don't honour 263 * perf_event_attr.wakeup_events, just PERF_EVENT_SAMPLE does. 264 */ 265 if (total_events == before && false) 266 poll(evlist->pollfd, evlist->nr_fds, -1); 267 268 sleep(1); 269 if (++wakeups > 5) { 270 pr_debug("No PERF_RECORD_EXIT event!\n"); 271 break; 272 } 273 } 274 275 found_exit: 276 if (nr_events[PERF_RECORD_COMM] > 1) { 277 pr_debug("Excessive number of PERF_RECORD_COMM events!\n"); 278 ++errs; 279 } 280 281 if (nr_events[PERF_RECORD_COMM] == 0) { 282 pr_debug("Missing PERF_RECORD_COMM for %s!\n", cmd); 283 ++errs; 284 } 285 286 if (!found_cmd_mmap) { 287 pr_debug("PERF_RECORD_MMAP for %s missing!\n", cmd); 288 ++errs; 289 } 290 291 if (!found_libc_mmap) { 292 pr_debug("PERF_RECORD_MMAP for %s missing!\n", "libc"); 293 ++errs; 294 } 295 296 if (!found_ld_mmap) { 297 pr_debug("PERF_RECORD_MMAP for %s missing!\n", "ld"); 298 ++errs; 299 } 300 301 if (!found_vdso_mmap) { 302 pr_debug("PERF_RECORD_MMAP for %s missing!\n", "[vdso]"); 303 ++errs; 304 } 305 out_delete_evlist: 306 perf_evlist__delete(evlist); 307 out: 308 return (err < 0 || errs > 0) ? -1 : 0; 309 } 310