1*945aea22SJiri Olsa /* 2*945aea22SJiri Olsa * builtin-test.c 3*945aea22SJiri Olsa * 4*945aea22SJiri Olsa * Builtin regression testing command: ever growing number of sanity tests 5*945aea22SJiri Olsa */ 6*945aea22SJiri Olsa #include "builtin.h" 7*945aea22SJiri Olsa 8*945aea22SJiri Olsa #include "util/cache.h" 9*945aea22SJiri Olsa #include "util/color.h" 10*945aea22SJiri Olsa #include "util/debug.h" 11*945aea22SJiri Olsa #include "util/debugfs.h" 12*945aea22SJiri Olsa #include "util/evlist.h" 13*945aea22SJiri Olsa #include "util/parse-options.h" 14*945aea22SJiri Olsa #include "util/parse-events.h" 15*945aea22SJiri Olsa #include "util/symbol.h" 16*945aea22SJiri Olsa #include "util/thread_map.h" 17*945aea22SJiri Olsa #include "util/pmu.h" 18*945aea22SJiri Olsa #include "event-parse.h" 19*945aea22SJiri Olsa #include "../../include/linux/hw_breakpoint.h" 20*945aea22SJiri Olsa 21*945aea22SJiri Olsa #include <sys/mman.h> 22*945aea22SJiri Olsa 23*945aea22SJiri Olsa static int vmlinux_matches_kallsyms_filter(struct map *map __maybe_unused, 24*945aea22SJiri Olsa struct symbol *sym) 25*945aea22SJiri Olsa { 26*945aea22SJiri Olsa bool *visited = symbol__priv(sym); 27*945aea22SJiri Olsa *visited = true; 28*945aea22SJiri Olsa return 0; 29*945aea22SJiri Olsa } 30*945aea22SJiri Olsa 31*945aea22SJiri Olsa static int test__vmlinux_matches_kallsyms(void) 32*945aea22SJiri Olsa { 33*945aea22SJiri Olsa int err = -1; 34*945aea22SJiri Olsa struct rb_node *nd; 35*945aea22SJiri Olsa struct symbol *sym; 36*945aea22SJiri Olsa struct map *kallsyms_map, *vmlinux_map; 37*945aea22SJiri Olsa struct machine kallsyms, vmlinux; 38*945aea22SJiri Olsa enum map_type type = MAP__FUNCTION; 39*945aea22SJiri Olsa struct ref_reloc_sym ref_reloc_sym = { .name = "_stext", }; 40*945aea22SJiri Olsa 41*945aea22SJiri Olsa /* 42*945aea22SJiri Olsa * Step 1: 43*945aea22SJiri Olsa * 44*945aea22SJiri Olsa * Init the machines that will hold kernel, modules obtained from 45*945aea22SJiri Olsa * both vmlinux + .ko files and from /proc/kallsyms split by modules. 46*945aea22SJiri Olsa */ 47*945aea22SJiri Olsa machine__init(&kallsyms, "", HOST_KERNEL_ID); 48*945aea22SJiri Olsa machine__init(&vmlinux, "", HOST_KERNEL_ID); 49*945aea22SJiri Olsa 50*945aea22SJiri Olsa /* 51*945aea22SJiri Olsa * Step 2: 52*945aea22SJiri Olsa * 53*945aea22SJiri Olsa * Create the kernel maps for kallsyms and the DSO where we will then 54*945aea22SJiri Olsa * load /proc/kallsyms. Also create the modules maps from /proc/modules 55*945aea22SJiri Olsa * and find the .ko files that match them in /lib/modules/`uname -r`/. 56*945aea22SJiri Olsa */ 57*945aea22SJiri Olsa if (machine__create_kernel_maps(&kallsyms) < 0) { 58*945aea22SJiri Olsa pr_debug("machine__create_kernel_maps "); 59*945aea22SJiri Olsa return -1; 60*945aea22SJiri Olsa } 61*945aea22SJiri Olsa 62*945aea22SJiri Olsa /* 63*945aea22SJiri Olsa * Step 3: 64*945aea22SJiri Olsa * 65*945aea22SJiri Olsa * Load and split /proc/kallsyms into multiple maps, one per module. 66*945aea22SJiri Olsa */ 67*945aea22SJiri Olsa if (machine__load_kallsyms(&kallsyms, "/proc/kallsyms", type, NULL) <= 0) { 68*945aea22SJiri Olsa pr_debug("dso__load_kallsyms "); 69*945aea22SJiri Olsa goto out; 70*945aea22SJiri Olsa } 71*945aea22SJiri Olsa 72*945aea22SJiri Olsa /* 73*945aea22SJiri Olsa * Step 4: 74*945aea22SJiri Olsa * 75*945aea22SJiri Olsa * kallsyms will be internally on demand sorted by name so that we can 76*945aea22SJiri Olsa * find the reference relocation * symbol, i.e. the symbol we will use 77*945aea22SJiri Olsa * to see if the running kernel was relocated by checking if it has the 78*945aea22SJiri Olsa * same value in the vmlinux file we load. 79*945aea22SJiri Olsa */ 80*945aea22SJiri Olsa kallsyms_map = machine__kernel_map(&kallsyms, type); 81*945aea22SJiri Olsa 82*945aea22SJiri Olsa sym = map__find_symbol_by_name(kallsyms_map, ref_reloc_sym.name, NULL); 83*945aea22SJiri Olsa if (sym == NULL) { 84*945aea22SJiri Olsa pr_debug("dso__find_symbol_by_name "); 85*945aea22SJiri Olsa goto out; 86*945aea22SJiri Olsa } 87*945aea22SJiri Olsa 88*945aea22SJiri Olsa ref_reloc_sym.addr = sym->start; 89*945aea22SJiri Olsa 90*945aea22SJiri Olsa /* 91*945aea22SJiri Olsa * Step 5: 92*945aea22SJiri Olsa * 93*945aea22SJiri Olsa * Now repeat step 2, this time for the vmlinux file we'll auto-locate. 94*945aea22SJiri Olsa */ 95*945aea22SJiri Olsa if (machine__create_kernel_maps(&vmlinux) < 0) { 96*945aea22SJiri Olsa pr_debug("machine__create_kernel_maps "); 97*945aea22SJiri Olsa goto out; 98*945aea22SJiri Olsa } 99*945aea22SJiri Olsa 100*945aea22SJiri Olsa vmlinux_map = machine__kernel_map(&vmlinux, type); 101*945aea22SJiri Olsa map__kmap(vmlinux_map)->ref_reloc_sym = &ref_reloc_sym; 102*945aea22SJiri Olsa 103*945aea22SJiri Olsa /* 104*945aea22SJiri Olsa * Step 6: 105*945aea22SJiri Olsa * 106*945aea22SJiri Olsa * Locate a vmlinux file in the vmlinux path that has a buildid that 107*945aea22SJiri Olsa * matches the one of the running kernel. 108*945aea22SJiri Olsa * 109*945aea22SJiri Olsa * While doing that look if we find the ref reloc symbol, if we find it 110*945aea22SJiri Olsa * we'll have its ref_reloc_symbol.unrelocated_addr and then 111*945aea22SJiri Olsa * maps__reloc_vmlinux will notice and set proper ->[un]map_ip routines 112*945aea22SJiri Olsa * to fixup the symbols. 113*945aea22SJiri Olsa */ 114*945aea22SJiri Olsa if (machine__load_vmlinux_path(&vmlinux, type, 115*945aea22SJiri Olsa vmlinux_matches_kallsyms_filter) <= 0) { 116*945aea22SJiri Olsa pr_debug("machine__load_vmlinux_path "); 117*945aea22SJiri Olsa goto out; 118*945aea22SJiri Olsa } 119*945aea22SJiri Olsa 120*945aea22SJiri Olsa err = 0; 121*945aea22SJiri Olsa /* 122*945aea22SJiri Olsa * Step 7: 123*945aea22SJiri Olsa * 124*945aea22SJiri Olsa * Now look at the symbols in the vmlinux DSO and check if we find all of them 125*945aea22SJiri Olsa * in the kallsyms dso. For the ones that are in both, check its names and 126*945aea22SJiri Olsa * end addresses too. 127*945aea22SJiri Olsa */ 128*945aea22SJiri Olsa for (nd = rb_first(&vmlinux_map->dso->symbols[type]); nd; nd = rb_next(nd)) { 129*945aea22SJiri Olsa struct symbol *pair, *first_pair; 130*945aea22SJiri Olsa bool backwards = true; 131*945aea22SJiri Olsa 132*945aea22SJiri Olsa sym = rb_entry(nd, struct symbol, rb_node); 133*945aea22SJiri Olsa 134*945aea22SJiri Olsa if (sym->start == sym->end) 135*945aea22SJiri Olsa continue; 136*945aea22SJiri Olsa 137*945aea22SJiri Olsa first_pair = machine__find_kernel_symbol(&kallsyms, type, sym->start, NULL, NULL); 138*945aea22SJiri Olsa pair = first_pair; 139*945aea22SJiri Olsa 140*945aea22SJiri Olsa if (pair && pair->start == sym->start) { 141*945aea22SJiri Olsa next_pair: 142*945aea22SJiri Olsa if (strcmp(sym->name, pair->name) == 0) { 143*945aea22SJiri Olsa /* 144*945aea22SJiri Olsa * kallsyms don't have the symbol end, so we 145*945aea22SJiri Olsa * set that by using the next symbol start - 1, 146*945aea22SJiri Olsa * in some cases we get this up to a page 147*945aea22SJiri Olsa * wrong, trace_kmalloc when I was developing 148*945aea22SJiri Olsa * this code was one such example, 2106 bytes 149*945aea22SJiri Olsa * off the real size. More than that and we 150*945aea22SJiri Olsa * _really_ have a problem. 151*945aea22SJiri Olsa */ 152*945aea22SJiri Olsa s64 skew = sym->end - pair->end; 153*945aea22SJiri Olsa if (llabs(skew) < page_size) 154*945aea22SJiri Olsa continue; 155*945aea22SJiri Olsa 156*945aea22SJiri Olsa pr_debug("%#" PRIx64 ": diff end addr for %s v: %#" PRIx64 " k: %#" PRIx64 "\n", 157*945aea22SJiri Olsa sym->start, sym->name, sym->end, pair->end); 158*945aea22SJiri Olsa } else { 159*945aea22SJiri Olsa struct rb_node *nnd; 160*945aea22SJiri Olsa detour: 161*945aea22SJiri Olsa nnd = backwards ? rb_prev(&pair->rb_node) : 162*945aea22SJiri Olsa rb_next(&pair->rb_node); 163*945aea22SJiri Olsa if (nnd) { 164*945aea22SJiri Olsa struct symbol *next = rb_entry(nnd, struct symbol, rb_node); 165*945aea22SJiri Olsa 166*945aea22SJiri Olsa if (next->start == sym->start) { 167*945aea22SJiri Olsa pair = next; 168*945aea22SJiri Olsa goto next_pair; 169*945aea22SJiri Olsa } 170*945aea22SJiri Olsa } 171*945aea22SJiri Olsa 172*945aea22SJiri Olsa if (backwards) { 173*945aea22SJiri Olsa backwards = false; 174*945aea22SJiri Olsa pair = first_pair; 175*945aea22SJiri Olsa goto detour; 176*945aea22SJiri Olsa } 177*945aea22SJiri Olsa 178*945aea22SJiri Olsa pr_debug("%#" PRIx64 ": diff name v: %s k: %s\n", 179*945aea22SJiri Olsa sym->start, sym->name, pair->name); 180*945aea22SJiri Olsa } 181*945aea22SJiri Olsa } else 182*945aea22SJiri Olsa pr_debug("%#" PRIx64 ": %s not on kallsyms\n", sym->start, sym->name); 183*945aea22SJiri Olsa 184*945aea22SJiri Olsa err = -1; 185*945aea22SJiri Olsa } 186*945aea22SJiri Olsa 187*945aea22SJiri Olsa if (!verbose) 188*945aea22SJiri Olsa goto out; 189*945aea22SJiri Olsa 190*945aea22SJiri Olsa pr_info("Maps only in vmlinux:\n"); 191*945aea22SJiri Olsa 192*945aea22SJiri Olsa for (nd = rb_first(&vmlinux.kmaps.maps[type]); nd; nd = rb_next(nd)) { 193*945aea22SJiri Olsa struct map *pos = rb_entry(nd, struct map, rb_node), *pair; 194*945aea22SJiri Olsa /* 195*945aea22SJiri Olsa * If it is the kernel, kallsyms is always "[kernel.kallsyms]", while 196*945aea22SJiri Olsa * the kernel will have the path for the vmlinux file being used, 197*945aea22SJiri Olsa * so use the short name, less descriptive but the same ("[kernel]" in 198*945aea22SJiri Olsa * both cases. 199*945aea22SJiri Olsa */ 200*945aea22SJiri Olsa pair = map_groups__find_by_name(&kallsyms.kmaps, type, 201*945aea22SJiri Olsa (pos->dso->kernel ? 202*945aea22SJiri Olsa pos->dso->short_name : 203*945aea22SJiri Olsa pos->dso->name)); 204*945aea22SJiri Olsa if (pair) 205*945aea22SJiri Olsa pair->priv = 1; 206*945aea22SJiri Olsa else 207*945aea22SJiri Olsa map__fprintf(pos, stderr); 208*945aea22SJiri Olsa } 209*945aea22SJiri Olsa 210*945aea22SJiri Olsa pr_info("Maps in vmlinux with a different name in kallsyms:\n"); 211*945aea22SJiri Olsa 212*945aea22SJiri Olsa for (nd = rb_first(&vmlinux.kmaps.maps[type]); nd; nd = rb_next(nd)) { 213*945aea22SJiri Olsa struct map *pos = rb_entry(nd, struct map, rb_node), *pair; 214*945aea22SJiri Olsa 215*945aea22SJiri Olsa pair = map_groups__find(&kallsyms.kmaps, type, pos->start); 216*945aea22SJiri Olsa if (pair == NULL || pair->priv) 217*945aea22SJiri Olsa continue; 218*945aea22SJiri Olsa 219*945aea22SJiri Olsa if (pair->start == pos->start) { 220*945aea22SJiri Olsa pair->priv = 1; 221*945aea22SJiri Olsa pr_info(" %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s in kallsyms as", 222*945aea22SJiri Olsa pos->start, pos->end, pos->pgoff, pos->dso->name); 223*945aea22SJiri Olsa if (pos->pgoff != pair->pgoff || pos->end != pair->end) 224*945aea22SJiri Olsa pr_info(": \n*%" PRIx64 "-%" PRIx64 " %" PRIx64 "", 225*945aea22SJiri Olsa pair->start, pair->end, pair->pgoff); 226*945aea22SJiri Olsa pr_info(" %s\n", pair->dso->name); 227*945aea22SJiri Olsa pair->priv = 1; 228*945aea22SJiri Olsa } 229*945aea22SJiri Olsa } 230*945aea22SJiri Olsa 231*945aea22SJiri Olsa pr_info("Maps only in kallsyms:\n"); 232*945aea22SJiri Olsa 233*945aea22SJiri Olsa for (nd = rb_first(&kallsyms.kmaps.maps[type]); 234*945aea22SJiri Olsa nd; nd = rb_next(nd)) { 235*945aea22SJiri Olsa struct map *pos = rb_entry(nd, struct map, rb_node); 236*945aea22SJiri Olsa 237*945aea22SJiri Olsa if (!pos->priv) 238*945aea22SJiri Olsa map__fprintf(pos, stderr); 239*945aea22SJiri Olsa } 240*945aea22SJiri Olsa out: 241*945aea22SJiri Olsa return err; 242*945aea22SJiri Olsa } 243*945aea22SJiri Olsa 244*945aea22SJiri Olsa #include "util/cpumap.h" 245*945aea22SJiri Olsa #include "util/evsel.h" 246*945aea22SJiri Olsa #include <sys/types.h> 247*945aea22SJiri Olsa 248*945aea22SJiri Olsa static int trace_event__id(const char *evname) 249*945aea22SJiri Olsa { 250*945aea22SJiri Olsa char *filename; 251*945aea22SJiri Olsa int err = -1, fd; 252*945aea22SJiri Olsa 253*945aea22SJiri Olsa if (asprintf(&filename, 254*945aea22SJiri Olsa "%s/syscalls/%s/id", 255*945aea22SJiri Olsa tracing_events_path, evname) < 0) 256*945aea22SJiri Olsa return -1; 257*945aea22SJiri Olsa 258*945aea22SJiri Olsa fd = open(filename, O_RDONLY); 259*945aea22SJiri Olsa if (fd >= 0) { 260*945aea22SJiri Olsa char id[16]; 261*945aea22SJiri Olsa if (read(fd, id, sizeof(id)) > 0) 262*945aea22SJiri Olsa err = atoi(id); 263*945aea22SJiri Olsa close(fd); 264*945aea22SJiri Olsa } 265*945aea22SJiri Olsa 266*945aea22SJiri Olsa free(filename); 267*945aea22SJiri Olsa return err; 268*945aea22SJiri Olsa } 269*945aea22SJiri Olsa 270*945aea22SJiri Olsa static int test__open_syscall_event(void) 271*945aea22SJiri Olsa { 272*945aea22SJiri Olsa int err = -1, fd; 273*945aea22SJiri Olsa struct thread_map *threads; 274*945aea22SJiri Olsa struct perf_evsel *evsel; 275*945aea22SJiri Olsa struct perf_event_attr attr; 276*945aea22SJiri Olsa unsigned int nr_open_calls = 111, i; 277*945aea22SJiri Olsa int id = trace_event__id("sys_enter_open"); 278*945aea22SJiri Olsa 279*945aea22SJiri Olsa if (id < 0) { 280*945aea22SJiri Olsa pr_debug("is debugfs mounted on /sys/kernel/debug?\n"); 281*945aea22SJiri Olsa return -1; 282*945aea22SJiri Olsa } 283*945aea22SJiri Olsa 284*945aea22SJiri Olsa threads = thread_map__new(-1, getpid(), UINT_MAX); 285*945aea22SJiri Olsa if (threads == NULL) { 286*945aea22SJiri Olsa pr_debug("thread_map__new\n"); 287*945aea22SJiri Olsa return -1; 288*945aea22SJiri Olsa } 289*945aea22SJiri Olsa 290*945aea22SJiri Olsa memset(&attr, 0, sizeof(attr)); 291*945aea22SJiri Olsa attr.type = PERF_TYPE_TRACEPOINT; 292*945aea22SJiri Olsa attr.config = id; 293*945aea22SJiri Olsa evsel = perf_evsel__new(&attr, 0); 294*945aea22SJiri Olsa if (evsel == NULL) { 295*945aea22SJiri Olsa pr_debug("perf_evsel__new\n"); 296*945aea22SJiri Olsa goto out_thread_map_delete; 297*945aea22SJiri Olsa } 298*945aea22SJiri Olsa 299*945aea22SJiri Olsa if (perf_evsel__open_per_thread(evsel, threads) < 0) { 300*945aea22SJiri Olsa pr_debug("failed to open counter: %s, " 301*945aea22SJiri Olsa "tweak /proc/sys/kernel/perf_event_paranoid?\n", 302*945aea22SJiri Olsa strerror(errno)); 303*945aea22SJiri Olsa goto out_evsel_delete; 304*945aea22SJiri Olsa } 305*945aea22SJiri Olsa 306*945aea22SJiri Olsa for (i = 0; i < nr_open_calls; ++i) { 307*945aea22SJiri Olsa fd = open("/etc/passwd", O_RDONLY); 308*945aea22SJiri Olsa close(fd); 309*945aea22SJiri Olsa } 310*945aea22SJiri Olsa 311*945aea22SJiri Olsa if (perf_evsel__read_on_cpu(evsel, 0, 0) < 0) { 312*945aea22SJiri Olsa pr_debug("perf_evsel__read_on_cpu\n"); 313*945aea22SJiri Olsa goto out_close_fd; 314*945aea22SJiri Olsa } 315*945aea22SJiri Olsa 316*945aea22SJiri Olsa if (evsel->counts->cpu[0].val != nr_open_calls) { 317*945aea22SJiri Olsa pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls, got %" PRIu64 "\n", 318*945aea22SJiri Olsa nr_open_calls, evsel->counts->cpu[0].val); 319*945aea22SJiri Olsa goto out_close_fd; 320*945aea22SJiri Olsa } 321*945aea22SJiri Olsa 322*945aea22SJiri Olsa err = 0; 323*945aea22SJiri Olsa out_close_fd: 324*945aea22SJiri Olsa perf_evsel__close_fd(evsel, 1, threads->nr); 325*945aea22SJiri Olsa out_evsel_delete: 326*945aea22SJiri Olsa perf_evsel__delete(evsel); 327*945aea22SJiri Olsa out_thread_map_delete: 328*945aea22SJiri Olsa thread_map__delete(threads); 329*945aea22SJiri Olsa return err; 330*945aea22SJiri Olsa } 331*945aea22SJiri Olsa 332*945aea22SJiri Olsa #include <sched.h> 333*945aea22SJiri Olsa 334*945aea22SJiri Olsa static int test__open_syscall_event_on_all_cpus(void) 335*945aea22SJiri Olsa { 336*945aea22SJiri Olsa int err = -1, fd, cpu; 337*945aea22SJiri Olsa struct thread_map *threads; 338*945aea22SJiri Olsa struct cpu_map *cpus; 339*945aea22SJiri Olsa struct perf_evsel *evsel; 340*945aea22SJiri Olsa struct perf_event_attr attr; 341*945aea22SJiri Olsa unsigned int nr_open_calls = 111, i; 342*945aea22SJiri Olsa cpu_set_t cpu_set; 343*945aea22SJiri Olsa int id = trace_event__id("sys_enter_open"); 344*945aea22SJiri Olsa 345*945aea22SJiri Olsa if (id < 0) { 346*945aea22SJiri Olsa pr_debug("is debugfs mounted on /sys/kernel/debug?\n"); 347*945aea22SJiri Olsa return -1; 348*945aea22SJiri Olsa } 349*945aea22SJiri Olsa 350*945aea22SJiri Olsa threads = thread_map__new(-1, getpid(), UINT_MAX); 351*945aea22SJiri Olsa if (threads == NULL) { 352*945aea22SJiri Olsa pr_debug("thread_map__new\n"); 353*945aea22SJiri Olsa return -1; 354*945aea22SJiri Olsa } 355*945aea22SJiri Olsa 356*945aea22SJiri Olsa cpus = cpu_map__new(NULL); 357*945aea22SJiri Olsa if (cpus == NULL) { 358*945aea22SJiri Olsa pr_debug("cpu_map__new\n"); 359*945aea22SJiri Olsa goto out_thread_map_delete; 360*945aea22SJiri Olsa } 361*945aea22SJiri Olsa 362*945aea22SJiri Olsa 363*945aea22SJiri Olsa CPU_ZERO(&cpu_set); 364*945aea22SJiri Olsa 365*945aea22SJiri Olsa memset(&attr, 0, sizeof(attr)); 366*945aea22SJiri Olsa attr.type = PERF_TYPE_TRACEPOINT; 367*945aea22SJiri Olsa attr.config = id; 368*945aea22SJiri Olsa evsel = perf_evsel__new(&attr, 0); 369*945aea22SJiri Olsa if (evsel == NULL) { 370*945aea22SJiri Olsa pr_debug("perf_evsel__new\n"); 371*945aea22SJiri Olsa goto out_thread_map_delete; 372*945aea22SJiri Olsa } 373*945aea22SJiri Olsa 374*945aea22SJiri Olsa if (perf_evsel__open(evsel, cpus, threads) < 0) { 375*945aea22SJiri Olsa pr_debug("failed to open counter: %s, " 376*945aea22SJiri Olsa "tweak /proc/sys/kernel/perf_event_paranoid?\n", 377*945aea22SJiri Olsa strerror(errno)); 378*945aea22SJiri Olsa goto out_evsel_delete; 379*945aea22SJiri Olsa } 380*945aea22SJiri Olsa 381*945aea22SJiri Olsa for (cpu = 0; cpu < cpus->nr; ++cpu) { 382*945aea22SJiri Olsa unsigned int ncalls = nr_open_calls + cpu; 383*945aea22SJiri Olsa /* 384*945aea22SJiri Olsa * XXX eventually lift this restriction in a way that 385*945aea22SJiri Olsa * keeps perf building on older glibc installations 386*945aea22SJiri Olsa * without CPU_ALLOC. 1024 cpus in 2010 still seems 387*945aea22SJiri Olsa * a reasonable upper limit tho :-) 388*945aea22SJiri Olsa */ 389*945aea22SJiri Olsa if (cpus->map[cpu] >= CPU_SETSIZE) { 390*945aea22SJiri Olsa pr_debug("Ignoring CPU %d\n", cpus->map[cpu]); 391*945aea22SJiri Olsa continue; 392*945aea22SJiri Olsa } 393*945aea22SJiri Olsa 394*945aea22SJiri Olsa CPU_SET(cpus->map[cpu], &cpu_set); 395*945aea22SJiri Olsa if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) { 396*945aea22SJiri Olsa pr_debug("sched_setaffinity() failed on CPU %d: %s ", 397*945aea22SJiri Olsa cpus->map[cpu], 398*945aea22SJiri Olsa strerror(errno)); 399*945aea22SJiri Olsa goto out_close_fd; 400*945aea22SJiri Olsa } 401*945aea22SJiri Olsa for (i = 0; i < ncalls; ++i) { 402*945aea22SJiri Olsa fd = open("/etc/passwd", O_RDONLY); 403*945aea22SJiri Olsa close(fd); 404*945aea22SJiri Olsa } 405*945aea22SJiri Olsa CPU_CLR(cpus->map[cpu], &cpu_set); 406*945aea22SJiri Olsa } 407*945aea22SJiri Olsa 408*945aea22SJiri Olsa /* 409*945aea22SJiri Olsa * Here we need to explicitely preallocate the counts, as if 410*945aea22SJiri Olsa * we use the auto allocation it will allocate just for 1 cpu, 411*945aea22SJiri Olsa * as we start by cpu 0. 412*945aea22SJiri Olsa */ 413*945aea22SJiri Olsa if (perf_evsel__alloc_counts(evsel, cpus->nr) < 0) { 414*945aea22SJiri Olsa pr_debug("perf_evsel__alloc_counts(ncpus=%d)\n", cpus->nr); 415*945aea22SJiri Olsa goto out_close_fd; 416*945aea22SJiri Olsa } 417*945aea22SJiri Olsa 418*945aea22SJiri Olsa err = 0; 419*945aea22SJiri Olsa 420*945aea22SJiri Olsa for (cpu = 0; cpu < cpus->nr; ++cpu) { 421*945aea22SJiri Olsa unsigned int expected; 422*945aea22SJiri Olsa 423*945aea22SJiri Olsa if (cpus->map[cpu] >= CPU_SETSIZE) 424*945aea22SJiri Olsa continue; 425*945aea22SJiri Olsa 426*945aea22SJiri Olsa if (perf_evsel__read_on_cpu(evsel, cpu, 0) < 0) { 427*945aea22SJiri Olsa pr_debug("perf_evsel__read_on_cpu\n"); 428*945aea22SJiri Olsa err = -1; 429*945aea22SJiri Olsa break; 430*945aea22SJiri Olsa } 431*945aea22SJiri Olsa 432*945aea22SJiri Olsa expected = nr_open_calls + cpu; 433*945aea22SJiri Olsa if (evsel->counts->cpu[cpu].val != expected) { 434*945aea22SJiri Olsa pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got %" PRIu64 "\n", 435*945aea22SJiri Olsa expected, cpus->map[cpu], evsel->counts->cpu[cpu].val); 436*945aea22SJiri Olsa err = -1; 437*945aea22SJiri Olsa } 438*945aea22SJiri Olsa } 439*945aea22SJiri Olsa 440*945aea22SJiri Olsa out_close_fd: 441*945aea22SJiri Olsa perf_evsel__close_fd(evsel, 1, threads->nr); 442*945aea22SJiri Olsa out_evsel_delete: 443*945aea22SJiri Olsa perf_evsel__delete(evsel); 444*945aea22SJiri Olsa out_thread_map_delete: 445*945aea22SJiri Olsa thread_map__delete(threads); 446*945aea22SJiri Olsa return err; 447*945aea22SJiri Olsa } 448*945aea22SJiri Olsa 449*945aea22SJiri Olsa /* 450*945aea22SJiri Olsa * This test will generate random numbers of calls to some getpid syscalls, 451*945aea22SJiri Olsa * then establish an mmap for a group of events that are created to monitor 452*945aea22SJiri Olsa * the syscalls. 453*945aea22SJiri Olsa * 454*945aea22SJiri Olsa * It will receive the events, using mmap, use its PERF_SAMPLE_ID generated 455*945aea22SJiri Olsa * sample.id field to map back to its respective perf_evsel instance. 456*945aea22SJiri Olsa * 457*945aea22SJiri Olsa * Then it checks if the number of syscalls reported as perf events by 458*945aea22SJiri Olsa * the kernel corresponds to the number of syscalls made. 459*945aea22SJiri Olsa */ 460*945aea22SJiri Olsa static int test__basic_mmap(void) 461*945aea22SJiri Olsa { 462*945aea22SJiri Olsa int err = -1; 463*945aea22SJiri Olsa union perf_event *event; 464*945aea22SJiri Olsa struct thread_map *threads; 465*945aea22SJiri Olsa struct cpu_map *cpus; 466*945aea22SJiri Olsa struct perf_evlist *evlist; 467*945aea22SJiri Olsa struct perf_event_attr attr = { 468*945aea22SJiri Olsa .type = PERF_TYPE_TRACEPOINT, 469*945aea22SJiri Olsa .read_format = PERF_FORMAT_ID, 470*945aea22SJiri Olsa .sample_type = PERF_SAMPLE_ID, 471*945aea22SJiri Olsa .watermark = 0, 472*945aea22SJiri Olsa }; 473*945aea22SJiri Olsa cpu_set_t cpu_set; 474*945aea22SJiri Olsa const char *syscall_names[] = { "getsid", "getppid", "getpgrp", 475*945aea22SJiri Olsa "getpgid", }; 476*945aea22SJiri Olsa pid_t (*syscalls[])(void) = { (void *)getsid, getppid, getpgrp, 477*945aea22SJiri Olsa (void*)getpgid }; 478*945aea22SJiri Olsa #define nsyscalls ARRAY_SIZE(syscall_names) 479*945aea22SJiri Olsa int ids[nsyscalls]; 480*945aea22SJiri Olsa unsigned int nr_events[nsyscalls], 481*945aea22SJiri Olsa expected_nr_events[nsyscalls], i, j; 482*945aea22SJiri Olsa struct perf_evsel *evsels[nsyscalls], *evsel; 483*945aea22SJiri Olsa 484*945aea22SJiri Olsa for (i = 0; i < nsyscalls; ++i) { 485*945aea22SJiri Olsa char name[64]; 486*945aea22SJiri Olsa 487*945aea22SJiri Olsa snprintf(name, sizeof(name), "sys_enter_%s", syscall_names[i]); 488*945aea22SJiri Olsa ids[i] = trace_event__id(name); 489*945aea22SJiri Olsa if (ids[i] < 0) { 490*945aea22SJiri Olsa pr_debug("Is debugfs mounted on /sys/kernel/debug?\n"); 491*945aea22SJiri Olsa return -1; 492*945aea22SJiri Olsa } 493*945aea22SJiri Olsa nr_events[i] = 0; 494*945aea22SJiri Olsa expected_nr_events[i] = random() % 257; 495*945aea22SJiri Olsa } 496*945aea22SJiri Olsa 497*945aea22SJiri Olsa threads = thread_map__new(-1, getpid(), UINT_MAX); 498*945aea22SJiri Olsa if (threads == NULL) { 499*945aea22SJiri Olsa pr_debug("thread_map__new\n"); 500*945aea22SJiri Olsa return -1; 501*945aea22SJiri Olsa } 502*945aea22SJiri Olsa 503*945aea22SJiri Olsa cpus = cpu_map__new(NULL); 504*945aea22SJiri Olsa if (cpus == NULL) { 505*945aea22SJiri Olsa pr_debug("cpu_map__new\n"); 506*945aea22SJiri Olsa goto out_free_threads; 507*945aea22SJiri Olsa } 508*945aea22SJiri Olsa 509*945aea22SJiri Olsa CPU_ZERO(&cpu_set); 510*945aea22SJiri Olsa CPU_SET(cpus->map[0], &cpu_set); 511*945aea22SJiri Olsa sched_setaffinity(0, sizeof(cpu_set), &cpu_set); 512*945aea22SJiri Olsa if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) { 513*945aea22SJiri Olsa pr_debug("sched_setaffinity() failed on CPU %d: %s ", 514*945aea22SJiri Olsa cpus->map[0], strerror(errno)); 515*945aea22SJiri Olsa goto out_free_cpus; 516*945aea22SJiri Olsa } 517*945aea22SJiri Olsa 518*945aea22SJiri Olsa evlist = perf_evlist__new(cpus, threads); 519*945aea22SJiri Olsa if (evlist == NULL) { 520*945aea22SJiri Olsa pr_debug("perf_evlist__new\n"); 521*945aea22SJiri Olsa goto out_free_cpus; 522*945aea22SJiri Olsa } 523*945aea22SJiri Olsa 524*945aea22SJiri Olsa /* anonymous union fields, can't be initialized above */ 525*945aea22SJiri Olsa attr.wakeup_events = 1; 526*945aea22SJiri Olsa attr.sample_period = 1; 527*945aea22SJiri Olsa 528*945aea22SJiri Olsa for (i = 0; i < nsyscalls; ++i) { 529*945aea22SJiri Olsa attr.config = ids[i]; 530*945aea22SJiri Olsa evsels[i] = perf_evsel__new(&attr, i); 531*945aea22SJiri Olsa if (evsels[i] == NULL) { 532*945aea22SJiri Olsa pr_debug("perf_evsel__new\n"); 533*945aea22SJiri Olsa goto out_free_evlist; 534*945aea22SJiri Olsa } 535*945aea22SJiri Olsa 536*945aea22SJiri Olsa perf_evlist__add(evlist, evsels[i]); 537*945aea22SJiri Olsa 538*945aea22SJiri Olsa if (perf_evsel__open(evsels[i], cpus, threads) < 0) { 539*945aea22SJiri Olsa pr_debug("failed to open counter: %s, " 540*945aea22SJiri Olsa "tweak /proc/sys/kernel/perf_event_paranoid?\n", 541*945aea22SJiri Olsa strerror(errno)); 542*945aea22SJiri Olsa goto out_close_fd; 543*945aea22SJiri Olsa } 544*945aea22SJiri Olsa } 545*945aea22SJiri Olsa 546*945aea22SJiri Olsa if (perf_evlist__mmap(evlist, 128, true) < 0) { 547*945aea22SJiri Olsa pr_debug("failed to mmap events: %d (%s)\n", errno, 548*945aea22SJiri Olsa strerror(errno)); 549*945aea22SJiri Olsa goto out_close_fd; 550*945aea22SJiri Olsa } 551*945aea22SJiri Olsa 552*945aea22SJiri Olsa for (i = 0; i < nsyscalls; ++i) 553*945aea22SJiri Olsa for (j = 0; j < expected_nr_events[i]; ++j) { 554*945aea22SJiri Olsa int foo = syscalls[i](); 555*945aea22SJiri Olsa ++foo; 556*945aea22SJiri Olsa } 557*945aea22SJiri Olsa 558*945aea22SJiri Olsa while ((event = perf_evlist__mmap_read(evlist, 0)) != NULL) { 559*945aea22SJiri Olsa struct perf_sample sample; 560*945aea22SJiri Olsa 561*945aea22SJiri Olsa if (event->header.type != PERF_RECORD_SAMPLE) { 562*945aea22SJiri Olsa pr_debug("unexpected %s event\n", 563*945aea22SJiri Olsa perf_event__name(event->header.type)); 564*945aea22SJiri Olsa goto out_munmap; 565*945aea22SJiri Olsa } 566*945aea22SJiri Olsa 567*945aea22SJiri Olsa err = perf_evlist__parse_sample(evlist, event, &sample); 568*945aea22SJiri Olsa if (err) { 569*945aea22SJiri Olsa pr_err("Can't parse sample, err = %d\n", err); 570*945aea22SJiri Olsa goto out_munmap; 571*945aea22SJiri Olsa } 572*945aea22SJiri Olsa 573*945aea22SJiri Olsa evsel = perf_evlist__id2evsel(evlist, sample.id); 574*945aea22SJiri Olsa if (evsel == NULL) { 575*945aea22SJiri Olsa pr_debug("event with id %" PRIu64 576*945aea22SJiri Olsa " doesn't map to an evsel\n", sample.id); 577*945aea22SJiri Olsa goto out_munmap; 578*945aea22SJiri Olsa } 579*945aea22SJiri Olsa nr_events[evsel->idx]++; 580*945aea22SJiri Olsa } 581*945aea22SJiri Olsa 582*945aea22SJiri Olsa list_for_each_entry(evsel, &evlist->entries, node) { 583*945aea22SJiri Olsa if (nr_events[evsel->idx] != expected_nr_events[evsel->idx]) { 584*945aea22SJiri Olsa pr_debug("expected %d %s events, got %d\n", 585*945aea22SJiri Olsa expected_nr_events[evsel->idx], 586*945aea22SJiri Olsa perf_evsel__name(evsel), nr_events[evsel->idx]); 587*945aea22SJiri Olsa goto out_munmap; 588*945aea22SJiri Olsa } 589*945aea22SJiri Olsa } 590*945aea22SJiri Olsa 591*945aea22SJiri Olsa err = 0; 592*945aea22SJiri Olsa out_munmap: 593*945aea22SJiri Olsa perf_evlist__munmap(evlist); 594*945aea22SJiri Olsa out_close_fd: 595*945aea22SJiri Olsa for (i = 0; i < nsyscalls; ++i) 596*945aea22SJiri Olsa perf_evsel__close_fd(evsels[i], 1, threads->nr); 597*945aea22SJiri Olsa out_free_evlist: 598*945aea22SJiri Olsa perf_evlist__delete(evlist); 599*945aea22SJiri Olsa out_free_cpus: 600*945aea22SJiri Olsa cpu_map__delete(cpus); 601*945aea22SJiri Olsa out_free_threads: 602*945aea22SJiri Olsa thread_map__delete(threads); 603*945aea22SJiri Olsa return err; 604*945aea22SJiri Olsa #undef nsyscalls 605*945aea22SJiri Olsa } 606*945aea22SJiri Olsa 607*945aea22SJiri Olsa static int sched__get_first_possible_cpu(pid_t pid, cpu_set_t **maskp, 608*945aea22SJiri Olsa size_t *sizep) 609*945aea22SJiri Olsa { 610*945aea22SJiri Olsa cpu_set_t *mask; 611*945aea22SJiri Olsa size_t size; 612*945aea22SJiri Olsa int i, cpu = -1, nrcpus = 1024; 613*945aea22SJiri Olsa realloc: 614*945aea22SJiri Olsa mask = CPU_ALLOC(nrcpus); 615*945aea22SJiri Olsa size = CPU_ALLOC_SIZE(nrcpus); 616*945aea22SJiri Olsa CPU_ZERO_S(size, mask); 617*945aea22SJiri Olsa 618*945aea22SJiri Olsa if (sched_getaffinity(pid, size, mask) == -1) { 619*945aea22SJiri Olsa CPU_FREE(mask); 620*945aea22SJiri Olsa if (errno == EINVAL && nrcpus < (1024 << 8)) { 621*945aea22SJiri Olsa nrcpus = nrcpus << 2; 622*945aea22SJiri Olsa goto realloc; 623*945aea22SJiri Olsa } 624*945aea22SJiri Olsa perror("sched_getaffinity"); 625*945aea22SJiri Olsa return -1; 626*945aea22SJiri Olsa } 627*945aea22SJiri Olsa 628*945aea22SJiri Olsa for (i = 0; i < nrcpus; i++) { 629*945aea22SJiri Olsa if (CPU_ISSET_S(i, size, mask)) { 630*945aea22SJiri Olsa if (cpu == -1) { 631*945aea22SJiri Olsa cpu = i; 632*945aea22SJiri Olsa *maskp = mask; 633*945aea22SJiri Olsa *sizep = size; 634*945aea22SJiri Olsa } else 635*945aea22SJiri Olsa CPU_CLR_S(i, size, mask); 636*945aea22SJiri Olsa } 637*945aea22SJiri Olsa } 638*945aea22SJiri Olsa 639*945aea22SJiri Olsa if (cpu == -1) 640*945aea22SJiri Olsa CPU_FREE(mask); 641*945aea22SJiri Olsa 642*945aea22SJiri Olsa return cpu; 643*945aea22SJiri Olsa } 644*945aea22SJiri Olsa 645*945aea22SJiri Olsa static int test__PERF_RECORD(void) 646*945aea22SJiri Olsa { 647*945aea22SJiri Olsa struct perf_record_opts opts = { 648*945aea22SJiri Olsa .target = { 649*945aea22SJiri Olsa .uid = UINT_MAX, 650*945aea22SJiri Olsa .uses_mmap = true, 651*945aea22SJiri Olsa }, 652*945aea22SJiri Olsa .no_delay = true, 653*945aea22SJiri Olsa .freq = 10, 654*945aea22SJiri Olsa .mmap_pages = 256, 655*945aea22SJiri Olsa }; 656*945aea22SJiri Olsa cpu_set_t *cpu_mask = NULL; 657*945aea22SJiri Olsa size_t cpu_mask_size = 0; 658*945aea22SJiri Olsa struct perf_evlist *evlist = perf_evlist__new(NULL, NULL); 659*945aea22SJiri Olsa struct perf_evsel *evsel; 660*945aea22SJiri Olsa struct perf_sample sample; 661*945aea22SJiri Olsa const char *cmd = "sleep"; 662*945aea22SJiri Olsa const char *argv[] = { cmd, "1", NULL, }; 663*945aea22SJiri Olsa char *bname; 664*945aea22SJiri Olsa u64 prev_time = 0; 665*945aea22SJiri Olsa bool found_cmd_mmap = false, 666*945aea22SJiri Olsa found_libc_mmap = false, 667*945aea22SJiri Olsa found_vdso_mmap = false, 668*945aea22SJiri Olsa found_ld_mmap = false; 669*945aea22SJiri Olsa int err = -1, errs = 0, i, wakeups = 0; 670*945aea22SJiri Olsa u32 cpu; 671*945aea22SJiri Olsa int total_events = 0, nr_events[PERF_RECORD_MAX] = { 0, }; 672*945aea22SJiri Olsa 673*945aea22SJiri Olsa if (evlist == NULL || argv == NULL) { 674*945aea22SJiri Olsa pr_debug("Not enough memory to create evlist\n"); 675*945aea22SJiri Olsa goto out; 676*945aea22SJiri Olsa } 677*945aea22SJiri Olsa 678*945aea22SJiri Olsa /* 679*945aea22SJiri Olsa * We need at least one evsel in the evlist, use the default 680*945aea22SJiri Olsa * one: "cycles". 681*945aea22SJiri Olsa */ 682*945aea22SJiri Olsa err = perf_evlist__add_default(evlist); 683*945aea22SJiri Olsa if (err < 0) { 684*945aea22SJiri Olsa pr_debug("Not enough memory to create evsel\n"); 685*945aea22SJiri Olsa goto out_delete_evlist; 686*945aea22SJiri Olsa } 687*945aea22SJiri Olsa 688*945aea22SJiri Olsa /* 689*945aea22SJiri Olsa * Create maps of threads and cpus to monitor. In this case 690*945aea22SJiri Olsa * we start with all threads and cpus (-1, -1) but then in 691*945aea22SJiri Olsa * perf_evlist__prepare_workload we'll fill in the only thread 692*945aea22SJiri Olsa * we're monitoring, the one forked there. 693*945aea22SJiri Olsa */ 694*945aea22SJiri Olsa err = perf_evlist__create_maps(evlist, &opts.target); 695*945aea22SJiri Olsa if (err < 0) { 696*945aea22SJiri Olsa pr_debug("Not enough memory to create thread/cpu maps\n"); 697*945aea22SJiri Olsa goto out_delete_evlist; 698*945aea22SJiri Olsa } 699*945aea22SJiri Olsa 700*945aea22SJiri Olsa /* 701*945aea22SJiri Olsa * Prepare the workload in argv[] to run, it'll fork it, and then wait 702*945aea22SJiri Olsa * for perf_evlist__start_workload() to exec it. This is done this way 703*945aea22SJiri Olsa * so that we have time to open the evlist (calling sys_perf_event_open 704*945aea22SJiri Olsa * on all the fds) and then mmap them. 705*945aea22SJiri Olsa */ 706*945aea22SJiri Olsa err = perf_evlist__prepare_workload(evlist, &opts, argv); 707*945aea22SJiri Olsa if (err < 0) { 708*945aea22SJiri Olsa pr_debug("Couldn't run the workload!\n"); 709*945aea22SJiri Olsa goto out_delete_evlist; 710*945aea22SJiri Olsa } 711*945aea22SJiri Olsa 712*945aea22SJiri Olsa /* 713*945aea22SJiri Olsa * Config the evsels, setting attr->comm on the first one, etc. 714*945aea22SJiri Olsa */ 715*945aea22SJiri Olsa evsel = perf_evlist__first(evlist); 716*945aea22SJiri Olsa evsel->attr.sample_type |= PERF_SAMPLE_CPU; 717*945aea22SJiri Olsa evsel->attr.sample_type |= PERF_SAMPLE_TID; 718*945aea22SJiri Olsa evsel->attr.sample_type |= PERF_SAMPLE_TIME; 719*945aea22SJiri Olsa perf_evlist__config_attrs(evlist, &opts); 720*945aea22SJiri Olsa 721*945aea22SJiri Olsa err = sched__get_first_possible_cpu(evlist->workload.pid, &cpu_mask, 722*945aea22SJiri Olsa &cpu_mask_size); 723*945aea22SJiri Olsa if (err < 0) { 724*945aea22SJiri Olsa pr_debug("sched__get_first_possible_cpu: %s\n", strerror(errno)); 725*945aea22SJiri Olsa goto out_delete_evlist; 726*945aea22SJiri Olsa } 727*945aea22SJiri Olsa 728*945aea22SJiri Olsa cpu = err; 729*945aea22SJiri Olsa 730*945aea22SJiri Olsa /* 731*945aea22SJiri Olsa * So that we can check perf_sample.cpu on all the samples. 732*945aea22SJiri Olsa */ 733*945aea22SJiri Olsa if (sched_setaffinity(evlist->workload.pid, cpu_mask_size, cpu_mask) < 0) { 734*945aea22SJiri Olsa pr_debug("sched_setaffinity: %s\n", strerror(errno)); 735*945aea22SJiri Olsa goto out_free_cpu_mask; 736*945aea22SJiri Olsa } 737*945aea22SJiri Olsa 738*945aea22SJiri Olsa /* 739*945aea22SJiri Olsa * Call sys_perf_event_open on all the fds on all the evsels, 740*945aea22SJiri Olsa * grouping them if asked to. 741*945aea22SJiri Olsa */ 742*945aea22SJiri Olsa err = perf_evlist__open(evlist); 743*945aea22SJiri Olsa if (err < 0) { 744*945aea22SJiri Olsa pr_debug("perf_evlist__open: %s\n", strerror(errno)); 745*945aea22SJiri Olsa goto out_delete_evlist; 746*945aea22SJiri Olsa } 747*945aea22SJiri Olsa 748*945aea22SJiri Olsa /* 749*945aea22SJiri Olsa * mmap the first fd on a given CPU and ask for events for the other 750*945aea22SJiri Olsa * fds in the same CPU to be injected in the same mmap ring buffer 751*945aea22SJiri Olsa * (using ioctl(PERF_EVENT_IOC_SET_OUTPUT)). 752*945aea22SJiri Olsa */ 753*945aea22SJiri Olsa err = perf_evlist__mmap(evlist, opts.mmap_pages, false); 754*945aea22SJiri Olsa if (err < 0) { 755*945aea22SJiri Olsa pr_debug("perf_evlist__mmap: %s\n", strerror(errno)); 756*945aea22SJiri Olsa goto out_delete_evlist; 757*945aea22SJiri Olsa } 758*945aea22SJiri Olsa 759*945aea22SJiri Olsa /* 760*945aea22SJiri Olsa * Now that all is properly set up, enable the events, they will 761*945aea22SJiri Olsa * count just on workload.pid, which will start... 762*945aea22SJiri Olsa */ 763*945aea22SJiri Olsa perf_evlist__enable(evlist); 764*945aea22SJiri Olsa 765*945aea22SJiri Olsa /* 766*945aea22SJiri Olsa * Now! 767*945aea22SJiri Olsa */ 768*945aea22SJiri Olsa perf_evlist__start_workload(evlist); 769*945aea22SJiri Olsa 770*945aea22SJiri Olsa while (1) { 771*945aea22SJiri Olsa int before = total_events; 772*945aea22SJiri Olsa 773*945aea22SJiri Olsa for (i = 0; i < evlist->nr_mmaps; i++) { 774*945aea22SJiri Olsa union perf_event *event; 775*945aea22SJiri Olsa 776*945aea22SJiri Olsa while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) { 777*945aea22SJiri Olsa const u32 type = event->header.type; 778*945aea22SJiri Olsa const char *name = perf_event__name(type); 779*945aea22SJiri Olsa 780*945aea22SJiri Olsa ++total_events; 781*945aea22SJiri Olsa if (type < PERF_RECORD_MAX) 782*945aea22SJiri Olsa nr_events[type]++; 783*945aea22SJiri Olsa 784*945aea22SJiri Olsa err = perf_evlist__parse_sample(evlist, event, &sample); 785*945aea22SJiri Olsa if (err < 0) { 786*945aea22SJiri Olsa if (verbose) 787*945aea22SJiri Olsa perf_event__fprintf(event, stderr); 788*945aea22SJiri Olsa pr_debug("Couldn't parse sample\n"); 789*945aea22SJiri Olsa goto out_err; 790*945aea22SJiri Olsa } 791*945aea22SJiri Olsa 792*945aea22SJiri Olsa if (verbose) { 793*945aea22SJiri Olsa pr_info("%" PRIu64" %d ", sample.time, sample.cpu); 794*945aea22SJiri Olsa perf_event__fprintf(event, stderr); 795*945aea22SJiri Olsa } 796*945aea22SJiri Olsa 797*945aea22SJiri Olsa if (prev_time > sample.time) { 798*945aea22SJiri Olsa pr_debug("%s going backwards in time, prev=%" PRIu64 ", curr=%" PRIu64 "\n", 799*945aea22SJiri Olsa name, prev_time, sample.time); 800*945aea22SJiri Olsa ++errs; 801*945aea22SJiri Olsa } 802*945aea22SJiri Olsa 803*945aea22SJiri Olsa prev_time = sample.time; 804*945aea22SJiri Olsa 805*945aea22SJiri Olsa if (sample.cpu != cpu) { 806*945aea22SJiri Olsa pr_debug("%s with unexpected cpu, expected %d, got %d\n", 807*945aea22SJiri Olsa name, cpu, sample.cpu); 808*945aea22SJiri Olsa ++errs; 809*945aea22SJiri Olsa } 810*945aea22SJiri Olsa 811*945aea22SJiri Olsa if ((pid_t)sample.pid != evlist->workload.pid) { 812*945aea22SJiri Olsa pr_debug("%s with unexpected pid, expected %d, got %d\n", 813*945aea22SJiri Olsa name, evlist->workload.pid, sample.pid); 814*945aea22SJiri Olsa ++errs; 815*945aea22SJiri Olsa } 816*945aea22SJiri Olsa 817*945aea22SJiri Olsa if ((pid_t)sample.tid != evlist->workload.pid) { 818*945aea22SJiri Olsa pr_debug("%s with unexpected tid, expected %d, got %d\n", 819*945aea22SJiri Olsa name, evlist->workload.pid, sample.tid); 820*945aea22SJiri Olsa ++errs; 821*945aea22SJiri Olsa } 822*945aea22SJiri Olsa 823*945aea22SJiri Olsa if ((type == PERF_RECORD_COMM || 824*945aea22SJiri Olsa type == PERF_RECORD_MMAP || 825*945aea22SJiri Olsa type == PERF_RECORD_FORK || 826*945aea22SJiri Olsa type == PERF_RECORD_EXIT) && 827*945aea22SJiri Olsa (pid_t)event->comm.pid != evlist->workload.pid) { 828*945aea22SJiri Olsa pr_debug("%s with unexpected pid/tid\n", name); 829*945aea22SJiri Olsa ++errs; 830*945aea22SJiri Olsa } 831*945aea22SJiri Olsa 832*945aea22SJiri Olsa if ((type == PERF_RECORD_COMM || 833*945aea22SJiri Olsa type == PERF_RECORD_MMAP) && 834*945aea22SJiri Olsa event->comm.pid != event->comm.tid) { 835*945aea22SJiri Olsa pr_debug("%s with different pid/tid!\n", name); 836*945aea22SJiri Olsa ++errs; 837*945aea22SJiri Olsa } 838*945aea22SJiri Olsa 839*945aea22SJiri Olsa switch (type) { 840*945aea22SJiri Olsa case PERF_RECORD_COMM: 841*945aea22SJiri Olsa if (strcmp(event->comm.comm, cmd)) { 842*945aea22SJiri Olsa pr_debug("%s with unexpected comm!\n", name); 843*945aea22SJiri Olsa ++errs; 844*945aea22SJiri Olsa } 845*945aea22SJiri Olsa break; 846*945aea22SJiri Olsa case PERF_RECORD_EXIT: 847*945aea22SJiri Olsa goto found_exit; 848*945aea22SJiri Olsa case PERF_RECORD_MMAP: 849*945aea22SJiri Olsa bname = strrchr(event->mmap.filename, '/'); 850*945aea22SJiri Olsa if (bname != NULL) { 851*945aea22SJiri Olsa if (!found_cmd_mmap) 852*945aea22SJiri Olsa found_cmd_mmap = !strcmp(bname + 1, cmd); 853*945aea22SJiri Olsa if (!found_libc_mmap) 854*945aea22SJiri Olsa found_libc_mmap = !strncmp(bname + 1, "libc", 4); 855*945aea22SJiri Olsa if (!found_ld_mmap) 856*945aea22SJiri Olsa found_ld_mmap = !strncmp(bname + 1, "ld", 2); 857*945aea22SJiri Olsa } else if (!found_vdso_mmap) 858*945aea22SJiri Olsa found_vdso_mmap = !strcmp(event->mmap.filename, "[vdso]"); 859*945aea22SJiri Olsa break; 860*945aea22SJiri Olsa 861*945aea22SJiri Olsa case PERF_RECORD_SAMPLE: 862*945aea22SJiri Olsa /* Just ignore samples for now */ 863*945aea22SJiri Olsa break; 864*945aea22SJiri Olsa default: 865*945aea22SJiri Olsa pr_debug("Unexpected perf_event->header.type %d!\n", 866*945aea22SJiri Olsa type); 867*945aea22SJiri Olsa ++errs; 868*945aea22SJiri Olsa } 869*945aea22SJiri Olsa } 870*945aea22SJiri Olsa } 871*945aea22SJiri Olsa 872*945aea22SJiri Olsa /* 873*945aea22SJiri Olsa * We don't use poll here because at least at 3.1 times the 874*945aea22SJiri Olsa * PERF_RECORD_{!SAMPLE} events don't honour 875*945aea22SJiri Olsa * perf_event_attr.wakeup_events, just PERF_EVENT_SAMPLE does. 876*945aea22SJiri Olsa */ 877*945aea22SJiri Olsa if (total_events == before && false) 878*945aea22SJiri Olsa poll(evlist->pollfd, evlist->nr_fds, -1); 879*945aea22SJiri Olsa 880*945aea22SJiri Olsa sleep(1); 881*945aea22SJiri Olsa if (++wakeups > 5) { 882*945aea22SJiri Olsa pr_debug("No PERF_RECORD_EXIT event!\n"); 883*945aea22SJiri Olsa break; 884*945aea22SJiri Olsa } 885*945aea22SJiri Olsa } 886*945aea22SJiri Olsa 887*945aea22SJiri Olsa found_exit: 888*945aea22SJiri Olsa if (nr_events[PERF_RECORD_COMM] > 1) { 889*945aea22SJiri Olsa pr_debug("Excessive number of PERF_RECORD_COMM events!\n"); 890*945aea22SJiri Olsa ++errs; 891*945aea22SJiri Olsa } 892*945aea22SJiri Olsa 893*945aea22SJiri Olsa if (nr_events[PERF_RECORD_COMM] == 0) { 894*945aea22SJiri Olsa pr_debug("Missing PERF_RECORD_COMM for %s!\n", cmd); 895*945aea22SJiri Olsa ++errs; 896*945aea22SJiri Olsa } 897*945aea22SJiri Olsa 898*945aea22SJiri Olsa if (!found_cmd_mmap) { 899*945aea22SJiri Olsa pr_debug("PERF_RECORD_MMAP for %s missing!\n", cmd); 900*945aea22SJiri Olsa ++errs; 901*945aea22SJiri Olsa } 902*945aea22SJiri Olsa 903*945aea22SJiri Olsa if (!found_libc_mmap) { 904*945aea22SJiri Olsa pr_debug("PERF_RECORD_MMAP for %s missing!\n", "libc"); 905*945aea22SJiri Olsa ++errs; 906*945aea22SJiri Olsa } 907*945aea22SJiri Olsa 908*945aea22SJiri Olsa if (!found_ld_mmap) { 909*945aea22SJiri Olsa pr_debug("PERF_RECORD_MMAP for %s missing!\n", "ld"); 910*945aea22SJiri Olsa ++errs; 911*945aea22SJiri Olsa } 912*945aea22SJiri Olsa 913*945aea22SJiri Olsa if (!found_vdso_mmap) { 914*945aea22SJiri Olsa pr_debug("PERF_RECORD_MMAP for %s missing!\n", "[vdso]"); 915*945aea22SJiri Olsa ++errs; 916*945aea22SJiri Olsa } 917*945aea22SJiri Olsa out_err: 918*945aea22SJiri Olsa perf_evlist__munmap(evlist); 919*945aea22SJiri Olsa out_free_cpu_mask: 920*945aea22SJiri Olsa CPU_FREE(cpu_mask); 921*945aea22SJiri Olsa out_delete_evlist: 922*945aea22SJiri Olsa perf_evlist__delete(evlist); 923*945aea22SJiri Olsa out: 924*945aea22SJiri Olsa return (err < 0 || errs > 0) ? -1 : 0; 925*945aea22SJiri Olsa } 926*945aea22SJiri Olsa 927*945aea22SJiri Olsa 928*945aea22SJiri Olsa #if defined(__x86_64__) || defined(__i386__) 929*945aea22SJiri Olsa 930*945aea22SJiri Olsa #define barrier() asm volatile("" ::: "memory") 931*945aea22SJiri Olsa 932*945aea22SJiri Olsa static u64 rdpmc(unsigned int counter) 933*945aea22SJiri Olsa { 934*945aea22SJiri Olsa unsigned int low, high; 935*945aea22SJiri Olsa 936*945aea22SJiri Olsa asm volatile("rdpmc" : "=a" (low), "=d" (high) : "c" (counter)); 937*945aea22SJiri Olsa 938*945aea22SJiri Olsa return low | ((u64)high) << 32; 939*945aea22SJiri Olsa } 940*945aea22SJiri Olsa 941*945aea22SJiri Olsa static u64 rdtsc(void) 942*945aea22SJiri Olsa { 943*945aea22SJiri Olsa unsigned int low, high; 944*945aea22SJiri Olsa 945*945aea22SJiri Olsa asm volatile("rdtsc" : "=a" (low), "=d" (high)); 946*945aea22SJiri Olsa 947*945aea22SJiri Olsa return low | ((u64)high) << 32; 948*945aea22SJiri Olsa } 949*945aea22SJiri Olsa 950*945aea22SJiri Olsa static u64 mmap_read_self(void *addr) 951*945aea22SJiri Olsa { 952*945aea22SJiri Olsa struct perf_event_mmap_page *pc = addr; 953*945aea22SJiri Olsa u32 seq, idx, time_mult = 0, time_shift = 0; 954*945aea22SJiri Olsa u64 count, cyc = 0, time_offset = 0, enabled, running, delta; 955*945aea22SJiri Olsa 956*945aea22SJiri Olsa do { 957*945aea22SJiri Olsa seq = pc->lock; 958*945aea22SJiri Olsa barrier(); 959*945aea22SJiri Olsa 960*945aea22SJiri Olsa enabled = pc->time_enabled; 961*945aea22SJiri Olsa running = pc->time_running; 962*945aea22SJiri Olsa 963*945aea22SJiri Olsa if (enabled != running) { 964*945aea22SJiri Olsa cyc = rdtsc(); 965*945aea22SJiri Olsa time_mult = pc->time_mult; 966*945aea22SJiri Olsa time_shift = pc->time_shift; 967*945aea22SJiri Olsa time_offset = pc->time_offset; 968*945aea22SJiri Olsa } 969*945aea22SJiri Olsa 970*945aea22SJiri Olsa idx = pc->index; 971*945aea22SJiri Olsa count = pc->offset; 972*945aea22SJiri Olsa if (idx) 973*945aea22SJiri Olsa count += rdpmc(idx - 1); 974*945aea22SJiri Olsa 975*945aea22SJiri Olsa barrier(); 976*945aea22SJiri Olsa } while (pc->lock != seq); 977*945aea22SJiri Olsa 978*945aea22SJiri Olsa if (enabled != running) { 979*945aea22SJiri Olsa u64 quot, rem; 980*945aea22SJiri Olsa 981*945aea22SJiri Olsa quot = (cyc >> time_shift); 982*945aea22SJiri Olsa rem = cyc & ((1 << time_shift) - 1); 983*945aea22SJiri Olsa delta = time_offset + quot * time_mult + 984*945aea22SJiri Olsa ((rem * time_mult) >> time_shift); 985*945aea22SJiri Olsa 986*945aea22SJiri Olsa enabled += delta; 987*945aea22SJiri Olsa if (idx) 988*945aea22SJiri Olsa running += delta; 989*945aea22SJiri Olsa 990*945aea22SJiri Olsa quot = count / running; 991*945aea22SJiri Olsa rem = count % running; 992*945aea22SJiri Olsa count = quot * enabled + (rem * enabled) / running; 993*945aea22SJiri Olsa } 994*945aea22SJiri Olsa 995*945aea22SJiri Olsa return count; 996*945aea22SJiri Olsa } 997*945aea22SJiri Olsa 998*945aea22SJiri Olsa /* 999*945aea22SJiri Olsa * If the RDPMC instruction faults then signal this back to the test parent task: 1000*945aea22SJiri Olsa */ 1001*945aea22SJiri Olsa static void segfault_handler(int sig __maybe_unused, 1002*945aea22SJiri Olsa siginfo_t *info __maybe_unused, 1003*945aea22SJiri Olsa void *uc __maybe_unused) 1004*945aea22SJiri Olsa { 1005*945aea22SJiri Olsa exit(-1); 1006*945aea22SJiri Olsa } 1007*945aea22SJiri Olsa 1008*945aea22SJiri Olsa static int __test__rdpmc(void) 1009*945aea22SJiri Olsa { 1010*945aea22SJiri Olsa volatile int tmp = 0; 1011*945aea22SJiri Olsa u64 i, loops = 1000; 1012*945aea22SJiri Olsa int n; 1013*945aea22SJiri Olsa int fd; 1014*945aea22SJiri Olsa void *addr; 1015*945aea22SJiri Olsa struct perf_event_attr attr = { 1016*945aea22SJiri Olsa .type = PERF_TYPE_HARDWARE, 1017*945aea22SJiri Olsa .config = PERF_COUNT_HW_INSTRUCTIONS, 1018*945aea22SJiri Olsa .exclude_kernel = 1, 1019*945aea22SJiri Olsa }; 1020*945aea22SJiri Olsa u64 delta_sum = 0; 1021*945aea22SJiri Olsa struct sigaction sa; 1022*945aea22SJiri Olsa 1023*945aea22SJiri Olsa sigfillset(&sa.sa_mask); 1024*945aea22SJiri Olsa sa.sa_sigaction = segfault_handler; 1025*945aea22SJiri Olsa sigaction(SIGSEGV, &sa, NULL); 1026*945aea22SJiri Olsa 1027*945aea22SJiri Olsa fd = sys_perf_event_open(&attr, 0, -1, -1, 0); 1028*945aea22SJiri Olsa if (fd < 0) { 1029*945aea22SJiri Olsa pr_err("Error: sys_perf_event_open() syscall returned " 1030*945aea22SJiri Olsa "with %d (%s)\n", fd, strerror(errno)); 1031*945aea22SJiri Olsa return -1; 1032*945aea22SJiri Olsa } 1033*945aea22SJiri Olsa 1034*945aea22SJiri Olsa addr = mmap(NULL, page_size, PROT_READ, MAP_SHARED, fd, 0); 1035*945aea22SJiri Olsa if (addr == (void *)(-1)) { 1036*945aea22SJiri Olsa pr_err("Error: mmap() syscall returned with (%s)\n", 1037*945aea22SJiri Olsa strerror(errno)); 1038*945aea22SJiri Olsa goto out_close; 1039*945aea22SJiri Olsa } 1040*945aea22SJiri Olsa 1041*945aea22SJiri Olsa for (n = 0; n < 6; n++) { 1042*945aea22SJiri Olsa u64 stamp, now, delta; 1043*945aea22SJiri Olsa 1044*945aea22SJiri Olsa stamp = mmap_read_self(addr); 1045*945aea22SJiri Olsa 1046*945aea22SJiri Olsa for (i = 0; i < loops; i++) 1047*945aea22SJiri Olsa tmp++; 1048*945aea22SJiri Olsa 1049*945aea22SJiri Olsa now = mmap_read_self(addr); 1050*945aea22SJiri Olsa loops *= 10; 1051*945aea22SJiri Olsa 1052*945aea22SJiri Olsa delta = now - stamp; 1053*945aea22SJiri Olsa pr_debug("%14d: %14Lu\n", n, (long long)delta); 1054*945aea22SJiri Olsa 1055*945aea22SJiri Olsa delta_sum += delta; 1056*945aea22SJiri Olsa } 1057*945aea22SJiri Olsa 1058*945aea22SJiri Olsa munmap(addr, page_size); 1059*945aea22SJiri Olsa pr_debug(" "); 1060*945aea22SJiri Olsa out_close: 1061*945aea22SJiri Olsa close(fd); 1062*945aea22SJiri Olsa 1063*945aea22SJiri Olsa if (!delta_sum) 1064*945aea22SJiri Olsa return -1; 1065*945aea22SJiri Olsa 1066*945aea22SJiri Olsa return 0; 1067*945aea22SJiri Olsa } 1068*945aea22SJiri Olsa 1069*945aea22SJiri Olsa static int test__rdpmc(void) 1070*945aea22SJiri Olsa { 1071*945aea22SJiri Olsa int status = 0; 1072*945aea22SJiri Olsa int wret = 0; 1073*945aea22SJiri Olsa int ret; 1074*945aea22SJiri Olsa int pid; 1075*945aea22SJiri Olsa 1076*945aea22SJiri Olsa pid = fork(); 1077*945aea22SJiri Olsa if (pid < 0) 1078*945aea22SJiri Olsa return -1; 1079*945aea22SJiri Olsa 1080*945aea22SJiri Olsa if (!pid) { 1081*945aea22SJiri Olsa ret = __test__rdpmc(); 1082*945aea22SJiri Olsa 1083*945aea22SJiri Olsa exit(ret); 1084*945aea22SJiri Olsa } 1085*945aea22SJiri Olsa 1086*945aea22SJiri Olsa wret = waitpid(pid, &status, 0); 1087*945aea22SJiri Olsa if (wret < 0 || status) 1088*945aea22SJiri Olsa return -1; 1089*945aea22SJiri Olsa 1090*945aea22SJiri Olsa return 0; 1091*945aea22SJiri Olsa } 1092*945aea22SJiri Olsa 1093*945aea22SJiri Olsa #endif 1094*945aea22SJiri Olsa 1095*945aea22SJiri Olsa static int test__perf_pmu(void) 1096*945aea22SJiri Olsa { 1097*945aea22SJiri Olsa return perf_pmu__test(); 1098*945aea22SJiri Olsa } 1099*945aea22SJiri Olsa 1100*945aea22SJiri Olsa static int perf_evsel__roundtrip_cache_name_test(void) 1101*945aea22SJiri Olsa { 1102*945aea22SJiri Olsa char name[128]; 1103*945aea22SJiri Olsa int type, op, err = 0, ret = 0, i, idx; 1104*945aea22SJiri Olsa struct perf_evsel *evsel; 1105*945aea22SJiri Olsa struct perf_evlist *evlist = perf_evlist__new(NULL, NULL); 1106*945aea22SJiri Olsa 1107*945aea22SJiri Olsa if (evlist == NULL) 1108*945aea22SJiri Olsa return -ENOMEM; 1109*945aea22SJiri Olsa 1110*945aea22SJiri Olsa for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) { 1111*945aea22SJiri Olsa for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) { 1112*945aea22SJiri Olsa /* skip invalid cache type */ 1113*945aea22SJiri Olsa if (!perf_evsel__is_cache_op_valid(type, op)) 1114*945aea22SJiri Olsa continue; 1115*945aea22SJiri Olsa 1116*945aea22SJiri Olsa for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) { 1117*945aea22SJiri Olsa __perf_evsel__hw_cache_type_op_res_name(type, op, i, 1118*945aea22SJiri Olsa name, sizeof(name)); 1119*945aea22SJiri Olsa err = parse_events(evlist, name, 0); 1120*945aea22SJiri Olsa if (err) 1121*945aea22SJiri Olsa ret = err; 1122*945aea22SJiri Olsa } 1123*945aea22SJiri Olsa } 1124*945aea22SJiri Olsa } 1125*945aea22SJiri Olsa 1126*945aea22SJiri Olsa idx = 0; 1127*945aea22SJiri Olsa evsel = perf_evlist__first(evlist); 1128*945aea22SJiri Olsa 1129*945aea22SJiri Olsa for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) { 1130*945aea22SJiri Olsa for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) { 1131*945aea22SJiri Olsa /* skip invalid cache type */ 1132*945aea22SJiri Olsa if (!perf_evsel__is_cache_op_valid(type, op)) 1133*945aea22SJiri Olsa continue; 1134*945aea22SJiri Olsa 1135*945aea22SJiri Olsa for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) { 1136*945aea22SJiri Olsa __perf_evsel__hw_cache_type_op_res_name(type, op, i, 1137*945aea22SJiri Olsa name, sizeof(name)); 1138*945aea22SJiri Olsa if (evsel->idx != idx) 1139*945aea22SJiri Olsa continue; 1140*945aea22SJiri Olsa 1141*945aea22SJiri Olsa ++idx; 1142*945aea22SJiri Olsa 1143*945aea22SJiri Olsa if (strcmp(perf_evsel__name(evsel), name)) { 1144*945aea22SJiri Olsa pr_debug("%s != %s\n", perf_evsel__name(evsel), name); 1145*945aea22SJiri Olsa ret = -1; 1146*945aea22SJiri Olsa } 1147*945aea22SJiri Olsa 1148*945aea22SJiri Olsa evsel = perf_evsel__next(evsel); 1149*945aea22SJiri Olsa } 1150*945aea22SJiri Olsa } 1151*945aea22SJiri Olsa } 1152*945aea22SJiri Olsa 1153*945aea22SJiri Olsa perf_evlist__delete(evlist); 1154*945aea22SJiri Olsa return ret; 1155*945aea22SJiri Olsa } 1156*945aea22SJiri Olsa 1157*945aea22SJiri Olsa static int __perf_evsel__name_array_test(const char *names[], int nr_names) 1158*945aea22SJiri Olsa { 1159*945aea22SJiri Olsa int i, err; 1160*945aea22SJiri Olsa struct perf_evsel *evsel; 1161*945aea22SJiri Olsa struct perf_evlist *evlist = perf_evlist__new(NULL, NULL); 1162*945aea22SJiri Olsa 1163*945aea22SJiri Olsa if (evlist == NULL) 1164*945aea22SJiri Olsa return -ENOMEM; 1165*945aea22SJiri Olsa 1166*945aea22SJiri Olsa for (i = 0; i < nr_names; ++i) { 1167*945aea22SJiri Olsa err = parse_events(evlist, names[i], 0); 1168*945aea22SJiri Olsa if (err) { 1169*945aea22SJiri Olsa pr_debug("failed to parse event '%s', err %d\n", 1170*945aea22SJiri Olsa names[i], err); 1171*945aea22SJiri Olsa goto out_delete_evlist; 1172*945aea22SJiri Olsa } 1173*945aea22SJiri Olsa } 1174*945aea22SJiri Olsa 1175*945aea22SJiri Olsa err = 0; 1176*945aea22SJiri Olsa list_for_each_entry(evsel, &evlist->entries, node) { 1177*945aea22SJiri Olsa if (strcmp(perf_evsel__name(evsel), names[evsel->idx])) { 1178*945aea22SJiri Olsa --err; 1179*945aea22SJiri Olsa pr_debug("%s != %s\n", perf_evsel__name(evsel), names[evsel->idx]); 1180*945aea22SJiri Olsa } 1181*945aea22SJiri Olsa } 1182*945aea22SJiri Olsa 1183*945aea22SJiri Olsa out_delete_evlist: 1184*945aea22SJiri Olsa perf_evlist__delete(evlist); 1185*945aea22SJiri Olsa return err; 1186*945aea22SJiri Olsa } 1187*945aea22SJiri Olsa 1188*945aea22SJiri Olsa #define perf_evsel__name_array_test(names) \ 1189*945aea22SJiri Olsa __perf_evsel__name_array_test(names, ARRAY_SIZE(names)) 1190*945aea22SJiri Olsa 1191*945aea22SJiri Olsa static int perf_evsel__roundtrip_name_test(void) 1192*945aea22SJiri Olsa { 1193*945aea22SJiri Olsa int err = 0, ret = 0; 1194*945aea22SJiri Olsa 1195*945aea22SJiri Olsa err = perf_evsel__name_array_test(perf_evsel__hw_names); 1196*945aea22SJiri Olsa if (err) 1197*945aea22SJiri Olsa ret = err; 1198*945aea22SJiri Olsa 1199*945aea22SJiri Olsa err = perf_evsel__name_array_test(perf_evsel__sw_names); 1200*945aea22SJiri Olsa if (err) 1201*945aea22SJiri Olsa ret = err; 1202*945aea22SJiri Olsa 1203*945aea22SJiri Olsa err = perf_evsel__roundtrip_cache_name_test(); 1204*945aea22SJiri Olsa if (err) 1205*945aea22SJiri Olsa ret = err; 1206*945aea22SJiri Olsa 1207*945aea22SJiri Olsa return ret; 1208*945aea22SJiri Olsa } 1209*945aea22SJiri Olsa 1210*945aea22SJiri Olsa static int perf_evsel__test_field(struct perf_evsel *evsel, const char *name, 1211*945aea22SJiri Olsa int size, bool should_be_signed) 1212*945aea22SJiri Olsa { 1213*945aea22SJiri Olsa struct format_field *field = perf_evsel__field(evsel, name); 1214*945aea22SJiri Olsa int is_signed; 1215*945aea22SJiri Olsa int ret = 0; 1216*945aea22SJiri Olsa 1217*945aea22SJiri Olsa if (field == NULL) { 1218*945aea22SJiri Olsa pr_debug("%s: \"%s\" field not found!\n", evsel->name, name); 1219*945aea22SJiri Olsa return -1; 1220*945aea22SJiri Olsa } 1221*945aea22SJiri Olsa 1222*945aea22SJiri Olsa is_signed = !!(field->flags | FIELD_IS_SIGNED); 1223*945aea22SJiri Olsa if (should_be_signed && !is_signed) { 1224*945aea22SJiri Olsa pr_debug("%s: \"%s\" signedness(%d) is wrong, should be %d\n", 1225*945aea22SJiri Olsa evsel->name, name, is_signed, should_be_signed); 1226*945aea22SJiri Olsa ret = -1; 1227*945aea22SJiri Olsa } 1228*945aea22SJiri Olsa 1229*945aea22SJiri Olsa if (field->size != size) { 1230*945aea22SJiri Olsa pr_debug("%s: \"%s\" size (%d) should be %d!\n", 1231*945aea22SJiri Olsa evsel->name, name, field->size, size); 1232*945aea22SJiri Olsa ret = -1; 1233*945aea22SJiri Olsa } 1234*945aea22SJiri Olsa 1235*945aea22SJiri Olsa return ret; 1236*945aea22SJiri Olsa } 1237*945aea22SJiri Olsa 1238*945aea22SJiri Olsa static int perf_evsel__tp_sched_test(void) 1239*945aea22SJiri Olsa { 1240*945aea22SJiri Olsa struct perf_evsel *evsel = perf_evsel__newtp("sched", "sched_switch", 0); 1241*945aea22SJiri Olsa int ret = 0; 1242*945aea22SJiri Olsa 1243*945aea22SJiri Olsa if (evsel == NULL) { 1244*945aea22SJiri Olsa pr_debug("perf_evsel__new\n"); 1245*945aea22SJiri Olsa return -1; 1246*945aea22SJiri Olsa } 1247*945aea22SJiri Olsa 1248*945aea22SJiri Olsa if (perf_evsel__test_field(evsel, "prev_comm", 16, true)) 1249*945aea22SJiri Olsa ret = -1; 1250*945aea22SJiri Olsa 1251*945aea22SJiri Olsa if (perf_evsel__test_field(evsel, "prev_pid", 4, true)) 1252*945aea22SJiri Olsa ret = -1; 1253*945aea22SJiri Olsa 1254*945aea22SJiri Olsa if (perf_evsel__test_field(evsel, "prev_prio", 4, true)) 1255*945aea22SJiri Olsa ret = -1; 1256*945aea22SJiri Olsa 1257*945aea22SJiri Olsa if (perf_evsel__test_field(evsel, "prev_state", 8, true)) 1258*945aea22SJiri Olsa ret = -1; 1259*945aea22SJiri Olsa 1260*945aea22SJiri Olsa if (perf_evsel__test_field(evsel, "next_comm", 16, true)) 1261*945aea22SJiri Olsa ret = -1; 1262*945aea22SJiri Olsa 1263*945aea22SJiri Olsa if (perf_evsel__test_field(evsel, "next_pid", 4, true)) 1264*945aea22SJiri Olsa ret = -1; 1265*945aea22SJiri Olsa 1266*945aea22SJiri Olsa if (perf_evsel__test_field(evsel, "next_prio", 4, true)) 1267*945aea22SJiri Olsa ret = -1; 1268*945aea22SJiri Olsa 1269*945aea22SJiri Olsa perf_evsel__delete(evsel); 1270*945aea22SJiri Olsa 1271*945aea22SJiri Olsa evsel = perf_evsel__newtp("sched", "sched_wakeup", 0); 1272*945aea22SJiri Olsa 1273*945aea22SJiri Olsa if (perf_evsel__test_field(evsel, "comm", 16, true)) 1274*945aea22SJiri Olsa ret = -1; 1275*945aea22SJiri Olsa 1276*945aea22SJiri Olsa if (perf_evsel__test_field(evsel, "pid", 4, true)) 1277*945aea22SJiri Olsa ret = -1; 1278*945aea22SJiri Olsa 1279*945aea22SJiri Olsa if (perf_evsel__test_field(evsel, "prio", 4, true)) 1280*945aea22SJiri Olsa ret = -1; 1281*945aea22SJiri Olsa 1282*945aea22SJiri Olsa if (perf_evsel__test_field(evsel, "success", 4, true)) 1283*945aea22SJiri Olsa ret = -1; 1284*945aea22SJiri Olsa 1285*945aea22SJiri Olsa if (perf_evsel__test_field(evsel, "target_cpu", 4, true)) 1286*945aea22SJiri Olsa ret = -1; 1287*945aea22SJiri Olsa 1288*945aea22SJiri Olsa return ret; 1289*945aea22SJiri Olsa } 1290*945aea22SJiri Olsa 1291*945aea22SJiri Olsa static int test__syscall_open_tp_fields(void) 1292*945aea22SJiri Olsa { 1293*945aea22SJiri Olsa struct perf_record_opts opts = { 1294*945aea22SJiri Olsa .target = { 1295*945aea22SJiri Olsa .uid = UINT_MAX, 1296*945aea22SJiri Olsa .uses_mmap = true, 1297*945aea22SJiri Olsa }, 1298*945aea22SJiri Olsa .no_delay = true, 1299*945aea22SJiri Olsa .freq = 1, 1300*945aea22SJiri Olsa .mmap_pages = 256, 1301*945aea22SJiri Olsa .raw_samples = true, 1302*945aea22SJiri Olsa }; 1303*945aea22SJiri Olsa const char *filename = "/etc/passwd"; 1304*945aea22SJiri Olsa int flags = O_RDONLY | O_DIRECTORY; 1305*945aea22SJiri Olsa struct perf_evlist *evlist = perf_evlist__new(NULL, NULL); 1306*945aea22SJiri Olsa struct perf_evsel *evsel; 1307*945aea22SJiri Olsa int err = -1, i, nr_events = 0, nr_polls = 0; 1308*945aea22SJiri Olsa 1309*945aea22SJiri Olsa if (evlist == NULL) { 1310*945aea22SJiri Olsa pr_debug("%s: perf_evlist__new\n", __func__); 1311*945aea22SJiri Olsa goto out; 1312*945aea22SJiri Olsa } 1313*945aea22SJiri Olsa 1314*945aea22SJiri Olsa evsel = perf_evsel__newtp("syscalls", "sys_enter_open", 0); 1315*945aea22SJiri Olsa if (evsel == NULL) { 1316*945aea22SJiri Olsa pr_debug("%s: perf_evsel__newtp\n", __func__); 1317*945aea22SJiri Olsa goto out_delete_evlist; 1318*945aea22SJiri Olsa } 1319*945aea22SJiri Olsa 1320*945aea22SJiri Olsa perf_evlist__add(evlist, evsel); 1321*945aea22SJiri Olsa 1322*945aea22SJiri Olsa err = perf_evlist__create_maps(evlist, &opts.target); 1323*945aea22SJiri Olsa if (err < 0) { 1324*945aea22SJiri Olsa pr_debug("%s: perf_evlist__create_maps\n", __func__); 1325*945aea22SJiri Olsa goto out_delete_evlist; 1326*945aea22SJiri Olsa } 1327*945aea22SJiri Olsa 1328*945aea22SJiri Olsa perf_evsel__config(evsel, &opts, evsel); 1329*945aea22SJiri Olsa 1330*945aea22SJiri Olsa evlist->threads->map[0] = getpid(); 1331*945aea22SJiri Olsa 1332*945aea22SJiri Olsa err = perf_evlist__open(evlist); 1333*945aea22SJiri Olsa if (err < 0) { 1334*945aea22SJiri Olsa pr_debug("perf_evlist__open: %s\n", strerror(errno)); 1335*945aea22SJiri Olsa goto out_delete_evlist; 1336*945aea22SJiri Olsa } 1337*945aea22SJiri Olsa 1338*945aea22SJiri Olsa err = perf_evlist__mmap(evlist, UINT_MAX, false); 1339*945aea22SJiri Olsa if (err < 0) { 1340*945aea22SJiri Olsa pr_debug("perf_evlist__mmap: %s\n", strerror(errno)); 1341*945aea22SJiri Olsa goto out_delete_evlist; 1342*945aea22SJiri Olsa } 1343*945aea22SJiri Olsa 1344*945aea22SJiri Olsa perf_evlist__enable(evlist); 1345*945aea22SJiri Olsa 1346*945aea22SJiri Olsa /* 1347*945aea22SJiri Olsa * Generate the event: 1348*945aea22SJiri Olsa */ 1349*945aea22SJiri Olsa open(filename, flags); 1350*945aea22SJiri Olsa 1351*945aea22SJiri Olsa while (1) { 1352*945aea22SJiri Olsa int before = nr_events; 1353*945aea22SJiri Olsa 1354*945aea22SJiri Olsa for (i = 0; i < evlist->nr_mmaps; i++) { 1355*945aea22SJiri Olsa union perf_event *event; 1356*945aea22SJiri Olsa 1357*945aea22SJiri Olsa while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) { 1358*945aea22SJiri Olsa const u32 type = event->header.type; 1359*945aea22SJiri Olsa int tp_flags; 1360*945aea22SJiri Olsa struct perf_sample sample; 1361*945aea22SJiri Olsa 1362*945aea22SJiri Olsa ++nr_events; 1363*945aea22SJiri Olsa 1364*945aea22SJiri Olsa if (type != PERF_RECORD_SAMPLE) 1365*945aea22SJiri Olsa continue; 1366*945aea22SJiri Olsa 1367*945aea22SJiri Olsa err = perf_evsel__parse_sample(evsel, event, &sample); 1368*945aea22SJiri Olsa if (err) { 1369*945aea22SJiri Olsa pr_err("Can't parse sample, err = %d\n", err); 1370*945aea22SJiri Olsa goto out_munmap; 1371*945aea22SJiri Olsa } 1372*945aea22SJiri Olsa 1373*945aea22SJiri Olsa tp_flags = perf_evsel__intval(evsel, &sample, "flags"); 1374*945aea22SJiri Olsa 1375*945aea22SJiri Olsa if (flags != tp_flags) { 1376*945aea22SJiri Olsa pr_debug("%s: Expected flags=%#x, got %#x\n", 1377*945aea22SJiri Olsa __func__, flags, tp_flags); 1378*945aea22SJiri Olsa goto out_munmap; 1379*945aea22SJiri Olsa } 1380*945aea22SJiri Olsa 1381*945aea22SJiri Olsa goto out_ok; 1382*945aea22SJiri Olsa } 1383*945aea22SJiri Olsa } 1384*945aea22SJiri Olsa 1385*945aea22SJiri Olsa if (nr_events == before) 1386*945aea22SJiri Olsa poll(evlist->pollfd, evlist->nr_fds, 10); 1387*945aea22SJiri Olsa 1388*945aea22SJiri Olsa if (++nr_polls > 5) { 1389*945aea22SJiri Olsa pr_debug("%s: no events!\n", __func__); 1390*945aea22SJiri Olsa goto out_munmap; 1391*945aea22SJiri Olsa } 1392*945aea22SJiri Olsa } 1393*945aea22SJiri Olsa out_ok: 1394*945aea22SJiri Olsa err = 0; 1395*945aea22SJiri Olsa out_munmap: 1396*945aea22SJiri Olsa perf_evlist__munmap(evlist); 1397*945aea22SJiri Olsa out_delete_evlist: 1398*945aea22SJiri Olsa perf_evlist__delete(evlist); 1399*945aea22SJiri Olsa out: 1400*945aea22SJiri Olsa return err; 1401*945aea22SJiri Olsa } 1402*945aea22SJiri Olsa 1403*945aea22SJiri Olsa static struct test { 1404*945aea22SJiri Olsa const char *desc; 1405*945aea22SJiri Olsa int (*func)(void); 1406*945aea22SJiri Olsa } tests[] = { 1407*945aea22SJiri Olsa { 1408*945aea22SJiri Olsa .desc = "vmlinux symtab matches kallsyms", 1409*945aea22SJiri Olsa .func = test__vmlinux_matches_kallsyms, 1410*945aea22SJiri Olsa }, 1411*945aea22SJiri Olsa { 1412*945aea22SJiri Olsa .desc = "detect open syscall event", 1413*945aea22SJiri Olsa .func = test__open_syscall_event, 1414*945aea22SJiri Olsa }, 1415*945aea22SJiri Olsa { 1416*945aea22SJiri Olsa .desc = "detect open syscall event on all cpus", 1417*945aea22SJiri Olsa .func = test__open_syscall_event_on_all_cpus, 1418*945aea22SJiri Olsa }, 1419*945aea22SJiri Olsa { 1420*945aea22SJiri Olsa .desc = "read samples using the mmap interface", 1421*945aea22SJiri Olsa .func = test__basic_mmap, 1422*945aea22SJiri Olsa }, 1423*945aea22SJiri Olsa { 1424*945aea22SJiri Olsa .desc = "parse events tests", 1425*945aea22SJiri Olsa .func = parse_events__test, 1426*945aea22SJiri Olsa }, 1427*945aea22SJiri Olsa #if defined(__x86_64__) || defined(__i386__) 1428*945aea22SJiri Olsa { 1429*945aea22SJiri Olsa .desc = "x86 rdpmc test", 1430*945aea22SJiri Olsa .func = test__rdpmc, 1431*945aea22SJiri Olsa }, 1432*945aea22SJiri Olsa #endif 1433*945aea22SJiri Olsa { 1434*945aea22SJiri Olsa .desc = "Validate PERF_RECORD_* events & perf_sample fields", 1435*945aea22SJiri Olsa .func = test__PERF_RECORD, 1436*945aea22SJiri Olsa }, 1437*945aea22SJiri Olsa { 1438*945aea22SJiri Olsa .desc = "Test perf pmu format parsing", 1439*945aea22SJiri Olsa .func = test__perf_pmu, 1440*945aea22SJiri Olsa }, 1441*945aea22SJiri Olsa { 1442*945aea22SJiri Olsa .desc = "Test dso data interface", 1443*945aea22SJiri Olsa .func = dso__test_data, 1444*945aea22SJiri Olsa }, 1445*945aea22SJiri Olsa { 1446*945aea22SJiri Olsa .desc = "roundtrip evsel->name check", 1447*945aea22SJiri Olsa .func = perf_evsel__roundtrip_name_test, 1448*945aea22SJiri Olsa }, 1449*945aea22SJiri Olsa { 1450*945aea22SJiri Olsa .desc = "Check parsing of sched tracepoints fields", 1451*945aea22SJiri Olsa .func = perf_evsel__tp_sched_test, 1452*945aea22SJiri Olsa }, 1453*945aea22SJiri Olsa { 1454*945aea22SJiri Olsa .desc = "Generate and check syscalls:sys_enter_open event fields", 1455*945aea22SJiri Olsa .func = test__syscall_open_tp_fields, 1456*945aea22SJiri Olsa }, 1457*945aea22SJiri Olsa { 1458*945aea22SJiri Olsa .func = NULL, 1459*945aea22SJiri Olsa }, 1460*945aea22SJiri Olsa }; 1461*945aea22SJiri Olsa 1462*945aea22SJiri Olsa static bool perf_test__matches(int curr, int argc, const char *argv[]) 1463*945aea22SJiri Olsa { 1464*945aea22SJiri Olsa int i; 1465*945aea22SJiri Olsa 1466*945aea22SJiri Olsa if (argc == 0) 1467*945aea22SJiri Olsa return true; 1468*945aea22SJiri Olsa 1469*945aea22SJiri Olsa for (i = 0; i < argc; ++i) { 1470*945aea22SJiri Olsa char *end; 1471*945aea22SJiri Olsa long nr = strtoul(argv[i], &end, 10); 1472*945aea22SJiri Olsa 1473*945aea22SJiri Olsa if (*end == '\0') { 1474*945aea22SJiri Olsa if (nr == curr + 1) 1475*945aea22SJiri Olsa return true; 1476*945aea22SJiri Olsa continue; 1477*945aea22SJiri Olsa } 1478*945aea22SJiri Olsa 1479*945aea22SJiri Olsa if (strstr(tests[curr].desc, argv[i])) 1480*945aea22SJiri Olsa return true; 1481*945aea22SJiri Olsa } 1482*945aea22SJiri Olsa 1483*945aea22SJiri Olsa return false; 1484*945aea22SJiri Olsa } 1485*945aea22SJiri Olsa 1486*945aea22SJiri Olsa static int __cmd_test(int argc, const char *argv[]) 1487*945aea22SJiri Olsa { 1488*945aea22SJiri Olsa int i = 0; 1489*945aea22SJiri Olsa int width = 0; 1490*945aea22SJiri Olsa 1491*945aea22SJiri Olsa while (tests[i].func) { 1492*945aea22SJiri Olsa int len = strlen(tests[i].desc); 1493*945aea22SJiri Olsa 1494*945aea22SJiri Olsa if (width < len) 1495*945aea22SJiri Olsa width = len; 1496*945aea22SJiri Olsa ++i; 1497*945aea22SJiri Olsa } 1498*945aea22SJiri Olsa 1499*945aea22SJiri Olsa i = 0; 1500*945aea22SJiri Olsa while (tests[i].func) { 1501*945aea22SJiri Olsa int curr = i++, err; 1502*945aea22SJiri Olsa 1503*945aea22SJiri Olsa if (!perf_test__matches(curr, argc, argv)) 1504*945aea22SJiri Olsa continue; 1505*945aea22SJiri Olsa 1506*945aea22SJiri Olsa pr_info("%2d: %-*s:", i, width, tests[curr].desc); 1507*945aea22SJiri Olsa pr_debug("\n--- start ---\n"); 1508*945aea22SJiri Olsa err = tests[curr].func(); 1509*945aea22SJiri Olsa pr_debug("---- end ----\n%s:", tests[curr].desc); 1510*945aea22SJiri Olsa if (err) 1511*945aea22SJiri Olsa color_fprintf(stderr, PERF_COLOR_RED, " FAILED!\n"); 1512*945aea22SJiri Olsa else 1513*945aea22SJiri Olsa pr_info(" Ok\n"); 1514*945aea22SJiri Olsa } 1515*945aea22SJiri Olsa 1516*945aea22SJiri Olsa return 0; 1517*945aea22SJiri Olsa } 1518*945aea22SJiri Olsa 1519*945aea22SJiri Olsa static int perf_test__list(int argc, const char **argv) 1520*945aea22SJiri Olsa { 1521*945aea22SJiri Olsa int i = 0; 1522*945aea22SJiri Olsa 1523*945aea22SJiri Olsa while (tests[i].func) { 1524*945aea22SJiri Olsa int curr = i++; 1525*945aea22SJiri Olsa 1526*945aea22SJiri Olsa if (argc > 1 && !strstr(tests[curr].desc, argv[1])) 1527*945aea22SJiri Olsa continue; 1528*945aea22SJiri Olsa 1529*945aea22SJiri Olsa pr_info("%2d: %s\n", i, tests[curr].desc); 1530*945aea22SJiri Olsa } 1531*945aea22SJiri Olsa 1532*945aea22SJiri Olsa return 0; 1533*945aea22SJiri Olsa } 1534*945aea22SJiri Olsa 1535*945aea22SJiri Olsa int cmd_test(int argc, const char **argv, const char *prefix __maybe_unused) 1536*945aea22SJiri Olsa { 1537*945aea22SJiri Olsa const char * const test_usage[] = { 1538*945aea22SJiri Olsa "perf test [<options>] [{list <test-name-fragment>|[<test-name-fragments>|<test-numbers>]}]", 1539*945aea22SJiri Olsa NULL, 1540*945aea22SJiri Olsa }; 1541*945aea22SJiri Olsa const struct option test_options[] = { 1542*945aea22SJiri Olsa OPT_INCR('v', "verbose", &verbose, 1543*945aea22SJiri Olsa "be more verbose (show symbol address, etc)"), 1544*945aea22SJiri Olsa OPT_END() 1545*945aea22SJiri Olsa }; 1546*945aea22SJiri Olsa 1547*945aea22SJiri Olsa argc = parse_options(argc, argv, test_options, test_usage, 0); 1548*945aea22SJiri Olsa if (argc >= 1 && !strcmp(argv[0], "list")) 1549*945aea22SJiri Olsa return perf_test__list(argc, argv); 1550*945aea22SJiri Olsa 1551*945aea22SJiri Olsa symbol_conf.priv_size = sizeof(int); 1552*945aea22SJiri Olsa symbol_conf.sort_by_name = true; 1553*945aea22SJiri Olsa symbol_conf.try_vmlinux_path = true; 1554*945aea22SJiri Olsa 1555*945aea22SJiri Olsa if (symbol__init() < 0) 1556*945aea22SJiri Olsa return -1; 1557*945aea22SJiri Olsa 1558*945aea22SJiri Olsa return __cmd_test(argc, argv); 1559*945aea22SJiri Olsa } 1560