1 #include "evlist.h" 2 #include "evsel.h" 3 #include "thread_map.h" 4 #include "cpumap.h" 5 #include "tests.h" 6 7 /* 8 * This test will generate random numbers of calls to some getpid syscalls, 9 * then establish an mmap for a group of events that are created to monitor 10 * the syscalls. 11 * 12 * It will receive the events, using mmap, use its PERF_SAMPLE_ID generated 13 * sample.id field to map back to its respective perf_evsel instance. 14 * 15 * Then it checks if the number of syscalls reported as perf events by 16 * the kernel corresponds to the number of syscalls made. 17 */ 18 int test__basic_mmap(void) 19 { 20 int err = -1; 21 union perf_event *event; 22 struct thread_map *threads; 23 struct cpu_map *cpus; 24 struct perf_evlist *evlist; 25 struct perf_event_attr attr = { 26 .type = PERF_TYPE_TRACEPOINT, 27 .read_format = PERF_FORMAT_ID, 28 .sample_type = PERF_SAMPLE_ID, 29 .watermark = 0, 30 }; 31 cpu_set_t cpu_set; 32 const char *syscall_names[] = { "getsid", "getppid", "getpgrp", 33 "getpgid", }; 34 pid_t (*syscalls[])(void) = { (void *)getsid, getppid, getpgrp, 35 (void*)getpgid }; 36 #define nsyscalls ARRAY_SIZE(syscall_names) 37 int ids[nsyscalls]; 38 unsigned int nr_events[nsyscalls], 39 expected_nr_events[nsyscalls], i, j; 40 struct perf_evsel *evsels[nsyscalls], *evsel; 41 42 for (i = 0; i < nsyscalls; ++i) { 43 char name[64]; 44 45 snprintf(name, sizeof(name), "sys_enter_%s", syscall_names[i]); 46 ids[i] = trace_event__id(name); 47 if (ids[i] < 0) { 48 pr_debug("Is debugfs mounted on /sys/kernel/debug?\n"); 49 return -1; 50 } 51 nr_events[i] = 0; 52 expected_nr_events[i] = random() % 257; 53 } 54 55 threads = thread_map__new(-1, getpid(), UINT_MAX); 56 if (threads == NULL) { 57 pr_debug("thread_map__new\n"); 58 return -1; 59 } 60 61 cpus = cpu_map__new(NULL); 62 if (cpus == NULL) { 63 pr_debug("cpu_map__new\n"); 64 goto out_free_threads; 65 } 66 67 CPU_ZERO(&cpu_set); 68 CPU_SET(cpus->map[0], &cpu_set); 69 sched_setaffinity(0, sizeof(cpu_set), &cpu_set); 70 if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) { 71 pr_debug("sched_setaffinity() failed on CPU %d: %s ", 72 cpus->map[0], strerror(errno)); 73 goto out_free_cpus; 74 } 75 76 evlist = perf_evlist__new(cpus, threads); 77 if (evlist == NULL) { 78 pr_debug("perf_evlist__new\n"); 79 goto out_free_cpus; 80 } 81 82 /* anonymous union fields, can't be initialized above */ 83 attr.wakeup_events = 1; 84 attr.sample_period = 1; 85 86 for (i = 0; i < nsyscalls; ++i) { 87 attr.config = ids[i]; 88 evsels[i] = perf_evsel__new(&attr, i); 89 if (evsels[i] == NULL) { 90 pr_debug("perf_evsel__new\n"); 91 goto out_free_evlist; 92 } 93 94 perf_evlist__add(evlist, evsels[i]); 95 96 if (perf_evsel__open(evsels[i], cpus, threads) < 0) { 97 pr_debug("failed to open counter: %s, " 98 "tweak /proc/sys/kernel/perf_event_paranoid?\n", 99 strerror(errno)); 100 goto out_close_fd; 101 } 102 } 103 104 if (perf_evlist__mmap(evlist, 128, true) < 0) { 105 pr_debug("failed to mmap events: %d (%s)\n", errno, 106 strerror(errno)); 107 goto out_close_fd; 108 } 109 110 for (i = 0; i < nsyscalls; ++i) 111 for (j = 0; j < expected_nr_events[i]; ++j) { 112 int foo = syscalls[i](); 113 ++foo; 114 } 115 116 while ((event = perf_evlist__mmap_read(evlist, 0)) != NULL) { 117 struct perf_sample sample; 118 119 if (event->header.type != PERF_RECORD_SAMPLE) { 120 pr_debug("unexpected %s event\n", 121 perf_event__name(event->header.type)); 122 goto out_munmap; 123 } 124 125 err = perf_evlist__parse_sample(evlist, event, &sample); 126 if (err) { 127 pr_err("Can't parse sample, err = %d\n", err); 128 goto out_munmap; 129 } 130 131 evsel = perf_evlist__id2evsel(evlist, sample.id); 132 if (evsel == NULL) { 133 pr_debug("event with id %" PRIu64 134 " doesn't map to an evsel\n", sample.id); 135 goto out_munmap; 136 } 137 nr_events[evsel->idx]++; 138 } 139 140 list_for_each_entry(evsel, &evlist->entries, node) { 141 if (nr_events[evsel->idx] != expected_nr_events[evsel->idx]) { 142 pr_debug("expected %d %s events, got %d\n", 143 expected_nr_events[evsel->idx], 144 perf_evsel__name(evsel), nr_events[evsel->idx]); 145 goto out_munmap; 146 } 147 } 148 149 err = 0; 150 out_munmap: 151 perf_evlist__munmap(evlist); 152 out_close_fd: 153 for (i = 0; i < nsyscalls; ++i) 154 perf_evsel__close_fd(evsels[i], 1, threads->nr); 155 out_free_evlist: 156 perf_evlist__delete(evlist); 157 out_free_cpus: 158 cpu_map__delete(cpus); 159 out_free_threads: 160 thread_map__delete(threads); 161 return err; 162 } 163