1 // SPDX-License-Identifier: GPL-2.0 2 #include <errno.h> 3 #include <inttypes.h> 4 /* For the CLR_() macros */ 5 #include <pthread.h> 6 #include <stdlib.h> 7 #include <perf/cpumap.h> 8 9 #include "debug.h" 10 #include "evlist.h" 11 #include "evsel.h" 12 #include "thread_map.h" 13 #include "tests.h" 14 #include "util/mmap.h" 15 #include <linux/err.h> 16 #include <linux/kernel.h> 17 #include <linux/string.h> 18 #include <perf/evlist.h> 19 #include <perf/mmap.h> 20 21 /* 22 * This test will generate random numbers of calls to some getpid syscalls, 23 * then establish an mmap for a group of events that are created to monitor 24 * the syscalls. 25 * 26 * It will receive the events, using mmap, use its PERF_SAMPLE_ID generated 27 * sample.id field to map back to its respective perf_evsel instance. 28 * 29 * Then it checks if the number of syscalls reported as perf events by 30 * the kernel corresponds to the number of syscalls made. 31 */ 32 int test__basic_mmap(struct test *test __maybe_unused, int subtest __maybe_unused) 33 { 34 int err = -1; 35 union perf_event *event; 36 struct perf_thread_map *threads; 37 struct perf_cpu_map *cpus; 38 struct evlist *evlist; 39 cpu_set_t cpu_set; 40 const char *syscall_names[] = { "getsid", "getppid", "getpgid", }; 41 pid_t (*syscalls[])(void) = { (void *)getsid, getppid, (void*)getpgid }; 42 #define nsyscalls ARRAY_SIZE(syscall_names) 43 unsigned int nr_events[nsyscalls], 44 expected_nr_events[nsyscalls], i, j; 45 struct evsel *evsels[nsyscalls], *evsel; 46 char sbuf[STRERR_BUFSIZE]; 47 struct mmap *md; 48 49 threads = thread_map__new(-1, getpid(), UINT_MAX); 50 if (threads == NULL) { 51 pr_debug("thread_map__new\n"); 52 return -1; 53 } 54 55 cpus = perf_cpu_map__new(NULL); 56 if (cpus == NULL) { 57 pr_debug("perf_cpu_map__new\n"); 58 goto out_free_threads; 59 } 60 61 CPU_ZERO(&cpu_set); 62 CPU_SET(cpus->map[0], &cpu_set); 63 sched_setaffinity(0, sizeof(cpu_set), &cpu_set); 64 if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) { 65 pr_debug("sched_setaffinity() failed on CPU %d: %s ", 66 cpus->map[0], str_error_r(errno, sbuf, sizeof(sbuf))); 67 goto out_free_cpus; 68 } 69 70 evlist = evlist__new(); 71 if (evlist == NULL) { 72 pr_debug("perf_evlist__new\n"); 73 goto out_free_cpus; 74 } 75 76 perf_evlist__set_maps(&evlist->core, cpus, threads); 77 78 for (i = 0; i < nsyscalls; ++i) { 79 char name[64]; 80 81 snprintf(name, sizeof(name), "sys_enter_%s", syscall_names[i]); 82 evsels[i] = evsel__newtp("syscalls", name); 83 if (IS_ERR(evsels[i])) { 84 pr_debug("evsel__new(%s)\n", name); 85 goto out_delete_evlist; 86 } 87 88 evsels[i]->core.attr.wakeup_events = 1; 89 evsel__set_sample_id(evsels[i], false); 90 91 evlist__add(evlist, evsels[i]); 92 93 if (evsel__open(evsels[i], cpus, threads) < 0) { 94 pr_debug("failed to open counter: %s, " 95 "tweak /proc/sys/kernel/perf_event_paranoid?\n", 96 str_error_r(errno, sbuf, sizeof(sbuf))); 97 goto out_delete_evlist; 98 } 99 100 nr_events[i] = 0; 101 expected_nr_events[i] = 1 + rand() % 127; 102 } 103 104 if (evlist__mmap(evlist, 128) < 0) { 105 pr_debug("failed to mmap events: %d (%s)\n", errno, 106 str_error_r(errno, sbuf, sizeof(sbuf))); 107 goto out_delete_evlist; 108 } 109 110 for (i = 0; i < nsyscalls; ++i) 111 for (j = 0; j < expected_nr_events[i]; ++j) { 112 int foo = syscalls[i](); 113 ++foo; 114 } 115 116 md = &evlist->mmap[0]; 117 if (perf_mmap__read_init(&md->core) < 0) 118 goto out_init; 119 120 while ((event = perf_mmap__read_event(&md->core)) != NULL) { 121 struct perf_sample sample; 122 123 if (event->header.type != PERF_RECORD_SAMPLE) { 124 pr_debug("unexpected %s event\n", 125 perf_event__name(event->header.type)); 126 goto out_delete_evlist; 127 } 128 129 err = perf_evlist__parse_sample(evlist, event, &sample); 130 if (err) { 131 pr_err("Can't parse sample, err = %d\n", err); 132 goto out_delete_evlist; 133 } 134 135 err = -1; 136 evsel = perf_evlist__id2evsel(evlist, sample.id); 137 if (evsel == NULL) { 138 pr_debug("event with id %" PRIu64 139 " doesn't map to an evsel\n", sample.id); 140 goto out_delete_evlist; 141 } 142 nr_events[evsel->idx]++; 143 perf_mmap__consume(&md->core); 144 } 145 perf_mmap__read_done(&md->core); 146 147 out_init: 148 err = 0; 149 evlist__for_each_entry(evlist, evsel) { 150 if (nr_events[evsel->idx] != expected_nr_events[evsel->idx]) { 151 pr_debug("expected %d %s events, got %d\n", 152 expected_nr_events[evsel->idx], 153 evsel__name(evsel), nr_events[evsel->idx]); 154 err = -1; 155 goto out_delete_evlist; 156 } 157 } 158 159 out_delete_evlist: 160 evlist__delete(evlist); 161 cpus = NULL; 162 threads = NULL; 163 out_free_cpus: 164 perf_cpu_map__put(cpus); 165 out_free_threads: 166 perf_thread_map__put(threads); 167 return err; 168 } 169