1 // SPDX-License-Identifier: GPL-2.0 2 #include <errno.h> 3 #include <inttypes.h> 4 #include <limits.h> 5 #include <stdbool.h> 6 #include <stdio.h> 7 #include <unistd.h> 8 #include <linux/types.h> 9 #include <sys/prctl.h> 10 #include <perf/cpumap.h> 11 #include <perf/evlist.h> 12 #include <perf/mmap.h> 13 14 #include "debug.h" 15 #include "parse-events.h" 16 #include "evlist.h" 17 #include "evsel.h" 18 #include "thread_map.h" 19 #include "record.h" 20 #include "tsc.h" 21 #include "mmap.h" 22 #include "tests.h" 23 #include "pmu.h" 24 #include "pmu-hybrid.h" 25 26 /* 27 * Except x86_64/i386 and Arm64, other archs don't support TSC in perf. Just 28 * enable the test for x86_64/i386 and Arm64 archs. 29 */ 30 #if defined(__x86_64__) || defined(__i386__) || defined(__aarch64__) 31 #define TSC_IS_SUPPORTED 1 32 #else 33 #define TSC_IS_SUPPORTED 0 34 #endif 35 36 #define CHECK__(x) { \ 37 while ((x) < 0) { \ 38 pr_debug(#x " failed!\n"); \ 39 goto out_err; \ 40 } \ 41 } 42 43 #define CHECK_NOT_NULL__(x) { \ 44 while ((x) == NULL) { \ 45 pr_debug(#x " failed!\n"); \ 46 goto out_err; \ 47 } \ 48 } 49 50 static int test__tsc_is_supported(struct test_suite *test __maybe_unused, 51 int subtest __maybe_unused) 52 { 53 if (!TSC_IS_SUPPORTED) { 54 pr_debug("Test not supported on this architecture\n"); 55 return TEST_SKIP; 56 } 57 58 return TEST_OK; 59 } 60 61 /** 62 * test__perf_time_to_tsc - test converting perf time to TSC. 63 * 64 * This function implements a test that checks that the conversion of perf time 65 * to and from TSC is consistent with the order of events. If the test passes 66 * %0 is returned, otherwise %-1 is returned. If TSC conversion is not 67 * supported then then the test passes but " (not supported)" is printed. 68 */ 69 static int test__perf_time_to_tsc(struct test_suite *test __maybe_unused, int subtest __maybe_unused) 70 { 71 struct record_opts opts = { 72 .mmap_pages = UINT_MAX, 73 .user_freq = UINT_MAX, 74 .user_interval = ULLONG_MAX, 75 .target = { 76 .uses_mmap = true, 77 }, 78 .sample_time = true, 79 }; 80 struct perf_thread_map *threads = NULL; 81 struct perf_cpu_map *cpus = NULL; 82 struct evlist *evlist = NULL; 83 struct evsel *evsel = NULL; 84 int err = TEST_FAIL, ret, i; 85 const char *comm1, *comm2; 86 struct perf_tsc_conversion tc; 87 struct perf_event_mmap_page *pc; 88 union perf_event *event; 89 u64 test_tsc, comm1_tsc, comm2_tsc; 90 u64 test_time, comm1_time = 0, comm2_time = 0; 91 struct mmap *md; 92 93 94 threads = thread_map__new(-1, getpid(), UINT_MAX); 95 CHECK_NOT_NULL__(threads); 96 97 cpus = perf_cpu_map__new(NULL); 98 CHECK_NOT_NULL__(cpus); 99 100 evlist = evlist__new(); 101 CHECK_NOT_NULL__(evlist); 102 103 perf_evlist__set_maps(&evlist->core, cpus, threads); 104 105 CHECK__(parse_events(evlist, "cycles:u", NULL)); 106 107 evlist__config(evlist, &opts, NULL); 108 109 evsel = evlist__first(evlist); 110 111 evsel->core.attr.comm = 1; 112 evsel->core.attr.disabled = 1; 113 evsel->core.attr.enable_on_exec = 0; 114 115 /* 116 * For hybrid "cycles:u", it creates two events. 117 * Init the second evsel here. 118 */ 119 if (perf_pmu__has_hybrid() && perf_pmu__hybrid_mounted("cpu_atom")) { 120 evsel = evsel__next(evsel); 121 evsel->core.attr.comm = 1; 122 evsel->core.attr.disabled = 1; 123 evsel->core.attr.enable_on_exec = 0; 124 } 125 126 if (evlist__open(evlist) == -ENOENT) { 127 err = TEST_SKIP; 128 goto out_err; 129 } 130 CHECK__(evlist__open(evlist)); 131 132 CHECK__(evlist__mmap(evlist, UINT_MAX)); 133 134 pc = evlist->mmap[0].core.base; 135 ret = perf_read_tsc_conversion(pc, &tc); 136 if (ret) { 137 if (ret == -EOPNOTSUPP) { 138 pr_debug("perf_read_tsc_conversion is not supported in current kernel\n"); 139 err = TEST_SKIP; 140 } 141 goto out_err; 142 } 143 144 evlist__enable(evlist); 145 146 comm1 = "Test COMM 1"; 147 CHECK__(prctl(PR_SET_NAME, (unsigned long)comm1, 0, 0, 0)); 148 149 test_tsc = rdtsc(); 150 151 comm2 = "Test COMM 2"; 152 CHECK__(prctl(PR_SET_NAME, (unsigned long)comm2, 0, 0, 0)); 153 154 evlist__disable(evlist); 155 156 for (i = 0; i < evlist->core.nr_mmaps; i++) { 157 md = &evlist->mmap[i]; 158 if (perf_mmap__read_init(&md->core) < 0) 159 continue; 160 161 while ((event = perf_mmap__read_event(&md->core)) != NULL) { 162 struct perf_sample sample; 163 164 if (event->header.type != PERF_RECORD_COMM || 165 (pid_t)event->comm.pid != getpid() || 166 (pid_t)event->comm.tid != getpid()) 167 goto next_event; 168 169 if (strcmp(event->comm.comm, comm1) == 0) { 170 CHECK__(evsel__parse_sample(evsel, event, &sample)); 171 comm1_time = sample.time; 172 } 173 if (strcmp(event->comm.comm, comm2) == 0) { 174 CHECK__(evsel__parse_sample(evsel, event, &sample)); 175 comm2_time = sample.time; 176 } 177 next_event: 178 perf_mmap__consume(&md->core); 179 } 180 perf_mmap__read_done(&md->core); 181 } 182 183 if (!comm1_time || !comm2_time) 184 goto out_err; 185 186 test_time = tsc_to_perf_time(test_tsc, &tc); 187 comm1_tsc = perf_time_to_tsc(comm1_time, &tc); 188 comm2_tsc = perf_time_to_tsc(comm2_time, &tc); 189 190 pr_debug("1st event perf time %"PRIu64" tsc %"PRIu64"\n", 191 comm1_time, comm1_tsc); 192 pr_debug("rdtsc time %"PRIu64" tsc %"PRIu64"\n", 193 test_time, test_tsc); 194 pr_debug("2nd event perf time %"PRIu64" tsc %"PRIu64"\n", 195 comm2_time, comm2_tsc); 196 197 if (test_time <= comm1_time || 198 test_time >= comm2_time) 199 goto out_err; 200 201 if (test_tsc <= comm1_tsc || 202 test_tsc >= comm2_tsc) 203 goto out_err; 204 205 err = TEST_OK; 206 207 out_err: 208 evlist__delete(evlist); 209 perf_cpu_map__put(cpus); 210 perf_thread_map__put(threads); 211 return err; 212 } 213 214 static struct test_case time_to_tsc_tests[] = { 215 TEST_CASE_REASON("TSC support", tsc_is_supported, 216 "This architecture does not support"), 217 TEST_CASE_REASON("Perf time to TSC", perf_time_to_tsc, 218 "perf_read_tsc_conversion is not supported"), 219 { .name = NULL, } 220 }; 221 222 struct test_suite suite__perf_time_to_tsc = { 223 .desc = "Convert perf time to TSC", 224 .test_cases = time_to_tsc_tests, 225 }; 226