1 // SPDX-License-Identifier: GPL-2.0 2 #include <errno.h> 3 #include <inttypes.h> 4 #include <limits.h> 5 #include <stdbool.h> 6 #include <stdio.h> 7 #include <unistd.h> 8 #include <linux/types.h> 9 #include <sys/prctl.h> 10 #include <perf/cpumap.h> 11 #include <perf/evlist.h> 12 #include <perf/mmap.h> 13 14 #include "debug.h" 15 #include "parse-events.h" 16 #include "evlist.h" 17 #include "evsel.h" 18 #include "thread_map.h" 19 #include "record.h" 20 #include "tsc.h" 21 #include "mmap.h" 22 #include "tests.h" 23 #include "pmu.h" 24 #include "pmu-hybrid.h" 25 26 /* 27 * Except x86_64/i386 and Arm64, other archs don't support TSC in perf. Just 28 * enable the test for x86_64/i386 and Arm64 archs. 29 */ 30 #if defined(__x86_64__) || defined(__i386__) || defined(__aarch64__) 31 #define TSC_IS_SUPPORTED 1 32 #else 33 #define TSC_IS_SUPPORTED 0 34 #endif 35 36 #define CHECK__(x) { \ 37 while ((x) < 0) { \ 38 pr_debug(#x " failed!\n"); \ 39 goto out_err; \ 40 } \ 41 } 42 43 #define CHECK_NOT_NULL__(x) { \ 44 while ((x) == NULL) { \ 45 pr_debug(#x " failed!\n"); \ 46 goto out_err; \ 47 } \ 48 } 49 50 /** 51 * test__perf_time_to_tsc - test converting perf time to TSC. 52 * 53 * This function implements a test that checks that the conversion of perf time 54 * to and from TSC is consistent with the order of events. If the test passes 55 * %0 is returned, otherwise %-1 is returned. If TSC conversion is not 56 * supported then then the test passes but " (not supported)" is printed. 57 */ 58 static int test__perf_time_to_tsc(struct test_suite *test __maybe_unused, int subtest __maybe_unused) 59 { 60 struct record_opts opts = { 61 .mmap_pages = UINT_MAX, 62 .user_freq = UINT_MAX, 63 .user_interval = ULLONG_MAX, 64 .target = { 65 .uses_mmap = true, 66 }, 67 .sample_time = true, 68 }; 69 struct perf_thread_map *threads = NULL; 70 struct perf_cpu_map *cpus = NULL; 71 struct evlist *evlist = NULL; 72 struct evsel *evsel = NULL; 73 int err = -1, ret, i; 74 const char *comm1, *comm2; 75 struct perf_tsc_conversion tc; 76 struct perf_event_mmap_page *pc; 77 union perf_event *event; 78 u64 test_tsc, comm1_tsc, comm2_tsc; 79 u64 test_time, comm1_time = 0, comm2_time = 0; 80 struct mmap *md; 81 82 if (!TSC_IS_SUPPORTED) { 83 pr_debug("Test not supported on this architecture"); 84 return TEST_SKIP; 85 } 86 87 threads = thread_map__new(-1, getpid(), UINT_MAX); 88 CHECK_NOT_NULL__(threads); 89 90 cpus = perf_cpu_map__new(NULL); 91 CHECK_NOT_NULL__(cpus); 92 93 evlist = evlist__new(); 94 CHECK_NOT_NULL__(evlist); 95 96 perf_evlist__set_maps(&evlist->core, cpus, threads); 97 98 CHECK__(parse_events(evlist, "cycles:u", NULL)); 99 100 evlist__config(evlist, &opts, NULL); 101 102 evsel = evlist__first(evlist); 103 104 evsel->core.attr.comm = 1; 105 evsel->core.attr.disabled = 1; 106 evsel->core.attr.enable_on_exec = 0; 107 108 /* 109 * For hybrid "cycles:u", it creates two events. 110 * Init the second evsel here. 111 */ 112 if (perf_pmu__has_hybrid() && perf_pmu__hybrid_mounted("cpu_atom")) { 113 evsel = evsel__next(evsel); 114 evsel->core.attr.comm = 1; 115 evsel->core.attr.disabled = 1; 116 evsel->core.attr.enable_on_exec = 0; 117 } 118 119 CHECK__(evlist__open(evlist)); 120 121 CHECK__(evlist__mmap(evlist, UINT_MAX)); 122 123 pc = evlist->mmap[0].core.base; 124 ret = perf_read_tsc_conversion(pc, &tc); 125 if (ret) { 126 if (ret == -EOPNOTSUPP) { 127 fprintf(stderr, " (not supported)"); 128 return 0; 129 } 130 goto out_err; 131 } 132 133 evlist__enable(evlist); 134 135 comm1 = "Test COMM 1"; 136 CHECK__(prctl(PR_SET_NAME, (unsigned long)comm1, 0, 0, 0)); 137 138 test_tsc = rdtsc(); 139 140 comm2 = "Test COMM 2"; 141 CHECK__(prctl(PR_SET_NAME, (unsigned long)comm2, 0, 0, 0)); 142 143 evlist__disable(evlist); 144 145 for (i = 0; i < evlist->core.nr_mmaps; i++) { 146 md = &evlist->mmap[i]; 147 if (perf_mmap__read_init(&md->core) < 0) 148 continue; 149 150 while ((event = perf_mmap__read_event(&md->core)) != NULL) { 151 struct perf_sample sample; 152 153 if (event->header.type != PERF_RECORD_COMM || 154 (pid_t)event->comm.pid != getpid() || 155 (pid_t)event->comm.tid != getpid()) 156 goto next_event; 157 158 if (strcmp(event->comm.comm, comm1) == 0) { 159 CHECK__(evsel__parse_sample(evsel, event, &sample)); 160 comm1_time = sample.time; 161 } 162 if (strcmp(event->comm.comm, comm2) == 0) { 163 CHECK__(evsel__parse_sample(evsel, event, &sample)); 164 comm2_time = sample.time; 165 } 166 next_event: 167 perf_mmap__consume(&md->core); 168 } 169 perf_mmap__read_done(&md->core); 170 } 171 172 if (!comm1_time || !comm2_time) 173 goto out_err; 174 175 test_time = tsc_to_perf_time(test_tsc, &tc); 176 comm1_tsc = perf_time_to_tsc(comm1_time, &tc); 177 comm2_tsc = perf_time_to_tsc(comm2_time, &tc); 178 179 pr_debug("1st event perf time %"PRIu64" tsc %"PRIu64"\n", 180 comm1_time, comm1_tsc); 181 pr_debug("rdtsc time %"PRIu64" tsc %"PRIu64"\n", 182 test_time, test_tsc); 183 pr_debug("2nd event perf time %"PRIu64" tsc %"PRIu64"\n", 184 comm2_time, comm2_tsc); 185 186 if (test_time <= comm1_time || 187 test_time >= comm2_time) 188 goto out_err; 189 190 if (test_tsc <= comm1_tsc || 191 test_tsc >= comm2_tsc) 192 goto out_err; 193 194 err = 0; 195 196 out_err: 197 evlist__delete(evlist); 198 perf_cpu_map__put(cpus); 199 perf_thread_map__put(threads); 200 return err; 201 } 202 203 DEFINE_SUITE("Convert perf time to TSC", perf_time_to_tsc); 204