1 // SPDX-License-Identifier: GPL-2.0
2 #include <errno.h>
3 #include <inttypes.h>
4 #include <limits.h>
5 #include <stdbool.h>
6 #include <stdio.h>
7 #include <unistd.h>
8 #include <linux/types.h>
9 #include <sys/prctl.h>
10 #include <perf/cpumap.h>
11 #include <perf/evlist.h>
12 #include <perf/mmap.h>
13 
14 #include "debug.h"
15 #include "parse-events.h"
16 #include "evlist.h"
17 #include "evsel.h"
18 #include "thread_map.h"
19 #include "record.h"
20 #include "tsc.h"
21 #include "mmap.h"
22 #include "tests.h"
23 #include "pmu.h"
24 #include "pmu-hybrid.h"
25 
26 #define CHECK__(x) {				\
27 	while ((x) < 0) {			\
28 		pr_debug(#x " failed!\n");	\
29 		goto out_err;			\
30 	}					\
31 }
32 
33 #define CHECK_NOT_NULL__(x) {			\
34 	while ((x) == NULL) {			\
35 		pr_debug(#x " failed!\n");	\
36 		goto out_err;			\
37 	}					\
38 }
39 
40 /**
41  * test__perf_time_to_tsc - test converting perf time to TSC.
42  *
43  * This function implements a test that checks that the conversion of perf time
44  * to and from TSC is consistent with the order of events.  If the test passes
45  * %0 is returned, otherwise %-1 is returned.  If TSC conversion is not
46  * supported then then the test passes but " (not supported)" is printed.
47  */
48 int test__perf_time_to_tsc(struct test *test __maybe_unused, int subtest __maybe_unused)
49 {
50 	struct record_opts opts = {
51 		.mmap_pages	     = UINT_MAX,
52 		.user_freq	     = UINT_MAX,
53 		.user_interval	     = ULLONG_MAX,
54 		.target		     = {
55 			.uses_mmap   = true,
56 		},
57 		.sample_time	     = true,
58 	};
59 	struct perf_thread_map *threads = NULL;
60 	struct perf_cpu_map *cpus = NULL;
61 	struct evlist *evlist = NULL;
62 	struct evsel *evsel = NULL;
63 	int err = -1, ret, i;
64 	const char *comm1, *comm2;
65 	struct perf_tsc_conversion tc;
66 	struct perf_event_mmap_page *pc;
67 	union perf_event *event;
68 	u64 test_tsc, comm1_tsc, comm2_tsc;
69 	u64 test_time, comm1_time = 0, comm2_time = 0;
70 	struct mmap *md;
71 
72 	threads = thread_map__new(-1, getpid(), UINT_MAX);
73 	CHECK_NOT_NULL__(threads);
74 
75 	cpus = perf_cpu_map__new(NULL);
76 	CHECK_NOT_NULL__(cpus);
77 
78 	evlist = evlist__new();
79 	CHECK_NOT_NULL__(evlist);
80 
81 	perf_evlist__set_maps(&evlist->core, cpus, threads);
82 
83 	CHECK__(parse_events(evlist, "cycles:u", NULL));
84 
85 	evlist__config(evlist, &opts, NULL);
86 
87 	evsel = evlist__first(evlist);
88 
89 	evsel->core.attr.comm = 1;
90 	evsel->core.attr.disabled = 1;
91 	evsel->core.attr.enable_on_exec = 0;
92 
93 	/*
94 	 * For hybrid "cycles:u", it creates two events.
95 	 * Init the second evsel here.
96 	 */
97 	if (perf_pmu__has_hybrid() && perf_pmu__hybrid_mounted("cpu_atom")) {
98 		evsel = evsel__next(evsel);
99 		evsel->core.attr.comm = 1;
100 		evsel->core.attr.disabled = 1;
101 		evsel->core.attr.enable_on_exec = 0;
102 	}
103 
104 	CHECK__(evlist__open(evlist));
105 
106 	CHECK__(evlist__mmap(evlist, UINT_MAX));
107 
108 	pc = evlist->mmap[0].core.base;
109 	ret = perf_read_tsc_conversion(pc, &tc);
110 	if (ret) {
111 		if (ret == -EOPNOTSUPP) {
112 			fprintf(stderr, " (not supported)");
113 			return 0;
114 		}
115 		goto out_err;
116 	}
117 
118 	evlist__enable(evlist);
119 
120 	comm1 = "Test COMM 1";
121 	CHECK__(prctl(PR_SET_NAME, (unsigned long)comm1, 0, 0, 0));
122 
123 	test_tsc = rdtsc();
124 
125 	comm2 = "Test COMM 2";
126 	CHECK__(prctl(PR_SET_NAME, (unsigned long)comm2, 0, 0, 0));
127 
128 	evlist__disable(evlist);
129 
130 	for (i = 0; i < evlist->core.nr_mmaps; i++) {
131 		md = &evlist->mmap[i];
132 		if (perf_mmap__read_init(&md->core) < 0)
133 			continue;
134 
135 		while ((event = perf_mmap__read_event(&md->core)) != NULL) {
136 			struct perf_sample sample;
137 
138 			if (event->header.type != PERF_RECORD_COMM ||
139 			    (pid_t)event->comm.pid != getpid() ||
140 			    (pid_t)event->comm.tid != getpid())
141 				goto next_event;
142 
143 			if (strcmp(event->comm.comm, comm1) == 0) {
144 				CHECK__(evsel__parse_sample(evsel, event, &sample));
145 				comm1_time = sample.time;
146 			}
147 			if (strcmp(event->comm.comm, comm2) == 0) {
148 				CHECK__(evsel__parse_sample(evsel, event, &sample));
149 				comm2_time = sample.time;
150 			}
151 next_event:
152 			perf_mmap__consume(&md->core);
153 		}
154 		perf_mmap__read_done(&md->core);
155 	}
156 
157 	if (!comm1_time || !comm2_time)
158 		goto out_err;
159 
160 	test_time = tsc_to_perf_time(test_tsc, &tc);
161 	comm1_tsc = perf_time_to_tsc(comm1_time, &tc);
162 	comm2_tsc = perf_time_to_tsc(comm2_time, &tc);
163 
164 	pr_debug("1st event perf time %"PRIu64" tsc %"PRIu64"\n",
165 		 comm1_time, comm1_tsc);
166 	pr_debug("rdtsc          time %"PRIu64" tsc %"PRIu64"\n",
167 		 test_time, test_tsc);
168 	pr_debug("2nd event perf time %"PRIu64" tsc %"PRIu64"\n",
169 		 comm2_time, comm2_tsc);
170 
171 	if (test_time <= comm1_time ||
172 	    test_time >= comm2_time)
173 		goto out_err;
174 
175 	if (test_tsc <= comm1_tsc ||
176 	    test_tsc >= comm2_tsc)
177 		goto out_err;
178 
179 	err = 0;
180 
181 out_err:
182 	evlist__delete(evlist);
183 	perf_cpu_map__put(cpus);
184 	perf_thread_map__put(threads);
185 	return err;
186 }
187 
188 bool test__tsc_is_supported(void)
189 {
190 	/*
191 	 * Except x86_64/i386 and Arm64, other archs don't support TSC in perf.
192 	 * Just enable the test for x86_64/i386 and Arm64 archs.
193 	 */
194 #if defined(__x86_64__) || defined(__i386__) || defined(__aarch64__)
195 	return true;
196 #else
197 	return false;
198 #endif
199 }
200