1c790c3d2SMichael Ellerman // SPDX-License-Identifier: GPL-2.0+
2c790c3d2SMichael Ellerman
3c790c3d2SMichael Ellerman /*
4c790c3d2SMichael Ellerman * Copyright 2018-2019 IBM Corporation.
5c790c3d2SMichael Ellerman */
6c790c3d2SMichael Ellerman
7c790c3d2SMichael Ellerman #define __SANE_USERSPACE_TYPES__
8c790c3d2SMichael Ellerman
9c790c3d2SMichael Ellerman #include <sys/types.h>
10c790c3d2SMichael Ellerman #include <stdint.h>
11c790c3d2SMichael Ellerman #include <malloc.h>
12c790c3d2SMichael Ellerman #include <unistd.h>
13c790c3d2SMichael Ellerman #include <stdlib.h>
14c790c3d2SMichael Ellerman #include <string.h>
15c790c3d2SMichael Ellerman #include <stdio.h>
16c790c3d2SMichael Ellerman #include <sys/prctl.h>
17c790c3d2SMichael Ellerman #include "utils.h"
18c790c3d2SMichael Ellerman
19c790c3d2SMichael Ellerman #include "../pmu/event.h"
20c790c3d2SMichael Ellerman
21c790c3d2SMichael Ellerman
22c790c3d2SMichael Ellerman extern void pattern_cache_loop(void);
23c790c3d2SMichael Ellerman extern void indirect_branch_loop(void);
24c790c3d2SMichael Ellerman
do_count_loop(struct event * events,bool is_p9,s64 * miss_percent)25c790c3d2SMichael Ellerman static int do_count_loop(struct event *events, bool is_p9, s64 *miss_percent)
26c790c3d2SMichael Ellerman {
27c790c3d2SMichael Ellerman u64 pred, mpred;
28c790c3d2SMichael Ellerman
29c790c3d2SMichael Ellerman prctl(PR_TASK_PERF_EVENTS_ENABLE);
30c790c3d2SMichael Ellerman
31c790c3d2SMichael Ellerman if (is_p9)
32c790c3d2SMichael Ellerman pattern_cache_loop();
33c790c3d2SMichael Ellerman else
34c790c3d2SMichael Ellerman indirect_branch_loop();
35c790c3d2SMichael Ellerman
36c790c3d2SMichael Ellerman prctl(PR_TASK_PERF_EVENTS_DISABLE);
37c790c3d2SMichael Ellerman
38c790c3d2SMichael Ellerman event_read(&events[0]);
39c790c3d2SMichael Ellerman event_read(&events[1]);
40c790c3d2SMichael Ellerman
41c790c3d2SMichael Ellerman // We could scale all the events by running/enabled but we're lazy
42c790c3d2SMichael Ellerman // As long as the PMU is uncontended they should all run
43c790c3d2SMichael Ellerman FAIL_IF(events[0].result.running != events[0].result.enabled);
44c790c3d2SMichael Ellerman FAIL_IF(events[1].result.running != events[1].result.enabled);
45c790c3d2SMichael Ellerman
46c790c3d2SMichael Ellerman pred = events[0].result.value;
47c790c3d2SMichael Ellerman mpred = events[1].result.value;
48c790c3d2SMichael Ellerman
49c790c3d2SMichael Ellerman if (is_p9) {
50c790c3d2SMichael Ellerman event_read(&events[2]);
51c790c3d2SMichael Ellerman event_read(&events[3]);
52c790c3d2SMichael Ellerman FAIL_IF(events[2].result.running != events[2].result.enabled);
53c790c3d2SMichael Ellerman FAIL_IF(events[3].result.running != events[3].result.enabled);
54c790c3d2SMichael Ellerman
55c790c3d2SMichael Ellerman pred += events[2].result.value;
56c790c3d2SMichael Ellerman mpred += events[3].result.value;
57c790c3d2SMichael Ellerman }
58c790c3d2SMichael Ellerman
59c790c3d2SMichael Ellerman *miss_percent = 100 * mpred / pred;
60c790c3d2SMichael Ellerman
61c790c3d2SMichael Ellerman return 0;
62c790c3d2SMichael Ellerman }
63c790c3d2SMichael Ellerman
setup_event(struct event * e,u64 config,char * name)64c790c3d2SMichael Ellerman static void setup_event(struct event *e, u64 config, char *name)
65c790c3d2SMichael Ellerman {
66c790c3d2SMichael Ellerman event_init_named(e, config, name);
67c790c3d2SMichael Ellerman
68c790c3d2SMichael Ellerman e->attr.disabled = 1;
69c790c3d2SMichael Ellerman e->attr.exclude_kernel = 1;
70c790c3d2SMichael Ellerman e->attr.exclude_hv = 1;
71c790c3d2SMichael Ellerman e->attr.exclude_idle = 1;
72c790c3d2SMichael Ellerman }
73c790c3d2SMichael Ellerman
74c790c3d2SMichael Ellerman enum spectre_v2_state {
75c790c3d2SMichael Ellerman VULNERABLE = 0,
76c790c3d2SMichael Ellerman UNKNOWN = 1, // Works with FAIL_IF()
77c790c3d2SMichael Ellerman NOT_AFFECTED,
78c790c3d2SMichael Ellerman BRANCH_SERIALISATION,
79c790c3d2SMichael Ellerman COUNT_CACHE_DISABLED,
80c790c3d2SMichael Ellerman COUNT_CACHE_FLUSH_SW,
81c790c3d2SMichael Ellerman COUNT_CACHE_FLUSH_HW,
82c790c3d2SMichael Ellerman BTB_FLUSH,
83c790c3d2SMichael Ellerman };
84c790c3d2SMichael Ellerman
get_sysfs_state(void)85c790c3d2SMichael Ellerman static enum spectre_v2_state get_sysfs_state(void)
86c790c3d2SMichael Ellerman {
87c790c3d2SMichael Ellerman enum spectre_v2_state state = UNKNOWN;
88c790c3d2SMichael Ellerman char buf[256];
89c790c3d2SMichael Ellerman int len;
90c790c3d2SMichael Ellerman
91c790c3d2SMichael Ellerman memset(buf, 0, sizeof(buf));
92c790c3d2SMichael Ellerman FAIL_IF(read_sysfs_file("devices/system/cpu/vulnerabilities/spectre_v2", buf, sizeof(buf)));
93c790c3d2SMichael Ellerman
94c790c3d2SMichael Ellerman // Make sure it's NULL terminated
95c790c3d2SMichael Ellerman buf[sizeof(buf) - 1] = '\0';
96c790c3d2SMichael Ellerman
97c790c3d2SMichael Ellerman // Trim the trailing newline
98c790c3d2SMichael Ellerman len = strlen(buf);
99c790c3d2SMichael Ellerman FAIL_IF(len < 1);
100c790c3d2SMichael Ellerman buf[len - 1] = '\0';
101c790c3d2SMichael Ellerman
102c790c3d2SMichael Ellerman printf("sysfs reports: '%s'\n", buf);
103c790c3d2SMichael Ellerman
104c790c3d2SMichael Ellerman // Order matters
105c790c3d2SMichael Ellerman if (strstr(buf, "Vulnerable"))
106c790c3d2SMichael Ellerman state = VULNERABLE;
107c790c3d2SMichael Ellerman else if (strstr(buf, "Not affected"))
108c790c3d2SMichael Ellerman state = NOT_AFFECTED;
109c790c3d2SMichael Ellerman else if (strstr(buf, "Indirect branch serialisation (kernel only)"))
110c790c3d2SMichael Ellerman state = BRANCH_SERIALISATION;
111c790c3d2SMichael Ellerman else if (strstr(buf, "Indirect branch cache disabled"))
112c790c3d2SMichael Ellerman state = COUNT_CACHE_DISABLED;
113c790c3d2SMichael Ellerman else if (strstr(buf, "Software count cache flush (hardware accelerated)"))
114c790c3d2SMichael Ellerman state = COUNT_CACHE_FLUSH_HW;
115c790c3d2SMichael Ellerman else if (strstr(buf, "Software count cache flush"))
116c790c3d2SMichael Ellerman state = COUNT_CACHE_FLUSH_SW;
117c790c3d2SMichael Ellerman else if (strstr(buf, "Branch predictor state flush"))
118c790c3d2SMichael Ellerman state = BTB_FLUSH;
119c790c3d2SMichael Ellerman
120c790c3d2SMichael Ellerman return state;
121c790c3d2SMichael Ellerman }
122c790c3d2SMichael Ellerman
123c790c3d2SMichael Ellerman #define PM_BR_PRED_CCACHE 0x040a4 // P8 + P9
124c790c3d2SMichael Ellerman #define PM_BR_MPRED_CCACHE 0x040ac // P8 + P9
125c790c3d2SMichael Ellerman #define PM_BR_PRED_PCACHE 0x048a0 // P9 only
126c790c3d2SMichael Ellerman #define PM_BR_MPRED_PCACHE 0x048b0 // P9 only
127c790c3d2SMichael Ellerman
spectre_v2_test(void)128c790c3d2SMichael Ellerman int spectre_v2_test(void)
129c790c3d2SMichael Ellerman {
130c790c3d2SMichael Ellerman enum spectre_v2_state state;
131c790c3d2SMichael Ellerman struct event events[4];
132c790c3d2SMichael Ellerman s64 miss_percent;
133c790c3d2SMichael Ellerman bool is_p9;
134c790c3d2SMichael Ellerman
1353a31518aSMichael Ellerman // The PMU events we use only work on Power8 or later
1363a31518aSMichael Ellerman SKIP_IF(!have_hwcap2(PPC_FEATURE2_ARCH_2_07));
1373a31518aSMichael Ellerman
138c790c3d2SMichael Ellerman state = get_sysfs_state();
139c790c3d2SMichael Ellerman if (state == UNKNOWN) {
140c790c3d2SMichael Ellerman printf("Error: couldn't determine spectre_v2 mitigation state?\n");
141c790c3d2SMichael Ellerman return -1;
142c790c3d2SMichael Ellerman }
143c790c3d2SMichael Ellerman
144c790c3d2SMichael Ellerman memset(events, 0, sizeof(events));
145c790c3d2SMichael Ellerman
146c790c3d2SMichael Ellerman setup_event(&events[0], PM_BR_PRED_CCACHE, "PM_BR_PRED_CCACHE");
147c790c3d2SMichael Ellerman setup_event(&events[1], PM_BR_MPRED_CCACHE, "PM_BR_MPRED_CCACHE");
148c790c3d2SMichael Ellerman FAIL_IF(event_open(&events[0]));
149c790c3d2SMichael Ellerman FAIL_IF(event_open_with_group(&events[1], events[0].fd) == -1);
150c790c3d2SMichael Ellerman
151c790c3d2SMichael Ellerman is_p9 = ((mfspr(SPRN_PVR) >> 16) & 0xFFFF) == 0x4e;
152c790c3d2SMichael Ellerman
153c790c3d2SMichael Ellerman if (is_p9) {
154c790c3d2SMichael Ellerman // Count pattern cache too
155c790c3d2SMichael Ellerman setup_event(&events[2], PM_BR_PRED_PCACHE, "PM_BR_PRED_PCACHE");
156c790c3d2SMichael Ellerman setup_event(&events[3], PM_BR_MPRED_PCACHE, "PM_BR_MPRED_PCACHE");
157c790c3d2SMichael Ellerman
158c790c3d2SMichael Ellerman FAIL_IF(event_open_with_group(&events[2], events[0].fd) == -1);
159c790c3d2SMichael Ellerman FAIL_IF(event_open_with_group(&events[3], events[0].fd) == -1);
160c790c3d2SMichael Ellerman }
161c790c3d2SMichael Ellerman
162c790c3d2SMichael Ellerman FAIL_IF(do_count_loop(events, is_p9, &miss_percent));
163c790c3d2SMichael Ellerman
164c790c3d2SMichael Ellerman event_report_justified(&events[0], 18, 10);
165c790c3d2SMichael Ellerman event_report_justified(&events[1], 18, 10);
166c790c3d2SMichael Ellerman event_close(&events[0]);
167c790c3d2SMichael Ellerman event_close(&events[1]);
168c790c3d2SMichael Ellerman
169c790c3d2SMichael Ellerman if (is_p9) {
170c790c3d2SMichael Ellerman event_report_justified(&events[2], 18, 10);
171c790c3d2SMichael Ellerman event_report_justified(&events[3], 18, 10);
172c790c3d2SMichael Ellerman event_close(&events[2]);
173c790c3d2SMichael Ellerman event_close(&events[3]);
174c790c3d2SMichael Ellerman }
175c790c3d2SMichael Ellerman
176c790c3d2SMichael Ellerman printf("Miss percent %lld %%\n", miss_percent);
177c790c3d2SMichael Ellerman
178c790c3d2SMichael Ellerman switch (state) {
179c790c3d2SMichael Ellerman case VULNERABLE:
180c790c3d2SMichael Ellerman case NOT_AFFECTED:
181c790c3d2SMichael Ellerman case COUNT_CACHE_FLUSH_SW:
182c790c3d2SMichael Ellerman case COUNT_CACHE_FLUSH_HW:
183c790c3d2SMichael Ellerman // These should all not affect userspace branch prediction
184c790c3d2SMichael Ellerman if (miss_percent > 15) {
18548482f4dSRussell Currey if (miss_percent > 95) {
186f3054ffdSThadeu Lima de Souza Cascardo /*
18748482f4dSRussell Currey * Such a mismatch may be caused by a system being unaware
18848482f4dSRussell Currey * the count cache is disabled. This may be to enable
18948482f4dSRussell Currey * guest migration between hosts with different settings.
190f3054ffdSThadeu Lima de Souza Cascardo * Return skip code to avoid detecting this as an error.
191f3054ffdSThadeu Lima de Souza Cascardo * We are not vulnerable and reporting otherwise, so
192f3054ffdSThadeu Lima de Souza Cascardo * missing such a mismatch is safe.
193f3054ffdSThadeu Lima de Souza Cascardo */
19448482f4dSRussell Currey printf("Branch misses > 95%% unexpected in this configuration.\n");
19548482f4dSRussell Currey printf("Count cache likely disabled without Linux knowing.\n");
19648482f4dSRussell Currey if (state == COUNT_CACHE_FLUSH_SW)
19748482f4dSRussell Currey printf("WARNING: Kernel performing unnecessary flushes.\n");
198f3054ffdSThadeu Lima de Souza Cascardo return 4;
19948482f4dSRussell Currey }
20048482f4dSRussell Currey printf("Branch misses > 15%% unexpected in this configuration!\n");
201*7801cb1dSColin Ian King printf("Possible mismatch between reported & actual mitigation\n");
202f3054ffdSThadeu Lima de Souza Cascardo
203c790c3d2SMichael Ellerman return 1;
204c790c3d2SMichael Ellerman }
205c790c3d2SMichael Ellerman break;
206c790c3d2SMichael Ellerman case BRANCH_SERIALISATION:
207c790c3d2SMichael Ellerman // This seems to affect userspace branch prediction a bit?
208c790c3d2SMichael Ellerman if (miss_percent > 25) {
209c790c3d2SMichael Ellerman printf("Branch misses > 25%% unexpected in this configuration!\n");
210*7801cb1dSColin Ian King printf("Possible mismatch between reported & actual mitigation\n");
211c790c3d2SMichael Ellerman return 1;
212c790c3d2SMichael Ellerman }
213c790c3d2SMichael Ellerman break;
214c790c3d2SMichael Ellerman case COUNT_CACHE_DISABLED:
215c790c3d2SMichael Ellerman if (miss_percent < 95) {
216dcbff9adSRussell Currey printf("Branch misses < 95%% unexpected in this configuration!\n");
217*7801cb1dSColin Ian King printf("Possible mismatch between reported & actual mitigation\n");
218c790c3d2SMichael Ellerman return 1;
219c790c3d2SMichael Ellerman }
220c790c3d2SMichael Ellerman break;
221c790c3d2SMichael Ellerman case UNKNOWN:
222c790c3d2SMichael Ellerman case BTB_FLUSH:
223c790c3d2SMichael Ellerman printf("Not sure!\n");
224c790c3d2SMichael Ellerman return 1;
225c790c3d2SMichael Ellerman }
226c790c3d2SMichael Ellerman
227c790c3d2SMichael Ellerman printf("OK - Measured branch prediction rates match reported spectre v2 mitigation.\n");
228c790c3d2SMichael Ellerman
229c790c3d2SMichael Ellerman return 0;
230c790c3d2SMichael Ellerman }
231c790c3d2SMichael Ellerman
main(int argc,char * argv[])232c790c3d2SMichael Ellerman int main(int argc, char *argv[])
233c790c3d2SMichael Ellerman {
234c790c3d2SMichael Ellerman return test_harness(spectre_v2_test, "spectre_v2");
235c790c3d2SMichael Ellerman }
236