1 // SPDX-License-Identifier: GPL-2.0+
2 
3 /*
4  * Copyright 2018-2019 IBM Corporation.
5  */
6 
7 #define __SANE_USERSPACE_TYPES__
8 
9 #include <sys/types.h>
10 #include <stdint.h>
11 #include <malloc.h>
12 #include <unistd.h>
13 #include <stdlib.h>
14 #include <string.h>
15 #include <stdio.h>
16 #include <sys/prctl.h>
17 #include "utils.h"
18 
19 #include "../pmu/event.h"
20 
21 
22 extern void pattern_cache_loop(void);
23 extern void indirect_branch_loop(void);
24 
do_count_loop(struct event * events,bool is_p9,s64 * miss_percent)25 static int do_count_loop(struct event *events, bool is_p9, s64 *miss_percent)
26 {
27 	u64 pred, mpred;
28 
29 	prctl(PR_TASK_PERF_EVENTS_ENABLE);
30 
31 	if (is_p9)
32 		pattern_cache_loop();
33 	else
34 		indirect_branch_loop();
35 
36 	prctl(PR_TASK_PERF_EVENTS_DISABLE);
37 
38 	event_read(&events[0]);
39 	event_read(&events[1]);
40 
41 	// We could scale all the events by running/enabled but we're lazy
42 	// As long as the PMU is uncontended they should all run
43 	FAIL_IF(events[0].result.running != events[0].result.enabled);
44 	FAIL_IF(events[1].result.running != events[1].result.enabled);
45 
46 	pred =  events[0].result.value;
47 	mpred = events[1].result.value;
48 
49 	if (is_p9) {
50 		event_read(&events[2]);
51 		event_read(&events[3]);
52 		FAIL_IF(events[2].result.running != events[2].result.enabled);
53 		FAIL_IF(events[3].result.running != events[3].result.enabled);
54 
55 		pred  += events[2].result.value;
56 		mpred += events[3].result.value;
57 	}
58 
59 	*miss_percent = 100 * mpred / pred;
60 
61 	return 0;
62 }
63 
setup_event(struct event * e,u64 config,char * name)64 static void setup_event(struct event *e, u64 config, char *name)
65 {
66 	event_init_named(e, config, name);
67 
68 	e->attr.disabled = 1;
69 	e->attr.exclude_kernel = 1;
70 	e->attr.exclude_hv = 1;
71 	e->attr.exclude_idle = 1;
72 }
73 
74 enum spectre_v2_state {
75 	VULNERABLE = 0,
76 	UNKNOWN = 1,		// Works with FAIL_IF()
77 	NOT_AFFECTED,
78 	BRANCH_SERIALISATION,
79 	COUNT_CACHE_DISABLED,
80 	COUNT_CACHE_FLUSH_SW,
81 	COUNT_CACHE_FLUSH_HW,
82 	BTB_FLUSH,
83 };
84 
get_sysfs_state(void)85 static enum spectre_v2_state get_sysfs_state(void)
86 {
87 	enum spectre_v2_state state = UNKNOWN;
88 	char buf[256];
89 	int len;
90 
91 	memset(buf, 0, sizeof(buf));
92 	FAIL_IF(read_sysfs_file("devices/system/cpu/vulnerabilities/spectre_v2", buf, sizeof(buf)));
93 
94 	// Make sure it's NULL terminated
95 	buf[sizeof(buf) - 1] = '\0';
96 
97 	// Trim the trailing newline
98 	len = strlen(buf);
99 	FAIL_IF(len < 1);
100 	buf[len - 1] = '\0';
101 
102 	printf("sysfs reports: '%s'\n", buf);
103 
104 	// Order matters
105 	if (strstr(buf, "Vulnerable"))
106 		state = VULNERABLE;
107 	else if (strstr(buf, "Not affected"))
108 		state = NOT_AFFECTED;
109 	else if (strstr(buf, "Indirect branch serialisation (kernel only)"))
110 		state = BRANCH_SERIALISATION;
111 	else if (strstr(buf, "Indirect branch cache disabled"))
112 		state = COUNT_CACHE_DISABLED;
113 	else if (strstr(buf, "Software count cache flush (hardware accelerated)"))
114 		state = COUNT_CACHE_FLUSH_HW;
115 	else if (strstr(buf, "Software count cache flush"))
116 		state = COUNT_CACHE_FLUSH_SW;
117 	else if (strstr(buf, "Branch predictor state flush"))
118 		state = BTB_FLUSH;
119 
120 	return state;
121 }
122 
123 #define PM_BR_PRED_CCACHE	0x040a4	// P8 + P9
124 #define PM_BR_MPRED_CCACHE	0x040ac	// P8 + P9
125 #define PM_BR_PRED_PCACHE	0x048a0	// P9 only
126 #define PM_BR_MPRED_PCACHE	0x048b0	// P9 only
127 
spectre_v2_test(void)128 int spectre_v2_test(void)
129 {
130 	enum spectre_v2_state state;
131 	struct event events[4];
132 	s64 miss_percent;
133 	bool is_p9;
134 
135 	// The PMU events we use only work on Power8 or later
136 	SKIP_IF(!have_hwcap2(PPC_FEATURE2_ARCH_2_07));
137 
138 	state = get_sysfs_state();
139 	if (state == UNKNOWN) {
140 		printf("Error: couldn't determine spectre_v2 mitigation state?\n");
141 		return -1;
142 	}
143 
144 	memset(events, 0, sizeof(events));
145 
146 	setup_event(&events[0], PM_BR_PRED_CCACHE,  "PM_BR_PRED_CCACHE");
147 	setup_event(&events[1], PM_BR_MPRED_CCACHE, "PM_BR_MPRED_CCACHE");
148 	FAIL_IF(event_open(&events[0]));
149 	FAIL_IF(event_open_with_group(&events[1], events[0].fd) == -1);
150 
151 	is_p9 = ((mfspr(SPRN_PVR) >>  16) & 0xFFFF) == 0x4e;
152 
153 	if (is_p9) {
154 		// Count pattern cache too
155 		setup_event(&events[2], PM_BR_PRED_PCACHE,  "PM_BR_PRED_PCACHE");
156 		setup_event(&events[3], PM_BR_MPRED_PCACHE, "PM_BR_MPRED_PCACHE");
157 
158 		FAIL_IF(event_open_with_group(&events[2], events[0].fd) == -1);
159 		FAIL_IF(event_open_with_group(&events[3], events[0].fd) == -1);
160 	}
161 
162 	FAIL_IF(do_count_loop(events, is_p9, &miss_percent));
163 
164 	event_report_justified(&events[0], 18, 10);
165 	event_report_justified(&events[1], 18, 10);
166 	event_close(&events[0]);
167 	event_close(&events[1]);
168 
169 	if (is_p9) {
170 		event_report_justified(&events[2], 18, 10);
171 		event_report_justified(&events[3], 18, 10);
172 		event_close(&events[2]);
173 		event_close(&events[3]);
174 	}
175 
176 	printf("Miss percent %lld %%\n", miss_percent);
177 
178 	switch (state) {
179 	case VULNERABLE:
180 	case NOT_AFFECTED:
181 	case COUNT_CACHE_FLUSH_SW:
182 	case COUNT_CACHE_FLUSH_HW:
183 		// These should all not affect userspace branch prediction
184 		if (miss_percent > 15) {
185 			if (miss_percent > 95) {
186 				/*
187 				 * Such a mismatch may be caused by a system being unaware
188 				 * the count cache is disabled. This may be to enable
189 				 * guest migration between hosts with different settings.
190 				 * Return skip code to avoid detecting this as an error.
191 				 * We are not vulnerable and reporting otherwise, so
192 				 * missing such a mismatch is safe.
193 				 */
194 				printf("Branch misses > 95%% unexpected in this configuration.\n");
195 				printf("Count cache likely disabled without Linux knowing.\n");
196 				if (state == COUNT_CACHE_FLUSH_SW)
197 					printf("WARNING: Kernel performing unnecessary flushes.\n");
198 				return 4;
199 			}
200 			printf("Branch misses > 15%% unexpected in this configuration!\n");
201 			printf("Possible mismatch between reported & actual mitigation\n");
202 
203 			return 1;
204 		}
205 		break;
206 	case BRANCH_SERIALISATION:
207 		// This seems to affect userspace branch prediction a bit?
208 		if (miss_percent > 25) {
209 			printf("Branch misses > 25%% unexpected in this configuration!\n");
210 			printf("Possible mismatch between reported & actual mitigation\n");
211 			return 1;
212 		}
213 		break;
214 	case COUNT_CACHE_DISABLED:
215 		if (miss_percent < 95) {
216 			printf("Branch misses < 95%% unexpected in this configuration!\n");
217 			printf("Possible mismatch between reported & actual mitigation\n");
218 			return 1;
219 		}
220 		break;
221 	case UNKNOWN:
222 	case BTB_FLUSH:
223 		printf("Not sure!\n");
224 		return 1;
225 	}
226 
227 	printf("OK - Measured branch prediction rates match reported spectre v2 mitigation.\n");
228 
229 	return 0;
230 }
231 
main(int argc,char * argv[])232 int main(int argc, char *argv[])
233 {
234 	return test_harness(spectre_v2_test, "spectre_v2");
235 }
236