1 // SPDX-License-Identifier: GPL-2.0+ 2 // 3 // Security related flags and so on. 4 // 5 // Copyright 2018, Michael Ellerman, IBM Corporation. 6 7 #include <linux/cpu.h> 8 #include <linux/kernel.h> 9 #include <linux/device.h> 10 #include <linux/seq_buf.h> 11 12 #include <asm/asm-prototypes.h> 13 #include <asm/code-patching.h> 14 #include <asm/debugfs.h> 15 #include <asm/security_features.h> 16 #include <asm/setup.h> 17 18 19 unsigned long powerpc_security_features __read_mostly = SEC_FTR_DEFAULT; 20 21 enum count_cache_flush_type { 22 COUNT_CACHE_FLUSH_NONE = 0x1, 23 COUNT_CACHE_FLUSH_SW = 0x2, 24 COUNT_CACHE_FLUSH_HW = 0x4, 25 }; 26 static enum count_cache_flush_type count_cache_flush_type = COUNT_CACHE_FLUSH_NONE; 27 28 bool barrier_nospec_enabled; 29 static bool no_nospec; 30 static bool btb_flush_enabled; 31 #ifdef CONFIG_PPC_FSL_BOOK3E 32 static bool no_spectrev2; 33 #endif 34 35 static void enable_barrier_nospec(bool enable) 36 { 37 barrier_nospec_enabled = enable; 38 do_barrier_nospec_fixups(enable); 39 } 40 41 void setup_barrier_nospec(void) 42 { 43 bool enable; 44 45 /* 46 * It would make sense to check SEC_FTR_SPEC_BAR_ORI31 below as well. 47 * But there's a good reason not to. The two flags we check below are 48 * both are enabled by default in the kernel, so if the hcall is not 49 * functional they will be enabled. 50 * On a system where the host firmware has been updated (so the ori 51 * functions as a barrier), but on which the hypervisor (KVM/Qemu) has 52 * not been updated, we would like to enable the barrier. Dropping the 53 * check for SEC_FTR_SPEC_BAR_ORI31 achieves that. The only downside is 54 * we potentially enable the barrier on systems where the host firmware 55 * is not updated, but that's harmless as it's a no-op. 56 */ 57 enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && 58 security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR); 59 60 if (!no_nospec) 61 enable_barrier_nospec(enable); 62 } 63 64 static int __init handle_nospectre_v1(char *p) 65 { 66 no_nospec = true; 67 68 return 0; 69 } 70 early_param("nospectre_v1", handle_nospectre_v1); 71 72 #ifdef CONFIG_DEBUG_FS 73 static int barrier_nospec_set(void *data, u64 val) 74 { 75 switch (val) { 76 case 0: 77 case 1: 78 break; 79 default: 80 return -EINVAL; 81 } 82 83 if (!!val == !!barrier_nospec_enabled) 84 return 0; 85 86 enable_barrier_nospec(!!val); 87 88 return 0; 89 } 90 91 static int barrier_nospec_get(void *data, u64 *val) 92 { 93 *val = barrier_nospec_enabled ? 1 : 0; 94 return 0; 95 } 96 97 DEFINE_SIMPLE_ATTRIBUTE(fops_barrier_nospec, 98 barrier_nospec_get, barrier_nospec_set, "%llu\n"); 99 100 static __init int barrier_nospec_debugfs_init(void) 101 { 102 debugfs_create_file("barrier_nospec", 0600, powerpc_debugfs_root, NULL, 103 &fops_barrier_nospec); 104 return 0; 105 } 106 device_initcall(barrier_nospec_debugfs_init); 107 #endif /* CONFIG_DEBUG_FS */ 108 109 #ifdef CONFIG_PPC_FSL_BOOK3E 110 static int __init handle_nospectre_v2(char *p) 111 { 112 no_spectrev2 = true; 113 114 return 0; 115 } 116 early_param("nospectre_v2", handle_nospectre_v2); 117 void setup_spectre_v2(void) 118 { 119 if (no_spectrev2) 120 do_btb_flush_fixups(); 121 else 122 btb_flush_enabled = true; 123 } 124 #endif /* CONFIG_PPC_FSL_BOOK3E */ 125 126 #ifdef CONFIG_PPC_BOOK3S_64 127 ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf) 128 { 129 bool thread_priv; 130 131 thread_priv = security_ftr_enabled(SEC_FTR_L1D_THREAD_PRIV); 132 133 if (rfi_flush || thread_priv) { 134 struct seq_buf s; 135 seq_buf_init(&s, buf, PAGE_SIZE - 1); 136 137 seq_buf_printf(&s, "Mitigation: "); 138 139 if (rfi_flush) 140 seq_buf_printf(&s, "RFI Flush"); 141 142 if (rfi_flush && thread_priv) 143 seq_buf_printf(&s, ", "); 144 145 if (thread_priv) 146 seq_buf_printf(&s, "L1D private per thread"); 147 148 seq_buf_printf(&s, "\n"); 149 150 return s.len; 151 } 152 153 if (!security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) && 154 !security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR)) 155 return sprintf(buf, "Not affected\n"); 156 157 return sprintf(buf, "Vulnerable\n"); 158 } 159 #endif 160 161 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf) 162 { 163 struct seq_buf s; 164 165 seq_buf_init(&s, buf, PAGE_SIZE - 1); 166 167 if (security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR)) { 168 if (barrier_nospec_enabled) 169 seq_buf_printf(&s, "Mitigation: __user pointer sanitization"); 170 else 171 seq_buf_printf(&s, "Vulnerable"); 172 173 if (security_ftr_enabled(SEC_FTR_SPEC_BAR_ORI31)) 174 seq_buf_printf(&s, ", ori31 speculation barrier enabled"); 175 176 seq_buf_printf(&s, "\n"); 177 } else 178 seq_buf_printf(&s, "Not affected\n"); 179 180 return s.len; 181 } 182 183 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf) 184 { 185 struct seq_buf s; 186 bool bcs, ccd; 187 188 seq_buf_init(&s, buf, PAGE_SIZE - 1); 189 190 bcs = security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED); 191 ccd = security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED); 192 193 if (bcs || ccd || count_cache_flush_type != COUNT_CACHE_FLUSH_NONE) { 194 bool comma = false; 195 seq_buf_printf(&s, "Mitigation: "); 196 197 if (bcs) { 198 seq_buf_printf(&s, "Indirect branch serialisation (kernel only)"); 199 comma = true; 200 } 201 202 if (ccd) { 203 if (comma) 204 seq_buf_printf(&s, ", "); 205 seq_buf_printf(&s, "Indirect branch cache disabled"); 206 comma = true; 207 } 208 209 if (comma) 210 seq_buf_printf(&s, ", "); 211 212 seq_buf_printf(&s, "Software count cache flush"); 213 214 if (count_cache_flush_type == COUNT_CACHE_FLUSH_HW) 215 seq_buf_printf(&s, "(hardware accelerated)"); 216 } else if (btb_flush_enabled) { 217 seq_buf_printf(&s, "Mitigation: Branch predictor state flush"); 218 } else { 219 seq_buf_printf(&s, "Vulnerable"); 220 } 221 222 seq_buf_printf(&s, "\n"); 223 224 return s.len; 225 } 226 227 #ifdef CONFIG_PPC_BOOK3S_64 228 /* 229 * Store-forwarding barrier support. 230 */ 231 232 static enum stf_barrier_type stf_enabled_flush_types; 233 static bool no_stf_barrier; 234 bool stf_barrier; 235 236 static int __init handle_no_stf_barrier(char *p) 237 { 238 pr_info("stf-barrier: disabled on command line."); 239 no_stf_barrier = true; 240 return 0; 241 } 242 243 early_param("no_stf_barrier", handle_no_stf_barrier); 244 245 /* This is the generic flag used by other architectures */ 246 static int __init handle_ssbd(char *p) 247 { 248 if (!p || strncmp(p, "auto", 5) == 0 || strncmp(p, "on", 2) == 0 ) { 249 /* Until firmware tells us, we have the barrier with auto */ 250 return 0; 251 } else if (strncmp(p, "off", 3) == 0) { 252 handle_no_stf_barrier(NULL); 253 return 0; 254 } else 255 return 1; 256 257 return 0; 258 } 259 early_param("spec_store_bypass_disable", handle_ssbd); 260 261 /* This is the generic flag used by other architectures */ 262 static int __init handle_no_ssbd(char *p) 263 { 264 handle_no_stf_barrier(NULL); 265 return 0; 266 } 267 early_param("nospec_store_bypass_disable", handle_no_ssbd); 268 269 static void stf_barrier_enable(bool enable) 270 { 271 if (enable) 272 do_stf_barrier_fixups(stf_enabled_flush_types); 273 else 274 do_stf_barrier_fixups(STF_BARRIER_NONE); 275 276 stf_barrier = enable; 277 } 278 279 void setup_stf_barrier(void) 280 { 281 enum stf_barrier_type type; 282 bool enable, hv; 283 284 hv = cpu_has_feature(CPU_FTR_HVMODE); 285 286 /* Default to fallback in case fw-features are not available */ 287 if (cpu_has_feature(CPU_FTR_ARCH_300)) 288 type = STF_BARRIER_EIEIO; 289 else if (cpu_has_feature(CPU_FTR_ARCH_207S)) 290 type = STF_BARRIER_SYNC_ORI; 291 else if (cpu_has_feature(CPU_FTR_ARCH_206)) 292 type = STF_BARRIER_FALLBACK; 293 else 294 type = STF_BARRIER_NONE; 295 296 enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && 297 (security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR) || 298 (security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) && hv)); 299 300 if (type == STF_BARRIER_FALLBACK) { 301 pr_info("stf-barrier: fallback barrier available\n"); 302 } else if (type == STF_BARRIER_SYNC_ORI) { 303 pr_info("stf-barrier: hwsync barrier available\n"); 304 } else if (type == STF_BARRIER_EIEIO) { 305 pr_info("stf-barrier: eieio barrier available\n"); 306 } 307 308 stf_enabled_flush_types = type; 309 310 if (!no_stf_barrier) 311 stf_barrier_enable(enable); 312 } 313 314 ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf) 315 { 316 if (stf_barrier && stf_enabled_flush_types != STF_BARRIER_NONE) { 317 const char *type; 318 switch (stf_enabled_flush_types) { 319 case STF_BARRIER_EIEIO: 320 type = "eieio"; 321 break; 322 case STF_BARRIER_SYNC_ORI: 323 type = "hwsync"; 324 break; 325 case STF_BARRIER_FALLBACK: 326 type = "fallback"; 327 break; 328 default: 329 type = "unknown"; 330 } 331 return sprintf(buf, "Mitigation: Kernel entry/exit barrier (%s)\n", type); 332 } 333 334 if (!security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) && 335 !security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR)) 336 return sprintf(buf, "Not affected\n"); 337 338 return sprintf(buf, "Vulnerable\n"); 339 } 340 341 #ifdef CONFIG_DEBUG_FS 342 static int stf_barrier_set(void *data, u64 val) 343 { 344 bool enable; 345 346 if (val == 1) 347 enable = true; 348 else if (val == 0) 349 enable = false; 350 else 351 return -EINVAL; 352 353 /* Only do anything if we're changing state */ 354 if (enable != stf_barrier) 355 stf_barrier_enable(enable); 356 357 return 0; 358 } 359 360 static int stf_barrier_get(void *data, u64 *val) 361 { 362 *val = stf_barrier ? 1 : 0; 363 return 0; 364 } 365 366 DEFINE_SIMPLE_ATTRIBUTE(fops_stf_barrier, stf_barrier_get, stf_barrier_set, "%llu\n"); 367 368 static __init int stf_barrier_debugfs_init(void) 369 { 370 debugfs_create_file("stf_barrier", 0600, powerpc_debugfs_root, NULL, &fops_stf_barrier); 371 return 0; 372 } 373 device_initcall(stf_barrier_debugfs_init); 374 #endif /* CONFIG_DEBUG_FS */ 375 376 static void toggle_count_cache_flush(bool enable) 377 { 378 if (!enable || !security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE)) { 379 patch_instruction_site(&patch__call_flush_count_cache, PPC_INST_NOP); 380 count_cache_flush_type = COUNT_CACHE_FLUSH_NONE; 381 pr_info("count-cache-flush: software flush disabled.\n"); 382 return; 383 } 384 385 patch_branch_site(&patch__call_flush_count_cache, 386 (u64)&flush_count_cache, BRANCH_SET_LINK); 387 388 if (!security_ftr_enabled(SEC_FTR_BCCTR_FLUSH_ASSIST)) { 389 count_cache_flush_type = COUNT_CACHE_FLUSH_SW; 390 pr_info("count-cache-flush: full software flush sequence enabled.\n"); 391 return; 392 } 393 394 patch_instruction_site(&patch__flush_count_cache_return, PPC_INST_BLR); 395 count_cache_flush_type = COUNT_CACHE_FLUSH_HW; 396 pr_info("count-cache-flush: hardware assisted flush sequence enabled\n"); 397 } 398 399 void setup_count_cache_flush(void) 400 { 401 toggle_count_cache_flush(true); 402 } 403 404 #ifdef CONFIG_DEBUG_FS 405 static int count_cache_flush_set(void *data, u64 val) 406 { 407 bool enable; 408 409 if (val == 1) 410 enable = true; 411 else if (val == 0) 412 enable = false; 413 else 414 return -EINVAL; 415 416 toggle_count_cache_flush(enable); 417 418 return 0; 419 } 420 421 static int count_cache_flush_get(void *data, u64 *val) 422 { 423 if (count_cache_flush_type == COUNT_CACHE_FLUSH_NONE) 424 *val = 0; 425 else 426 *val = 1; 427 428 return 0; 429 } 430 431 DEFINE_SIMPLE_ATTRIBUTE(fops_count_cache_flush, count_cache_flush_get, 432 count_cache_flush_set, "%llu\n"); 433 434 static __init int count_cache_flush_debugfs_init(void) 435 { 436 debugfs_create_file("count_cache_flush", 0600, powerpc_debugfs_root, 437 NULL, &fops_count_cache_flush); 438 return 0; 439 } 440 device_initcall(count_cache_flush_debugfs_init); 441 #endif /* CONFIG_DEBUG_FS */ 442 #endif /* CONFIG_PPC_BOOK3S_64 */ 443