1 // SPDX-License-Identifier: GPL-2.0+ 2 // 3 // Security related flags and so on. 4 // 5 // Copyright 2018, Michael Ellerman, IBM Corporation. 6 7 #include <linux/cpu.h> 8 #include <linux/kernel.h> 9 #include <linux/device.h> 10 #include <linux/seq_buf.h> 11 12 #include <asm/asm-prototypes.h> 13 #include <asm/code-patching.h> 14 #include <asm/debugfs.h> 15 #include <asm/security_features.h> 16 #include <asm/setup.h> 17 18 19 u64 powerpc_security_features __read_mostly = SEC_FTR_DEFAULT; 20 21 enum count_cache_flush_type { 22 COUNT_CACHE_FLUSH_NONE = 0x1, 23 COUNT_CACHE_FLUSH_SW = 0x2, 24 COUNT_CACHE_FLUSH_HW = 0x4, 25 }; 26 static enum count_cache_flush_type count_cache_flush_type = COUNT_CACHE_FLUSH_NONE; 27 static bool link_stack_flush_enabled; 28 29 bool barrier_nospec_enabled; 30 static bool no_nospec; 31 static bool btb_flush_enabled; 32 #if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_BOOK3S_64) 33 static bool no_spectrev2; 34 #endif 35 36 static void enable_barrier_nospec(bool enable) 37 { 38 barrier_nospec_enabled = enable; 39 do_barrier_nospec_fixups(enable); 40 } 41 42 void setup_barrier_nospec(void) 43 { 44 bool enable; 45 46 /* 47 * It would make sense to check SEC_FTR_SPEC_BAR_ORI31 below as well. 48 * But there's a good reason not to. The two flags we check below are 49 * both are enabled by default in the kernel, so if the hcall is not 50 * functional they will be enabled. 51 * On a system where the host firmware has been updated (so the ori 52 * functions as a barrier), but on which the hypervisor (KVM/Qemu) has 53 * not been updated, we would like to enable the barrier. Dropping the 54 * check for SEC_FTR_SPEC_BAR_ORI31 achieves that. The only downside is 55 * we potentially enable the barrier on systems where the host firmware 56 * is not updated, but that's harmless as it's a no-op. 57 */ 58 enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && 59 security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR); 60 61 if (!no_nospec && !cpu_mitigations_off()) 62 enable_barrier_nospec(enable); 63 } 64 65 static int __init handle_nospectre_v1(char *p) 66 { 67 no_nospec = true; 68 69 return 0; 70 } 71 early_param("nospectre_v1", handle_nospectre_v1); 72 73 #ifdef CONFIG_DEBUG_FS 74 static int barrier_nospec_set(void *data, u64 val) 75 { 76 switch (val) { 77 case 0: 78 case 1: 79 break; 80 default: 81 return -EINVAL; 82 } 83 84 if (!!val == !!barrier_nospec_enabled) 85 return 0; 86 87 enable_barrier_nospec(!!val); 88 89 return 0; 90 } 91 92 static int barrier_nospec_get(void *data, u64 *val) 93 { 94 *val = barrier_nospec_enabled ? 1 : 0; 95 return 0; 96 } 97 98 DEFINE_DEBUGFS_ATTRIBUTE(fops_barrier_nospec, barrier_nospec_get, 99 barrier_nospec_set, "%llu\n"); 100 101 static __init int barrier_nospec_debugfs_init(void) 102 { 103 debugfs_create_file_unsafe("barrier_nospec", 0600, 104 powerpc_debugfs_root, NULL, 105 &fops_barrier_nospec); 106 return 0; 107 } 108 device_initcall(barrier_nospec_debugfs_init); 109 110 static __init int security_feature_debugfs_init(void) 111 { 112 debugfs_create_x64("security_features", 0400, powerpc_debugfs_root, 113 &powerpc_security_features); 114 return 0; 115 } 116 device_initcall(security_feature_debugfs_init); 117 #endif /* CONFIG_DEBUG_FS */ 118 119 #if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_BOOK3S_64) 120 static int __init handle_nospectre_v2(char *p) 121 { 122 no_spectrev2 = true; 123 124 return 0; 125 } 126 early_param("nospectre_v2", handle_nospectre_v2); 127 #endif /* CONFIG_PPC_FSL_BOOK3E || CONFIG_PPC_BOOK3S_64 */ 128 129 #ifdef CONFIG_PPC_FSL_BOOK3E 130 void setup_spectre_v2(void) 131 { 132 if (no_spectrev2 || cpu_mitigations_off()) 133 do_btb_flush_fixups(); 134 else 135 btb_flush_enabled = true; 136 } 137 #endif /* CONFIG_PPC_FSL_BOOK3E */ 138 139 #ifdef CONFIG_PPC_BOOK3S_64 140 ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf) 141 { 142 bool thread_priv; 143 144 thread_priv = security_ftr_enabled(SEC_FTR_L1D_THREAD_PRIV); 145 146 if (rfi_flush) { 147 struct seq_buf s; 148 seq_buf_init(&s, buf, PAGE_SIZE - 1); 149 150 seq_buf_printf(&s, "Mitigation: RFI Flush"); 151 if (thread_priv) 152 seq_buf_printf(&s, ", L1D private per thread"); 153 154 seq_buf_printf(&s, "\n"); 155 156 return s.len; 157 } 158 159 if (thread_priv) 160 return sprintf(buf, "Vulnerable: L1D private per thread\n"); 161 162 if (!security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) && 163 !security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR)) 164 return sprintf(buf, "Not affected\n"); 165 166 return sprintf(buf, "Vulnerable\n"); 167 } 168 169 ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf) 170 { 171 return cpu_show_meltdown(dev, attr, buf); 172 } 173 #endif 174 175 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf) 176 { 177 struct seq_buf s; 178 179 seq_buf_init(&s, buf, PAGE_SIZE - 1); 180 181 if (security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR)) { 182 if (barrier_nospec_enabled) 183 seq_buf_printf(&s, "Mitigation: __user pointer sanitization"); 184 else 185 seq_buf_printf(&s, "Vulnerable"); 186 187 if (security_ftr_enabled(SEC_FTR_SPEC_BAR_ORI31)) 188 seq_buf_printf(&s, ", ori31 speculation barrier enabled"); 189 190 seq_buf_printf(&s, "\n"); 191 } else 192 seq_buf_printf(&s, "Not affected\n"); 193 194 return s.len; 195 } 196 197 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf) 198 { 199 struct seq_buf s; 200 bool bcs, ccd; 201 202 seq_buf_init(&s, buf, PAGE_SIZE - 1); 203 204 bcs = security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED); 205 ccd = security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED); 206 207 if (bcs || ccd) { 208 seq_buf_printf(&s, "Mitigation: "); 209 210 if (bcs) 211 seq_buf_printf(&s, "Indirect branch serialisation (kernel only)"); 212 213 if (bcs && ccd) 214 seq_buf_printf(&s, ", "); 215 216 if (ccd) 217 seq_buf_printf(&s, "Indirect branch cache disabled"); 218 219 if (link_stack_flush_enabled) 220 seq_buf_printf(&s, ", Software link stack flush"); 221 222 } else if (count_cache_flush_type != COUNT_CACHE_FLUSH_NONE) { 223 seq_buf_printf(&s, "Mitigation: Software count cache flush"); 224 225 if (count_cache_flush_type == COUNT_CACHE_FLUSH_HW) 226 seq_buf_printf(&s, " (hardware accelerated)"); 227 228 if (link_stack_flush_enabled) 229 seq_buf_printf(&s, ", Software link stack flush"); 230 231 } else if (btb_flush_enabled) { 232 seq_buf_printf(&s, "Mitigation: Branch predictor state flush"); 233 } else { 234 seq_buf_printf(&s, "Vulnerable"); 235 } 236 237 seq_buf_printf(&s, "\n"); 238 239 return s.len; 240 } 241 242 #ifdef CONFIG_PPC_BOOK3S_64 243 /* 244 * Store-forwarding barrier support. 245 */ 246 247 static enum stf_barrier_type stf_enabled_flush_types; 248 static bool no_stf_barrier; 249 bool stf_barrier; 250 251 static int __init handle_no_stf_barrier(char *p) 252 { 253 pr_info("stf-barrier: disabled on command line."); 254 no_stf_barrier = true; 255 return 0; 256 } 257 258 early_param("no_stf_barrier", handle_no_stf_barrier); 259 260 /* This is the generic flag used by other architectures */ 261 static int __init handle_ssbd(char *p) 262 { 263 if (!p || strncmp(p, "auto", 5) == 0 || strncmp(p, "on", 2) == 0 ) { 264 /* Until firmware tells us, we have the barrier with auto */ 265 return 0; 266 } else if (strncmp(p, "off", 3) == 0) { 267 handle_no_stf_barrier(NULL); 268 return 0; 269 } else 270 return 1; 271 272 return 0; 273 } 274 early_param("spec_store_bypass_disable", handle_ssbd); 275 276 /* This is the generic flag used by other architectures */ 277 static int __init handle_no_ssbd(char *p) 278 { 279 handle_no_stf_barrier(NULL); 280 return 0; 281 } 282 early_param("nospec_store_bypass_disable", handle_no_ssbd); 283 284 static void stf_barrier_enable(bool enable) 285 { 286 if (enable) 287 do_stf_barrier_fixups(stf_enabled_flush_types); 288 else 289 do_stf_barrier_fixups(STF_BARRIER_NONE); 290 291 stf_barrier = enable; 292 } 293 294 void setup_stf_barrier(void) 295 { 296 enum stf_barrier_type type; 297 bool enable, hv; 298 299 hv = cpu_has_feature(CPU_FTR_HVMODE); 300 301 /* Default to fallback in case fw-features are not available */ 302 if (cpu_has_feature(CPU_FTR_ARCH_300)) 303 type = STF_BARRIER_EIEIO; 304 else if (cpu_has_feature(CPU_FTR_ARCH_207S)) 305 type = STF_BARRIER_SYNC_ORI; 306 else if (cpu_has_feature(CPU_FTR_ARCH_206)) 307 type = STF_BARRIER_FALLBACK; 308 else 309 type = STF_BARRIER_NONE; 310 311 enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && 312 (security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR) || 313 (security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) && hv)); 314 315 if (type == STF_BARRIER_FALLBACK) { 316 pr_info("stf-barrier: fallback barrier available\n"); 317 } else if (type == STF_BARRIER_SYNC_ORI) { 318 pr_info("stf-barrier: hwsync barrier available\n"); 319 } else if (type == STF_BARRIER_EIEIO) { 320 pr_info("stf-barrier: eieio barrier available\n"); 321 } 322 323 stf_enabled_flush_types = type; 324 325 if (!no_stf_barrier && !cpu_mitigations_off()) 326 stf_barrier_enable(enable); 327 } 328 329 ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf) 330 { 331 if (stf_barrier && stf_enabled_flush_types != STF_BARRIER_NONE) { 332 const char *type; 333 switch (stf_enabled_flush_types) { 334 case STF_BARRIER_EIEIO: 335 type = "eieio"; 336 break; 337 case STF_BARRIER_SYNC_ORI: 338 type = "hwsync"; 339 break; 340 case STF_BARRIER_FALLBACK: 341 type = "fallback"; 342 break; 343 default: 344 type = "unknown"; 345 } 346 return sprintf(buf, "Mitigation: Kernel entry/exit barrier (%s)\n", type); 347 } 348 349 if (!security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) && 350 !security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR)) 351 return sprintf(buf, "Not affected\n"); 352 353 return sprintf(buf, "Vulnerable\n"); 354 } 355 356 #ifdef CONFIG_DEBUG_FS 357 static int stf_barrier_set(void *data, u64 val) 358 { 359 bool enable; 360 361 if (val == 1) 362 enable = true; 363 else if (val == 0) 364 enable = false; 365 else 366 return -EINVAL; 367 368 /* Only do anything if we're changing state */ 369 if (enable != stf_barrier) 370 stf_barrier_enable(enable); 371 372 return 0; 373 } 374 375 static int stf_barrier_get(void *data, u64 *val) 376 { 377 *val = stf_barrier ? 1 : 0; 378 return 0; 379 } 380 381 DEFINE_DEBUGFS_ATTRIBUTE(fops_stf_barrier, stf_barrier_get, stf_barrier_set, 382 "%llu\n"); 383 384 static __init int stf_barrier_debugfs_init(void) 385 { 386 debugfs_create_file_unsafe("stf_barrier", 0600, powerpc_debugfs_root, 387 NULL, &fops_stf_barrier); 388 return 0; 389 } 390 device_initcall(stf_barrier_debugfs_init); 391 #endif /* CONFIG_DEBUG_FS */ 392 393 static void no_count_cache_flush(void) 394 { 395 count_cache_flush_type = COUNT_CACHE_FLUSH_NONE; 396 pr_info("count-cache-flush: software flush disabled.\n"); 397 } 398 399 static void toggle_count_cache_flush(bool enable) 400 { 401 if (!security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE) && 402 !security_ftr_enabled(SEC_FTR_FLUSH_LINK_STACK)) 403 enable = false; 404 405 if (!enable) { 406 patch_instruction_site(&patch__call_flush_count_cache, PPC_INST_NOP); 407 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 408 patch_instruction_site(&patch__call_kvm_flush_link_stack, PPC_INST_NOP); 409 #endif 410 pr_info("link-stack-flush: software flush disabled.\n"); 411 link_stack_flush_enabled = false; 412 no_count_cache_flush(); 413 return; 414 } 415 416 // This enables the branch from _switch to flush_count_cache 417 patch_branch_site(&patch__call_flush_count_cache, 418 (u64)&flush_count_cache, BRANCH_SET_LINK); 419 420 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 421 // This enables the branch from guest_exit_cont to kvm_flush_link_stack 422 patch_branch_site(&patch__call_kvm_flush_link_stack, 423 (u64)&kvm_flush_link_stack, BRANCH_SET_LINK); 424 #endif 425 426 pr_info("link-stack-flush: software flush enabled.\n"); 427 link_stack_flush_enabled = true; 428 429 // If we just need to flush the link stack, patch an early return 430 if (!security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE)) { 431 patch_instruction_site(&patch__flush_link_stack_return, PPC_INST_BLR); 432 no_count_cache_flush(); 433 return; 434 } 435 436 if (!security_ftr_enabled(SEC_FTR_BCCTR_FLUSH_ASSIST)) { 437 count_cache_flush_type = COUNT_CACHE_FLUSH_SW; 438 pr_info("count-cache-flush: full software flush sequence enabled.\n"); 439 return; 440 } 441 442 patch_instruction_site(&patch__flush_count_cache_return, PPC_INST_BLR); 443 count_cache_flush_type = COUNT_CACHE_FLUSH_HW; 444 pr_info("count-cache-flush: hardware assisted flush sequence enabled\n"); 445 } 446 447 void setup_count_cache_flush(void) 448 { 449 bool enable = true; 450 451 if (no_spectrev2 || cpu_mitigations_off()) { 452 if (security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED) || 453 security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED)) 454 pr_warn("Spectre v2 mitigations not fully under software control, can't disable\n"); 455 456 enable = false; 457 } 458 459 /* 460 * There's no firmware feature flag/hypervisor bit to tell us we need to 461 * flush the link stack on context switch. So we set it here if we see 462 * either of the Spectre v2 mitigations that aim to protect userspace. 463 */ 464 if (security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED) || 465 security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE)) 466 security_ftr_set(SEC_FTR_FLUSH_LINK_STACK); 467 468 toggle_count_cache_flush(enable); 469 } 470 471 #ifdef CONFIG_DEBUG_FS 472 static int count_cache_flush_set(void *data, u64 val) 473 { 474 bool enable; 475 476 if (val == 1) 477 enable = true; 478 else if (val == 0) 479 enable = false; 480 else 481 return -EINVAL; 482 483 toggle_count_cache_flush(enable); 484 485 return 0; 486 } 487 488 static int count_cache_flush_get(void *data, u64 *val) 489 { 490 if (count_cache_flush_type == COUNT_CACHE_FLUSH_NONE) 491 *val = 0; 492 else 493 *val = 1; 494 495 return 0; 496 } 497 498 DEFINE_DEBUGFS_ATTRIBUTE(fops_count_cache_flush, count_cache_flush_get, 499 count_cache_flush_set, "%llu\n"); 500 501 static __init int count_cache_flush_debugfs_init(void) 502 { 503 debugfs_create_file_unsafe("count_cache_flush", 0600, 504 powerpc_debugfs_root, NULL, 505 &fops_count_cache_flush); 506 return 0; 507 } 508 device_initcall(count_cache_flush_debugfs_init); 509 #endif /* CONFIG_DEBUG_FS */ 510 #endif /* CONFIG_PPC_BOOK3S_64 */ 511