1 // SPDX-License-Identifier: GPL-2.0+ 2 // 3 // Security related flags and so on. 4 // 5 // Copyright 2018, Michael Ellerman, IBM Corporation. 6 7 #include <linux/cpu.h> 8 #include <linux/kernel.h> 9 #include <linux/device.h> 10 #include <linux/nospec.h> 11 #include <linux/prctl.h> 12 #include <linux/seq_buf.h> 13 14 #include <asm/asm-prototypes.h> 15 #include <asm/code-patching.h> 16 #include <asm/debugfs.h> 17 #include <asm/security_features.h> 18 #include <asm/setup.h> 19 #include <asm/inst.h> 20 21 22 u64 powerpc_security_features __read_mostly = SEC_FTR_DEFAULT; 23 24 enum count_cache_flush_type { 25 COUNT_CACHE_FLUSH_NONE = 0x1, 26 COUNT_CACHE_FLUSH_SW = 0x2, 27 COUNT_CACHE_FLUSH_HW = 0x4, 28 }; 29 static enum count_cache_flush_type count_cache_flush_type = COUNT_CACHE_FLUSH_NONE; 30 static bool link_stack_flush_enabled; 31 32 bool barrier_nospec_enabled; 33 static bool no_nospec; 34 static bool btb_flush_enabled; 35 #if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_BOOK3S_64) 36 static bool no_spectrev2; 37 #endif 38 39 static void enable_barrier_nospec(bool enable) 40 { 41 barrier_nospec_enabled = enable; 42 do_barrier_nospec_fixups(enable); 43 } 44 45 void setup_barrier_nospec(void) 46 { 47 bool enable; 48 49 /* 50 * It would make sense to check SEC_FTR_SPEC_BAR_ORI31 below as well. 51 * But there's a good reason not to. The two flags we check below are 52 * both are enabled by default in the kernel, so if the hcall is not 53 * functional they will be enabled. 54 * On a system where the host firmware has been updated (so the ori 55 * functions as a barrier), but on which the hypervisor (KVM/Qemu) has 56 * not been updated, we would like to enable the barrier. Dropping the 57 * check for SEC_FTR_SPEC_BAR_ORI31 achieves that. The only downside is 58 * we potentially enable the barrier on systems where the host firmware 59 * is not updated, but that's harmless as it's a no-op. 60 */ 61 enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && 62 security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR); 63 64 if (!no_nospec && !cpu_mitigations_off()) 65 enable_barrier_nospec(enable); 66 } 67 68 static int __init handle_nospectre_v1(char *p) 69 { 70 no_nospec = true; 71 72 return 0; 73 } 74 early_param("nospectre_v1", handle_nospectre_v1); 75 76 #ifdef CONFIG_DEBUG_FS 77 static int barrier_nospec_set(void *data, u64 val) 78 { 79 switch (val) { 80 case 0: 81 case 1: 82 break; 83 default: 84 return -EINVAL; 85 } 86 87 if (!!val == !!barrier_nospec_enabled) 88 return 0; 89 90 enable_barrier_nospec(!!val); 91 92 return 0; 93 } 94 95 static int barrier_nospec_get(void *data, u64 *val) 96 { 97 *val = barrier_nospec_enabled ? 1 : 0; 98 return 0; 99 } 100 101 DEFINE_DEBUGFS_ATTRIBUTE(fops_barrier_nospec, barrier_nospec_get, 102 barrier_nospec_set, "%llu\n"); 103 104 static __init int barrier_nospec_debugfs_init(void) 105 { 106 debugfs_create_file_unsafe("barrier_nospec", 0600, 107 powerpc_debugfs_root, NULL, 108 &fops_barrier_nospec); 109 return 0; 110 } 111 device_initcall(barrier_nospec_debugfs_init); 112 113 static __init int security_feature_debugfs_init(void) 114 { 115 debugfs_create_x64("security_features", 0400, powerpc_debugfs_root, 116 &powerpc_security_features); 117 return 0; 118 } 119 device_initcall(security_feature_debugfs_init); 120 #endif /* CONFIG_DEBUG_FS */ 121 122 #if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_BOOK3S_64) 123 static int __init handle_nospectre_v2(char *p) 124 { 125 no_spectrev2 = true; 126 127 return 0; 128 } 129 early_param("nospectre_v2", handle_nospectre_v2); 130 #endif /* CONFIG_PPC_FSL_BOOK3E || CONFIG_PPC_BOOK3S_64 */ 131 132 #ifdef CONFIG_PPC_FSL_BOOK3E 133 void setup_spectre_v2(void) 134 { 135 if (no_spectrev2 || cpu_mitigations_off()) 136 do_btb_flush_fixups(); 137 else 138 btb_flush_enabled = true; 139 } 140 #endif /* CONFIG_PPC_FSL_BOOK3E */ 141 142 #ifdef CONFIG_PPC_BOOK3S_64 143 ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf) 144 { 145 bool thread_priv; 146 147 thread_priv = security_ftr_enabled(SEC_FTR_L1D_THREAD_PRIV); 148 149 if (rfi_flush) { 150 struct seq_buf s; 151 seq_buf_init(&s, buf, PAGE_SIZE - 1); 152 153 seq_buf_printf(&s, "Mitigation: RFI Flush"); 154 if (thread_priv) 155 seq_buf_printf(&s, ", L1D private per thread"); 156 157 seq_buf_printf(&s, "\n"); 158 159 return s.len; 160 } 161 162 if (thread_priv) 163 return sprintf(buf, "Vulnerable: L1D private per thread\n"); 164 165 if (!security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) && 166 !security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR)) 167 return sprintf(buf, "Not affected\n"); 168 169 return sprintf(buf, "Vulnerable\n"); 170 } 171 172 ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf) 173 { 174 return cpu_show_meltdown(dev, attr, buf); 175 } 176 #endif 177 178 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf) 179 { 180 struct seq_buf s; 181 182 seq_buf_init(&s, buf, PAGE_SIZE - 1); 183 184 if (security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR)) { 185 if (barrier_nospec_enabled) 186 seq_buf_printf(&s, "Mitigation: __user pointer sanitization"); 187 else 188 seq_buf_printf(&s, "Vulnerable"); 189 190 if (security_ftr_enabled(SEC_FTR_SPEC_BAR_ORI31)) 191 seq_buf_printf(&s, ", ori31 speculation barrier enabled"); 192 193 seq_buf_printf(&s, "\n"); 194 } else 195 seq_buf_printf(&s, "Not affected\n"); 196 197 return s.len; 198 } 199 200 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf) 201 { 202 struct seq_buf s; 203 bool bcs, ccd; 204 205 seq_buf_init(&s, buf, PAGE_SIZE - 1); 206 207 bcs = security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED); 208 ccd = security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED); 209 210 if (bcs || ccd) { 211 seq_buf_printf(&s, "Mitigation: "); 212 213 if (bcs) 214 seq_buf_printf(&s, "Indirect branch serialisation (kernel only)"); 215 216 if (bcs && ccd) 217 seq_buf_printf(&s, ", "); 218 219 if (ccd) 220 seq_buf_printf(&s, "Indirect branch cache disabled"); 221 222 if (link_stack_flush_enabled) 223 seq_buf_printf(&s, ", Software link stack flush"); 224 225 } else if (count_cache_flush_type != COUNT_CACHE_FLUSH_NONE) { 226 seq_buf_printf(&s, "Mitigation: Software count cache flush"); 227 228 if (count_cache_flush_type == COUNT_CACHE_FLUSH_HW) 229 seq_buf_printf(&s, " (hardware accelerated)"); 230 231 if (link_stack_flush_enabled) 232 seq_buf_printf(&s, ", Software link stack flush"); 233 234 } else if (btb_flush_enabled) { 235 seq_buf_printf(&s, "Mitigation: Branch predictor state flush"); 236 } else { 237 seq_buf_printf(&s, "Vulnerable"); 238 } 239 240 seq_buf_printf(&s, "\n"); 241 242 return s.len; 243 } 244 245 #ifdef CONFIG_PPC_BOOK3S_64 246 /* 247 * Store-forwarding barrier support. 248 */ 249 250 static enum stf_barrier_type stf_enabled_flush_types; 251 static bool no_stf_barrier; 252 bool stf_barrier; 253 254 static int __init handle_no_stf_barrier(char *p) 255 { 256 pr_info("stf-barrier: disabled on command line."); 257 no_stf_barrier = true; 258 return 0; 259 } 260 261 early_param("no_stf_barrier", handle_no_stf_barrier); 262 263 /* This is the generic flag used by other architectures */ 264 static int __init handle_ssbd(char *p) 265 { 266 if (!p || strncmp(p, "auto", 5) == 0 || strncmp(p, "on", 2) == 0 ) { 267 /* Until firmware tells us, we have the barrier with auto */ 268 return 0; 269 } else if (strncmp(p, "off", 3) == 0) { 270 handle_no_stf_barrier(NULL); 271 return 0; 272 } else 273 return 1; 274 275 return 0; 276 } 277 early_param("spec_store_bypass_disable", handle_ssbd); 278 279 /* This is the generic flag used by other architectures */ 280 static int __init handle_no_ssbd(char *p) 281 { 282 handle_no_stf_barrier(NULL); 283 return 0; 284 } 285 early_param("nospec_store_bypass_disable", handle_no_ssbd); 286 287 static void stf_barrier_enable(bool enable) 288 { 289 if (enable) 290 do_stf_barrier_fixups(stf_enabled_flush_types); 291 else 292 do_stf_barrier_fixups(STF_BARRIER_NONE); 293 294 stf_barrier = enable; 295 } 296 297 void setup_stf_barrier(void) 298 { 299 enum stf_barrier_type type; 300 bool enable, hv; 301 302 hv = cpu_has_feature(CPU_FTR_HVMODE); 303 304 /* Default to fallback in case fw-features are not available */ 305 if (cpu_has_feature(CPU_FTR_ARCH_300)) 306 type = STF_BARRIER_EIEIO; 307 else if (cpu_has_feature(CPU_FTR_ARCH_207S)) 308 type = STF_BARRIER_SYNC_ORI; 309 else if (cpu_has_feature(CPU_FTR_ARCH_206)) 310 type = STF_BARRIER_FALLBACK; 311 else 312 type = STF_BARRIER_NONE; 313 314 enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && 315 (security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR) || 316 (security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) && hv)); 317 318 if (type == STF_BARRIER_FALLBACK) { 319 pr_info("stf-barrier: fallback barrier available\n"); 320 } else if (type == STF_BARRIER_SYNC_ORI) { 321 pr_info("stf-barrier: hwsync barrier available\n"); 322 } else if (type == STF_BARRIER_EIEIO) { 323 pr_info("stf-barrier: eieio barrier available\n"); 324 } 325 326 stf_enabled_flush_types = type; 327 328 if (!no_stf_barrier && !cpu_mitigations_off()) 329 stf_barrier_enable(enable); 330 } 331 332 ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf) 333 { 334 if (stf_barrier && stf_enabled_flush_types != STF_BARRIER_NONE) { 335 const char *type; 336 switch (stf_enabled_flush_types) { 337 case STF_BARRIER_EIEIO: 338 type = "eieio"; 339 break; 340 case STF_BARRIER_SYNC_ORI: 341 type = "hwsync"; 342 break; 343 case STF_BARRIER_FALLBACK: 344 type = "fallback"; 345 break; 346 default: 347 type = "unknown"; 348 } 349 return sprintf(buf, "Mitigation: Kernel entry/exit barrier (%s)\n", type); 350 } 351 352 if (!security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) && 353 !security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR)) 354 return sprintf(buf, "Not affected\n"); 355 356 return sprintf(buf, "Vulnerable\n"); 357 } 358 359 static int ssb_prctl_get(struct task_struct *task) 360 { 361 if (stf_enabled_flush_types == STF_BARRIER_NONE) 362 /* 363 * We don't have an explicit signal from firmware that we're 364 * vulnerable or not, we only have certain CPU revisions that 365 * are known to be vulnerable. 366 * 367 * We assume that if we're on another CPU, where the barrier is 368 * NONE, then we are not vulnerable. 369 */ 370 return PR_SPEC_NOT_AFFECTED; 371 else 372 /* 373 * If we do have a barrier type then we are vulnerable. The 374 * barrier is not a global or per-process mitigation, so the 375 * only value we can report here is PR_SPEC_ENABLE, which 376 * appears as "vulnerable" in /proc. 377 */ 378 return PR_SPEC_ENABLE; 379 380 return -EINVAL; 381 } 382 383 int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which) 384 { 385 switch (which) { 386 case PR_SPEC_STORE_BYPASS: 387 return ssb_prctl_get(task); 388 default: 389 return -ENODEV; 390 } 391 } 392 393 #ifdef CONFIG_DEBUG_FS 394 static int stf_barrier_set(void *data, u64 val) 395 { 396 bool enable; 397 398 if (val == 1) 399 enable = true; 400 else if (val == 0) 401 enable = false; 402 else 403 return -EINVAL; 404 405 /* Only do anything if we're changing state */ 406 if (enable != stf_barrier) 407 stf_barrier_enable(enable); 408 409 return 0; 410 } 411 412 static int stf_barrier_get(void *data, u64 *val) 413 { 414 *val = stf_barrier ? 1 : 0; 415 return 0; 416 } 417 418 DEFINE_DEBUGFS_ATTRIBUTE(fops_stf_barrier, stf_barrier_get, stf_barrier_set, 419 "%llu\n"); 420 421 static __init int stf_barrier_debugfs_init(void) 422 { 423 debugfs_create_file_unsafe("stf_barrier", 0600, powerpc_debugfs_root, 424 NULL, &fops_stf_barrier); 425 return 0; 426 } 427 device_initcall(stf_barrier_debugfs_init); 428 #endif /* CONFIG_DEBUG_FS */ 429 430 static void no_count_cache_flush(void) 431 { 432 count_cache_flush_type = COUNT_CACHE_FLUSH_NONE; 433 pr_info("count-cache-flush: software flush disabled.\n"); 434 } 435 436 static void toggle_count_cache_flush(bool enable) 437 { 438 if (!security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE) && 439 !security_ftr_enabled(SEC_FTR_FLUSH_LINK_STACK)) 440 enable = false; 441 442 if (!enable) { 443 patch_instruction_site(&patch__call_flush_count_cache, 444 ppc_inst(PPC_INST_NOP)); 445 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 446 patch_instruction_site(&patch__call_kvm_flush_link_stack, 447 ppc_inst(PPC_INST_NOP)); 448 #endif 449 pr_info("link-stack-flush: software flush disabled.\n"); 450 link_stack_flush_enabled = false; 451 no_count_cache_flush(); 452 return; 453 } 454 455 // This enables the branch from _switch to flush_count_cache 456 patch_branch_site(&patch__call_flush_count_cache, 457 (u64)&flush_count_cache, BRANCH_SET_LINK); 458 459 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 460 // This enables the branch from guest_exit_cont to kvm_flush_link_stack 461 patch_branch_site(&patch__call_kvm_flush_link_stack, 462 (u64)&kvm_flush_link_stack, BRANCH_SET_LINK); 463 #endif 464 465 pr_info("link-stack-flush: software flush enabled.\n"); 466 link_stack_flush_enabled = true; 467 468 // If we just need to flush the link stack, patch an early return 469 if (!security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE)) { 470 patch_instruction_site(&patch__flush_link_stack_return, 471 ppc_inst(PPC_INST_BLR)); 472 no_count_cache_flush(); 473 return; 474 } 475 476 if (!security_ftr_enabled(SEC_FTR_BCCTR_FLUSH_ASSIST)) { 477 count_cache_flush_type = COUNT_CACHE_FLUSH_SW; 478 pr_info("count-cache-flush: full software flush sequence enabled.\n"); 479 return; 480 } 481 482 patch_instruction_site(&patch__flush_count_cache_return, ppc_inst(PPC_INST_BLR)); 483 count_cache_flush_type = COUNT_CACHE_FLUSH_HW; 484 pr_info("count-cache-flush: hardware assisted flush sequence enabled\n"); 485 } 486 487 void setup_count_cache_flush(void) 488 { 489 bool enable = true; 490 491 if (no_spectrev2 || cpu_mitigations_off()) { 492 if (security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED) || 493 security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED)) 494 pr_warn("Spectre v2 mitigations not fully under software control, can't disable\n"); 495 496 enable = false; 497 } 498 499 /* 500 * There's no firmware feature flag/hypervisor bit to tell us we need to 501 * flush the link stack on context switch. So we set it here if we see 502 * either of the Spectre v2 mitigations that aim to protect userspace. 503 */ 504 if (security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED) || 505 security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE)) 506 security_ftr_set(SEC_FTR_FLUSH_LINK_STACK); 507 508 toggle_count_cache_flush(enable); 509 } 510 511 #ifdef CONFIG_DEBUG_FS 512 static int count_cache_flush_set(void *data, u64 val) 513 { 514 bool enable; 515 516 if (val == 1) 517 enable = true; 518 else if (val == 0) 519 enable = false; 520 else 521 return -EINVAL; 522 523 toggle_count_cache_flush(enable); 524 525 return 0; 526 } 527 528 static int count_cache_flush_get(void *data, u64 *val) 529 { 530 if (count_cache_flush_type == COUNT_CACHE_FLUSH_NONE) 531 *val = 0; 532 else 533 *val = 1; 534 535 return 0; 536 } 537 538 DEFINE_DEBUGFS_ATTRIBUTE(fops_count_cache_flush, count_cache_flush_get, 539 count_cache_flush_set, "%llu\n"); 540 541 static __init int count_cache_flush_debugfs_init(void) 542 { 543 debugfs_create_file_unsafe("count_cache_flush", 0600, 544 powerpc_debugfs_root, NULL, 545 &fops_count_cache_flush); 546 return 0; 547 } 548 device_initcall(count_cache_flush_debugfs_init); 549 #endif /* CONFIG_DEBUG_FS */ 550 #endif /* CONFIG_PPC_BOOK3S_64 */ 551