1 // SPDX-License-Identifier: GPL-2.0+ 2 // 3 // Security related flags and so on. 4 // 5 // Copyright 2018, Michael Ellerman, IBM Corporation. 6 7 #include <linux/cpu.h> 8 #include <linux/kernel.h> 9 #include <linux/device.h> 10 #include <linux/nospec.h> 11 #include <linux/prctl.h> 12 #include <linux/seq_buf.h> 13 14 #include <asm/asm-prototypes.h> 15 #include <asm/code-patching.h> 16 #include <asm/debugfs.h> 17 #include <asm/security_features.h> 18 #include <asm/setup.h> 19 #include <asm/inst.h> 20 21 22 u64 powerpc_security_features __read_mostly = SEC_FTR_DEFAULT; 23 24 enum branch_cache_flush_type { 25 BRANCH_CACHE_FLUSH_NONE = 0x1, 26 BRANCH_CACHE_FLUSH_SW = 0x2, 27 BRANCH_CACHE_FLUSH_HW = 0x4, 28 }; 29 static enum branch_cache_flush_type count_cache_flush_type = BRANCH_CACHE_FLUSH_NONE; 30 static enum branch_cache_flush_type link_stack_flush_type = BRANCH_CACHE_FLUSH_NONE; 31 32 bool barrier_nospec_enabled; 33 static bool no_nospec; 34 static bool btb_flush_enabled; 35 #if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_BOOK3S_64) 36 static bool no_spectrev2; 37 #endif 38 39 static void enable_barrier_nospec(bool enable) 40 { 41 barrier_nospec_enabled = enable; 42 do_barrier_nospec_fixups(enable); 43 } 44 45 void setup_barrier_nospec(void) 46 { 47 bool enable; 48 49 /* 50 * It would make sense to check SEC_FTR_SPEC_BAR_ORI31 below as well. 51 * But there's a good reason not to. The two flags we check below are 52 * both are enabled by default in the kernel, so if the hcall is not 53 * functional they will be enabled. 54 * On a system where the host firmware has been updated (so the ori 55 * functions as a barrier), but on which the hypervisor (KVM/Qemu) has 56 * not been updated, we would like to enable the barrier. Dropping the 57 * check for SEC_FTR_SPEC_BAR_ORI31 achieves that. The only downside is 58 * we potentially enable the barrier on systems where the host firmware 59 * is not updated, but that's harmless as it's a no-op. 60 */ 61 enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && 62 security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR); 63 64 if (!no_nospec && !cpu_mitigations_off()) 65 enable_barrier_nospec(enable); 66 } 67 68 static int __init handle_nospectre_v1(char *p) 69 { 70 no_nospec = true; 71 72 return 0; 73 } 74 early_param("nospectre_v1", handle_nospectre_v1); 75 76 #ifdef CONFIG_DEBUG_FS 77 static int barrier_nospec_set(void *data, u64 val) 78 { 79 switch (val) { 80 case 0: 81 case 1: 82 break; 83 default: 84 return -EINVAL; 85 } 86 87 if (!!val == !!barrier_nospec_enabled) 88 return 0; 89 90 enable_barrier_nospec(!!val); 91 92 return 0; 93 } 94 95 static int barrier_nospec_get(void *data, u64 *val) 96 { 97 *val = barrier_nospec_enabled ? 1 : 0; 98 return 0; 99 } 100 101 DEFINE_DEBUGFS_ATTRIBUTE(fops_barrier_nospec, barrier_nospec_get, 102 barrier_nospec_set, "%llu\n"); 103 104 static __init int barrier_nospec_debugfs_init(void) 105 { 106 debugfs_create_file_unsafe("barrier_nospec", 0600, 107 powerpc_debugfs_root, NULL, 108 &fops_barrier_nospec); 109 return 0; 110 } 111 device_initcall(barrier_nospec_debugfs_init); 112 113 static __init int security_feature_debugfs_init(void) 114 { 115 debugfs_create_x64("security_features", 0400, powerpc_debugfs_root, 116 &powerpc_security_features); 117 return 0; 118 } 119 device_initcall(security_feature_debugfs_init); 120 #endif /* CONFIG_DEBUG_FS */ 121 122 #if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_BOOK3S_64) 123 static int __init handle_nospectre_v2(char *p) 124 { 125 no_spectrev2 = true; 126 127 return 0; 128 } 129 early_param("nospectre_v2", handle_nospectre_v2); 130 #endif /* CONFIG_PPC_FSL_BOOK3E || CONFIG_PPC_BOOK3S_64 */ 131 132 #ifdef CONFIG_PPC_FSL_BOOK3E 133 void setup_spectre_v2(void) 134 { 135 if (no_spectrev2 || cpu_mitigations_off()) 136 do_btb_flush_fixups(); 137 else 138 btb_flush_enabled = true; 139 } 140 #endif /* CONFIG_PPC_FSL_BOOK3E */ 141 142 #ifdef CONFIG_PPC_BOOK3S_64 143 ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf) 144 { 145 bool thread_priv; 146 147 thread_priv = security_ftr_enabled(SEC_FTR_L1D_THREAD_PRIV); 148 149 if (rfi_flush) { 150 struct seq_buf s; 151 seq_buf_init(&s, buf, PAGE_SIZE - 1); 152 153 seq_buf_printf(&s, "Mitigation: RFI Flush"); 154 if (thread_priv) 155 seq_buf_printf(&s, ", L1D private per thread"); 156 157 seq_buf_printf(&s, "\n"); 158 159 return s.len; 160 } 161 162 if (thread_priv) 163 return sprintf(buf, "Vulnerable: L1D private per thread\n"); 164 165 if (!security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) && 166 !security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR)) 167 return sprintf(buf, "Not affected\n"); 168 169 return sprintf(buf, "Vulnerable\n"); 170 } 171 172 ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf) 173 { 174 return cpu_show_meltdown(dev, attr, buf); 175 } 176 #endif 177 178 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf) 179 { 180 struct seq_buf s; 181 182 seq_buf_init(&s, buf, PAGE_SIZE - 1); 183 184 if (security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR)) { 185 if (barrier_nospec_enabled) 186 seq_buf_printf(&s, "Mitigation: __user pointer sanitization"); 187 else 188 seq_buf_printf(&s, "Vulnerable"); 189 190 if (security_ftr_enabled(SEC_FTR_SPEC_BAR_ORI31)) 191 seq_buf_printf(&s, ", ori31 speculation barrier enabled"); 192 193 seq_buf_printf(&s, "\n"); 194 } else 195 seq_buf_printf(&s, "Not affected\n"); 196 197 return s.len; 198 } 199 200 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf) 201 { 202 struct seq_buf s; 203 bool bcs, ccd; 204 205 seq_buf_init(&s, buf, PAGE_SIZE - 1); 206 207 bcs = security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED); 208 ccd = security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED); 209 210 if (bcs || ccd) { 211 seq_buf_printf(&s, "Mitigation: "); 212 213 if (bcs) 214 seq_buf_printf(&s, "Indirect branch serialisation (kernel only)"); 215 216 if (bcs && ccd) 217 seq_buf_printf(&s, ", "); 218 219 if (ccd) 220 seq_buf_printf(&s, "Indirect branch cache disabled"); 221 222 } else if (count_cache_flush_type != BRANCH_CACHE_FLUSH_NONE) { 223 seq_buf_printf(&s, "Mitigation: Software count cache flush"); 224 225 if (count_cache_flush_type == BRANCH_CACHE_FLUSH_HW) 226 seq_buf_printf(&s, " (hardware accelerated)"); 227 228 } else if (btb_flush_enabled) { 229 seq_buf_printf(&s, "Mitigation: Branch predictor state flush"); 230 } else { 231 seq_buf_printf(&s, "Vulnerable"); 232 } 233 234 if (bcs || ccd || count_cache_flush_type != BRANCH_CACHE_FLUSH_NONE) { 235 if (link_stack_flush_type != BRANCH_CACHE_FLUSH_NONE) 236 seq_buf_printf(&s, ", Software link stack flush"); 237 if (link_stack_flush_type == BRANCH_CACHE_FLUSH_HW) 238 seq_buf_printf(&s, " (hardware accelerated)"); 239 } 240 241 seq_buf_printf(&s, "\n"); 242 243 return s.len; 244 } 245 246 #ifdef CONFIG_PPC_BOOK3S_64 247 /* 248 * Store-forwarding barrier support. 249 */ 250 251 static enum stf_barrier_type stf_enabled_flush_types; 252 static bool no_stf_barrier; 253 bool stf_barrier; 254 255 static int __init handle_no_stf_barrier(char *p) 256 { 257 pr_info("stf-barrier: disabled on command line."); 258 no_stf_barrier = true; 259 return 0; 260 } 261 262 early_param("no_stf_barrier", handle_no_stf_barrier); 263 264 /* This is the generic flag used by other architectures */ 265 static int __init handle_ssbd(char *p) 266 { 267 if (!p || strncmp(p, "auto", 5) == 0 || strncmp(p, "on", 2) == 0 ) { 268 /* Until firmware tells us, we have the barrier with auto */ 269 return 0; 270 } else if (strncmp(p, "off", 3) == 0) { 271 handle_no_stf_barrier(NULL); 272 return 0; 273 } else 274 return 1; 275 276 return 0; 277 } 278 early_param("spec_store_bypass_disable", handle_ssbd); 279 280 /* This is the generic flag used by other architectures */ 281 static int __init handle_no_ssbd(char *p) 282 { 283 handle_no_stf_barrier(NULL); 284 return 0; 285 } 286 early_param("nospec_store_bypass_disable", handle_no_ssbd); 287 288 static void stf_barrier_enable(bool enable) 289 { 290 if (enable) 291 do_stf_barrier_fixups(stf_enabled_flush_types); 292 else 293 do_stf_barrier_fixups(STF_BARRIER_NONE); 294 295 stf_barrier = enable; 296 } 297 298 void setup_stf_barrier(void) 299 { 300 enum stf_barrier_type type; 301 bool enable, hv; 302 303 hv = cpu_has_feature(CPU_FTR_HVMODE); 304 305 /* Default to fallback in case fw-features are not available */ 306 if (cpu_has_feature(CPU_FTR_ARCH_300)) 307 type = STF_BARRIER_EIEIO; 308 else if (cpu_has_feature(CPU_FTR_ARCH_207S)) 309 type = STF_BARRIER_SYNC_ORI; 310 else if (cpu_has_feature(CPU_FTR_ARCH_206)) 311 type = STF_BARRIER_FALLBACK; 312 else 313 type = STF_BARRIER_NONE; 314 315 enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && 316 (security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR) || 317 (security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) && hv)); 318 319 if (type == STF_BARRIER_FALLBACK) { 320 pr_info("stf-barrier: fallback barrier available\n"); 321 } else if (type == STF_BARRIER_SYNC_ORI) { 322 pr_info("stf-barrier: hwsync barrier available\n"); 323 } else if (type == STF_BARRIER_EIEIO) { 324 pr_info("stf-barrier: eieio barrier available\n"); 325 } 326 327 stf_enabled_flush_types = type; 328 329 if (!no_stf_barrier && !cpu_mitigations_off()) 330 stf_barrier_enable(enable); 331 } 332 333 ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf) 334 { 335 if (stf_barrier && stf_enabled_flush_types != STF_BARRIER_NONE) { 336 const char *type; 337 switch (stf_enabled_flush_types) { 338 case STF_BARRIER_EIEIO: 339 type = "eieio"; 340 break; 341 case STF_BARRIER_SYNC_ORI: 342 type = "hwsync"; 343 break; 344 case STF_BARRIER_FALLBACK: 345 type = "fallback"; 346 break; 347 default: 348 type = "unknown"; 349 } 350 return sprintf(buf, "Mitigation: Kernel entry/exit barrier (%s)\n", type); 351 } 352 353 if (!security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) && 354 !security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR)) 355 return sprintf(buf, "Not affected\n"); 356 357 return sprintf(buf, "Vulnerable\n"); 358 } 359 360 static int ssb_prctl_get(struct task_struct *task) 361 { 362 if (stf_enabled_flush_types == STF_BARRIER_NONE) 363 /* 364 * We don't have an explicit signal from firmware that we're 365 * vulnerable or not, we only have certain CPU revisions that 366 * are known to be vulnerable. 367 * 368 * We assume that if we're on another CPU, where the barrier is 369 * NONE, then we are not vulnerable. 370 */ 371 return PR_SPEC_NOT_AFFECTED; 372 else 373 /* 374 * If we do have a barrier type then we are vulnerable. The 375 * barrier is not a global or per-process mitigation, so the 376 * only value we can report here is PR_SPEC_ENABLE, which 377 * appears as "vulnerable" in /proc. 378 */ 379 return PR_SPEC_ENABLE; 380 381 return -EINVAL; 382 } 383 384 int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which) 385 { 386 switch (which) { 387 case PR_SPEC_STORE_BYPASS: 388 return ssb_prctl_get(task); 389 default: 390 return -ENODEV; 391 } 392 } 393 394 #ifdef CONFIG_DEBUG_FS 395 static int stf_barrier_set(void *data, u64 val) 396 { 397 bool enable; 398 399 if (val == 1) 400 enable = true; 401 else if (val == 0) 402 enable = false; 403 else 404 return -EINVAL; 405 406 /* Only do anything if we're changing state */ 407 if (enable != stf_barrier) 408 stf_barrier_enable(enable); 409 410 return 0; 411 } 412 413 static int stf_barrier_get(void *data, u64 *val) 414 { 415 *val = stf_barrier ? 1 : 0; 416 return 0; 417 } 418 419 DEFINE_DEBUGFS_ATTRIBUTE(fops_stf_barrier, stf_barrier_get, stf_barrier_set, 420 "%llu\n"); 421 422 static __init int stf_barrier_debugfs_init(void) 423 { 424 debugfs_create_file_unsafe("stf_barrier", 0600, powerpc_debugfs_root, 425 NULL, &fops_stf_barrier); 426 return 0; 427 } 428 device_initcall(stf_barrier_debugfs_init); 429 #endif /* CONFIG_DEBUG_FS */ 430 431 static void update_branch_cache_flush(void) 432 { 433 u32 *site; 434 435 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 436 site = &patch__call_kvm_flush_link_stack; 437 // This controls the branch from guest_exit_cont to kvm_flush_link_stack 438 if (link_stack_flush_type == BRANCH_CACHE_FLUSH_NONE) { 439 patch_instruction_site(site, ppc_inst(PPC_INST_NOP)); 440 } else { 441 // Could use HW flush, but that could also flush count cache 442 patch_branch_site(site, (u64)&kvm_flush_link_stack, BRANCH_SET_LINK); 443 } 444 #endif 445 446 // Patch out the bcctr first, then nop the rest 447 site = &patch__call_flush_branch_caches3; 448 patch_instruction_site(site, ppc_inst(PPC_INST_NOP)); 449 site = &patch__call_flush_branch_caches2; 450 patch_instruction_site(site, ppc_inst(PPC_INST_NOP)); 451 site = &patch__call_flush_branch_caches1; 452 patch_instruction_site(site, ppc_inst(PPC_INST_NOP)); 453 454 // This controls the branch from _switch to flush_branch_caches 455 if (count_cache_flush_type == BRANCH_CACHE_FLUSH_NONE && 456 link_stack_flush_type == BRANCH_CACHE_FLUSH_NONE) { 457 // Nothing to be done 458 459 } else if (count_cache_flush_type == BRANCH_CACHE_FLUSH_HW && 460 link_stack_flush_type == BRANCH_CACHE_FLUSH_HW) { 461 // Patch in the bcctr last 462 site = &patch__call_flush_branch_caches1; 463 patch_instruction_site(site, ppc_inst(0x39207fff)); // li r9,0x7fff 464 site = &patch__call_flush_branch_caches2; 465 patch_instruction_site(site, ppc_inst(0x7d2903a6)); // mtctr r9 466 site = &patch__call_flush_branch_caches3; 467 patch_instruction_site(site, ppc_inst(PPC_INST_BCCTR_FLUSH)); 468 469 } else { 470 patch_branch_site(site, (u64)&flush_branch_caches, BRANCH_SET_LINK); 471 472 // If we just need to flush the link stack, early return 473 if (count_cache_flush_type == BRANCH_CACHE_FLUSH_NONE) { 474 patch_instruction_site(&patch__flush_link_stack_return, 475 ppc_inst(PPC_INST_BLR)); 476 477 // If we have flush instruction, early return 478 } else if (count_cache_flush_type == BRANCH_CACHE_FLUSH_HW) { 479 patch_instruction_site(&patch__flush_count_cache_return, 480 ppc_inst(PPC_INST_BLR)); 481 } 482 } 483 } 484 485 static void toggle_branch_cache_flush(bool enable) 486 { 487 if (!enable || !security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE)) { 488 if (count_cache_flush_type != BRANCH_CACHE_FLUSH_NONE) 489 count_cache_flush_type = BRANCH_CACHE_FLUSH_NONE; 490 491 pr_info("count-cache-flush: flush disabled.\n"); 492 } else { 493 if (security_ftr_enabled(SEC_FTR_BCCTR_FLUSH_ASSIST)) { 494 count_cache_flush_type = BRANCH_CACHE_FLUSH_HW; 495 pr_info("count-cache-flush: hardware flush enabled.\n"); 496 } else { 497 count_cache_flush_type = BRANCH_CACHE_FLUSH_SW; 498 pr_info("count-cache-flush: software flush enabled.\n"); 499 } 500 } 501 502 if (!enable || !security_ftr_enabled(SEC_FTR_FLUSH_LINK_STACK)) { 503 if (link_stack_flush_type != BRANCH_CACHE_FLUSH_NONE) 504 link_stack_flush_type = BRANCH_CACHE_FLUSH_NONE; 505 506 pr_info("link-stack-flush: flush disabled.\n"); 507 } else { 508 if (security_ftr_enabled(SEC_FTR_BCCTR_LINK_FLUSH_ASSIST)) { 509 link_stack_flush_type = BRANCH_CACHE_FLUSH_HW; 510 pr_info("link-stack-flush: hardware flush enabled.\n"); 511 } else { 512 link_stack_flush_type = BRANCH_CACHE_FLUSH_SW; 513 pr_info("link-stack-flush: software flush enabled.\n"); 514 } 515 } 516 517 update_branch_cache_flush(); 518 } 519 520 void setup_count_cache_flush(void) 521 { 522 bool enable = true; 523 524 if (no_spectrev2 || cpu_mitigations_off()) { 525 if (security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED) || 526 security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED)) 527 pr_warn("Spectre v2 mitigations not fully under software control, can't disable\n"); 528 529 enable = false; 530 } 531 532 /* 533 * There's no firmware feature flag/hypervisor bit to tell us we need to 534 * flush the link stack on context switch. So we set it here if we see 535 * either of the Spectre v2 mitigations that aim to protect userspace. 536 */ 537 if (security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED) || 538 security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE)) 539 security_ftr_set(SEC_FTR_FLUSH_LINK_STACK); 540 541 toggle_branch_cache_flush(enable); 542 } 543 544 #ifdef CONFIG_DEBUG_FS 545 static int count_cache_flush_set(void *data, u64 val) 546 { 547 bool enable; 548 549 if (val == 1) 550 enable = true; 551 else if (val == 0) 552 enable = false; 553 else 554 return -EINVAL; 555 556 toggle_branch_cache_flush(enable); 557 558 return 0; 559 } 560 561 static int count_cache_flush_get(void *data, u64 *val) 562 { 563 if (count_cache_flush_type == BRANCH_CACHE_FLUSH_NONE) 564 *val = 0; 565 else 566 *val = 1; 567 568 return 0; 569 } 570 571 DEFINE_DEBUGFS_ATTRIBUTE(fops_count_cache_flush, count_cache_flush_get, 572 count_cache_flush_set, "%llu\n"); 573 574 static __init int count_cache_flush_debugfs_init(void) 575 { 576 debugfs_create_file_unsafe("count_cache_flush", 0600, 577 powerpc_debugfs_root, NULL, 578 &fops_count_cache_flush); 579 return 0; 580 } 581 device_initcall(count_cache_flush_debugfs_init); 582 #endif /* CONFIG_DEBUG_FS */ 583 #endif /* CONFIG_PPC_BOOK3S_64 */ 584