1 // SPDX-License-Identifier: GPL-2.0+ 2 // 3 // Security related flags and so on. 4 // 5 // Copyright 2018, Michael Ellerman, IBM Corporation. 6 7 #include <linux/cpu.h> 8 #include <linux/kernel.h> 9 #include <linux/device.h> 10 #include <linux/memblock.h> 11 #include <linux/nospec.h> 12 #include <linux/prctl.h> 13 #include <linux/seq_buf.h> 14 #include <linux/debugfs.h> 15 16 #include <asm/asm-prototypes.h> 17 #include <asm/code-patching.h> 18 #include <asm/security_features.h> 19 #include <asm/sections.h> 20 #include <asm/setup.h> 21 #include <asm/inst.h> 22 23 #include "setup.h" 24 25 u64 powerpc_security_features __read_mostly = SEC_FTR_DEFAULT; 26 27 enum branch_cache_flush_type { 28 BRANCH_CACHE_FLUSH_NONE = 0x1, 29 BRANCH_CACHE_FLUSH_SW = 0x2, 30 BRANCH_CACHE_FLUSH_HW = 0x4, 31 }; 32 static enum branch_cache_flush_type count_cache_flush_type = BRANCH_CACHE_FLUSH_NONE; 33 static enum branch_cache_flush_type link_stack_flush_type = BRANCH_CACHE_FLUSH_NONE; 34 35 bool barrier_nospec_enabled; 36 static bool no_nospec; 37 static bool btb_flush_enabled; 38 #if defined(CONFIG_PPC_E500) || defined(CONFIG_PPC_BOOK3S_64) 39 static bool no_spectrev2; 40 #endif 41 42 static void enable_barrier_nospec(bool enable) 43 { 44 barrier_nospec_enabled = enable; 45 do_barrier_nospec_fixups(enable); 46 } 47 48 void __init setup_barrier_nospec(void) 49 { 50 bool enable; 51 52 /* 53 * It would make sense to check SEC_FTR_SPEC_BAR_ORI31 below as well. 54 * But there's a good reason not to. The two flags we check below are 55 * both are enabled by default in the kernel, so if the hcall is not 56 * functional they will be enabled. 57 * On a system where the host firmware has been updated (so the ori 58 * functions as a barrier), but on which the hypervisor (KVM/Qemu) has 59 * not been updated, we would like to enable the barrier. Dropping the 60 * check for SEC_FTR_SPEC_BAR_ORI31 achieves that. The only downside is 61 * we potentially enable the barrier on systems where the host firmware 62 * is not updated, but that's harmless as it's a no-op. 63 */ 64 enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && 65 security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR); 66 67 if (!no_nospec && !cpu_mitigations_off()) 68 enable_barrier_nospec(enable); 69 } 70 71 static int __init handle_nospectre_v1(char *p) 72 { 73 no_nospec = true; 74 75 return 0; 76 } 77 early_param("nospectre_v1", handle_nospectre_v1); 78 79 #ifdef CONFIG_DEBUG_FS 80 static int barrier_nospec_set(void *data, u64 val) 81 { 82 switch (val) { 83 case 0: 84 case 1: 85 break; 86 default: 87 return -EINVAL; 88 } 89 90 if (!!val == !!barrier_nospec_enabled) 91 return 0; 92 93 enable_barrier_nospec(!!val); 94 95 return 0; 96 } 97 98 static int barrier_nospec_get(void *data, u64 *val) 99 { 100 *val = barrier_nospec_enabled ? 1 : 0; 101 return 0; 102 } 103 104 DEFINE_DEBUGFS_ATTRIBUTE(fops_barrier_nospec, barrier_nospec_get, 105 barrier_nospec_set, "%llu\n"); 106 107 static __init int barrier_nospec_debugfs_init(void) 108 { 109 debugfs_create_file_unsafe("barrier_nospec", 0600, 110 arch_debugfs_dir, NULL, 111 &fops_barrier_nospec); 112 return 0; 113 } 114 device_initcall(barrier_nospec_debugfs_init); 115 116 static __init int security_feature_debugfs_init(void) 117 { 118 debugfs_create_x64("security_features", 0400, arch_debugfs_dir, 119 &powerpc_security_features); 120 return 0; 121 } 122 device_initcall(security_feature_debugfs_init); 123 #endif /* CONFIG_DEBUG_FS */ 124 125 #if defined(CONFIG_PPC_E500) || defined(CONFIG_PPC_BOOK3S_64) 126 static int __init handle_nospectre_v2(char *p) 127 { 128 no_spectrev2 = true; 129 130 return 0; 131 } 132 early_param("nospectre_v2", handle_nospectre_v2); 133 #endif /* CONFIG_PPC_E500 || CONFIG_PPC_BOOK3S_64 */ 134 135 #ifdef CONFIG_PPC_E500 136 void __init setup_spectre_v2(void) 137 { 138 if (no_spectrev2 || cpu_mitigations_off()) 139 do_btb_flush_fixups(); 140 else 141 btb_flush_enabled = true; 142 } 143 #endif /* CONFIG_PPC_E500 */ 144 145 #ifdef CONFIG_PPC_BOOK3S_64 146 ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf) 147 { 148 bool thread_priv; 149 150 thread_priv = security_ftr_enabled(SEC_FTR_L1D_THREAD_PRIV); 151 152 if (rfi_flush) { 153 struct seq_buf s; 154 seq_buf_init(&s, buf, PAGE_SIZE - 1); 155 156 seq_buf_printf(&s, "Mitigation: RFI Flush"); 157 if (thread_priv) 158 seq_buf_printf(&s, ", L1D private per thread"); 159 160 seq_buf_printf(&s, "\n"); 161 162 return s.len; 163 } 164 165 if (thread_priv) 166 return sprintf(buf, "Vulnerable: L1D private per thread\n"); 167 168 if (!security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) && 169 !security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR)) 170 return sprintf(buf, "Not affected\n"); 171 172 return sprintf(buf, "Vulnerable\n"); 173 } 174 175 ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf) 176 { 177 return cpu_show_meltdown(dev, attr, buf); 178 } 179 #endif 180 181 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf) 182 { 183 struct seq_buf s; 184 185 seq_buf_init(&s, buf, PAGE_SIZE - 1); 186 187 if (security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR)) { 188 if (barrier_nospec_enabled) 189 seq_buf_printf(&s, "Mitigation: __user pointer sanitization"); 190 else 191 seq_buf_printf(&s, "Vulnerable"); 192 193 if (security_ftr_enabled(SEC_FTR_SPEC_BAR_ORI31)) 194 seq_buf_printf(&s, ", ori31 speculation barrier enabled"); 195 196 seq_buf_printf(&s, "\n"); 197 } else 198 seq_buf_printf(&s, "Not affected\n"); 199 200 return s.len; 201 } 202 203 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf) 204 { 205 struct seq_buf s; 206 bool bcs, ccd; 207 208 seq_buf_init(&s, buf, PAGE_SIZE - 1); 209 210 bcs = security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED); 211 ccd = security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED); 212 213 if (bcs || ccd) { 214 seq_buf_printf(&s, "Mitigation: "); 215 216 if (bcs) 217 seq_buf_printf(&s, "Indirect branch serialisation (kernel only)"); 218 219 if (bcs && ccd) 220 seq_buf_printf(&s, ", "); 221 222 if (ccd) 223 seq_buf_printf(&s, "Indirect branch cache disabled"); 224 225 } else if (count_cache_flush_type != BRANCH_CACHE_FLUSH_NONE) { 226 seq_buf_printf(&s, "Mitigation: Software count cache flush"); 227 228 if (count_cache_flush_type == BRANCH_CACHE_FLUSH_HW) 229 seq_buf_printf(&s, " (hardware accelerated)"); 230 231 } else if (btb_flush_enabled) { 232 seq_buf_printf(&s, "Mitigation: Branch predictor state flush"); 233 } else { 234 seq_buf_printf(&s, "Vulnerable"); 235 } 236 237 if (bcs || ccd || count_cache_flush_type != BRANCH_CACHE_FLUSH_NONE) { 238 if (link_stack_flush_type != BRANCH_CACHE_FLUSH_NONE) 239 seq_buf_printf(&s, ", Software link stack flush"); 240 if (link_stack_flush_type == BRANCH_CACHE_FLUSH_HW) 241 seq_buf_printf(&s, " (hardware accelerated)"); 242 } 243 244 seq_buf_printf(&s, "\n"); 245 246 return s.len; 247 } 248 249 #ifdef CONFIG_PPC_BOOK3S_64 250 /* 251 * Store-forwarding barrier support. 252 */ 253 254 static enum stf_barrier_type stf_enabled_flush_types; 255 static bool no_stf_barrier; 256 static bool stf_barrier; 257 258 static int __init handle_no_stf_barrier(char *p) 259 { 260 pr_info("stf-barrier: disabled on command line."); 261 no_stf_barrier = true; 262 return 0; 263 } 264 265 early_param("no_stf_barrier", handle_no_stf_barrier); 266 267 enum stf_barrier_type stf_barrier_type_get(void) 268 { 269 return stf_enabled_flush_types; 270 } 271 272 /* This is the generic flag used by other architectures */ 273 static int __init handle_ssbd(char *p) 274 { 275 if (!p || strncmp(p, "auto", 5) == 0 || strncmp(p, "on", 2) == 0 ) { 276 /* Until firmware tells us, we have the barrier with auto */ 277 return 0; 278 } else if (strncmp(p, "off", 3) == 0) { 279 handle_no_stf_barrier(NULL); 280 return 0; 281 } else 282 return 1; 283 284 return 0; 285 } 286 early_param("spec_store_bypass_disable", handle_ssbd); 287 288 /* This is the generic flag used by other architectures */ 289 static int __init handle_no_ssbd(char *p) 290 { 291 handle_no_stf_barrier(NULL); 292 return 0; 293 } 294 early_param("nospec_store_bypass_disable", handle_no_ssbd); 295 296 static void stf_barrier_enable(bool enable) 297 { 298 if (enable) 299 do_stf_barrier_fixups(stf_enabled_flush_types); 300 else 301 do_stf_barrier_fixups(STF_BARRIER_NONE); 302 303 stf_barrier = enable; 304 } 305 306 void setup_stf_barrier(void) 307 { 308 enum stf_barrier_type type; 309 bool enable; 310 311 /* Default to fallback in case fw-features are not available */ 312 if (cpu_has_feature(CPU_FTR_ARCH_300)) 313 type = STF_BARRIER_EIEIO; 314 else if (cpu_has_feature(CPU_FTR_ARCH_207S)) 315 type = STF_BARRIER_SYNC_ORI; 316 else if (cpu_has_feature(CPU_FTR_ARCH_206)) 317 type = STF_BARRIER_FALLBACK; 318 else 319 type = STF_BARRIER_NONE; 320 321 enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && 322 security_ftr_enabled(SEC_FTR_STF_BARRIER); 323 324 if (type == STF_BARRIER_FALLBACK) { 325 pr_info("stf-barrier: fallback barrier available\n"); 326 } else if (type == STF_BARRIER_SYNC_ORI) { 327 pr_info("stf-barrier: hwsync barrier available\n"); 328 } else if (type == STF_BARRIER_EIEIO) { 329 pr_info("stf-barrier: eieio barrier available\n"); 330 } 331 332 stf_enabled_flush_types = type; 333 334 if (!no_stf_barrier && !cpu_mitigations_off()) 335 stf_barrier_enable(enable); 336 } 337 338 ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf) 339 { 340 if (stf_barrier && stf_enabled_flush_types != STF_BARRIER_NONE) { 341 const char *type; 342 switch (stf_enabled_flush_types) { 343 case STF_BARRIER_EIEIO: 344 type = "eieio"; 345 break; 346 case STF_BARRIER_SYNC_ORI: 347 type = "hwsync"; 348 break; 349 case STF_BARRIER_FALLBACK: 350 type = "fallback"; 351 break; 352 default: 353 type = "unknown"; 354 } 355 return sprintf(buf, "Mitigation: Kernel entry/exit barrier (%s)\n", type); 356 } 357 358 if (!security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) && 359 !security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR)) 360 return sprintf(buf, "Not affected\n"); 361 362 return sprintf(buf, "Vulnerable\n"); 363 } 364 365 static int ssb_prctl_get(struct task_struct *task) 366 { 367 if (stf_enabled_flush_types == STF_BARRIER_NONE) 368 /* 369 * We don't have an explicit signal from firmware that we're 370 * vulnerable or not, we only have certain CPU revisions that 371 * are known to be vulnerable. 372 * 373 * We assume that if we're on another CPU, where the barrier is 374 * NONE, then we are not vulnerable. 375 */ 376 return PR_SPEC_NOT_AFFECTED; 377 else 378 /* 379 * If we do have a barrier type then we are vulnerable. The 380 * barrier is not a global or per-process mitigation, so the 381 * only value we can report here is PR_SPEC_ENABLE, which 382 * appears as "vulnerable" in /proc. 383 */ 384 return PR_SPEC_ENABLE; 385 386 return -EINVAL; 387 } 388 389 int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which) 390 { 391 switch (which) { 392 case PR_SPEC_STORE_BYPASS: 393 return ssb_prctl_get(task); 394 default: 395 return -ENODEV; 396 } 397 } 398 399 #ifdef CONFIG_DEBUG_FS 400 static int stf_barrier_set(void *data, u64 val) 401 { 402 bool enable; 403 404 if (val == 1) 405 enable = true; 406 else if (val == 0) 407 enable = false; 408 else 409 return -EINVAL; 410 411 /* Only do anything if we're changing state */ 412 if (enable != stf_barrier) 413 stf_barrier_enable(enable); 414 415 return 0; 416 } 417 418 static int stf_barrier_get(void *data, u64 *val) 419 { 420 *val = stf_barrier ? 1 : 0; 421 return 0; 422 } 423 424 DEFINE_DEBUGFS_ATTRIBUTE(fops_stf_barrier, stf_barrier_get, stf_barrier_set, 425 "%llu\n"); 426 427 static __init int stf_barrier_debugfs_init(void) 428 { 429 debugfs_create_file_unsafe("stf_barrier", 0600, arch_debugfs_dir, 430 NULL, &fops_stf_barrier); 431 return 0; 432 } 433 device_initcall(stf_barrier_debugfs_init); 434 #endif /* CONFIG_DEBUG_FS */ 435 436 static void update_branch_cache_flush(void) 437 { 438 u32 *site, __maybe_unused *site2; 439 440 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 441 site = &patch__call_kvm_flush_link_stack; 442 site2 = &patch__call_kvm_flush_link_stack_p9; 443 // This controls the branch from guest_exit_cont to kvm_flush_link_stack 444 if (link_stack_flush_type == BRANCH_CACHE_FLUSH_NONE) { 445 patch_instruction_site(site, ppc_inst(PPC_RAW_NOP())); 446 patch_instruction_site(site2, ppc_inst(PPC_RAW_NOP())); 447 } else { 448 // Could use HW flush, but that could also flush count cache 449 patch_branch_site(site, (u64)&kvm_flush_link_stack, BRANCH_SET_LINK); 450 patch_branch_site(site2, (u64)&kvm_flush_link_stack, BRANCH_SET_LINK); 451 } 452 #endif 453 454 // Patch out the bcctr first, then nop the rest 455 site = &patch__call_flush_branch_caches3; 456 patch_instruction_site(site, ppc_inst(PPC_RAW_NOP())); 457 site = &patch__call_flush_branch_caches2; 458 patch_instruction_site(site, ppc_inst(PPC_RAW_NOP())); 459 site = &patch__call_flush_branch_caches1; 460 patch_instruction_site(site, ppc_inst(PPC_RAW_NOP())); 461 462 // This controls the branch from _switch to flush_branch_caches 463 if (count_cache_flush_type == BRANCH_CACHE_FLUSH_NONE && 464 link_stack_flush_type == BRANCH_CACHE_FLUSH_NONE) { 465 // Nothing to be done 466 467 } else if (count_cache_flush_type == BRANCH_CACHE_FLUSH_HW && 468 link_stack_flush_type == BRANCH_CACHE_FLUSH_HW) { 469 // Patch in the bcctr last 470 site = &patch__call_flush_branch_caches1; 471 patch_instruction_site(site, ppc_inst(0x39207fff)); // li r9,0x7fff 472 site = &patch__call_flush_branch_caches2; 473 patch_instruction_site(site, ppc_inst(0x7d2903a6)); // mtctr r9 474 site = &patch__call_flush_branch_caches3; 475 patch_instruction_site(site, ppc_inst(PPC_INST_BCCTR_FLUSH)); 476 477 } else { 478 patch_branch_site(site, (u64)&flush_branch_caches, BRANCH_SET_LINK); 479 480 // If we just need to flush the link stack, early return 481 if (count_cache_flush_type == BRANCH_CACHE_FLUSH_NONE) { 482 patch_instruction_site(&patch__flush_link_stack_return, 483 ppc_inst(PPC_RAW_BLR())); 484 485 // If we have flush instruction, early return 486 } else if (count_cache_flush_type == BRANCH_CACHE_FLUSH_HW) { 487 patch_instruction_site(&patch__flush_count_cache_return, 488 ppc_inst(PPC_RAW_BLR())); 489 } 490 } 491 } 492 493 static void toggle_branch_cache_flush(bool enable) 494 { 495 if (!enable || !security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE)) { 496 if (count_cache_flush_type != BRANCH_CACHE_FLUSH_NONE) 497 count_cache_flush_type = BRANCH_CACHE_FLUSH_NONE; 498 499 pr_info("count-cache-flush: flush disabled.\n"); 500 } else { 501 if (security_ftr_enabled(SEC_FTR_BCCTR_FLUSH_ASSIST)) { 502 count_cache_flush_type = BRANCH_CACHE_FLUSH_HW; 503 pr_info("count-cache-flush: hardware flush enabled.\n"); 504 } else { 505 count_cache_flush_type = BRANCH_CACHE_FLUSH_SW; 506 pr_info("count-cache-flush: software flush enabled.\n"); 507 } 508 } 509 510 if (!enable || !security_ftr_enabled(SEC_FTR_FLUSH_LINK_STACK)) { 511 if (link_stack_flush_type != BRANCH_CACHE_FLUSH_NONE) 512 link_stack_flush_type = BRANCH_CACHE_FLUSH_NONE; 513 514 pr_info("link-stack-flush: flush disabled.\n"); 515 } else { 516 if (security_ftr_enabled(SEC_FTR_BCCTR_LINK_FLUSH_ASSIST)) { 517 link_stack_flush_type = BRANCH_CACHE_FLUSH_HW; 518 pr_info("link-stack-flush: hardware flush enabled.\n"); 519 } else { 520 link_stack_flush_type = BRANCH_CACHE_FLUSH_SW; 521 pr_info("link-stack-flush: software flush enabled.\n"); 522 } 523 } 524 525 update_branch_cache_flush(); 526 } 527 528 void setup_count_cache_flush(void) 529 { 530 bool enable = true; 531 532 if (no_spectrev2 || cpu_mitigations_off()) { 533 if (security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED) || 534 security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED)) 535 pr_warn("Spectre v2 mitigations not fully under software control, can't disable\n"); 536 537 enable = false; 538 } 539 540 /* 541 * There's no firmware feature flag/hypervisor bit to tell us we need to 542 * flush the link stack on context switch. So we set it here if we see 543 * either of the Spectre v2 mitigations that aim to protect userspace. 544 */ 545 if (security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED) || 546 security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE)) 547 security_ftr_set(SEC_FTR_FLUSH_LINK_STACK); 548 549 toggle_branch_cache_flush(enable); 550 } 551 552 static enum l1d_flush_type enabled_flush_types; 553 static void *l1d_flush_fallback_area; 554 static bool no_rfi_flush; 555 static bool no_entry_flush; 556 static bool no_uaccess_flush; 557 bool rfi_flush; 558 static bool entry_flush; 559 static bool uaccess_flush; 560 DEFINE_STATIC_KEY_FALSE(uaccess_flush_key); 561 EXPORT_SYMBOL(uaccess_flush_key); 562 563 static int __init handle_no_rfi_flush(char *p) 564 { 565 pr_info("rfi-flush: disabled on command line."); 566 no_rfi_flush = true; 567 return 0; 568 } 569 early_param("no_rfi_flush", handle_no_rfi_flush); 570 571 static int __init handle_no_entry_flush(char *p) 572 { 573 pr_info("entry-flush: disabled on command line."); 574 no_entry_flush = true; 575 return 0; 576 } 577 early_param("no_entry_flush", handle_no_entry_flush); 578 579 static int __init handle_no_uaccess_flush(char *p) 580 { 581 pr_info("uaccess-flush: disabled on command line."); 582 no_uaccess_flush = true; 583 return 0; 584 } 585 early_param("no_uaccess_flush", handle_no_uaccess_flush); 586 587 /* 588 * The RFI flush is not KPTI, but because users will see doco that says to use 589 * nopti we hijack that option here to also disable the RFI flush. 590 */ 591 static int __init handle_no_pti(char *p) 592 { 593 pr_info("rfi-flush: disabling due to 'nopti' on command line.\n"); 594 handle_no_rfi_flush(NULL); 595 return 0; 596 } 597 early_param("nopti", handle_no_pti); 598 599 static void do_nothing(void *unused) 600 { 601 /* 602 * We don't need to do the flush explicitly, just enter+exit kernel is 603 * sufficient, the RFI exit handlers will do the right thing. 604 */ 605 } 606 607 void rfi_flush_enable(bool enable) 608 { 609 if (enable) { 610 do_rfi_flush_fixups(enabled_flush_types); 611 on_each_cpu(do_nothing, NULL, 1); 612 } else 613 do_rfi_flush_fixups(L1D_FLUSH_NONE); 614 615 rfi_flush = enable; 616 } 617 618 static void entry_flush_enable(bool enable) 619 { 620 if (enable) { 621 do_entry_flush_fixups(enabled_flush_types); 622 on_each_cpu(do_nothing, NULL, 1); 623 } else { 624 do_entry_flush_fixups(L1D_FLUSH_NONE); 625 } 626 627 entry_flush = enable; 628 } 629 630 static void uaccess_flush_enable(bool enable) 631 { 632 if (enable) { 633 do_uaccess_flush_fixups(enabled_flush_types); 634 static_branch_enable(&uaccess_flush_key); 635 on_each_cpu(do_nothing, NULL, 1); 636 } else { 637 static_branch_disable(&uaccess_flush_key); 638 do_uaccess_flush_fixups(L1D_FLUSH_NONE); 639 } 640 641 uaccess_flush = enable; 642 } 643 644 static void __ref init_fallback_flush(void) 645 { 646 u64 l1d_size, limit; 647 int cpu; 648 649 /* Only allocate the fallback flush area once (at boot time). */ 650 if (l1d_flush_fallback_area) 651 return; 652 653 l1d_size = ppc64_caches.l1d.size; 654 655 /* 656 * If there is no d-cache-size property in the device tree, l1d_size 657 * could be zero. That leads to the loop in the asm wrapping around to 658 * 2^64-1, and then walking off the end of the fallback area and 659 * eventually causing a page fault which is fatal. Just default to 660 * something vaguely sane. 661 */ 662 if (!l1d_size) 663 l1d_size = (64 * 1024); 664 665 limit = min(ppc64_bolted_size(), ppc64_rma_size); 666 667 /* 668 * Align to L1d size, and size it at 2x L1d size, to catch possible 669 * hardware prefetch runoff. We don't have a recipe for load patterns to 670 * reliably avoid the prefetcher. 671 */ 672 l1d_flush_fallback_area = memblock_alloc_try_nid(l1d_size * 2, 673 l1d_size, MEMBLOCK_LOW_LIMIT, 674 limit, NUMA_NO_NODE); 675 if (!l1d_flush_fallback_area) 676 panic("%s: Failed to allocate %llu bytes align=0x%llx max_addr=%pa\n", 677 __func__, l1d_size * 2, l1d_size, &limit); 678 679 680 for_each_possible_cpu(cpu) { 681 struct paca_struct *paca = paca_ptrs[cpu]; 682 paca->rfi_flush_fallback_area = l1d_flush_fallback_area; 683 paca->l1d_flush_size = l1d_size; 684 } 685 } 686 687 void setup_rfi_flush(enum l1d_flush_type types, bool enable) 688 { 689 if (types & L1D_FLUSH_FALLBACK) { 690 pr_info("rfi-flush: fallback displacement flush available\n"); 691 init_fallback_flush(); 692 } 693 694 if (types & L1D_FLUSH_ORI) 695 pr_info("rfi-flush: ori type flush available\n"); 696 697 if (types & L1D_FLUSH_MTTRIG) 698 pr_info("rfi-flush: mttrig type flush available\n"); 699 700 enabled_flush_types = types; 701 702 if (!cpu_mitigations_off() && !no_rfi_flush) 703 rfi_flush_enable(enable); 704 } 705 706 void setup_entry_flush(bool enable) 707 { 708 if (cpu_mitigations_off()) 709 return; 710 711 if (!no_entry_flush) 712 entry_flush_enable(enable); 713 } 714 715 void setup_uaccess_flush(bool enable) 716 { 717 if (cpu_mitigations_off()) 718 return; 719 720 if (!no_uaccess_flush) 721 uaccess_flush_enable(enable); 722 } 723 724 #ifdef CONFIG_DEBUG_FS 725 static int count_cache_flush_set(void *data, u64 val) 726 { 727 bool enable; 728 729 if (val == 1) 730 enable = true; 731 else if (val == 0) 732 enable = false; 733 else 734 return -EINVAL; 735 736 toggle_branch_cache_flush(enable); 737 738 return 0; 739 } 740 741 static int count_cache_flush_get(void *data, u64 *val) 742 { 743 if (count_cache_flush_type == BRANCH_CACHE_FLUSH_NONE) 744 *val = 0; 745 else 746 *val = 1; 747 748 return 0; 749 } 750 751 static int link_stack_flush_get(void *data, u64 *val) 752 { 753 if (link_stack_flush_type == BRANCH_CACHE_FLUSH_NONE) 754 *val = 0; 755 else 756 *val = 1; 757 758 return 0; 759 } 760 761 DEFINE_DEBUGFS_ATTRIBUTE(fops_count_cache_flush, count_cache_flush_get, 762 count_cache_flush_set, "%llu\n"); 763 DEFINE_DEBUGFS_ATTRIBUTE(fops_link_stack_flush, link_stack_flush_get, 764 count_cache_flush_set, "%llu\n"); 765 766 static __init int count_cache_flush_debugfs_init(void) 767 { 768 debugfs_create_file_unsafe("count_cache_flush", 0600, 769 arch_debugfs_dir, NULL, 770 &fops_count_cache_flush); 771 debugfs_create_file_unsafe("link_stack_flush", 0600, 772 arch_debugfs_dir, NULL, 773 &fops_link_stack_flush); 774 return 0; 775 } 776 device_initcall(count_cache_flush_debugfs_init); 777 778 static int rfi_flush_set(void *data, u64 val) 779 { 780 bool enable; 781 782 if (val == 1) 783 enable = true; 784 else if (val == 0) 785 enable = false; 786 else 787 return -EINVAL; 788 789 /* Only do anything if we're changing state */ 790 if (enable != rfi_flush) 791 rfi_flush_enable(enable); 792 793 return 0; 794 } 795 796 static int rfi_flush_get(void *data, u64 *val) 797 { 798 *val = rfi_flush ? 1 : 0; 799 return 0; 800 } 801 802 DEFINE_SIMPLE_ATTRIBUTE(fops_rfi_flush, rfi_flush_get, rfi_flush_set, "%llu\n"); 803 804 static int entry_flush_set(void *data, u64 val) 805 { 806 bool enable; 807 808 if (val == 1) 809 enable = true; 810 else if (val == 0) 811 enable = false; 812 else 813 return -EINVAL; 814 815 /* Only do anything if we're changing state */ 816 if (enable != entry_flush) 817 entry_flush_enable(enable); 818 819 return 0; 820 } 821 822 static int entry_flush_get(void *data, u64 *val) 823 { 824 *val = entry_flush ? 1 : 0; 825 return 0; 826 } 827 828 DEFINE_SIMPLE_ATTRIBUTE(fops_entry_flush, entry_flush_get, entry_flush_set, "%llu\n"); 829 830 static int uaccess_flush_set(void *data, u64 val) 831 { 832 bool enable; 833 834 if (val == 1) 835 enable = true; 836 else if (val == 0) 837 enable = false; 838 else 839 return -EINVAL; 840 841 /* Only do anything if we're changing state */ 842 if (enable != uaccess_flush) 843 uaccess_flush_enable(enable); 844 845 return 0; 846 } 847 848 static int uaccess_flush_get(void *data, u64 *val) 849 { 850 *val = uaccess_flush ? 1 : 0; 851 return 0; 852 } 853 854 DEFINE_SIMPLE_ATTRIBUTE(fops_uaccess_flush, uaccess_flush_get, uaccess_flush_set, "%llu\n"); 855 856 static __init int rfi_flush_debugfs_init(void) 857 { 858 debugfs_create_file("rfi_flush", 0600, arch_debugfs_dir, NULL, &fops_rfi_flush); 859 debugfs_create_file("entry_flush", 0600, arch_debugfs_dir, NULL, &fops_entry_flush); 860 debugfs_create_file("uaccess_flush", 0600, arch_debugfs_dir, NULL, &fops_uaccess_flush); 861 return 0; 862 } 863 device_initcall(rfi_flush_debugfs_init); 864 #endif /* CONFIG_DEBUG_FS */ 865 #endif /* CONFIG_PPC_BOOK3S_64 */ 866