1 // SPDX-License-Identifier: GPL-2.0+ 2 // 3 // Security related flags and so on. 4 // 5 // Copyright 2018, Michael Ellerman, IBM Corporation. 6 7 #include <linux/cpu.h> 8 #include <linux/kernel.h> 9 #include <linux/device.h> 10 #include <linux/memblock.h> 11 #include <linux/nospec.h> 12 #include <linux/prctl.h> 13 #include <linux/seq_buf.h> 14 #include <linux/debugfs.h> 15 16 #include <asm/asm-prototypes.h> 17 #include <asm/code-patching.h> 18 #include <asm/security_features.h> 19 #include <asm/setup.h> 20 #include <asm/inst.h> 21 22 #include "setup.h" 23 24 u64 powerpc_security_features __read_mostly = SEC_FTR_DEFAULT; 25 26 enum branch_cache_flush_type { 27 BRANCH_CACHE_FLUSH_NONE = 0x1, 28 BRANCH_CACHE_FLUSH_SW = 0x2, 29 BRANCH_CACHE_FLUSH_HW = 0x4, 30 }; 31 static enum branch_cache_flush_type count_cache_flush_type = BRANCH_CACHE_FLUSH_NONE; 32 static enum branch_cache_flush_type link_stack_flush_type = BRANCH_CACHE_FLUSH_NONE; 33 34 bool barrier_nospec_enabled; 35 static bool no_nospec; 36 static bool btb_flush_enabled; 37 #if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_BOOK3S_64) 38 static bool no_spectrev2; 39 #endif 40 41 static void enable_barrier_nospec(bool enable) 42 { 43 barrier_nospec_enabled = enable; 44 do_barrier_nospec_fixups(enable); 45 } 46 47 void __init setup_barrier_nospec(void) 48 { 49 bool enable; 50 51 /* 52 * It would make sense to check SEC_FTR_SPEC_BAR_ORI31 below as well. 53 * But there's a good reason not to. The two flags we check below are 54 * both are enabled by default in the kernel, so if the hcall is not 55 * functional they will be enabled. 56 * On a system where the host firmware has been updated (so the ori 57 * functions as a barrier), but on which the hypervisor (KVM/Qemu) has 58 * not been updated, we would like to enable the barrier. Dropping the 59 * check for SEC_FTR_SPEC_BAR_ORI31 achieves that. The only downside is 60 * we potentially enable the barrier on systems where the host firmware 61 * is not updated, but that's harmless as it's a no-op. 62 */ 63 enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && 64 security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR); 65 66 if (!no_nospec && !cpu_mitigations_off()) 67 enable_barrier_nospec(enable); 68 } 69 70 static int __init handle_nospectre_v1(char *p) 71 { 72 no_nospec = true; 73 74 return 0; 75 } 76 early_param("nospectre_v1", handle_nospectre_v1); 77 78 #ifdef CONFIG_DEBUG_FS 79 static int barrier_nospec_set(void *data, u64 val) 80 { 81 switch (val) { 82 case 0: 83 case 1: 84 break; 85 default: 86 return -EINVAL; 87 } 88 89 if (!!val == !!barrier_nospec_enabled) 90 return 0; 91 92 enable_barrier_nospec(!!val); 93 94 return 0; 95 } 96 97 static int barrier_nospec_get(void *data, u64 *val) 98 { 99 *val = barrier_nospec_enabled ? 1 : 0; 100 return 0; 101 } 102 103 DEFINE_DEBUGFS_ATTRIBUTE(fops_barrier_nospec, barrier_nospec_get, 104 barrier_nospec_set, "%llu\n"); 105 106 static __init int barrier_nospec_debugfs_init(void) 107 { 108 debugfs_create_file_unsafe("barrier_nospec", 0600, 109 arch_debugfs_dir, NULL, 110 &fops_barrier_nospec); 111 return 0; 112 } 113 device_initcall(barrier_nospec_debugfs_init); 114 115 static __init int security_feature_debugfs_init(void) 116 { 117 debugfs_create_x64("security_features", 0400, arch_debugfs_dir, 118 &powerpc_security_features); 119 return 0; 120 } 121 device_initcall(security_feature_debugfs_init); 122 #endif /* CONFIG_DEBUG_FS */ 123 124 #if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_BOOK3S_64) 125 static int __init handle_nospectre_v2(char *p) 126 { 127 no_spectrev2 = true; 128 129 return 0; 130 } 131 early_param("nospectre_v2", handle_nospectre_v2); 132 #endif /* CONFIG_PPC_FSL_BOOK3E || CONFIG_PPC_BOOK3S_64 */ 133 134 #ifdef CONFIG_PPC_FSL_BOOK3E 135 void __init setup_spectre_v2(void) 136 { 137 if (no_spectrev2 || cpu_mitigations_off()) 138 do_btb_flush_fixups(); 139 else 140 btb_flush_enabled = true; 141 } 142 #endif /* CONFIG_PPC_FSL_BOOK3E */ 143 144 #ifdef CONFIG_PPC_BOOK3S_64 145 ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf) 146 { 147 bool thread_priv; 148 149 thread_priv = security_ftr_enabled(SEC_FTR_L1D_THREAD_PRIV); 150 151 if (rfi_flush) { 152 struct seq_buf s; 153 seq_buf_init(&s, buf, PAGE_SIZE - 1); 154 155 seq_buf_printf(&s, "Mitigation: RFI Flush"); 156 if (thread_priv) 157 seq_buf_printf(&s, ", L1D private per thread"); 158 159 seq_buf_printf(&s, "\n"); 160 161 return s.len; 162 } 163 164 if (thread_priv) 165 return sprintf(buf, "Vulnerable: L1D private per thread\n"); 166 167 if (!security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) && 168 !security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR)) 169 return sprintf(buf, "Not affected\n"); 170 171 return sprintf(buf, "Vulnerable\n"); 172 } 173 174 ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf) 175 { 176 return cpu_show_meltdown(dev, attr, buf); 177 } 178 #endif 179 180 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf) 181 { 182 struct seq_buf s; 183 184 seq_buf_init(&s, buf, PAGE_SIZE - 1); 185 186 if (security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR)) { 187 if (barrier_nospec_enabled) 188 seq_buf_printf(&s, "Mitigation: __user pointer sanitization"); 189 else 190 seq_buf_printf(&s, "Vulnerable"); 191 192 if (security_ftr_enabled(SEC_FTR_SPEC_BAR_ORI31)) 193 seq_buf_printf(&s, ", ori31 speculation barrier enabled"); 194 195 seq_buf_printf(&s, "\n"); 196 } else 197 seq_buf_printf(&s, "Not affected\n"); 198 199 return s.len; 200 } 201 202 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf) 203 { 204 struct seq_buf s; 205 bool bcs, ccd; 206 207 seq_buf_init(&s, buf, PAGE_SIZE - 1); 208 209 bcs = security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED); 210 ccd = security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED); 211 212 if (bcs || ccd) { 213 seq_buf_printf(&s, "Mitigation: "); 214 215 if (bcs) 216 seq_buf_printf(&s, "Indirect branch serialisation (kernel only)"); 217 218 if (bcs && ccd) 219 seq_buf_printf(&s, ", "); 220 221 if (ccd) 222 seq_buf_printf(&s, "Indirect branch cache disabled"); 223 224 } else if (count_cache_flush_type != BRANCH_CACHE_FLUSH_NONE) { 225 seq_buf_printf(&s, "Mitigation: Software count cache flush"); 226 227 if (count_cache_flush_type == BRANCH_CACHE_FLUSH_HW) 228 seq_buf_printf(&s, " (hardware accelerated)"); 229 230 } else if (btb_flush_enabled) { 231 seq_buf_printf(&s, "Mitigation: Branch predictor state flush"); 232 } else { 233 seq_buf_printf(&s, "Vulnerable"); 234 } 235 236 if (bcs || ccd || count_cache_flush_type != BRANCH_CACHE_FLUSH_NONE) { 237 if (link_stack_flush_type != BRANCH_CACHE_FLUSH_NONE) 238 seq_buf_printf(&s, ", Software link stack flush"); 239 if (link_stack_flush_type == BRANCH_CACHE_FLUSH_HW) 240 seq_buf_printf(&s, " (hardware accelerated)"); 241 } 242 243 seq_buf_printf(&s, "\n"); 244 245 return s.len; 246 } 247 248 #ifdef CONFIG_PPC_BOOK3S_64 249 /* 250 * Store-forwarding barrier support. 251 */ 252 253 static enum stf_barrier_type stf_enabled_flush_types; 254 static bool no_stf_barrier; 255 static bool stf_barrier; 256 257 static int __init handle_no_stf_barrier(char *p) 258 { 259 pr_info("stf-barrier: disabled on command line."); 260 no_stf_barrier = true; 261 return 0; 262 } 263 264 early_param("no_stf_barrier", handle_no_stf_barrier); 265 266 enum stf_barrier_type stf_barrier_type_get(void) 267 { 268 return stf_enabled_flush_types; 269 } 270 271 /* This is the generic flag used by other architectures */ 272 static int __init handle_ssbd(char *p) 273 { 274 if (!p || strncmp(p, "auto", 5) == 0 || strncmp(p, "on", 2) == 0 ) { 275 /* Until firmware tells us, we have the barrier with auto */ 276 return 0; 277 } else if (strncmp(p, "off", 3) == 0) { 278 handle_no_stf_barrier(NULL); 279 return 0; 280 } else 281 return 1; 282 283 return 0; 284 } 285 early_param("spec_store_bypass_disable", handle_ssbd); 286 287 /* This is the generic flag used by other architectures */ 288 static int __init handle_no_ssbd(char *p) 289 { 290 handle_no_stf_barrier(NULL); 291 return 0; 292 } 293 early_param("nospec_store_bypass_disable", handle_no_ssbd); 294 295 static void stf_barrier_enable(bool enable) 296 { 297 if (enable) 298 do_stf_barrier_fixups(stf_enabled_flush_types); 299 else 300 do_stf_barrier_fixups(STF_BARRIER_NONE); 301 302 stf_barrier = enable; 303 } 304 305 void setup_stf_barrier(void) 306 { 307 enum stf_barrier_type type; 308 bool enable; 309 310 /* Default to fallback in case fw-features are not available */ 311 if (cpu_has_feature(CPU_FTR_ARCH_300)) 312 type = STF_BARRIER_EIEIO; 313 else if (cpu_has_feature(CPU_FTR_ARCH_207S)) 314 type = STF_BARRIER_SYNC_ORI; 315 else if (cpu_has_feature(CPU_FTR_ARCH_206)) 316 type = STF_BARRIER_FALLBACK; 317 else 318 type = STF_BARRIER_NONE; 319 320 enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && 321 security_ftr_enabled(SEC_FTR_STF_BARRIER); 322 323 if (type == STF_BARRIER_FALLBACK) { 324 pr_info("stf-barrier: fallback barrier available\n"); 325 } else if (type == STF_BARRIER_SYNC_ORI) { 326 pr_info("stf-barrier: hwsync barrier available\n"); 327 } else if (type == STF_BARRIER_EIEIO) { 328 pr_info("stf-barrier: eieio barrier available\n"); 329 } 330 331 stf_enabled_flush_types = type; 332 333 if (!no_stf_barrier && !cpu_mitigations_off()) 334 stf_barrier_enable(enable); 335 } 336 337 ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf) 338 { 339 if (stf_barrier && stf_enabled_flush_types != STF_BARRIER_NONE) { 340 const char *type; 341 switch (stf_enabled_flush_types) { 342 case STF_BARRIER_EIEIO: 343 type = "eieio"; 344 break; 345 case STF_BARRIER_SYNC_ORI: 346 type = "hwsync"; 347 break; 348 case STF_BARRIER_FALLBACK: 349 type = "fallback"; 350 break; 351 default: 352 type = "unknown"; 353 } 354 return sprintf(buf, "Mitigation: Kernel entry/exit barrier (%s)\n", type); 355 } 356 357 if (!security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) && 358 !security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR)) 359 return sprintf(buf, "Not affected\n"); 360 361 return sprintf(buf, "Vulnerable\n"); 362 } 363 364 static int ssb_prctl_get(struct task_struct *task) 365 { 366 if (stf_enabled_flush_types == STF_BARRIER_NONE) 367 /* 368 * We don't have an explicit signal from firmware that we're 369 * vulnerable or not, we only have certain CPU revisions that 370 * are known to be vulnerable. 371 * 372 * We assume that if we're on another CPU, where the barrier is 373 * NONE, then we are not vulnerable. 374 */ 375 return PR_SPEC_NOT_AFFECTED; 376 else 377 /* 378 * If we do have a barrier type then we are vulnerable. The 379 * barrier is not a global or per-process mitigation, so the 380 * only value we can report here is PR_SPEC_ENABLE, which 381 * appears as "vulnerable" in /proc. 382 */ 383 return PR_SPEC_ENABLE; 384 385 return -EINVAL; 386 } 387 388 int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which) 389 { 390 switch (which) { 391 case PR_SPEC_STORE_BYPASS: 392 return ssb_prctl_get(task); 393 default: 394 return -ENODEV; 395 } 396 } 397 398 #ifdef CONFIG_DEBUG_FS 399 static int stf_barrier_set(void *data, u64 val) 400 { 401 bool enable; 402 403 if (val == 1) 404 enable = true; 405 else if (val == 0) 406 enable = false; 407 else 408 return -EINVAL; 409 410 /* Only do anything if we're changing state */ 411 if (enable != stf_barrier) 412 stf_barrier_enable(enable); 413 414 return 0; 415 } 416 417 static int stf_barrier_get(void *data, u64 *val) 418 { 419 *val = stf_barrier ? 1 : 0; 420 return 0; 421 } 422 423 DEFINE_DEBUGFS_ATTRIBUTE(fops_stf_barrier, stf_barrier_get, stf_barrier_set, 424 "%llu\n"); 425 426 static __init int stf_barrier_debugfs_init(void) 427 { 428 debugfs_create_file_unsafe("stf_barrier", 0600, arch_debugfs_dir, 429 NULL, &fops_stf_barrier); 430 return 0; 431 } 432 device_initcall(stf_barrier_debugfs_init); 433 #endif /* CONFIG_DEBUG_FS */ 434 435 static void update_branch_cache_flush(void) 436 { 437 u32 *site, __maybe_unused *site2; 438 439 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 440 site = &patch__call_kvm_flush_link_stack; 441 site2 = &patch__call_kvm_flush_link_stack_p9; 442 // This controls the branch from guest_exit_cont to kvm_flush_link_stack 443 if (link_stack_flush_type == BRANCH_CACHE_FLUSH_NONE) { 444 patch_instruction_site(site, ppc_inst(PPC_RAW_NOP())); 445 patch_instruction_site(site2, ppc_inst(PPC_RAW_NOP())); 446 } else { 447 // Could use HW flush, but that could also flush count cache 448 patch_branch_site(site, (u64)&kvm_flush_link_stack, BRANCH_SET_LINK); 449 patch_branch_site(site2, (u64)&kvm_flush_link_stack, BRANCH_SET_LINK); 450 } 451 #endif 452 453 // Patch out the bcctr first, then nop the rest 454 site = &patch__call_flush_branch_caches3; 455 patch_instruction_site(site, ppc_inst(PPC_RAW_NOP())); 456 site = &patch__call_flush_branch_caches2; 457 patch_instruction_site(site, ppc_inst(PPC_RAW_NOP())); 458 site = &patch__call_flush_branch_caches1; 459 patch_instruction_site(site, ppc_inst(PPC_RAW_NOP())); 460 461 // This controls the branch from _switch to flush_branch_caches 462 if (count_cache_flush_type == BRANCH_CACHE_FLUSH_NONE && 463 link_stack_flush_type == BRANCH_CACHE_FLUSH_NONE) { 464 // Nothing to be done 465 466 } else if (count_cache_flush_type == BRANCH_CACHE_FLUSH_HW && 467 link_stack_flush_type == BRANCH_CACHE_FLUSH_HW) { 468 // Patch in the bcctr last 469 site = &patch__call_flush_branch_caches1; 470 patch_instruction_site(site, ppc_inst(0x39207fff)); // li r9,0x7fff 471 site = &patch__call_flush_branch_caches2; 472 patch_instruction_site(site, ppc_inst(0x7d2903a6)); // mtctr r9 473 site = &patch__call_flush_branch_caches3; 474 patch_instruction_site(site, ppc_inst(PPC_INST_BCCTR_FLUSH)); 475 476 } else { 477 patch_branch_site(site, (u64)&flush_branch_caches, BRANCH_SET_LINK); 478 479 // If we just need to flush the link stack, early return 480 if (count_cache_flush_type == BRANCH_CACHE_FLUSH_NONE) { 481 patch_instruction_site(&patch__flush_link_stack_return, 482 ppc_inst(PPC_RAW_BLR())); 483 484 // If we have flush instruction, early return 485 } else if (count_cache_flush_type == BRANCH_CACHE_FLUSH_HW) { 486 patch_instruction_site(&patch__flush_count_cache_return, 487 ppc_inst(PPC_RAW_BLR())); 488 } 489 } 490 } 491 492 static void toggle_branch_cache_flush(bool enable) 493 { 494 if (!enable || !security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE)) { 495 if (count_cache_flush_type != BRANCH_CACHE_FLUSH_NONE) 496 count_cache_flush_type = BRANCH_CACHE_FLUSH_NONE; 497 498 pr_info("count-cache-flush: flush disabled.\n"); 499 } else { 500 if (security_ftr_enabled(SEC_FTR_BCCTR_FLUSH_ASSIST)) { 501 count_cache_flush_type = BRANCH_CACHE_FLUSH_HW; 502 pr_info("count-cache-flush: hardware flush enabled.\n"); 503 } else { 504 count_cache_flush_type = BRANCH_CACHE_FLUSH_SW; 505 pr_info("count-cache-flush: software flush enabled.\n"); 506 } 507 } 508 509 if (!enable || !security_ftr_enabled(SEC_FTR_FLUSH_LINK_STACK)) { 510 if (link_stack_flush_type != BRANCH_CACHE_FLUSH_NONE) 511 link_stack_flush_type = BRANCH_CACHE_FLUSH_NONE; 512 513 pr_info("link-stack-flush: flush disabled.\n"); 514 } else { 515 if (security_ftr_enabled(SEC_FTR_BCCTR_LINK_FLUSH_ASSIST)) { 516 link_stack_flush_type = BRANCH_CACHE_FLUSH_HW; 517 pr_info("link-stack-flush: hardware flush enabled.\n"); 518 } else { 519 link_stack_flush_type = BRANCH_CACHE_FLUSH_SW; 520 pr_info("link-stack-flush: software flush enabled.\n"); 521 } 522 } 523 524 update_branch_cache_flush(); 525 } 526 527 void setup_count_cache_flush(void) 528 { 529 bool enable = true; 530 531 if (no_spectrev2 || cpu_mitigations_off()) { 532 if (security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED) || 533 security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED)) 534 pr_warn("Spectre v2 mitigations not fully under software control, can't disable\n"); 535 536 enable = false; 537 } 538 539 /* 540 * There's no firmware feature flag/hypervisor bit to tell us we need to 541 * flush the link stack on context switch. So we set it here if we see 542 * either of the Spectre v2 mitigations that aim to protect userspace. 543 */ 544 if (security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED) || 545 security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE)) 546 security_ftr_set(SEC_FTR_FLUSH_LINK_STACK); 547 548 toggle_branch_cache_flush(enable); 549 } 550 551 static enum l1d_flush_type enabled_flush_types; 552 static void *l1d_flush_fallback_area; 553 static bool no_rfi_flush; 554 static bool no_entry_flush; 555 static bool no_uaccess_flush; 556 bool rfi_flush; 557 static bool entry_flush; 558 static bool uaccess_flush; 559 DEFINE_STATIC_KEY_FALSE(uaccess_flush_key); 560 EXPORT_SYMBOL(uaccess_flush_key); 561 562 static int __init handle_no_rfi_flush(char *p) 563 { 564 pr_info("rfi-flush: disabled on command line."); 565 no_rfi_flush = true; 566 return 0; 567 } 568 early_param("no_rfi_flush", handle_no_rfi_flush); 569 570 static int __init handle_no_entry_flush(char *p) 571 { 572 pr_info("entry-flush: disabled on command line."); 573 no_entry_flush = true; 574 return 0; 575 } 576 early_param("no_entry_flush", handle_no_entry_flush); 577 578 static int __init handle_no_uaccess_flush(char *p) 579 { 580 pr_info("uaccess-flush: disabled on command line."); 581 no_uaccess_flush = true; 582 return 0; 583 } 584 early_param("no_uaccess_flush", handle_no_uaccess_flush); 585 586 /* 587 * The RFI flush is not KPTI, but because users will see doco that says to use 588 * nopti we hijack that option here to also disable the RFI flush. 589 */ 590 static int __init handle_no_pti(char *p) 591 { 592 pr_info("rfi-flush: disabling due to 'nopti' on command line.\n"); 593 handle_no_rfi_flush(NULL); 594 return 0; 595 } 596 early_param("nopti", handle_no_pti); 597 598 static void do_nothing(void *unused) 599 { 600 /* 601 * We don't need to do the flush explicitly, just enter+exit kernel is 602 * sufficient, the RFI exit handlers will do the right thing. 603 */ 604 } 605 606 void rfi_flush_enable(bool enable) 607 { 608 if (enable) { 609 do_rfi_flush_fixups(enabled_flush_types); 610 on_each_cpu(do_nothing, NULL, 1); 611 } else 612 do_rfi_flush_fixups(L1D_FLUSH_NONE); 613 614 rfi_flush = enable; 615 } 616 617 static void entry_flush_enable(bool enable) 618 { 619 if (enable) { 620 do_entry_flush_fixups(enabled_flush_types); 621 on_each_cpu(do_nothing, NULL, 1); 622 } else { 623 do_entry_flush_fixups(L1D_FLUSH_NONE); 624 } 625 626 entry_flush = enable; 627 } 628 629 static void uaccess_flush_enable(bool enable) 630 { 631 if (enable) { 632 do_uaccess_flush_fixups(enabled_flush_types); 633 static_branch_enable(&uaccess_flush_key); 634 on_each_cpu(do_nothing, NULL, 1); 635 } else { 636 static_branch_disable(&uaccess_flush_key); 637 do_uaccess_flush_fixups(L1D_FLUSH_NONE); 638 } 639 640 uaccess_flush = enable; 641 } 642 643 static void __ref init_fallback_flush(void) 644 { 645 u64 l1d_size, limit; 646 int cpu; 647 648 /* Only allocate the fallback flush area once (at boot time). */ 649 if (l1d_flush_fallback_area) 650 return; 651 652 l1d_size = ppc64_caches.l1d.size; 653 654 /* 655 * If there is no d-cache-size property in the device tree, l1d_size 656 * could be zero. That leads to the loop in the asm wrapping around to 657 * 2^64-1, and then walking off the end of the fallback area and 658 * eventually causing a page fault which is fatal. Just default to 659 * something vaguely sane. 660 */ 661 if (!l1d_size) 662 l1d_size = (64 * 1024); 663 664 limit = min(ppc64_bolted_size(), ppc64_rma_size); 665 666 /* 667 * Align to L1d size, and size it at 2x L1d size, to catch possible 668 * hardware prefetch runoff. We don't have a recipe for load patterns to 669 * reliably avoid the prefetcher. 670 */ 671 l1d_flush_fallback_area = memblock_alloc_try_nid(l1d_size * 2, 672 l1d_size, MEMBLOCK_LOW_LIMIT, 673 limit, NUMA_NO_NODE); 674 if (!l1d_flush_fallback_area) 675 panic("%s: Failed to allocate %llu bytes align=0x%llx max_addr=%pa\n", 676 __func__, l1d_size * 2, l1d_size, &limit); 677 678 679 for_each_possible_cpu(cpu) { 680 struct paca_struct *paca = paca_ptrs[cpu]; 681 paca->rfi_flush_fallback_area = l1d_flush_fallback_area; 682 paca->l1d_flush_size = l1d_size; 683 } 684 } 685 686 void setup_rfi_flush(enum l1d_flush_type types, bool enable) 687 { 688 if (types & L1D_FLUSH_FALLBACK) { 689 pr_info("rfi-flush: fallback displacement flush available\n"); 690 init_fallback_flush(); 691 } 692 693 if (types & L1D_FLUSH_ORI) 694 pr_info("rfi-flush: ori type flush available\n"); 695 696 if (types & L1D_FLUSH_MTTRIG) 697 pr_info("rfi-flush: mttrig type flush available\n"); 698 699 enabled_flush_types = types; 700 701 if (!cpu_mitigations_off() && !no_rfi_flush) 702 rfi_flush_enable(enable); 703 } 704 705 void setup_entry_flush(bool enable) 706 { 707 if (cpu_mitigations_off()) 708 return; 709 710 if (!no_entry_flush) 711 entry_flush_enable(enable); 712 } 713 714 void setup_uaccess_flush(bool enable) 715 { 716 if (cpu_mitigations_off()) 717 return; 718 719 if (!no_uaccess_flush) 720 uaccess_flush_enable(enable); 721 } 722 723 #ifdef CONFIG_DEBUG_FS 724 static int count_cache_flush_set(void *data, u64 val) 725 { 726 bool enable; 727 728 if (val == 1) 729 enable = true; 730 else if (val == 0) 731 enable = false; 732 else 733 return -EINVAL; 734 735 toggle_branch_cache_flush(enable); 736 737 return 0; 738 } 739 740 static int count_cache_flush_get(void *data, u64 *val) 741 { 742 if (count_cache_flush_type == BRANCH_CACHE_FLUSH_NONE) 743 *val = 0; 744 else 745 *val = 1; 746 747 return 0; 748 } 749 750 static int link_stack_flush_get(void *data, u64 *val) 751 { 752 if (link_stack_flush_type == BRANCH_CACHE_FLUSH_NONE) 753 *val = 0; 754 else 755 *val = 1; 756 757 return 0; 758 } 759 760 DEFINE_DEBUGFS_ATTRIBUTE(fops_count_cache_flush, count_cache_flush_get, 761 count_cache_flush_set, "%llu\n"); 762 DEFINE_DEBUGFS_ATTRIBUTE(fops_link_stack_flush, link_stack_flush_get, 763 count_cache_flush_set, "%llu\n"); 764 765 static __init int count_cache_flush_debugfs_init(void) 766 { 767 debugfs_create_file_unsafe("count_cache_flush", 0600, 768 arch_debugfs_dir, NULL, 769 &fops_count_cache_flush); 770 debugfs_create_file_unsafe("link_stack_flush", 0600, 771 arch_debugfs_dir, NULL, 772 &fops_link_stack_flush); 773 return 0; 774 } 775 device_initcall(count_cache_flush_debugfs_init); 776 777 static int rfi_flush_set(void *data, u64 val) 778 { 779 bool enable; 780 781 if (val == 1) 782 enable = true; 783 else if (val == 0) 784 enable = false; 785 else 786 return -EINVAL; 787 788 /* Only do anything if we're changing state */ 789 if (enable != rfi_flush) 790 rfi_flush_enable(enable); 791 792 return 0; 793 } 794 795 static int rfi_flush_get(void *data, u64 *val) 796 { 797 *val = rfi_flush ? 1 : 0; 798 return 0; 799 } 800 801 DEFINE_SIMPLE_ATTRIBUTE(fops_rfi_flush, rfi_flush_get, rfi_flush_set, "%llu\n"); 802 803 static int entry_flush_set(void *data, u64 val) 804 { 805 bool enable; 806 807 if (val == 1) 808 enable = true; 809 else if (val == 0) 810 enable = false; 811 else 812 return -EINVAL; 813 814 /* Only do anything if we're changing state */ 815 if (enable != entry_flush) 816 entry_flush_enable(enable); 817 818 return 0; 819 } 820 821 static int entry_flush_get(void *data, u64 *val) 822 { 823 *val = entry_flush ? 1 : 0; 824 return 0; 825 } 826 827 DEFINE_SIMPLE_ATTRIBUTE(fops_entry_flush, entry_flush_get, entry_flush_set, "%llu\n"); 828 829 static int uaccess_flush_set(void *data, u64 val) 830 { 831 bool enable; 832 833 if (val == 1) 834 enable = true; 835 else if (val == 0) 836 enable = false; 837 else 838 return -EINVAL; 839 840 /* Only do anything if we're changing state */ 841 if (enable != uaccess_flush) 842 uaccess_flush_enable(enable); 843 844 return 0; 845 } 846 847 static int uaccess_flush_get(void *data, u64 *val) 848 { 849 *val = uaccess_flush ? 1 : 0; 850 return 0; 851 } 852 853 DEFINE_SIMPLE_ATTRIBUTE(fops_uaccess_flush, uaccess_flush_get, uaccess_flush_set, "%llu\n"); 854 855 static __init int rfi_flush_debugfs_init(void) 856 { 857 debugfs_create_file("rfi_flush", 0600, arch_debugfs_dir, NULL, &fops_rfi_flush); 858 debugfs_create_file("entry_flush", 0600, arch_debugfs_dir, NULL, &fops_entry_flush); 859 debugfs_create_file("uaccess_flush", 0600, arch_debugfs_dir, NULL, &fops_uaccess_flush); 860 return 0; 861 } 862 device_initcall(rfi_flush_debugfs_init); 863 #endif /* CONFIG_DEBUG_FS */ 864 #endif /* CONFIG_PPC_BOOK3S_64 */ 865