1 // SPDX-License-Identifier: GPL-2.0+ 2 // 3 // Security related flags and so on. 4 // 5 // Copyright 2018, Michael Ellerman, IBM Corporation. 6 7 #include <linux/cpu.h> 8 #include <linux/kernel.h> 9 #include <linux/device.h> 10 #include <linux/memblock.h> 11 #include <linux/nospec.h> 12 #include <linux/prctl.h> 13 #include <linux/seq_buf.h> 14 15 #include <asm/asm-prototypes.h> 16 #include <asm/code-patching.h> 17 #include <asm/debugfs.h> 18 #include <asm/security_features.h> 19 #include <asm/setup.h> 20 #include <asm/inst.h> 21 22 #include "setup.h" 23 24 u64 powerpc_security_features __read_mostly = SEC_FTR_DEFAULT; 25 26 enum branch_cache_flush_type { 27 BRANCH_CACHE_FLUSH_NONE = 0x1, 28 BRANCH_CACHE_FLUSH_SW = 0x2, 29 BRANCH_CACHE_FLUSH_HW = 0x4, 30 }; 31 static enum branch_cache_flush_type count_cache_flush_type = BRANCH_CACHE_FLUSH_NONE; 32 static enum branch_cache_flush_type link_stack_flush_type = BRANCH_CACHE_FLUSH_NONE; 33 34 bool barrier_nospec_enabled; 35 static bool no_nospec; 36 static bool btb_flush_enabled; 37 #if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_BOOK3S_64) 38 static bool no_spectrev2; 39 #endif 40 41 static void enable_barrier_nospec(bool enable) 42 { 43 barrier_nospec_enabled = enable; 44 do_barrier_nospec_fixups(enable); 45 } 46 47 void setup_barrier_nospec(void) 48 { 49 bool enable; 50 51 /* 52 * It would make sense to check SEC_FTR_SPEC_BAR_ORI31 below as well. 53 * But there's a good reason not to. The two flags we check below are 54 * both are enabled by default in the kernel, so if the hcall is not 55 * functional they will be enabled. 56 * On a system where the host firmware has been updated (so the ori 57 * functions as a barrier), but on which the hypervisor (KVM/Qemu) has 58 * not been updated, we would like to enable the barrier. Dropping the 59 * check for SEC_FTR_SPEC_BAR_ORI31 achieves that. The only downside is 60 * we potentially enable the barrier on systems where the host firmware 61 * is not updated, but that's harmless as it's a no-op. 62 */ 63 enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && 64 security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR); 65 66 if (!no_nospec && !cpu_mitigations_off()) 67 enable_barrier_nospec(enable); 68 } 69 70 static int __init handle_nospectre_v1(char *p) 71 { 72 no_nospec = true; 73 74 return 0; 75 } 76 early_param("nospectre_v1", handle_nospectre_v1); 77 78 #ifdef CONFIG_DEBUG_FS 79 static int barrier_nospec_set(void *data, u64 val) 80 { 81 switch (val) { 82 case 0: 83 case 1: 84 break; 85 default: 86 return -EINVAL; 87 } 88 89 if (!!val == !!barrier_nospec_enabled) 90 return 0; 91 92 enable_barrier_nospec(!!val); 93 94 return 0; 95 } 96 97 static int barrier_nospec_get(void *data, u64 *val) 98 { 99 *val = barrier_nospec_enabled ? 1 : 0; 100 return 0; 101 } 102 103 DEFINE_DEBUGFS_ATTRIBUTE(fops_barrier_nospec, barrier_nospec_get, 104 barrier_nospec_set, "%llu\n"); 105 106 static __init int barrier_nospec_debugfs_init(void) 107 { 108 debugfs_create_file_unsafe("barrier_nospec", 0600, 109 powerpc_debugfs_root, NULL, 110 &fops_barrier_nospec); 111 return 0; 112 } 113 device_initcall(barrier_nospec_debugfs_init); 114 115 static __init int security_feature_debugfs_init(void) 116 { 117 debugfs_create_x64("security_features", 0400, powerpc_debugfs_root, 118 &powerpc_security_features); 119 return 0; 120 } 121 device_initcall(security_feature_debugfs_init); 122 #endif /* CONFIG_DEBUG_FS */ 123 124 #if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_BOOK3S_64) 125 static int __init handle_nospectre_v2(char *p) 126 { 127 no_spectrev2 = true; 128 129 return 0; 130 } 131 early_param("nospectre_v2", handle_nospectre_v2); 132 #endif /* CONFIG_PPC_FSL_BOOK3E || CONFIG_PPC_BOOK3S_64 */ 133 134 #ifdef CONFIG_PPC_FSL_BOOK3E 135 void setup_spectre_v2(void) 136 { 137 if (no_spectrev2 || cpu_mitigations_off()) 138 do_btb_flush_fixups(); 139 else 140 btb_flush_enabled = true; 141 } 142 #endif /* CONFIG_PPC_FSL_BOOK3E */ 143 144 #ifdef CONFIG_PPC_BOOK3S_64 145 ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf) 146 { 147 bool thread_priv; 148 149 thread_priv = security_ftr_enabled(SEC_FTR_L1D_THREAD_PRIV); 150 151 if (rfi_flush) { 152 struct seq_buf s; 153 seq_buf_init(&s, buf, PAGE_SIZE - 1); 154 155 seq_buf_printf(&s, "Mitigation: RFI Flush"); 156 if (thread_priv) 157 seq_buf_printf(&s, ", L1D private per thread"); 158 159 seq_buf_printf(&s, "\n"); 160 161 return s.len; 162 } 163 164 if (thread_priv) 165 return sprintf(buf, "Vulnerable: L1D private per thread\n"); 166 167 if (!security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) && 168 !security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR)) 169 return sprintf(buf, "Not affected\n"); 170 171 return sprintf(buf, "Vulnerable\n"); 172 } 173 174 ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf) 175 { 176 return cpu_show_meltdown(dev, attr, buf); 177 } 178 #endif 179 180 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf) 181 { 182 struct seq_buf s; 183 184 seq_buf_init(&s, buf, PAGE_SIZE - 1); 185 186 if (security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR)) { 187 if (barrier_nospec_enabled) 188 seq_buf_printf(&s, "Mitigation: __user pointer sanitization"); 189 else 190 seq_buf_printf(&s, "Vulnerable"); 191 192 if (security_ftr_enabled(SEC_FTR_SPEC_BAR_ORI31)) 193 seq_buf_printf(&s, ", ori31 speculation barrier enabled"); 194 195 seq_buf_printf(&s, "\n"); 196 } else 197 seq_buf_printf(&s, "Not affected\n"); 198 199 return s.len; 200 } 201 202 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf) 203 { 204 struct seq_buf s; 205 bool bcs, ccd; 206 207 seq_buf_init(&s, buf, PAGE_SIZE - 1); 208 209 bcs = security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED); 210 ccd = security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED); 211 212 if (bcs || ccd) { 213 seq_buf_printf(&s, "Mitigation: "); 214 215 if (bcs) 216 seq_buf_printf(&s, "Indirect branch serialisation (kernel only)"); 217 218 if (bcs && ccd) 219 seq_buf_printf(&s, ", "); 220 221 if (ccd) 222 seq_buf_printf(&s, "Indirect branch cache disabled"); 223 224 } else if (count_cache_flush_type != BRANCH_CACHE_FLUSH_NONE) { 225 seq_buf_printf(&s, "Mitigation: Software count cache flush"); 226 227 if (count_cache_flush_type == BRANCH_CACHE_FLUSH_HW) 228 seq_buf_printf(&s, " (hardware accelerated)"); 229 230 } else if (btb_flush_enabled) { 231 seq_buf_printf(&s, "Mitigation: Branch predictor state flush"); 232 } else { 233 seq_buf_printf(&s, "Vulnerable"); 234 } 235 236 if (bcs || ccd || count_cache_flush_type != BRANCH_CACHE_FLUSH_NONE) { 237 if (link_stack_flush_type != BRANCH_CACHE_FLUSH_NONE) 238 seq_buf_printf(&s, ", Software link stack flush"); 239 if (link_stack_flush_type == BRANCH_CACHE_FLUSH_HW) 240 seq_buf_printf(&s, " (hardware accelerated)"); 241 } 242 243 seq_buf_printf(&s, "\n"); 244 245 return s.len; 246 } 247 248 #ifdef CONFIG_PPC_BOOK3S_64 249 /* 250 * Store-forwarding barrier support. 251 */ 252 253 static enum stf_barrier_type stf_enabled_flush_types; 254 static bool no_stf_barrier; 255 static bool stf_barrier; 256 257 static int __init handle_no_stf_barrier(char *p) 258 { 259 pr_info("stf-barrier: disabled on command line."); 260 no_stf_barrier = true; 261 return 0; 262 } 263 264 early_param("no_stf_barrier", handle_no_stf_barrier); 265 266 /* This is the generic flag used by other architectures */ 267 static int __init handle_ssbd(char *p) 268 { 269 if (!p || strncmp(p, "auto", 5) == 0 || strncmp(p, "on", 2) == 0 ) { 270 /* Until firmware tells us, we have the barrier with auto */ 271 return 0; 272 } else if (strncmp(p, "off", 3) == 0) { 273 handle_no_stf_barrier(NULL); 274 return 0; 275 } else 276 return 1; 277 278 return 0; 279 } 280 early_param("spec_store_bypass_disable", handle_ssbd); 281 282 /* This is the generic flag used by other architectures */ 283 static int __init handle_no_ssbd(char *p) 284 { 285 handle_no_stf_barrier(NULL); 286 return 0; 287 } 288 early_param("nospec_store_bypass_disable", handle_no_ssbd); 289 290 static void stf_barrier_enable(bool enable) 291 { 292 if (enable) 293 do_stf_barrier_fixups(stf_enabled_flush_types); 294 else 295 do_stf_barrier_fixups(STF_BARRIER_NONE); 296 297 stf_barrier = enable; 298 } 299 300 void setup_stf_barrier(void) 301 { 302 enum stf_barrier_type type; 303 bool enable, hv; 304 305 hv = cpu_has_feature(CPU_FTR_HVMODE); 306 307 /* Default to fallback in case fw-features are not available */ 308 if (cpu_has_feature(CPU_FTR_ARCH_300)) 309 type = STF_BARRIER_EIEIO; 310 else if (cpu_has_feature(CPU_FTR_ARCH_207S)) 311 type = STF_BARRIER_SYNC_ORI; 312 else if (cpu_has_feature(CPU_FTR_ARCH_206)) 313 type = STF_BARRIER_FALLBACK; 314 else 315 type = STF_BARRIER_NONE; 316 317 enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && 318 (security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR) || 319 (security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) && hv)); 320 321 if (type == STF_BARRIER_FALLBACK) { 322 pr_info("stf-barrier: fallback barrier available\n"); 323 } else if (type == STF_BARRIER_SYNC_ORI) { 324 pr_info("stf-barrier: hwsync barrier available\n"); 325 } else if (type == STF_BARRIER_EIEIO) { 326 pr_info("stf-barrier: eieio barrier available\n"); 327 } 328 329 stf_enabled_flush_types = type; 330 331 if (!no_stf_barrier && !cpu_mitigations_off()) 332 stf_barrier_enable(enable); 333 } 334 335 ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf) 336 { 337 if (stf_barrier && stf_enabled_flush_types != STF_BARRIER_NONE) { 338 const char *type; 339 switch (stf_enabled_flush_types) { 340 case STF_BARRIER_EIEIO: 341 type = "eieio"; 342 break; 343 case STF_BARRIER_SYNC_ORI: 344 type = "hwsync"; 345 break; 346 case STF_BARRIER_FALLBACK: 347 type = "fallback"; 348 break; 349 default: 350 type = "unknown"; 351 } 352 return sprintf(buf, "Mitigation: Kernel entry/exit barrier (%s)\n", type); 353 } 354 355 if (!security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) && 356 !security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR)) 357 return sprintf(buf, "Not affected\n"); 358 359 return sprintf(buf, "Vulnerable\n"); 360 } 361 362 static int ssb_prctl_get(struct task_struct *task) 363 { 364 if (stf_enabled_flush_types == STF_BARRIER_NONE) 365 /* 366 * We don't have an explicit signal from firmware that we're 367 * vulnerable or not, we only have certain CPU revisions that 368 * are known to be vulnerable. 369 * 370 * We assume that if we're on another CPU, where the barrier is 371 * NONE, then we are not vulnerable. 372 */ 373 return PR_SPEC_NOT_AFFECTED; 374 else 375 /* 376 * If we do have a barrier type then we are vulnerable. The 377 * barrier is not a global or per-process mitigation, so the 378 * only value we can report here is PR_SPEC_ENABLE, which 379 * appears as "vulnerable" in /proc. 380 */ 381 return PR_SPEC_ENABLE; 382 383 return -EINVAL; 384 } 385 386 int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which) 387 { 388 switch (which) { 389 case PR_SPEC_STORE_BYPASS: 390 return ssb_prctl_get(task); 391 default: 392 return -ENODEV; 393 } 394 } 395 396 #ifdef CONFIG_DEBUG_FS 397 static int stf_barrier_set(void *data, u64 val) 398 { 399 bool enable; 400 401 if (val == 1) 402 enable = true; 403 else if (val == 0) 404 enable = false; 405 else 406 return -EINVAL; 407 408 /* Only do anything if we're changing state */ 409 if (enable != stf_barrier) 410 stf_barrier_enable(enable); 411 412 return 0; 413 } 414 415 static int stf_barrier_get(void *data, u64 *val) 416 { 417 *val = stf_barrier ? 1 : 0; 418 return 0; 419 } 420 421 DEFINE_DEBUGFS_ATTRIBUTE(fops_stf_barrier, stf_barrier_get, stf_barrier_set, 422 "%llu\n"); 423 424 static __init int stf_barrier_debugfs_init(void) 425 { 426 debugfs_create_file_unsafe("stf_barrier", 0600, powerpc_debugfs_root, 427 NULL, &fops_stf_barrier); 428 return 0; 429 } 430 device_initcall(stf_barrier_debugfs_init); 431 #endif /* CONFIG_DEBUG_FS */ 432 433 static void update_branch_cache_flush(void) 434 { 435 u32 *site, __maybe_unused *site2; 436 437 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 438 site = &patch__call_kvm_flush_link_stack; 439 site2 = &patch__call_kvm_flush_link_stack_p9; 440 // This controls the branch from guest_exit_cont to kvm_flush_link_stack 441 if (link_stack_flush_type == BRANCH_CACHE_FLUSH_NONE) { 442 patch_instruction_site(site, ppc_inst(PPC_RAW_NOP())); 443 patch_instruction_site(site2, ppc_inst(PPC_RAW_NOP())); 444 } else { 445 // Could use HW flush, but that could also flush count cache 446 patch_branch_site(site, (u64)&kvm_flush_link_stack, BRANCH_SET_LINK); 447 patch_branch_site(site2, (u64)&kvm_flush_link_stack, BRANCH_SET_LINK); 448 } 449 #endif 450 451 // Patch out the bcctr first, then nop the rest 452 site = &patch__call_flush_branch_caches3; 453 patch_instruction_site(site, ppc_inst(PPC_RAW_NOP())); 454 site = &patch__call_flush_branch_caches2; 455 patch_instruction_site(site, ppc_inst(PPC_RAW_NOP())); 456 site = &patch__call_flush_branch_caches1; 457 patch_instruction_site(site, ppc_inst(PPC_RAW_NOP())); 458 459 // This controls the branch from _switch to flush_branch_caches 460 if (count_cache_flush_type == BRANCH_CACHE_FLUSH_NONE && 461 link_stack_flush_type == BRANCH_CACHE_FLUSH_NONE) { 462 // Nothing to be done 463 464 } else if (count_cache_flush_type == BRANCH_CACHE_FLUSH_HW && 465 link_stack_flush_type == BRANCH_CACHE_FLUSH_HW) { 466 // Patch in the bcctr last 467 site = &patch__call_flush_branch_caches1; 468 patch_instruction_site(site, ppc_inst(0x39207fff)); // li r9,0x7fff 469 site = &patch__call_flush_branch_caches2; 470 patch_instruction_site(site, ppc_inst(0x7d2903a6)); // mtctr r9 471 site = &patch__call_flush_branch_caches3; 472 patch_instruction_site(site, ppc_inst(PPC_INST_BCCTR_FLUSH)); 473 474 } else { 475 patch_branch_site(site, (u64)&flush_branch_caches, BRANCH_SET_LINK); 476 477 // If we just need to flush the link stack, early return 478 if (count_cache_flush_type == BRANCH_CACHE_FLUSH_NONE) { 479 patch_instruction_site(&patch__flush_link_stack_return, 480 ppc_inst(PPC_RAW_BLR())); 481 482 // If we have flush instruction, early return 483 } else if (count_cache_flush_type == BRANCH_CACHE_FLUSH_HW) { 484 patch_instruction_site(&patch__flush_count_cache_return, 485 ppc_inst(PPC_RAW_BLR())); 486 } 487 } 488 } 489 490 static void toggle_branch_cache_flush(bool enable) 491 { 492 if (!enable || !security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE)) { 493 if (count_cache_flush_type != BRANCH_CACHE_FLUSH_NONE) 494 count_cache_flush_type = BRANCH_CACHE_FLUSH_NONE; 495 496 pr_info("count-cache-flush: flush disabled.\n"); 497 } else { 498 if (security_ftr_enabled(SEC_FTR_BCCTR_FLUSH_ASSIST)) { 499 count_cache_flush_type = BRANCH_CACHE_FLUSH_HW; 500 pr_info("count-cache-flush: hardware flush enabled.\n"); 501 } else { 502 count_cache_flush_type = BRANCH_CACHE_FLUSH_SW; 503 pr_info("count-cache-flush: software flush enabled.\n"); 504 } 505 } 506 507 if (!enable || !security_ftr_enabled(SEC_FTR_FLUSH_LINK_STACK)) { 508 if (link_stack_flush_type != BRANCH_CACHE_FLUSH_NONE) 509 link_stack_flush_type = BRANCH_CACHE_FLUSH_NONE; 510 511 pr_info("link-stack-flush: flush disabled.\n"); 512 } else { 513 if (security_ftr_enabled(SEC_FTR_BCCTR_LINK_FLUSH_ASSIST)) { 514 link_stack_flush_type = BRANCH_CACHE_FLUSH_HW; 515 pr_info("link-stack-flush: hardware flush enabled.\n"); 516 } else { 517 link_stack_flush_type = BRANCH_CACHE_FLUSH_SW; 518 pr_info("link-stack-flush: software flush enabled.\n"); 519 } 520 } 521 522 update_branch_cache_flush(); 523 } 524 525 void setup_count_cache_flush(void) 526 { 527 bool enable = true; 528 529 if (no_spectrev2 || cpu_mitigations_off()) { 530 if (security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED) || 531 security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED)) 532 pr_warn("Spectre v2 mitigations not fully under software control, can't disable\n"); 533 534 enable = false; 535 } 536 537 /* 538 * There's no firmware feature flag/hypervisor bit to tell us we need to 539 * flush the link stack on context switch. So we set it here if we see 540 * either of the Spectre v2 mitigations that aim to protect userspace. 541 */ 542 if (security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED) || 543 security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE)) 544 security_ftr_set(SEC_FTR_FLUSH_LINK_STACK); 545 546 toggle_branch_cache_flush(enable); 547 } 548 549 static enum l1d_flush_type enabled_flush_types; 550 static void *l1d_flush_fallback_area; 551 static bool no_rfi_flush; 552 static bool no_entry_flush; 553 static bool no_uaccess_flush; 554 bool rfi_flush; 555 static bool entry_flush; 556 static bool uaccess_flush; 557 DEFINE_STATIC_KEY_FALSE(uaccess_flush_key); 558 EXPORT_SYMBOL(uaccess_flush_key); 559 560 static int __init handle_no_rfi_flush(char *p) 561 { 562 pr_info("rfi-flush: disabled on command line."); 563 no_rfi_flush = true; 564 return 0; 565 } 566 early_param("no_rfi_flush", handle_no_rfi_flush); 567 568 static int __init handle_no_entry_flush(char *p) 569 { 570 pr_info("entry-flush: disabled on command line."); 571 no_entry_flush = true; 572 return 0; 573 } 574 early_param("no_entry_flush", handle_no_entry_flush); 575 576 static int __init handle_no_uaccess_flush(char *p) 577 { 578 pr_info("uaccess-flush: disabled on command line."); 579 no_uaccess_flush = true; 580 return 0; 581 } 582 early_param("no_uaccess_flush", handle_no_uaccess_flush); 583 584 /* 585 * The RFI flush is not KPTI, but because users will see doco that says to use 586 * nopti we hijack that option here to also disable the RFI flush. 587 */ 588 static int __init handle_no_pti(char *p) 589 { 590 pr_info("rfi-flush: disabling due to 'nopti' on command line.\n"); 591 handle_no_rfi_flush(NULL); 592 return 0; 593 } 594 early_param("nopti", handle_no_pti); 595 596 static void do_nothing(void *unused) 597 { 598 /* 599 * We don't need to do the flush explicitly, just enter+exit kernel is 600 * sufficient, the RFI exit handlers will do the right thing. 601 */ 602 } 603 604 void rfi_flush_enable(bool enable) 605 { 606 if (enable) { 607 do_rfi_flush_fixups(enabled_flush_types); 608 on_each_cpu(do_nothing, NULL, 1); 609 } else 610 do_rfi_flush_fixups(L1D_FLUSH_NONE); 611 612 rfi_flush = enable; 613 } 614 615 static void entry_flush_enable(bool enable) 616 { 617 if (enable) { 618 do_entry_flush_fixups(enabled_flush_types); 619 on_each_cpu(do_nothing, NULL, 1); 620 } else { 621 do_entry_flush_fixups(L1D_FLUSH_NONE); 622 } 623 624 entry_flush = enable; 625 } 626 627 static void uaccess_flush_enable(bool enable) 628 { 629 if (enable) { 630 do_uaccess_flush_fixups(enabled_flush_types); 631 static_branch_enable(&uaccess_flush_key); 632 on_each_cpu(do_nothing, NULL, 1); 633 } else { 634 static_branch_disable(&uaccess_flush_key); 635 do_uaccess_flush_fixups(L1D_FLUSH_NONE); 636 } 637 638 uaccess_flush = enable; 639 } 640 641 static void __ref init_fallback_flush(void) 642 { 643 u64 l1d_size, limit; 644 int cpu; 645 646 /* Only allocate the fallback flush area once (at boot time). */ 647 if (l1d_flush_fallback_area) 648 return; 649 650 l1d_size = ppc64_caches.l1d.size; 651 652 /* 653 * If there is no d-cache-size property in the device tree, l1d_size 654 * could be zero. That leads to the loop in the asm wrapping around to 655 * 2^64-1, and then walking off the end of the fallback area and 656 * eventually causing a page fault which is fatal. Just default to 657 * something vaguely sane. 658 */ 659 if (!l1d_size) 660 l1d_size = (64 * 1024); 661 662 limit = min(ppc64_bolted_size(), ppc64_rma_size); 663 664 /* 665 * Align to L1d size, and size it at 2x L1d size, to catch possible 666 * hardware prefetch runoff. We don't have a recipe for load patterns to 667 * reliably avoid the prefetcher. 668 */ 669 l1d_flush_fallback_area = memblock_alloc_try_nid(l1d_size * 2, 670 l1d_size, MEMBLOCK_LOW_LIMIT, 671 limit, NUMA_NO_NODE); 672 if (!l1d_flush_fallback_area) 673 panic("%s: Failed to allocate %llu bytes align=0x%llx max_addr=%pa\n", 674 __func__, l1d_size * 2, l1d_size, &limit); 675 676 677 for_each_possible_cpu(cpu) { 678 struct paca_struct *paca = paca_ptrs[cpu]; 679 paca->rfi_flush_fallback_area = l1d_flush_fallback_area; 680 paca->l1d_flush_size = l1d_size; 681 } 682 } 683 684 void setup_rfi_flush(enum l1d_flush_type types, bool enable) 685 { 686 if (types & L1D_FLUSH_FALLBACK) { 687 pr_info("rfi-flush: fallback displacement flush available\n"); 688 init_fallback_flush(); 689 } 690 691 if (types & L1D_FLUSH_ORI) 692 pr_info("rfi-flush: ori type flush available\n"); 693 694 if (types & L1D_FLUSH_MTTRIG) 695 pr_info("rfi-flush: mttrig type flush available\n"); 696 697 enabled_flush_types = types; 698 699 if (!cpu_mitigations_off() && !no_rfi_flush) 700 rfi_flush_enable(enable); 701 } 702 703 void setup_entry_flush(bool enable) 704 { 705 if (cpu_mitigations_off()) 706 return; 707 708 if (!no_entry_flush) 709 entry_flush_enable(enable); 710 } 711 712 void setup_uaccess_flush(bool enable) 713 { 714 if (cpu_mitigations_off()) 715 return; 716 717 if (!no_uaccess_flush) 718 uaccess_flush_enable(enable); 719 } 720 721 #ifdef CONFIG_DEBUG_FS 722 static int count_cache_flush_set(void *data, u64 val) 723 { 724 bool enable; 725 726 if (val == 1) 727 enable = true; 728 else if (val == 0) 729 enable = false; 730 else 731 return -EINVAL; 732 733 toggle_branch_cache_flush(enable); 734 735 return 0; 736 } 737 738 static int count_cache_flush_get(void *data, u64 *val) 739 { 740 if (count_cache_flush_type == BRANCH_CACHE_FLUSH_NONE) 741 *val = 0; 742 else 743 *val = 1; 744 745 return 0; 746 } 747 748 DEFINE_DEBUGFS_ATTRIBUTE(fops_count_cache_flush, count_cache_flush_get, 749 count_cache_flush_set, "%llu\n"); 750 751 static __init int count_cache_flush_debugfs_init(void) 752 { 753 debugfs_create_file_unsafe("count_cache_flush", 0600, 754 powerpc_debugfs_root, NULL, 755 &fops_count_cache_flush); 756 return 0; 757 } 758 device_initcall(count_cache_flush_debugfs_init); 759 760 static int rfi_flush_set(void *data, u64 val) 761 { 762 bool enable; 763 764 if (val == 1) 765 enable = true; 766 else if (val == 0) 767 enable = false; 768 else 769 return -EINVAL; 770 771 /* Only do anything if we're changing state */ 772 if (enable != rfi_flush) 773 rfi_flush_enable(enable); 774 775 return 0; 776 } 777 778 static int rfi_flush_get(void *data, u64 *val) 779 { 780 *val = rfi_flush ? 1 : 0; 781 return 0; 782 } 783 784 DEFINE_SIMPLE_ATTRIBUTE(fops_rfi_flush, rfi_flush_get, rfi_flush_set, "%llu\n"); 785 786 static int entry_flush_set(void *data, u64 val) 787 { 788 bool enable; 789 790 if (val == 1) 791 enable = true; 792 else if (val == 0) 793 enable = false; 794 else 795 return -EINVAL; 796 797 /* Only do anything if we're changing state */ 798 if (enable != entry_flush) 799 entry_flush_enable(enable); 800 801 return 0; 802 } 803 804 static int entry_flush_get(void *data, u64 *val) 805 { 806 *val = entry_flush ? 1 : 0; 807 return 0; 808 } 809 810 DEFINE_SIMPLE_ATTRIBUTE(fops_entry_flush, entry_flush_get, entry_flush_set, "%llu\n"); 811 812 static int uaccess_flush_set(void *data, u64 val) 813 { 814 bool enable; 815 816 if (val == 1) 817 enable = true; 818 else if (val == 0) 819 enable = false; 820 else 821 return -EINVAL; 822 823 /* Only do anything if we're changing state */ 824 if (enable != uaccess_flush) 825 uaccess_flush_enable(enable); 826 827 return 0; 828 } 829 830 static int uaccess_flush_get(void *data, u64 *val) 831 { 832 *val = uaccess_flush ? 1 : 0; 833 return 0; 834 } 835 836 DEFINE_SIMPLE_ATTRIBUTE(fops_uaccess_flush, uaccess_flush_get, uaccess_flush_set, "%llu\n"); 837 838 static __init int rfi_flush_debugfs_init(void) 839 { 840 debugfs_create_file("rfi_flush", 0600, powerpc_debugfs_root, NULL, &fops_rfi_flush); 841 debugfs_create_file("entry_flush", 0600, powerpc_debugfs_root, NULL, &fops_entry_flush); 842 debugfs_create_file("uaccess_flush", 0600, powerpc_debugfs_root, NULL, &fops_uaccess_flush); 843 return 0; 844 } 845 device_initcall(rfi_flush_debugfs_init); 846 #endif /* CONFIG_DEBUG_FS */ 847 #endif /* CONFIG_PPC_BOOK3S_64 */ 848