1 /* 2 * 3 * Common boot and setup code. 4 * 5 * Copyright (C) 2001 PPC64 Team, IBM Corp 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License 9 * as published by the Free Software Foundation; either version 10 * 2 of the License, or (at your option) any later version. 11 */ 12 13 #include <linux/export.h> 14 #include <linux/string.h> 15 #include <linux/sched.h> 16 #include <linux/init.h> 17 #include <linux/kernel.h> 18 #include <linux/reboot.h> 19 #include <linux/delay.h> 20 #include <linux/initrd.h> 21 #include <linux/seq_file.h> 22 #include <linux/ioport.h> 23 #include <linux/console.h> 24 #include <linux/utsname.h> 25 #include <linux/tty.h> 26 #include <linux/root_dev.h> 27 #include <linux/notifier.h> 28 #include <linux/cpu.h> 29 #include <linux/unistd.h> 30 #include <linux/serial.h> 31 #include <linux/serial_8250.h> 32 #include <linux/memblock.h> 33 #include <linux/pci.h> 34 #include <linux/lockdep.h> 35 #include <linux/memory.h> 36 #include <linux/nmi.h> 37 38 #include <asm/debugfs.h> 39 #include <asm/io.h> 40 #include <asm/kdump.h> 41 #include <asm/prom.h> 42 #include <asm/processor.h> 43 #include <asm/pgtable.h> 44 #include <asm/smp.h> 45 #include <asm/elf.h> 46 #include <asm/machdep.h> 47 #include <asm/paca.h> 48 #include <asm/time.h> 49 #include <asm/cputable.h> 50 #include <asm/dt_cpu_ftrs.h> 51 #include <asm/sections.h> 52 #include <asm/btext.h> 53 #include <asm/nvram.h> 54 #include <asm/setup.h> 55 #include <asm/rtas.h> 56 #include <asm/iommu.h> 57 #include <asm/serial.h> 58 #include <asm/cache.h> 59 #include <asm/page.h> 60 #include <asm/mmu.h> 61 #include <asm/firmware.h> 62 #include <asm/xmon.h> 63 #include <asm/udbg.h> 64 #include <asm/kexec.h> 65 #include <asm/code-patching.h> 66 #include <asm/livepatch.h> 67 #include <asm/opal.h> 68 #include <asm/cputhreads.h> 69 #include <asm/hw_irq.h> 70 #include <asm/feature-fixups.h> 71 72 #include "setup.h" 73 74 #ifdef DEBUG 75 #define DBG(fmt...) udbg_printf(fmt) 76 #else 77 #define DBG(fmt...) 78 #endif 79 80 int spinning_secondaries; 81 u64 ppc64_pft_size; 82 83 struct ppc64_caches ppc64_caches = { 84 .l1d = { 85 .block_size = 0x40, 86 .log_block_size = 6, 87 }, 88 .l1i = { 89 .block_size = 0x40, 90 .log_block_size = 6 91 }, 92 }; 93 EXPORT_SYMBOL_GPL(ppc64_caches); 94 95 #if defined(CONFIG_PPC_BOOK3E) && defined(CONFIG_SMP) 96 void __init setup_tlb_core_data(void) 97 { 98 int cpu; 99 100 BUILD_BUG_ON(offsetof(struct tlb_core_data, lock) != 0); 101 102 for_each_possible_cpu(cpu) { 103 int first = cpu_first_thread_sibling(cpu); 104 105 /* 106 * If we boot via kdump on a non-primary thread, 107 * make sure we point at the thread that actually 108 * set up this TLB. 109 */ 110 if (cpu_first_thread_sibling(boot_cpuid) == first) 111 first = boot_cpuid; 112 113 paca_ptrs[cpu]->tcd_ptr = &paca_ptrs[first]->tcd; 114 115 /* 116 * If we have threads, we need either tlbsrx. 117 * or e6500 tablewalk mode, or else TLB handlers 118 * will be racy and could produce duplicate entries. 119 * Should we panic instead? 120 */ 121 WARN_ONCE(smt_enabled_at_boot >= 2 && 122 !mmu_has_feature(MMU_FTR_USE_TLBRSRV) && 123 book3e_htw_mode != PPC_HTW_E6500, 124 "%s: unsupported MMU configuration\n", __func__); 125 } 126 } 127 #endif 128 129 #ifdef CONFIG_SMP 130 131 static char *smt_enabled_cmdline; 132 133 /* Look for ibm,smt-enabled OF option */ 134 void __init check_smt_enabled(void) 135 { 136 struct device_node *dn; 137 const char *smt_option; 138 139 /* Default to enabling all threads */ 140 smt_enabled_at_boot = threads_per_core; 141 142 /* Allow the command line to overrule the OF option */ 143 if (smt_enabled_cmdline) { 144 if (!strcmp(smt_enabled_cmdline, "on")) 145 smt_enabled_at_boot = threads_per_core; 146 else if (!strcmp(smt_enabled_cmdline, "off")) 147 smt_enabled_at_boot = 0; 148 else { 149 int smt; 150 int rc; 151 152 rc = kstrtoint(smt_enabled_cmdline, 10, &smt); 153 if (!rc) 154 smt_enabled_at_boot = 155 min(threads_per_core, smt); 156 } 157 } else { 158 dn = of_find_node_by_path("/options"); 159 if (dn) { 160 smt_option = of_get_property(dn, "ibm,smt-enabled", 161 NULL); 162 163 if (smt_option) { 164 if (!strcmp(smt_option, "on")) 165 smt_enabled_at_boot = threads_per_core; 166 else if (!strcmp(smt_option, "off")) 167 smt_enabled_at_boot = 0; 168 } 169 170 of_node_put(dn); 171 } 172 } 173 } 174 175 /* Look for smt-enabled= cmdline option */ 176 static int __init early_smt_enabled(char *p) 177 { 178 smt_enabled_cmdline = p; 179 return 0; 180 } 181 early_param("smt-enabled", early_smt_enabled); 182 183 #endif /* CONFIG_SMP */ 184 185 /** Fix up paca fields required for the boot cpu */ 186 static void __init fixup_boot_paca(void) 187 { 188 /* The boot cpu is started */ 189 get_paca()->cpu_start = 1; 190 /* Allow percpu accesses to work until we setup percpu data */ 191 get_paca()->data_offset = 0; 192 /* Mark interrupts disabled in PACA */ 193 irq_soft_mask_set(IRQS_DISABLED); 194 } 195 196 static void __init configure_exceptions(void) 197 { 198 /* 199 * Setup the trampolines from the lowmem exception vectors 200 * to the kdump kernel when not using a relocatable kernel. 201 */ 202 setup_kdump_trampoline(); 203 204 /* Under a PAPR hypervisor, we need hypercalls */ 205 if (firmware_has_feature(FW_FEATURE_SET_MODE)) { 206 /* Enable AIL if possible */ 207 pseries_enable_reloc_on_exc(); 208 209 /* 210 * Tell the hypervisor that we want our exceptions to 211 * be taken in little endian mode. 212 * 213 * We don't call this for big endian as our calling convention 214 * makes us always enter in BE, and the call may fail under 215 * some circumstances with kdump. 216 */ 217 #ifdef __LITTLE_ENDIAN__ 218 pseries_little_endian_exceptions(); 219 #endif 220 } else { 221 /* Set endian mode using OPAL */ 222 if (firmware_has_feature(FW_FEATURE_OPAL)) 223 opal_configure_cores(); 224 225 /* AIL on native is done in cpu_ready_for_interrupts() */ 226 } 227 } 228 229 static void cpu_ready_for_interrupts(void) 230 { 231 /* 232 * Enable AIL if supported, and we are in hypervisor mode. This 233 * is called once for every processor. 234 * 235 * If we are not in hypervisor mode the job is done once for 236 * the whole partition in configure_exceptions(). 237 */ 238 if (cpu_has_feature(CPU_FTR_HVMODE) && 239 cpu_has_feature(CPU_FTR_ARCH_207S)) { 240 unsigned long lpcr = mfspr(SPRN_LPCR); 241 mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3); 242 } 243 244 /* 245 * Set HFSCR:TM based on CPU features: 246 * In the special case of TM no suspend (P9N DD2.1), Linux is 247 * told TM is off via the dt-ftrs but told to (partially) use 248 * it via OPAL_REINIT_CPUS_TM_SUSPEND_DISABLED. So HFSCR[TM] 249 * will be off from dt-ftrs but we need to turn it on for the 250 * no suspend case. 251 */ 252 if (cpu_has_feature(CPU_FTR_HVMODE)) { 253 if (cpu_has_feature(CPU_FTR_TM_COMP)) 254 mtspr(SPRN_HFSCR, mfspr(SPRN_HFSCR) | HFSCR_TM); 255 else 256 mtspr(SPRN_HFSCR, mfspr(SPRN_HFSCR) & ~HFSCR_TM); 257 } 258 259 /* Set IR and DR in PACA MSR */ 260 get_paca()->kernel_msr = MSR_KERNEL; 261 } 262 263 unsigned long spr_default_dscr = 0; 264 265 void __init record_spr_defaults(void) 266 { 267 if (early_cpu_has_feature(CPU_FTR_DSCR)) 268 spr_default_dscr = mfspr(SPRN_DSCR); 269 } 270 271 /* 272 * Early initialization entry point. This is called by head.S 273 * with MMU translation disabled. We rely on the "feature" of 274 * the CPU that ignores the top 2 bits of the address in real 275 * mode so we can access kernel globals normally provided we 276 * only toy with things in the RMO region. From here, we do 277 * some early parsing of the device-tree to setup out MEMBLOCK 278 * data structures, and allocate & initialize the hash table 279 * and segment tables so we can start running with translation 280 * enabled. 281 * 282 * It is this function which will call the probe() callback of 283 * the various platform types and copy the matching one to the 284 * global ppc_md structure. Your platform can eventually do 285 * some very early initializations from the probe() routine, but 286 * this is not recommended, be very careful as, for example, the 287 * device-tree is not accessible via normal means at this point. 288 */ 289 290 void __init early_setup(unsigned long dt_ptr) 291 { 292 static __initdata struct paca_struct boot_paca; 293 294 /* -------- printk is _NOT_ safe to use here ! ------- */ 295 296 /* Try new device tree based feature discovery ... */ 297 if (!dt_cpu_ftrs_init(__va(dt_ptr))) 298 /* Otherwise use the old style CPU table */ 299 identify_cpu(0, mfspr(SPRN_PVR)); 300 301 /* Assume we're on cpu 0 for now. Don't write to the paca yet! */ 302 initialise_paca(&boot_paca, 0); 303 setup_paca(&boot_paca); 304 fixup_boot_paca(); 305 306 /* -------- printk is now safe to use ------- */ 307 308 /* Enable early debugging if any specified (see udbg.h) */ 309 udbg_early_init(); 310 311 DBG(" -> early_setup(), dt_ptr: 0x%lx\n", dt_ptr); 312 313 /* 314 * Do early initialization using the flattened device 315 * tree, such as retrieving the physical memory map or 316 * calculating/retrieving the hash table size. 317 */ 318 early_init_devtree(__va(dt_ptr)); 319 320 /* Now we know the logical id of our boot cpu, setup the paca. */ 321 if (boot_cpuid != 0) { 322 /* Poison paca_ptrs[0] again if it's not the boot cpu */ 323 memset(&paca_ptrs[0], 0x88, sizeof(paca_ptrs[0])); 324 } 325 setup_paca(paca_ptrs[boot_cpuid]); 326 fixup_boot_paca(); 327 328 /* 329 * Configure exception handlers. This include setting up trampolines 330 * if needed, setting exception endian mode, etc... 331 */ 332 configure_exceptions(); 333 334 /* Apply all the dynamic patching */ 335 apply_feature_fixups(); 336 setup_feature_keys(); 337 338 /* Initialize the hash table or TLB handling */ 339 early_init_mmu(); 340 341 /* 342 * After firmware and early platform setup code has set things up, 343 * we note the SPR values for configurable control/performance 344 * registers, and use those as initial defaults. 345 */ 346 record_spr_defaults(); 347 348 /* 349 * At this point, we can let interrupts switch to virtual mode 350 * (the MMU has been setup), so adjust the MSR in the PACA to 351 * have IR and DR set and enable AIL if it exists 352 */ 353 cpu_ready_for_interrupts(); 354 355 /* 356 * We enable ftrace here, but since we only support DYNAMIC_FTRACE, it 357 * will only actually get enabled on the boot cpu much later once 358 * ftrace itself has been initialized. 359 */ 360 this_cpu_enable_ftrace(); 361 362 DBG(" <- early_setup()\n"); 363 364 #ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX 365 /* 366 * This needs to be done *last* (after the above DBG() even) 367 * 368 * Right after we return from this function, we turn on the MMU 369 * which means the real-mode access trick that btext does will 370 * no longer work, it needs to switch to using a real MMU 371 * mapping. This call will ensure that it does 372 */ 373 btext_map(); 374 #endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */ 375 } 376 377 #ifdef CONFIG_SMP 378 void early_setup_secondary(void) 379 { 380 /* Mark interrupts disabled in PACA */ 381 irq_soft_mask_set(IRQS_DISABLED); 382 383 /* Initialize the hash table or TLB handling */ 384 early_init_mmu_secondary(); 385 386 /* 387 * At this point, we can let interrupts switch to virtual mode 388 * (the MMU has been setup), so adjust the MSR in the PACA to 389 * have IR and DR set. 390 */ 391 cpu_ready_for_interrupts(); 392 } 393 394 #endif /* CONFIG_SMP */ 395 396 void panic_smp_self_stop(void) 397 { 398 hard_irq_disable(); 399 spin_begin(); 400 while (1) 401 spin_cpu_relax(); 402 } 403 404 #if defined(CONFIG_SMP) || defined(CONFIG_KEXEC_CORE) 405 static bool use_spinloop(void) 406 { 407 if (IS_ENABLED(CONFIG_PPC_BOOK3S)) { 408 /* 409 * See comments in head_64.S -- not all platforms insert 410 * secondaries at __secondary_hold and wait at the spin 411 * loop. 412 */ 413 if (firmware_has_feature(FW_FEATURE_OPAL)) 414 return false; 415 return true; 416 } 417 418 /* 419 * When book3e boots from kexec, the ePAPR spin table does 420 * not get used. 421 */ 422 return of_property_read_bool(of_chosen, "linux,booted-from-kexec"); 423 } 424 425 void smp_release_cpus(void) 426 { 427 unsigned long *ptr; 428 int i; 429 430 if (!use_spinloop()) 431 return; 432 433 DBG(" -> smp_release_cpus()\n"); 434 435 /* All secondary cpus are spinning on a common spinloop, release them 436 * all now so they can start to spin on their individual paca 437 * spinloops. For non SMP kernels, the secondary cpus never get out 438 * of the common spinloop. 439 */ 440 441 ptr = (unsigned long *)((unsigned long)&__secondary_hold_spinloop 442 - PHYSICAL_START); 443 *ptr = ppc_function_entry(generic_secondary_smp_init); 444 445 /* And wait a bit for them to catch up */ 446 for (i = 0; i < 100000; i++) { 447 mb(); 448 HMT_low(); 449 if (spinning_secondaries == 0) 450 break; 451 udelay(1); 452 } 453 DBG("spinning_secondaries = %d\n", spinning_secondaries); 454 455 DBG(" <- smp_release_cpus()\n"); 456 } 457 #endif /* CONFIG_SMP || CONFIG_KEXEC_CORE */ 458 459 /* 460 * Initialize some remaining members of the ppc64_caches and systemcfg 461 * structures 462 * (at least until we get rid of them completely). This is mostly some 463 * cache informations about the CPU that will be used by cache flush 464 * routines and/or provided to userland 465 */ 466 467 static void init_cache_info(struct ppc_cache_info *info, u32 size, u32 lsize, 468 u32 bsize, u32 sets) 469 { 470 info->size = size; 471 info->sets = sets; 472 info->line_size = lsize; 473 info->block_size = bsize; 474 info->log_block_size = __ilog2(bsize); 475 if (bsize) 476 info->blocks_per_page = PAGE_SIZE / bsize; 477 else 478 info->blocks_per_page = 0; 479 480 if (sets == 0) 481 info->assoc = 0xffff; 482 else 483 info->assoc = size / (sets * lsize); 484 } 485 486 static bool __init parse_cache_info(struct device_node *np, 487 bool icache, 488 struct ppc_cache_info *info) 489 { 490 static const char *ipropnames[] __initdata = { 491 "i-cache-size", 492 "i-cache-sets", 493 "i-cache-block-size", 494 "i-cache-line-size", 495 }; 496 static const char *dpropnames[] __initdata = { 497 "d-cache-size", 498 "d-cache-sets", 499 "d-cache-block-size", 500 "d-cache-line-size", 501 }; 502 const char **propnames = icache ? ipropnames : dpropnames; 503 const __be32 *sizep, *lsizep, *bsizep, *setsp; 504 u32 size, lsize, bsize, sets; 505 bool success = true; 506 507 size = 0; 508 sets = -1u; 509 lsize = bsize = cur_cpu_spec->dcache_bsize; 510 sizep = of_get_property(np, propnames[0], NULL); 511 if (sizep != NULL) 512 size = be32_to_cpu(*sizep); 513 setsp = of_get_property(np, propnames[1], NULL); 514 if (setsp != NULL) 515 sets = be32_to_cpu(*setsp); 516 bsizep = of_get_property(np, propnames[2], NULL); 517 lsizep = of_get_property(np, propnames[3], NULL); 518 if (bsizep == NULL) 519 bsizep = lsizep; 520 if (lsizep != NULL) 521 lsize = be32_to_cpu(*lsizep); 522 if (bsizep != NULL) 523 bsize = be32_to_cpu(*bsizep); 524 if (sizep == NULL || bsizep == NULL || lsizep == NULL) 525 success = false; 526 527 /* 528 * OF is weird .. it represents fully associative caches 529 * as "1 way" which doesn't make much sense and doesn't 530 * leave room for direct mapped. We'll assume that 0 531 * in OF means direct mapped for that reason. 532 */ 533 if (sets == 1) 534 sets = 0; 535 else if (sets == 0) 536 sets = 1; 537 538 init_cache_info(info, size, lsize, bsize, sets); 539 540 return success; 541 } 542 543 void __init initialize_cache_info(void) 544 { 545 struct device_node *cpu = NULL, *l2, *l3 = NULL; 546 u32 pvr; 547 548 DBG(" -> initialize_cache_info()\n"); 549 550 /* 551 * All shipping POWER8 machines have a firmware bug that 552 * puts incorrect information in the device-tree. This will 553 * be (hopefully) fixed for future chips but for now hard 554 * code the values if we are running on one of these 555 */ 556 pvr = PVR_VER(mfspr(SPRN_PVR)); 557 if (pvr == PVR_POWER8 || pvr == PVR_POWER8E || 558 pvr == PVR_POWER8NVL) { 559 /* size lsize blk sets */ 560 init_cache_info(&ppc64_caches.l1i, 0x8000, 128, 128, 32); 561 init_cache_info(&ppc64_caches.l1d, 0x10000, 128, 128, 64); 562 init_cache_info(&ppc64_caches.l2, 0x80000, 128, 0, 512); 563 init_cache_info(&ppc64_caches.l3, 0x800000, 128, 0, 8192); 564 } else 565 cpu = of_find_node_by_type(NULL, "cpu"); 566 567 /* 568 * We're assuming *all* of the CPUs have the same 569 * d-cache and i-cache sizes... -Peter 570 */ 571 if (cpu) { 572 if (!parse_cache_info(cpu, false, &ppc64_caches.l1d)) 573 DBG("Argh, can't find dcache properties !\n"); 574 575 if (!parse_cache_info(cpu, true, &ppc64_caches.l1i)) 576 DBG("Argh, can't find icache properties !\n"); 577 578 /* 579 * Try to find the L2 and L3 if any. Assume they are 580 * unified and use the D-side properties. 581 */ 582 l2 = of_find_next_cache_node(cpu); 583 of_node_put(cpu); 584 if (l2) { 585 parse_cache_info(l2, false, &ppc64_caches.l2); 586 l3 = of_find_next_cache_node(l2); 587 of_node_put(l2); 588 } 589 if (l3) { 590 parse_cache_info(l3, false, &ppc64_caches.l3); 591 of_node_put(l3); 592 } 593 } 594 595 /* For use by binfmt_elf */ 596 dcache_bsize = ppc64_caches.l1d.block_size; 597 icache_bsize = ppc64_caches.l1i.block_size; 598 599 cur_cpu_spec->dcache_bsize = dcache_bsize; 600 cur_cpu_spec->icache_bsize = icache_bsize; 601 602 DBG(" <- initialize_cache_info()\n"); 603 } 604 605 /* 606 * This returns the limit below which memory accesses to the linear 607 * mapping are guarnateed not to cause an architectural exception (e.g., 608 * TLB or SLB miss fault). 609 * 610 * This is used to allocate PACAs and various interrupt stacks that 611 * that are accessed early in interrupt handlers that must not cause 612 * re-entrant interrupts. 613 */ 614 __init u64 ppc64_bolted_size(void) 615 { 616 #ifdef CONFIG_PPC_BOOK3E 617 /* Freescale BookE bolts the entire linear mapping */ 618 /* XXX: BookE ppc64_rma_limit setup seems to disagree? */ 619 if (early_mmu_has_feature(MMU_FTR_TYPE_FSL_E)) 620 return linear_map_top; 621 /* Other BookE, we assume the first GB is bolted */ 622 return 1ul << 30; 623 #else 624 /* BookS radix, does not take faults on linear mapping */ 625 if (early_radix_enabled()) 626 return ULONG_MAX; 627 628 /* BookS hash, the first segment is bolted */ 629 if (early_mmu_has_feature(MMU_FTR_1T_SEGMENT)) 630 return 1UL << SID_SHIFT_1T; 631 return 1UL << SID_SHIFT; 632 #endif 633 } 634 635 static void *__init alloc_stack(unsigned long limit, int cpu) 636 { 637 unsigned long pa; 638 639 BUILD_BUG_ON(STACK_INT_FRAME_SIZE % 16); 640 641 pa = memblock_alloc_base_nid(THREAD_SIZE, THREAD_SIZE, limit, 642 early_cpu_to_node(cpu), MEMBLOCK_NONE); 643 if (!pa) { 644 pa = memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit); 645 if (!pa) 646 panic("cannot allocate stacks"); 647 } 648 649 return __va(pa); 650 } 651 652 void __init irqstack_early_init(void) 653 { 654 u64 limit = ppc64_bolted_size(); 655 unsigned int i; 656 657 /* 658 * Interrupt stacks must be in the first segment since we 659 * cannot afford to take SLB misses on them. They are not 660 * accessed in realmode. 661 */ 662 for_each_possible_cpu(i) { 663 softirq_ctx[i] = alloc_stack(limit, i); 664 hardirq_ctx[i] = alloc_stack(limit, i); 665 } 666 } 667 668 #ifdef CONFIG_PPC_BOOK3E 669 void __init exc_lvl_early_init(void) 670 { 671 unsigned int i; 672 673 for_each_possible_cpu(i) { 674 void *sp; 675 676 sp = alloc_stack(ULONG_MAX, i); 677 critirq_ctx[i] = sp; 678 paca_ptrs[i]->crit_kstack = sp + THREAD_SIZE; 679 680 sp = alloc_stack(ULONG_MAX, i); 681 dbgirq_ctx[i] = sp; 682 paca_ptrs[i]->dbg_kstack = sp + THREAD_SIZE; 683 684 sp = alloc_stack(ULONG_MAX, i); 685 mcheckirq_ctx[i] = sp; 686 paca_ptrs[i]->mc_kstack = sp + THREAD_SIZE; 687 } 688 689 if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC)) 690 patch_exception(0x040, exc_debug_debug_book3e); 691 } 692 #endif 693 694 /* 695 * Emergency stacks are used for a range of things, from asynchronous 696 * NMIs (system reset, machine check) to synchronous, process context. 697 * We set preempt_count to zero, even though that isn't necessarily correct. To 698 * get the right value we'd need to copy it from the previous thread_info, but 699 * doing that might fault causing more problems. 700 * TODO: what to do with accounting? 701 */ 702 static void emerg_stack_init_thread_info(struct thread_info *ti, int cpu) 703 { 704 ti->task = NULL; 705 ti->cpu = cpu; 706 ti->preempt_count = 0; 707 ti->local_flags = 0; 708 ti->flags = 0; 709 klp_init_thread_info(ti); 710 } 711 712 /* 713 * Stack space used when we detect a bad kernel stack pointer, and 714 * early in SMP boots before relocation is enabled. Exclusive emergency 715 * stack for machine checks. 716 */ 717 void __init emergency_stack_init(void) 718 { 719 u64 limit; 720 unsigned int i; 721 722 /* 723 * Emergency stacks must be under 256MB, we cannot afford to take 724 * SLB misses on them. The ABI also requires them to be 128-byte 725 * aligned. 726 * 727 * Since we use these as temporary stacks during secondary CPU 728 * bringup, machine check, system reset, and HMI, we need to get 729 * at them in real mode. This means they must also be within the RMO 730 * region. 731 * 732 * The IRQ stacks allocated elsewhere in this file are zeroed and 733 * initialized in kernel/irq.c. These are initialized here in order 734 * to have emergency stacks available as early as possible. 735 */ 736 limit = min(ppc64_bolted_size(), ppc64_rma_size); 737 738 for_each_possible_cpu(i) { 739 struct thread_info *ti; 740 741 ti = alloc_stack(limit, i); 742 memset(ti, 0, THREAD_SIZE); 743 emerg_stack_init_thread_info(ti, i); 744 paca_ptrs[i]->emergency_sp = (void *)ti + THREAD_SIZE; 745 746 #ifdef CONFIG_PPC_BOOK3S_64 747 /* emergency stack for NMI exception handling. */ 748 ti = alloc_stack(limit, i); 749 memset(ti, 0, THREAD_SIZE); 750 emerg_stack_init_thread_info(ti, i); 751 paca_ptrs[i]->nmi_emergency_sp = (void *)ti + THREAD_SIZE; 752 753 /* emergency stack for machine check exception handling. */ 754 ti = alloc_stack(limit, i); 755 memset(ti, 0, THREAD_SIZE); 756 emerg_stack_init_thread_info(ti, i); 757 paca_ptrs[i]->mc_emergency_sp = (void *)ti + THREAD_SIZE; 758 #endif 759 } 760 } 761 762 #ifdef CONFIG_SMP 763 #define PCPU_DYN_SIZE () 764 765 static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align) 766 { 767 return memblock_alloc_try_nid(size, align, __pa(MAX_DMA_ADDRESS), 768 MEMBLOCK_ALLOC_ACCESSIBLE, 769 early_cpu_to_node(cpu)); 770 771 } 772 773 static void __init pcpu_fc_free(void *ptr, size_t size) 774 { 775 memblock_free(__pa(ptr), size); 776 } 777 778 static int pcpu_cpu_distance(unsigned int from, unsigned int to) 779 { 780 if (early_cpu_to_node(from) == early_cpu_to_node(to)) 781 return LOCAL_DISTANCE; 782 else 783 return REMOTE_DISTANCE; 784 } 785 786 unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; 787 EXPORT_SYMBOL(__per_cpu_offset); 788 789 void __init setup_per_cpu_areas(void) 790 { 791 const size_t dyn_size = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE; 792 size_t atom_size; 793 unsigned long delta; 794 unsigned int cpu; 795 int rc; 796 797 /* 798 * Linear mapping is one of 4K, 1M and 16M. For 4K, no need 799 * to group units. For larger mappings, use 1M atom which 800 * should be large enough to contain a number of units. 801 */ 802 if (mmu_linear_psize == MMU_PAGE_4K) 803 atom_size = PAGE_SIZE; 804 else 805 atom_size = 1 << 20; 806 807 rc = pcpu_embed_first_chunk(0, dyn_size, atom_size, pcpu_cpu_distance, 808 pcpu_fc_alloc, pcpu_fc_free); 809 if (rc < 0) 810 panic("cannot initialize percpu area (err=%d)", rc); 811 812 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; 813 for_each_possible_cpu(cpu) { 814 __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu]; 815 paca_ptrs[cpu]->data_offset = __per_cpu_offset[cpu]; 816 } 817 } 818 #endif 819 820 #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE 821 unsigned long memory_block_size_bytes(void) 822 { 823 if (ppc_md.memory_block_size) 824 return ppc_md.memory_block_size(); 825 826 return MIN_MEMORY_BLOCK_SIZE; 827 } 828 #endif 829 830 #if defined(CONFIG_PPC_INDIRECT_PIO) || defined(CONFIG_PPC_INDIRECT_MMIO) 831 struct ppc_pci_io ppc_pci_io; 832 EXPORT_SYMBOL(ppc_pci_io); 833 #endif 834 835 #ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF 836 u64 hw_nmi_get_sample_period(int watchdog_thresh) 837 { 838 return ppc_proc_freq * watchdog_thresh; 839 } 840 #endif 841 842 /* 843 * The perf based hardlockup detector breaks PMU event based branches, so 844 * disable it by default. Book3S has a soft-nmi hardlockup detector based 845 * on the decrementer interrupt, so it does not suffer from this problem. 846 * 847 * It is likely to get false positives in VM guests, so disable it there 848 * by default too. 849 */ 850 static int __init disable_hardlockup_detector(void) 851 { 852 #ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF 853 hardlockup_detector_disable(); 854 #else 855 if (firmware_has_feature(FW_FEATURE_LPAR)) 856 hardlockup_detector_disable(); 857 #endif 858 859 return 0; 860 } 861 early_initcall(disable_hardlockup_detector); 862 863 #ifdef CONFIG_PPC_BOOK3S_64 864 static enum l1d_flush_type enabled_flush_types; 865 static void *l1d_flush_fallback_area; 866 static bool no_rfi_flush; 867 bool rfi_flush; 868 869 static int __init handle_no_rfi_flush(char *p) 870 { 871 pr_info("rfi-flush: disabled on command line."); 872 no_rfi_flush = true; 873 return 0; 874 } 875 early_param("no_rfi_flush", handle_no_rfi_flush); 876 877 /* 878 * The RFI flush is not KPTI, but because users will see doco that says to use 879 * nopti we hijack that option here to also disable the RFI flush. 880 */ 881 static int __init handle_no_pti(char *p) 882 { 883 pr_info("rfi-flush: disabling due to 'nopti' on command line.\n"); 884 handle_no_rfi_flush(NULL); 885 return 0; 886 } 887 early_param("nopti", handle_no_pti); 888 889 static void do_nothing(void *unused) 890 { 891 /* 892 * We don't need to do the flush explicitly, just enter+exit kernel is 893 * sufficient, the RFI exit handlers will do the right thing. 894 */ 895 } 896 897 void rfi_flush_enable(bool enable) 898 { 899 if (enable) { 900 do_rfi_flush_fixups(enabled_flush_types); 901 on_each_cpu(do_nothing, NULL, 1); 902 } else 903 do_rfi_flush_fixups(L1D_FLUSH_NONE); 904 905 rfi_flush = enable; 906 } 907 908 static void __ref init_fallback_flush(void) 909 { 910 u64 l1d_size, limit; 911 int cpu; 912 913 /* Only allocate the fallback flush area once (at boot time). */ 914 if (l1d_flush_fallback_area) 915 return; 916 917 l1d_size = ppc64_caches.l1d.size; 918 919 /* 920 * If there is no d-cache-size property in the device tree, l1d_size 921 * could be zero. That leads to the loop in the asm wrapping around to 922 * 2^64-1, and then walking off the end of the fallback area and 923 * eventually causing a page fault which is fatal. Just default to 924 * something vaguely sane. 925 */ 926 if (!l1d_size) 927 l1d_size = (64 * 1024); 928 929 limit = min(ppc64_bolted_size(), ppc64_rma_size); 930 931 /* 932 * Align to L1d size, and size it at 2x L1d size, to catch possible 933 * hardware prefetch runoff. We don't have a recipe for load patterns to 934 * reliably avoid the prefetcher. 935 */ 936 l1d_flush_fallback_area = __va(memblock_alloc_base(l1d_size * 2, l1d_size, limit)); 937 memset(l1d_flush_fallback_area, 0, l1d_size * 2); 938 939 for_each_possible_cpu(cpu) { 940 struct paca_struct *paca = paca_ptrs[cpu]; 941 paca->rfi_flush_fallback_area = l1d_flush_fallback_area; 942 paca->l1d_flush_size = l1d_size; 943 } 944 } 945 946 void setup_rfi_flush(enum l1d_flush_type types, bool enable) 947 { 948 if (types & L1D_FLUSH_FALLBACK) { 949 pr_info("rfi-flush: fallback displacement flush available\n"); 950 init_fallback_flush(); 951 } 952 953 if (types & L1D_FLUSH_ORI) 954 pr_info("rfi-flush: ori type flush available\n"); 955 956 if (types & L1D_FLUSH_MTTRIG) 957 pr_info("rfi-flush: mttrig type flush available\n"); 958 959 enabled_flush_types = types; 960 961 if (!no_rfi_flush) 962 rfi_flush_enable(enable); 963 } 964 965 #ifdef CONFIG_DEBUG_FS 966 static int rfi_flush_set(void *data, u64 val) 967 { 968 bool enable; 969 970 if (val == 1) 971 enable = true; 972 else if (val == 0) 973 enable = false; 974 else 975 return -EINVAL; 976 977 /* Only do anything if we're changing state */ 978 if (enable != rfi_flush) 979 rfi_flush_enable(enable); 980 981 return 0; 982 } 983 984 static int rfi_flush_get(void *data, u64 *val) 985 { 986 *val = rfi_flush ? 1 : 0; 987 return 0; 988 } 989 990 DEFINE_SIMPLE_ATTRIBUTE(fops_rfi_flush, rfi_flush_get, rfi_flush_set, "%llu\n"); 991 992 static __init int rfi_flush_debugfs_init(void) 993 { 994 debugfs_create_file("rfi_flush", 0600, powerpc_debugfs_root, NULL, &fops_rfi_flush); 995 return 0; 996 } 997 device_initcall(rfi_flush_debugfs_init); 998 #endif 999 #endif /* CONFIG_PPC_BOOK3S_64 */ 1000