1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * S390 version 4 * Copyright IBM Corp. 1999, 2012 5 * Author(s): Hartmut Penner (hp@de.ibm.com), 6 * Martin Schwidefsky (schwidefsky@de.ibm.com) 7 * 8 * Derived from "arch/i386/kernel/setup.c" 9 * Copyright (C) 1995, Linus Torvalds 10 */ 11 12 /* 13 * This file handles the architecture-dependent parts of initialization 14 */ 15 16 #define KMSG_COMPONENT "setup" 17 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 18 19 #include <linux/errno.h> 20 #include <linux/export.h> 21 #include <linux/sched.h> 22 #include <linux/sched/task.h> 23 #include <linux/cpu.h> 24 #include <linux/kernel.h> 25 #include <linux/memblock.h> 26 #include <linux/mm.h> 27 #include <linux/stddef.h> 28 #include <linux/unistd.h> 29 #include <linux/ptrace.h> 30 #include <linux/random.h> 31 #include <linux/user.h> 32 #include <linux/tty.h> 33 #include <linux/ioport.h> 34 #include <linux/delay.h> 35 #include <linux/init.h> 36 #include <linux/initrd.h> 37 #include <linux/root_dev.h> 38 #include <linux/console.h> 39 #include <linux/kernel_stat.h> 40 #include <linux/dma-map-ops.h> 41 #include <linux/device.h> 42 #include <linux/notifier.h> 43 #include <linux/pfn.h> 44 #include <linux/ctype.h> 45 #include <linux/reboot.h> 46 #include <linux/topology.h> 47 #include <linux/kexec.h> 48 #include <linux/crash_dump.h> 49 #include <linux/memory.h> 50 #include <linux/compat.h> 51 #include <linux/start_kernel.h> 52 53 #include <asm/boot_data.h> 54 #include <asm/ipl.h> 55 #include <asm/facility.h> 56 #include <asm/smp.h> 57 #include <asm/mmu_context.h> 58 #include <asm/cpcmd.h> 59 #include <asm/lowcore.h> 60 #include <asm/nmi.h> 61 #include <asm/irq.h> 62 #include <asm/page.h> 63 #include <asm/ptrace.h> 64 #include <asm/sections.h> 65 #include <asm/ebcdic.h> 66 #include <asm/diag.h> 67 #include <asm/os_info.h> 68 #include <asm/sclp.h> 69 #include <asm/stacktrace.h> 70 #include <asm/sysinfo.h> 71 #include <asm/numa.h> 72 #include <asm/alternative.h> 73 #include <asm/nospec-branch.h> 74 #include <asm/mem_detect.h> 75 #include <asm/uv.h> 76 #include <asm/asm-offsets.h> 77 #include "entry.h" 78 79 /* 80 * Machine setup.. 81 */ 82 unsigned int console_mode = 0; 83 EXPORT_SYMBOL(console_mode); 84 85 unsigned int console_devno = -1; 86 EXPORT_SYMBOL(console_devno); 87 88 unsigned int console_irq = -1; 89 EXPORT_SYMBOL(console_irq); 90 91 unsigned long elf_hwcap __read_mostly = 0; 92 char elf_platform[ELF_PLATFORM_SIZE]; 93 94 unsigned long int_hwcap = 0; 95 96 int __bootdata(noexec_disabled); 97 int __bootdata(memory_end_set); 98 unsigned long __bootdata(memory_end); 99 unsigned long __bootdata(vmalloc_size); 100 unsigned long __bootdata(max_physmem_end); 101 struct mem_detect_info __bootdata(mem_detect); 102 103 struct exception_table_entry *__bootdata_preserved(__start_dma_ex_table); 104 struct exception_table_entry *__bootdata_preserved(__stop_dma_ex_table); 105 unsigned long __bootdata_preserved(__stext_dma); 106 unsigned long __bootdata_preserved(__etext_dma); 107 unsigned long __bootdata_preserved(__sdma); 108 unsigned long __bootdata_preserved(__edma); 109 unsigned long __bootdata_preserved(__kaslr_offset); 110 unsigned int __bootdata_preserved(zlib_dfltcc_support); 111 EXPORT_SYMBOL(zlib_dfltcc_support); 112 113 unsigned long VMALLOC_START; 114 EXPORT_SYMBOL(VMALLOC_START); 115 116 unsigned long VMALLOC_END; 117 EXPORT_SYMBOL(VMALLOC_END); 118 119 struct page *vmemmap; 120 EXPORT_SYMBOL(vmemmap); 121 unsigned long vmemmap_size; 122 123 unsigned long MODULES_VADDR; 124 unsigned long MODULES_END; 125 126 /* An array with a pointer to the lowcore of every CPU. */ 127 struct lowcore *lowcore_ptr[NR_CPUS]; 128 EXPORT_SYMBOL(lowcore_ptr); 129 130 /* 131 * The Write Back bit position in the physaddr is given by the SLPC PCI. 132 * Leaving the mask zero always uses write through which is safe 133 */ 134 unsigned long mio_wb_bit_mask __ro_after_init; 135 136 /* 137 * This is set up by the setup-routine at boot-time 138 * for S390 need to find out, what we have to setup 139 * using address 0x10400 ... 140 */ 141 142 #include <asm/setup.h> 143 144 /* 145 * condev= and conmode= setup parameter. 146 */ 147 148 static int __init condev_setup(char *str) 149 { 150 int vdev; 151 152 vdev = simple_strtoul(str, &str, 0); 153 if (vdev >= 0 && vdev < 65536) { 154 console_devno = vdev; 155 console_irq = -1; 156 } 157 return 1; 158 } 159 160 __setup("condev=", condev_setup); 161 162 static void __init set_preferred_console(void) 163 { 164 if (CONSOLE_IS_3215 || CONSOLE_IS_SCLP) 165 add_preferred_console("ttyS", 0, NULL); 166 else if (CONSOLE_IS_3270) 167 add_preferred_console("tty3270", 0, NULL); 168 else if (CONSOLE_IS_VT220) 169 add_preferred_console("ttyS", 1, NULL); 170 else if (CONSOLE_IS_HVC) 171 add_preferred_console("hvc", 0, NULL); 172 } 173 174 static int __init conmode_setup(char *str) 175 { 176 #if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE) 177 if (!strcmp(str, "hwc") || !strcmp(str, "sclp")) 178 SET_CONSOLE_SCLP; 179 #endif 180 #if defined(CONFIG_TN3215_CONSOLE) 181 if (!strcmp(str, "3215")) 182 SET_CONSOLE_3215; 183 #endif 184 #if defined(CONFIG_TN3270_CONSOLE) 185 if (!strcmp(str, "3270")) 186 SET_CONSOLE_3270; 187 #endif 188 set_preferred_console(); 189 return 1; 190 } 191 192 __setup("conmode=", conmode_setup); 193 194 static void __init conmode_default(void) 195 { 196 char query_buffer[1024]; 197 char *ptr; 198 199 if (MACHINE_IS_VM) { 200 cpcmd("QUERY CONSOLE", query_buffer, 1024, NULL); 201 console_devno = simple_strtoul(query_buffer + 5, NULL, 16); 202 ptr = strstr(query_buffer, "SUBCHANNEL ="); 203 console_irq = simple_strtoul(ptr + 13, NULL, 16); 204 cpcmd("QUERY TERM", query_buffer, 1024, NULL); 205 ptr = strstr(query_buffer, "CONMODE"); 206 /* 207 * Set the conmode to 3215 so that the device recognition 208 * will set the cu_type of the console to 3215. If the 209 * conmode is 3270 and we don't set it back then both 210 * 3215 and the 3270 driver will try to access the console 211 * device (3215 as console and 3270 as normal tty). 212 */ 213 cpcmd("TERM CONMODE 3215", NULL, 0, NULL); 214 if (ptr == NULL) { 215 #if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE) 216 SET_CONSOLE_SCLP; 217 #endif 218 return; 219 } 220 if (str_has_prefix(ptr + 8, "3270")) { 221 #if defined(CONFIG_TN3270_CONSOLE) 222 SET_CONSOLE_3270; 223 #elif defined(CONFIG_TN3215_CONSOLE) 224 SET_CONSOLE_3215; 225 #elif defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE) 226 SET_CONSOLE_SCLP; 227 #endif 228 } else if (str_has_prefix(ptr + 8, "3215")) { 229 #if defined(CONFIG_TN3215_CONSOLE) 230 SET_CONSOLE_3215; 231 #elif defined(CONFIG_TN3270_CONSOLE) 232 SET_CONSOLE_3270; 233 #elif defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE) 234 SET_CONSOLE_SCLP; 235 #endif 236 } 237 } else if (MACHINE_IS_KVM) { 238 if (sclp.has_vt220 && IS_ENABLED(CONFIG_SCLP_VT220_CONSOLE)) 239 SET_CONSOLE_VT220; 240 else if (sclp.has_linemode && IS_ENABLED(CONFIG_SCLP_CONSOLE)) 241 SET_CONSOLE_SCLP; 242 else 243 SET_CONSOLE_HVC; 244 } else { 245 #if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE) 246 SET_CONSOLE_SCLP; 247 #endif 248 } 249 } 250 251 #ifdef CONFIG_CRASH_DUMP 252 static void __init setup_zfcpdump(void) 253 { 254 if (!is_ipl_type_dump()) 255 return; 256 if (OLDMEM_BASE) 257 return; 258 strcat(boot_command_line, " cio_ignore=all,!ipldev,!condev"); 259 console_loglevel = 2; 260 } 261 #else 262 static inline void setup_zfcpdump(void) {} 263 #endif /* CONFIG_CRASH_DUMP */ 264 265 /* 266 * Reboot, halt and power_off stubs. They just call _machine_restart, 267 * _machine_halt or _machine_power_off. 268 */ 269 270 void machine_restart(char *command) 271 { 272 if ((!in_interrupt() && !in_atomic()) || oops_in_progress) 273 /* 274 * Only unblank the console if we are called in enabled 275 * context or a bust_spinlocks cleared the way for us. 276 */ 277 console_unblank(); 278 _machine_restart(command); 279 } 280 281 void machine_halt(void) 282 { 283 if (!in_interrupt() || oops_in_progress) 284 /* 285 * Only unblank the console if we are called in enabled 286 * context or a bust_spinlocks cleared the way for us. 287 */ 288 console_unblank(); 289 _machine_halt(); 290 } 291 292 void machine_power_off(void) 293 { 294 if (!in_interrupt() || oops_in_progress) 295 /* 296 * Only unblank the console if we are called in enabled 297 * context or a bust_spinlocks cleared the way for us. 298 */ 299 console_unblank(); 300 _machine_power_off(); 301 } 302 303 /* 304 * Dummy power off function. 305 */ 306 void (*pm_power_off)(void) = machine_power_off; 307 EXPORT_SYMBOL_GPL(pm_power_off); 308 309 void *restart_stack; 310 311 unsigned long stack_alloc(void) 312 { 313 #ifdef CONFIG_VMAP_STACK 314 return (unsigned long)__vmalloc_node(THREAD_SIZE, THREAD_SIZE, 315 THREADINFO_GFP, NUMA_NO_NODE, 316 __builtin_return_address(0)); 317 #else 318 return __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER); 319 #endif 320 } 321 322 void stack_free(unsigned long stack) 323 { 324 #ifdef CONFIG_VMAP_STACK 325 vfree((void *) stack); 326 #else 327 free_pages(stack, THREAD_SIZE_ORDER); 328 #endif 329 } 330 331 int __init arch_early_irq_init(void) 332 { 333 unsigned long stack; 334 335 stack = __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER); 336 if (!stack) 337 panic("Couldn't allocate async stack"); 338 S390_lowcore.async_stack = stack + STACK_INIT_OFFSET; 339 return 0; 340 } 341 342 static int __init async_stack_realloc(void) 343 { 344 unsigned long old, new; 345 346 old = S390_lowcore.async_stack - STACK_INIT_OFFSET; 347 new = stack_alloc(); 348 if (!new) 349 panic("Couldn't allocate async stack"); 350 S390_lowcore.async_stack = new + STACK_INIT_OFFSET; 351 free_pages(old, THREAD_SIZE_ORDER); 352 return 0; 353 } 354 early_initcall(async_stack_realloc); 355 356 void __init arch_call_rest_init(void) 357 { 358 unsigned long stack; 359 360 stack = stack_alloc(); 361 if (!stack) 362 panic("Couldn't allocate kernel stack"); 363 current->stack = (void *) stack; 364 #ifdef CONFIG_VMAP_STACK 365 current->stack_vm_area = (void *) stack; 366 #endif 367 set_task_stack_end_magic(current); 368 stack += STACK_INIT_OFFSET; 369 S390_lowcore.kernel_stack = stack; 370 CALL_ON_STACK_NORETURN(rest_init, stack); 371 } 372 373 static void __init setup_lowcore_dat_off(void) 374 { 375 unsigned long int_psw_mask = PSW_KERNEL_BITS; 376 struct lowcore *lc; 377 378 if (IS_ENABLED(CONFIG_KASAN)) 379 int_psw_mask |= PSW_MASK_DAT; 380 381 /* 382 * Setup lowcore for boot cpu 383 */ 384 BUILD_BUG_ON(sizeof(struct lowcore) != LC_PAGES * PAGE_SIZE); 385 lc = memblock_alloc_low(sizeof(*lc), sizeof(*lc)); 386 if (!lc) 387 panic("%s: Failed to allocate %zu bytes align=%zx\n", 388 __func__, sizeof(*lc), sizeof(*lc)); 389 390 lc->restart_psw.mask = PSW_KERNEL_BITS; 391 lc->restart_psw.addr = (unsigned long) restart_int_handler; 392 lc->external_new_psw.mask = int_psw_mask | PSW_MASK_MCHECK; 393 lc->external_new_psw.addr = (unsigned long) ext_int_handler; 394 lc->svc_new_psw.mask = int_psw_mask | PSW_MASK_MCHECK; 395 lc->svc_new_psw.addr = (unsigned long) system_call; 396 lc->program_new_psw.mask = int_psw_mask | PSW_MASK_MCHECK; 397 lc->program_new_psw.addr = (unsigned long) pgm_check_handler; 398 lc->mcck_new_psw.mask = PSW_KERNEL_BITS; 399 lc->mcck_new_psw.addr = (unsigned long) mcck_int_handler; 400 lc->io_new_psw.mask = int_psw_mask | PSW_MASK_MCHECK; 401 lc->io_new_psw.addr = (unsigned long) io_int_handler; 402 lc->clock_comparator = clock_comparator_max; 403 lc->nodat_stack = ((unsigned long) &init_thread_union) 404 + THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs); 405 lc->current_task = (unsigned long)&init_task; 406 lc->lpp = LPP_MAGIC; 407 lc->machine_flags = S390_lowcore.machine_flags; 408 lc->preempt_count = S390_lowcore.preempt_count; 409 lc->stfl_fac_list = S390_lowcore.stfl_fac_list; 410 memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list, 411 sizeof(lc->stfle_fac_list)); 412 memcpy(lc->alt_stfle_fac_list, S390_lowcore.alt_stfle_fac_list, 413 sizeof(lc->alt_stfle_fac_list)); 414 nmi_alloc_boot_cpu(lc); 415 lc->sync_enter_timer = S390_lowcore.sync_enter_timer; 416 lc->async_enter_timer = S390_lowcore.async_enter_timer; 417 lc->exit_timer = S390_lowcore.exit_timer; 418 lc->user_timer = S390_lowcore.user_timer; 419 lc->system_timer = S390_lowcore.system_timer; 420 lc->steal_timer = S390_lowcore.steal_timer; 421 lc->last_update_timer = S390_lowcore.last_update_timer; 422 lc->last_update_clock = S390_lowcore.last_update_clock; 423 424 /* 425 * Allocate the global restart stack which is the same for 426 * all CPUs in cast *one* of them does a PSW restart. 427 */ 428 restart_stack = memblock_alloc(THREAD_SIZE, THREAD_SIZE); 429 if (!restart_stack) 430 panic("%s: Failed to allocate %lu bytes align=0x%lx\n", 431 __func__, THREAD_SIZE, THREAD_SIZE); 432 restart_stack += STACK_INIT_OFFSET; 433 434 /* 435 * Set up PSW restart to call ipl.c:do_restart(). Copy the relevant 436 * restart data to the absolute zero lowcore. This is necessary if 437 * PSW restart is done on an offline CPU that has lowcore zero. 438 */ 439 lc->restart_stack = (unsigned long) restart_stack; 440 lc->restart_fn = (unsigned long) do_restart; 441 lc->restart_data = 0; 442 lc->restart_source = -1UL; 443 444 /* Setup absolute zero lowcore */ 445 mem_assign_absolute(S390_lowcore.restart_stack, lc->restart_stack); 446 mem_assign_absolute(S390_lowcore.restart_fn, lc->restart_fn); 447 mem_assign_absolute(S390_lowcore.restart_data, lc->restart_data); 448 mem_assign_absolute(S390_lowcore.restart_source, lc->restart_source); 449 mem_assign_absolute(S390_lowcore.restart_psw, lc->restart_psw); 450 451 lc->spinlock_lockval = arch_spin_lockval(0); 452 lc->spinlock_index = 0; 453 arch_spin_lock_setup(0); 454 lc->br_r1_trampoline = 0x07f1; /* br %r1 */ 455 lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW); 456 lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW); 457 458 set_prefix((u32)(unsigned long) lc); 459 lowcore_ptr[0] = lc; 460 } 461 462 static void __init setup_lowcore_dat_on(void) 463 { 464 __ctl_clear_bit(0, 28); 465 S390_lowcore.external_new_psw.mask |= PSW_MASK_DAT; 466 S390_lowcore.svc_new_psw.mask |= PSW_MASK_DAT; 467 S390_lowcore.program_new_psw.mask |= PSW_MASK_DAT; 468 S390_lowcore.io_new_psw.mask |= PSW_MASK_DAT; 469 __ctl_set_bit(0, 28); 470 } 471 472 static struct resource code_resource = { 473 .name = "Kernel code", 474 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM, 475 }; 476 477 static struct resource data_resource = { 478 .name = "Kernel data", 479 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM, 480 }; 481 482 static struct resource bss_resource = { 483 .name = "Kernel bss", 484 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM, 485 }; 486 487 static struct resource __initdata *standard_resources[] = { 488 &code_resource, 489 &data_resource, 490 &bss_resource, 491 }; 492 493 static void __init setup_resources(void) 494 { 495 struct resource *res, *std_res, *sub_res; 496 phys_addr_t start, end; 497 int j; 498 u64 i; 499 500 code_resource.start = (unsigned long) _text; 501 code_resource.end = (unsigned long) _etext - 1; 502 data_resource.start = (unsigned long) _etext; 503 data_resource.end = (unsigned long) _edata - 1; 504 bss_resource.start = (unsigned long) __bss_start; 505 bss_resource.end = (unsigned long) __bss_stop - 1; 506 507 for_each_mem_range(i, &start, &end) { 508 res = memblock_alloc(sizeof(*res), 8); 509 if (!res) 510 panic("%s: Failed to allocate %zu bytes align=0x%x\n", 511 __func__, sizeof(*res), 8); 512 res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM; 513 514 res->name = "System RAM"; 515 res->start = start; 516 /* 517 * In memblock, end points to the first byte after the 518 * range while in resourses, end points to the last byte in 519 * the range. 520 */ 521 res->end = end - 1; 522 request_resource(&iomem_resource, res); 523 524 for (j = 0; j < ARRAY_SIZE(standard_resources); j++) { 525 std_res = standard_resources[j]; 526 if (std_res->start < res->start || 527 std_res->start > res->end) 528 continue; 529 if (std_res->end > res->end) { 530 sub_res = memblock_alloc(sizeof(*sub_res), 8); 531 if (!sub_res) 532 panic("%s: Failed to allocate %zu bytes align=0x%x\n", 533 __func__, sizeof(*sub_res), 8); 534 *sub_res = *std_res; 535 sub_res->end = res->end; 536 std_res->start = res->end + 1; 537 request_resource(res, sub_res); 538 } else { 539 request_resource(res, std_res); 540 } 541 } 542 } 543 #ifdef CONFIG_CRASH_DUMP 544 /* 545 * Re-add removed crash kernel memory as reserved memory. This makes 546 * sure it will be mapped with the identity mapping and struct pages 547 * will be created, so it can be resized later on. 548 * However add it later since the crash kernel resource should not be 549 * part of the System RAM resource. 550 */ 551 if (crashk_res.end) { 552 memblock_add_node(crashk_res.start, resource_size(&crashk_res), 0); 553 memblock_reserve(crashk_res.start, resource_size(&crashk_res)); 554 insert_resource(&iomem_resource, &crashk_res); 555 } 556 #endif 557 } 558 559 static void __init setup_memory_end(void) 560 { 561 unsigned long vmax, tmp; 562 563 /* Choose kernel address space layout: 3 or 4 levels. */ 564 tmp = (memory_end ?: max_physmem_end) / PAGE_SIZE; 565 tmp = tmp * (sizeof(struct page) + PAGE_SIZE); 566 if (tmp + vmalloc_size + MODULES_LEN <= _REGION2_SIZE) 567 vmax = _REGION2_SIZE; /* 3-level kernel page table */ 568 else 569 vmax = _REGION1_SIZE; /* 4-level kernel page table */ 570 if (is_prot_virt_host()) 571 adjust_to_uv_max(&vmax); 572 #ifdef CONFIG_KASAN 573 vmax = kasan_vmax; 574 #endif 575 /* module area is at the end of the kernel address space. */ 576 MODULES_END = vmax; 577 MODULES_VADDR = MODULES_END - MODULES_LEN; 578 VMALLOC_END = MODULES_VADDR; 579 VMALLOC_START = VMALLOC_END - vmalloc_size; 580 581 /* Split remaining virtual space between 1:1 mapping & vmemmap array */ 582 tmp = VMALLOC_START / (PAGE_SIZE + sizeof(struct page)); 583 /* vmemmap contains a multiple of PAGES_PER_SECTION struct pages */ 584 tmp = SECTION_ALIGN_UP(tmp); 585 tmp = VMALLOC_START - tmp * sizeof(struct page); 586 tmp &= ~((vmax >> 11) - 1); /* align to page table level */ 587 tmp = min(tmp, 1UL << MAX_PHYSMEM_BITS); 588 vmemmap = (struct page *) tmp; 589 590 /* Take care that memory_end is set and <= vmemmap */ 591 memory_end = min(memory_end ?: max_physmem_end, (unsigned long)vmemmap); 592 #ifdef CONFIG_KASAN 593 memory_end = min(memory_end, KASAN_SHADOW_START); 594 #endif 595 vmemmap_size = SECTION_ALIGN_UP(memory_end / PAGE_SIZE) * sizeof(struct page); 596 #ifdef CONFIG_KASAN 597 /* move vmemmap above kasan shadow only if stands in a way */ 598 if (KASAN_SHADOW_END > (unsigned long)vmemmap && 599 (unsigned long)vmemmap + vmemmap_size > KASAN_SHADOW_START) 600 vmemmap = max(vmemmap, (struct page *)KASAN_SHADOW_END); 601 #endif 602 max_pfn = max_low_pfn = PFN_DOWN(memory_end); 603 memblock_remove(memory_end, ULONG_MAX); 604 605 pr_notice("The maximum memory size is %luMB\n", memory_end >> 20); 606 } 607 608 #ifdef CONFIG_CRASH_DUMP 609 610 /* 611 * When kdump is enabled, we have to ensure that no memory from the area 612 * [0 - crashkernel memory size] is set offline - it will be exchanged with 613 * the crashkernel memory region when kdump is triggered. The crashkernel 614 * memory region can never get offlined (pages are unmovable). 615 */ 616 static int kdump_mem_notifier(struct notifier_block *nb, 617 unsigned long action, void *data) 618 { 619 struct memory_notify *arg = data; 620 621 if (action != MEM_GOING_OFFLINE) 622 return NOTIFY_OK; 623 if (arg->start_pfn < PFN_DOWN(resource_size(&crashk_res))) 624 return NOTIFY_BAD; 625 return NOTIFY_OK; 626 } 627 628 static struct notifier_block kdump_mem_nb = { 629 .notifier_call = kdump_mem_notifier, 630 }; 631 632 #endif 633 634 /* 635 * Make sure that the area behind memory_end is protected 636 */ 637 static void __init reserve_memory_end(void) 638 { 639 if (memory_end_set) 640 memblock_reserve(memory_end, ULONG_MAX); 641 } 642 643 /* 644 * Make sure that oldmem, where the dump is stored, is protected 645 */ 646 static void __init reserve_oldmem(void) 647 { 648 #ifdef CONFIG_CRASH_DUMP 649 if (OLDMEM_BASE) 650 /* Forget all memory above the running kdump system */ 651 memblock_reserve(OLDMEM_SIZE, (phys_addr_t)ULONG_MAX); 652 #endif 653 } 654 655 /* 656 * Make sure that oldmem, where the dump is stored, is protected 657 */ 658 static void __init remove_oldmem(void) 659 { 660 #ifdef CONFIG_CRASH_DUMP 661 if (OLDMEM_BASE) 662 /* Forget all memory above the running kdump system */ 663 memblock_remove(OLDMEM_SIZE, (phys_addr_t)ULONG_MAX); 664 #endif 665 } 666 667 /* 668 * Reserve memory for kdump kernel to be loaded with kexec 669 */ 670 static void __init reserve_crashkernel(void) 671 { 672 #ifdef CONFIG_CRASH_DUMP 673 unsigned long long crash_base, crash_size; 674 phys_addr_t low, high; 675 int rc; 676 677 rc = parse_crashkernel(boot_command_line, memory_end, &crash_size, 678 &crash_base); 679 680 crash_base = ALIGN(crash_base, KEXEC_CRASH_MEM_ALIGN); 681 crash_size = ALIGN(crash_size, KEXEC_CRASH_MEM_ALIGN); 682 if (rc || crash_size == 0) 683 return; 684 685 if (memblock.memory.regions[0].size < crash_size) { 686 pr_info("crashkernel reservation failed: %s\n", 687 "first memory chunk must be at least crashkernel size"); 688 return; 689 } 690 691 low = crash_base ?: OLDMEM_BASE; 692 high = low + crash_size; 693 if (low >= OLDMEM_BASE && high <= OLDMEM_BASE + OLDMEM_SIZE) { 694 /* The crashkernel fits into OLDMEM, reuse OLDMEM */ 695 crash_base = low; 696 } else { 697 /* Find suitable area in free memory */ 698 low = max_t(unsigned long, crash_size, sclp.hsa_size); 699 high = crash_base ? crash_base + crash_size : ULONG_MAX; 700 701 if (crash_base && crash_base < low) { 702 pr_info("crashkernel reservation failed: %s\n", 703 "crash_base too low"); 704 return; 705 } 706 low = crash_base ?: low; 707 crash_base = memblock_find_in_range(low, high, crash_size, 708 KEXEC_CRASH_MEM_ALIGN); 709 } 710 711 if (!crash_base) { 712 pr_info("crashkernel reservation failed: %s\n", 713 "no suitable area found"); 714 return; 715 } 716 717 if (register_memory_notifier(&kdump_mem_nb)) 718 return; 719 720 if (!OLDMEM_BASE && MACHINE_IS_VM) 721 diag10_range(PFN_DOWN(crash_base), PFN_DOWN(crash_size)); 722 crashk_res.start = crash_base; 723 crashk_res.end = crash_base + crash_size - 1; 724 memblock_remove(crash_base, crash_size); 725 pr_info("Reserving %lluMB of memory at %lluMB " 726 "for crashkernel (System RAM: %luMB)\n", 727 crash_size >> 20, crash_base >> 20, 728 (unsigned long)memblock.memory.total_size >> 20); 729 os_info_crashkernel_add(crash_base, crash_size); 730 #endif 731 } 732 733 /* 734 * Reserve the initrd from being used by memblock 735 */ 736 static void __init reserve_initrd(void) 737 { 738 #ifdef CONFIG_BLK_DEV_INITRD 739 if (!INITRD_START || !INITRD_SIZE) 740 return; 741 initrd_start = INITRD_START; 742 initrd_end = initrd_start + INITRD_SIZE; 743 memblock_reserve(INITRD_START, INITRD_SIZE); 744 #endif 745 } 746 747 /* 748 * Reserve the memory area used to pass the certificate lists 749 */ 750 static void __init reserve_certificate_list(void) 751 { 752 if (ipl_cert_list_addr) 753 memblock_reserve(ipl_cert_list_addr, ipl_cert_list_size); 754 } 755 756 static void __init reserve_mem_detect_info(void) 757 { 758 unsigned long start, size; 759 760 get_mem_detect_reserved(&start, &size); 761 if (size) 762 memblock_reserve(start, size); 763 } 764 765 static void __init free_mem_detect_info(void) 766 { 767 unsigned long start, size; 768 769 get_mem_detect_reserved(&start, &size); 770 if (size) 771 memblock_free(start, size); 772 } 773 774 static const char * __init get_mem_info_source(void) 775 { 776 switch (mem_detect.info_source) { 777 case MEM_DETECT_SCLP_STOR_INFO: 778 return "sclp storage info"; 779 case MEM_DETECT_DIAG260: 780 return "diag260"; 781 case MEM_DETECT_SCLP_READ_INFO: 782 return "sclp read info"; 783 case MEM_DETECT_BIN_SEARCH: 784 return "binary search"; 785 } 786 return "none"; 787 } 788 789 static void __init memblock_add_mem_detect_info(void) 790 { 791 unsigned long start, end; 792 int i; 793 794 pr_debug("physmem info source: %s (%hhd)\n", 795 get_mem_info_source(), mem_detect.info_source); 796 /* keep memblock lists close to the kernel */ 797 memblock_set_bottom_up(true); 798 for_each_mem_detect_block(i, &start, &end) { 799 memblock_add(start, end - start); 800 memblock_physmem_add(start, end - start); 801 } 802 memblock_set_bottom_up(false); 803 memblock_set_node(0, ULONG_MAX, &memblock.memory, 0); 804 memblock_dump_all(); 805 } 806 807 /* 808 * Check for initrd being in usable memory 809 */ 810 static void __init check_initrd(void) 811 { 812 #ifdef CONFIG_BLK_DEV_INITRD 813 if (INITRD_START && INITRD_SIZE && 814 !memblock_is_region_memory(INITRD_START, INITRD_SIZE)) { 815 pr_err("The initial RAM disk does not fit into the memory\n"); 816 memblock_free(INITRD_START, INITRD_SIZE); 817 initrd_start = initrd_end = 0; 818 } 819 #endif 820 } 821 822 /* 823 * Reserve memory used for lowcore/command line/kernel image. 824 */ 825 static void __init reserve_kernel(void) 826 { 827 unsigned long start_pfn = PFN_UP(__pa(_end)); 828 829 memblock_reserve(0, HEAD_END); 830 memblock_reserve((unsigned long)_stext, PFN_PHYS(start_pfn) 831 - (unsigned long)_stext); 832 memblock_reserve(__sdma, __edma - __sdma); 833 } 834 835 static void __init setup_memory(void) 836 { 837 phys_addr_t start, end; 838 u64 i; 839 840 /* 841 * Init storage key for present memory 842 */ 843 for_each_mem_range(i, &start, &end) 844 storage_key_init_range(start, end); 845 846 psw_set_key(PAGE_DEFAULT_KEY); 847 848 /* Only cosmetics */ 849 memblock_enforce_memory_limit(memblock_end_of_DRAM()); 850 } 851 852 /* 853 * Setup hardware capabilities. 854 */ 855 static int __init setup_hwcaps(void) 856 { 857 static const int stfl_bits[6] = { 0, 2, 7, 17, 19, 21 }; 858 struct cpuid cpu_id; 859 int i; 860 861 /* 862 * The store facility list bits numbers as found in the principles 863 * of operation are numbered with bit 1UL<<31 as number 0 to 864 * bit 1UL<<0 as number 31. 865 * Bit 0: instructions named N3, "backported" to esa-mode 866 * Bit 2: z/Architecture mode is active 867 * Bit 7: the store-facility-list-extended facility is installed 868 * Bit 17: the message-security assist is installed 869 * Bit 19: the long-displacement facility is installed 870 * Bit 21: the extended-immediate facility is installed 871 * Bit 22: extended-translation facility 3 is installed 872 * Bit 30: extended-translation facility 3 enhancement facility 873 * These get translated to: 874 * HWCAP_S390_ESAN3 bit 0, HWCAP_S390_ZARCH bit 1, 875 * HWCAP_S390_STFLE bit 2, HWCAP_S390_MSA bit 3, 876 * HWCAP_S390_LDISP bit 4, HWCAP_S390_EIMM bit 5 and 877 * HWCAP_S390_ETF3EH bit 8 (22 && 30). 878 */ 879 for (i = 0; i < 6; i++) 880 if (test_facility(stfl_bits[i])) 881 elf_hwcap |= 1UL << i; 882 883 if (test_facility(22) && test_facility(30)) 884 elf_hwcap |= HWCAP_S390_ETF3EH; 885 886 /* 887 * Check for additional facilities with store-facility-list-extended. 888 * stfle stores doublewords (8 byte) with bit 1ULL<<63 as bit 0 889 * and 1ULL<<0 as bit 63. Bits 0-31 contain the same information 890 * as stored by stfl, bits 32-xxx contain additional facilities. 891 * How many facility words are stored depends on the number of 892 * doublewords passed to the instruction. The additional facilities 893 * are: 894 * Bit 42: decimal floating point facility is installed 895 * Bit 44: perform floating point operation facility is installed 896 * translated to: 897 * HWCAP_S390_DFP bit 6 (42 && 44). 898 */ 899 if ((elf_hwcap & (1UL << 2)) && test_facility(42) && test_facility(44)) 900 elf_hwcap |= HWCAP_S390_DFP; 901 902 /* 903 * Huge page support HWCAP_S390_HPAGE is bit 7. 904 */ 905 if (MACHINE_HAS_EDAT1) 906 elf_hwcap |= HWCAP_S390_HPAGE; 907 908 /* 909 * 64-bit register support for 31-bit processes 910 * HWCAP_S390_HIGH_GPRS is bit 9. 911 */ 912 elf_hwcap |= HWCAP_S390_HIGH_GPRS; 913 914 /* 915 * Transactional execution support HWCAP_S390_TE is bit 10. 916 */ 917 if (MACHINE_HAS_TE) 918 elf_hwcap |= HWCAP_S390_TE; 919 920 /* 921 * Vector extension HWCAP_S390_VXRS is bit 11. The Vector extension 922 * can be disabled with the "novx" parameter. Use MACHINE_HAS_VX 923 * instead of facility bit 129. 924 */ 925 if (MACHINE_HAS_VX) { 926 elf_hwcap |= HWCAP_S390_VXRS; 927 if (test_facility(134)) 928 elf_hwcap |= HWCAP_S390_VXRS_EXT; 929 if (test_facility(135)) 930 elf_hwcap |= HWCAP_S390_VXRS_BCD; 931 if (test_facility(148)) 932 elf_hwcap |= HWCAP_S390_VXRS_EXT2; 933 if (test_facility(152)) 934 elf_hwcap |= HWCAP_S390_VXRS_PDE; 935 } 936 if (test_facility(150)) 937 elf_hwcap |= HWCAP_S390_SORT; 938 if (test_facility(151)) 939 elf_hwcap |= HWCAP_S390_DFLT; 940 941 /* 942 * Guarded storage support HWCAP_S390_GS is bit 12. 943 */ 944 if (MACHINE_HAS_GS) 945 elf_hwcap |= HWCAP_S390_GS; 946 947 get_cpu_id(&cpu_id); 948 add_device_randomness(&cpu_id, sizeof(cpu_id)); 949 switch (cpu_id.machine) { 950 case 0x2064: 951 case 0x2066: 952 default: /* Use "z900" as default for 64 bit kernels. */ 953 strcpy(elf_platform, "z900"); 954 break; 955 case 0x2084: 956 case 0x2086: 957 strcpy(elf_platform, "z990"); 958 break; 959 case 0x2094: 960 case 0x2096: 961 strcpy(elf_platform, "z9-109"); 962 break; 963 case 0x2097: 964 case 0x2098: 965 strcpy(elf_platform, "z10"); 966 break; 967 case 0x2817: 968 case 0x2818: 969 strcpy(elf_platform, "z196"); 970 break; 971 case 0x2827: 972 case 0x2828: 973 strcpy(elf_platform, "zEC12"); 974 break; 975 case 0x2964: 976 case 0x2965: 977 strcpy(elf_platform, "z13"); 978 break; 979 case 0x3906: 980 case 0x3907: 981 strcpy(elf_platform, "z14"); 982 break; 983 case 0x8561: 984 case 0x8562: 985 strcpy(elf_platform, "z15"); 986 break; 987 } 988 989 /* 990 * Virtualization support HWCAP_INT_SIE is bit 0. 991 */ 992 if (sclp.has_sief2) 993 int_hwcap |= HWCAP_INT_SIE; 994 995 return 0; 996 } 997 arch_initcall(setup_hwcaps); 998 999 /* 1000 * Add system information as device randomness 1001 */ 1002 static void __init setup_randomness(void) 1003 { 1004 struct sysinfo_3_2_2 *vmms; 1005 1006 vmms = (struct sysinfo_3_2_2 *) memblock_phys_alloc(PAGE_SIZE, 1007 PAGE_SIZE); 1008 if (!vmms) 1009 panic("Failed to allocate memory for sysinfo structure\n"); 1010 1011 if (stsi(vmms, 3, 2, 2) == 0 && vmms->count) 1012 add_device_randomness(&vmms->vm, sizeof(vmms->vm[0]) * vmms->count); 1013 memblock_free((unsigned long) vmms, PAGE_SIZE); 1014 } 1015 1016 /* 1017 * Find the correct size for the task_struct. This depends on 1018 * the size of the struct fpu at the end of the thread_struct 1019 * which is embedded in the task_struct. 1020 */ 1021 static void __init setup_task_size(void) 1022 { 1023 int task_size = sizeof(struct task_struct); 1024 1025 if (!MACHINE_HAS_VX) { 1026 task_size -= sizeof(__vector128) * __NUM_VXRS; 1027 task_size += sizeof(freg_t) * __NUM_FPRS; 1028 } 1029 arch_task_struct_size = task_size; 1030 } 1031 1032 /* 1033 * Issue diagnose 318 to set the control program name and 1034 * version codes. 1035 */ 1036 static void __init setup_control_program_code(void) 1037 { 1038 union diag318_info diag318_info = { 1039 .cpnc = CPNC_LINUX, 1040 .cpvc = 0, 1041 }; 1042 1043 if (!sclp.has_diag318) 1044 return; 1045 1046 diag_stat_inc(DIAG_STAT_X318); 1047 asm volatile("diag %0,0,0x318\n" : : "d" (diag318_info.val)); 1048 } 1049 1050 /* 1051 * Print the component list from the IPL report 1052 */ 1053 static void __init log_component_list(void) 1054 { 1055 struct ipl_rb_component_entry *ptr, *end; 1056 char *str; 1057 1058 if (!early_ipl_comp_list_addr) 1059 return; 1060 if (ipl_block.hdr.flags & IPL_PL_FLAG_SIPL) 1061 pr_info("Linux is running with Secure-IPL enabled\n"); 1062 else 1063 pr_info("Linux is running with Secure-IPL disabled\n"); 1064 ptr = (void *) early_ipl_comp_list_addr; 1065 end = (void *) ptr + early_ipl_comp_list_size; 1066 pr_info("The IPL report contains the following components:\n"); 1067 while (ptr < end) { 1068 if (ptr->flags & IPL_RB_COMPONENT_FLAG_SIGNED) { 1069 if (ptr->flags & IPL_RB_COMPONENT_FLAG_VERIFIED) 1070 str = "signed, verified"; 1071 else 1072 str = "signed, verification failed"; 1073 } else { 1074 str = "not signed"; 1075 } 1076 pr_info("%016llx - %016llx (%s)\n", 1077 ptr->addr, ptr->addr + ptr->len, str); 1078 ptr++; 1079 } 1080 } 1081 1082 /* 1083 * Setup function called from init/main.c just after the banner 1084 * was printed. 1085 */ 1086 1087 void __init setup_arch(char **cmdline_p) 1088 { 1089 /* 1090 * print what head.S has found out about the machine 1091 */ 1092 if (MACHINE_IS_VM) 1093 pr_info("Linux is running as a z/VM " 1094 "guest operating system in 64-bit mode\n"); 1095 else if (MACHINE_IS_KVM) 1096 pr_info("Linux is running under KVM in 64-bit mode\n"); 1097 else if (MACHINE_IS_LPAR) 1098 pr_info("Linux is running natively in 64-bit mode\n"); 1099 else 1100 pr_info("Linux is running as a guest in 64-bit mode\n"); 1101 1102 log_component_list(); 1103 1104 /* Have one command line that is parsed and saved in /proc/cmdline */ 1105 /* boot_command_line has been already set up in early.c */ 1106 *cmdline_p = boot_command_line; 1107 1108 ROOT_DEV = Root_RAM0; 1109 1110 init_mm.start_code = (unsigned long) _text; 1111 init_mm.end_code = (unsigned long) _etext; 1112 init_mm.end_data = (unsigned long) _edata; 1113 init_mm.brk = (unsigned long) _end; 1114 1115 if (IS_ENABLED(CONFIG_EXPOLINE_AUTO)) 1116 nospec_auto_detect(); 1117 1118 jump_label_init(); 1119 parse_early_param(); 1120 #ifdef CONFIG_CRASH_DUMP 1121 /* Deactivate elfcorehdr= kernel parameter */ 1122 elfcorehdr_addr = ELFCORE_ADDR_MAX; 1123 #endif 1124 1125 os_info_init(); 1126 setup_ipl(); 1127 setup_task_size(); 1128 setup_control_program_code(); 1129 1130 /* Do some memory reservations *before* memory is added to memblock */ 1131 reserve_memory_end(); 1132 reserve_oldmem(); 1133 reserve_kernel(); 1134 reserve_initrd(); 1135 reserve_certificate_list(); 1136 reserve_mem_detect_info(); 1137 memblock_allow_resize(); 1138 1139 /* Get information about *all* installed memory */ 1140 memblock_add_mem_detect_info(); 1141 1142 free_mem_detect_info(); 1143 remove_oldmem(); 1144 1145 setup_uv(); 1146 setup_memory_end(); 1147 setup_memory(); 1148 dma_contiguous_reserve(memory_end); 1149 vmcp_cma_reserve(); 1150 1151 check_initrd(); 1152 reserve_crashkernel(); 1153 #ifdef CONFIG_CRASH_DUMP 1154 /* 1155 * Be aware that smp_save_dump_cpus() triggers a system reset. 1156 * Therefore CPU and device initialization should be done afterwards. 1157 */ 1158 smp_save_dump_cpus(); 1159 #endif 1160 1161 setup_resources(); 1162 setup_lowcore_dat_off(); 1163 smp_fill_possible_mask(); 1164 cpu_detect_mhz_feature(); 1165 cpu_init(); 1166 numa_setup(); 1167 smp_detect_cpus(); 1168 topology_init_early(); 1169 1170 /* 1171 * Create kernel page tables and switch to virtual addressing. 1172 */ 1173 paging_init(); 1174 1175 /* 1176 * After paging_init created the kernel page table, the new PSWs 1177 * in lowcore can now run with DAT enabled. 1178 */ 1179 setup_lowcore_dat_on(); 1180 1181 /* Setup default console */ 1182 conmode_default(); 1183 set_preferred_console(); 1184 1185 apply_alternative_instructions(); 1186 if (IS_ENABLED(CONFIG_EXPOLINE)) 1187 nospec_init_branches(); 1188 1189 /* Setup zfcp/nvme dump support */ 1190 setup_zfcpdump(); 1191 1192 /* Add system specific data to the random pool */ 1193 setup_randomness(); 1194 } 1195