xref: /openbmc/linux/arch/s390/kernel/setup.c (revision 97e6ea6d)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  S390 version
4  *    Copyright IBM Corp. 1999, 2012
5  *    Author(s): Hartmut Penner (hp@de.ibm.com),
6  *               Martin Schwidefsky (schwidefsky@de.ibm.com)
7  *
8  *  Derived from "arch/i386/kernel/setup.c"
9  *    Copyright (C) 1995, Linus Torvalds
10  */
11 
12 /*
13  * This file handles the architecture-dependent parts of initialization
14  */
15 
16 #define KMSG_COMPONENT "setup"
17 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
18 
19 #include <linux/errno.h>
20 #include <linux/export.h>
21 #include <linux/sched.h>
22 #include <linux/sched/task.h>
23 #include <linux/cpu.h>
24 #include <linux/kernel.h>
25 #include <linux/memblock.h>
26 #include <linux/mm.h>
27 #include <linux/stddef.h>
28 #include <linux/unistd.h>
29 #include <linux/ptrace.h>
30 #include <linux/random.h>
31 #include <linux/user.h>
32 #include <linux/tty.h>
33 #include <linux/ioport.h>
34 #include <linux/delay.h>
35 #include <linux/init.h>
36 #include <linux/initrd.h>
37 #include <linux/root_dev.h>
38 #include <linux/console.h>
39 #include <linux/kernel_stat.h>
40 #include <linux/dma-map-ops.h>
41 #include <linux/device.h>
42 #include <linux/notifier.h>
43 #include <linux/pfn.h>
44 #include <linux/ctype.h>
45 #include <linux/reboot.h>
46 #include <linux/topology.h>
47 #include <linux/kexec.h>
48 #include <linux/crash_dump.h>
49 #include <linux/memory.h>
50 #include <linux/compat.h>
51 #include <linux/start_kernel.h>
52 #include <linux/hugetlb.h>
53 #include <linux/kmemleak.h>
54 
55 #include <asm/boot_data.h>
56 #include <asm/ipl.h>
57 #include <asm/facility.h>
58 #include <asm/smp.h>
59 #include <asm/mmu_context.h>
60 #include <asm/cpcmd.h>
61 #include <asm/lowcore.h>
62 #include <asm/nmi.h>
63 #include <asm/irq.h>
64 #include <asm/page.h>
65 #include <asm/ptrace.h>
66 #include <asm/sections.h>
67 #include <asm/ebcdic.h>
68 #include <asm/diag.h>
69 #include <asm/os_info.h>
70 #include <asm/sclp.h>
71 #include <asm/stacktrace.h>
72 #include <asm/sysinfo.h>
73 #include <asm/numa.h>
74 #include <asm/alternative.h>
75 #include <asm/nospec-branch.h>
76 #include <asm/mem_detect.h>
77 #include <asm/uv.h>
78 #include <asm/asm-offsets.h>
79 #include "entry.h"
80 
81 /*
82  * Machine setup..
83  */
84 unsigned int console_mode = 0;
85 EXPORT_SYMBOL(console_mode);
86 
87 unsigned int console_devno = -1;
88 EXPORT_SYMBOL(console_devno);
89 
90 unsigned int console_irq = -1;
91 EXPORT_SYMBOL(console_irq);
92 
93 /*
94  * Some code and data needs to stay below 2 GB, even when the kernel would be
95  * relocated above 2 GB, because it has to use 31 bit addresses.
96  * Such code and data is part of the .amode31 section.
97  */
98 unsigned long __amode31_ref __samode31 = __pa(&_samode31);
99 unsigned long __amode31_ref __eamode31 = __pa(&_eamode31);
100 unsigned long __amode31_ref __stext_amode31 = __pa(&_stext_amode31);
101 unsigned long __amode31_ref __etext_amode31 = __pa(&_etext_amode31);
102 struct exception_table_entry __amode31_ref *__start_amode31_ex_table = _start_amode31_ex_table;
103 struct exception_table_entry __amode31_ref *__stop_amode31_ex_table = _stop_amode31_ex_table;
104 
105 /*
106  * Control registers CR2, CR5 and CR15 are initialized with addresses
107  * of tables that must be placed below 2G which is handled by the AMODE31
108  * sections.
109  * Because the AMODE31 sections are relocated below 2G at startup,
110  * the content of control registers CR2, CR5 and CR15 must be updated
111  * with new addresses after the relocation. The initial initialization of
112  * control registers occurs in head64.S and then gets updated again after AMODE31
113  * relocation. We must access the relevant AMODE31 tables indirectly via
114  * pointers placed in the .amode31.refs linker section. Those pointers get
115  * updated automatically during AMODE31 relocation and always contain a valid
116  * address within AMODE31 sections.
117  */
118 
119 static __amode31_data u32 __ctl_duct_amode31[16] __aligned(64);
120 
121 static __amode31_data u64 __ctl_aste_amode31[8] __aligned(64) = {
122 	[1] = 0xffffffffffffffff
123 };
124 
125 static __amode31_data u32 __ctl_duald_amode31[32] __aligned(128) = {
126 	0x80000000, 0, 0, 0,
127 	0x80000000, 0, 0, 0,
128 	0x80000000, 0, 0, 0,
129 	0x80000000, 0, 0, 0,
130 	0x80000000, 0, 0, 0,
131 	0x80000000, 0, 0, 0,
132 	0x80000000, 0, 0, 0,
133 	0x80000000, 0, 0, 0
134 };
135 
136 static __amode31_data u32 __ctl_linkage_stack_amode31[8] __aligned(64) = {
137 	0, 0, 0x89000000, 0,
138 	0, 0, 0x8a000000, 0
139 };
140 
141 static u64 __amode31_ref *__ctl_aste = __ctl_aste_amode31;
142 static u32 __amode31_ref *__ctl_duald = __ctl_duald_amode31;
143 static u32 __amode31_ref *__ctl_linkage_stack = __ctl_linkage_stack_amode31;
144 static u32 __amode31_ref *__ctl_duct = __ctl_duct_amode31;
145 
146 int __bootdata(noexec_disabled);
147 unsigned long __bootdata(ident_map_size);
148 struct mem_detect_info __bootdata(mem_detect);
149 struct initrd_data __bootdata(initrd_data);
150 
151 unsigned long __bootdata_preserved(__kaslr_offset);
152 unsigned int __bootdata_preserved(zlib_dfltcc_support);
153 EXPORT_SYMBOL(zlib_dfltcc_support);
154 u64 __bootdata_preserved(stfle_fac_list[16]);
155 EXPORT_SYMBOL(stfle_fac_list);
156 u64 __bootdata_preserved(alt_stfle_fac_list[16]);
157 struct oldmem_data __bootdata_preserved(oldmem_data);
158 
159 unsigned long VMALLOC_START;
160 EXPORT_SYMBOL(VMALLOC_START);
161 
162 unsigned long VMALLOC_END;
163 EXPORT_SYMBOL(VMALLOC_END);
164 
165 struct page *vmemmap;
166 EXPORT_SYMBOL(vmemmap);
167 unsigned long vmemmap_size;
168 
169 unsigned long MODULES_VADDR;
170 unsigned long MODULES_END;
171 
172 /* An array with a pointer to the lowcore of every CPU. */
173 struct lowcore *lowcore_ptr[NR_CPUS];
174 EXPORT_SYMBOL(lowcore_ptr);
175 
176 /*
177  * The Write Back bit position in the physaddr is given by the SLPC PCI.
178  * Leaving the mask zero always uses write through which is safe
179  */
180 unsigned long mio_wb_bit_mask __ro_after_init;
181 
182 /*
183  * This is set up by the setup-routine at boot-time
184  * for S390 need to find out, what we have to setup
185  * using address 0x10400 ...
186  */
187 
188 #include <asm/setup.h>
189 
190 /*
191  * condev= and conmode= setup parameter.
192  */
193 
194 static int __init condev_setup(char *str)
195 {
196 	int vdev;
197 
198 	vdev = simple_strtoul(str, &str, 0);
199 	if (vdev >= 0 && vdev < 65536) {
200 		console_devno = vdev;
201 		console_irq = -1;
202 	}
203 	return 1;
204 }
205 
206 __setup("condev=", condev_setup);
207 
208 static void __init set_preferred_console(void)
209 {
210 	if (CONSOLE_IS_3215 || CONSOLE_IS_SCLP)
211 		add_preferred_console("ttyS", 0, NULL);
212 	else if (CONSOLE_IS_3270)
213 		add_preferred_console("tty3270", 0, NULL);
214 	else if (CONSOLE_IS_VT220)
215 		add_preferred_console("ttysclp", 0, NULL);
216 	else if (CONSOLE_IS_HVC)
217 		add_preferred_console("hvc", 0, NULL);
218 }
219 
220 static int __init conmode_setup(char *str)
221 {
222 #if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
223 	if (!strcmp(str, "hwc") || !strcmp(str, "sclp"))
224                 SET_CONSOLE_SCLP;
225 #endif
226 #if defined(CONFIG_TN3215_CONSOLE)
227 	if (!strcmp(str, "3215"))
228 		SET_CONSOLE_3215;
229 #endif
230 #if defined(CONFIG_TN3270_CONSOLE)
231 	if (!strcmp(str, "3270"))
232 		SET_CONSOLE_3270;
233 #endif
234 	set_preferred_console();
235         return 1;
236 }
237 
238 __setup("conmode=", conmode_setup);
239 
240 static void __init conmode_default(void)
241 {
242 	char query_buffer[1024];
243 	char *ptr;
244 
245         if (MACHINE_IS_VM) {
246 		cpcmd("QUERY CONSOLE", query_buffer, 1024, NULL);
247 		console_devno = simple_strtoul(query_buffer + 5, NULL, 16);
248 		ptr = strstr(query_buffer, "SUBCHANNEL =");
249 		console_irq = simple_strtoul(ptr + 13, NULL, 16);
250 		cpcmd("QUERY TERM", query_buffer, 1024, NULL);
251 		ptr = strstr(query_buffer, "CONMODE");
252 		/*
253 		 * Set the conmode to 3215 so that the device recognition
254 		 * will set the cu_type of the console to 3215. If the
255 		 * conmode is 3270 and we don't set it back then both
256 		 * 3215 and the 3270 driver will try to access the console
257 		 * device (3215 as console and 3270 as normal tty).
258 		 */
259 		cpcmd("TERM CONMODE 3215", NULL, 0, NULL);
260 		if (ptr == NULL) {
261 #if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
262 			SET_CONSOLE_SCLP;
263 #endif
264 			return;
265 		}
266 		if (str_has_prefix(ptr + 8, "3270")) {
267 #if defined(CONFIG_TN3270_CONSOLE)
268 			SET_CONSOLE_3270;
269 #elif defined(CONFIG_TN3215_CONSOLE)
270 			SET_CONSOLE_3215;
271 #elif defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
272 			SET_CONSOLE_SCLP;
273 #endif
274 		} else if (str_has_prefix(ptr + 8, "3215")) {
275 #if defined(CONFIG_TN3215_CONSOLE)
276 			SET_CONSOLE_3215;
277 #elif defined(CONFIG_TN3270_CONSOLE)
278 			SET_CONSOLE_3270;
279 #elif defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
280 			SET_CONSOLE_SCLP;
281 #endif
282 		}
283 	} else if (MACHINE_IS_KVM) {
284 		if (sclp.has_vt220 && IS_ENABLED(CONFIG_SCLP_VT220_CONSOLE))
285 			SET_CONSOLE_VT220;
286 		else if (sclp.has_linemode && IS_ENABLED(CONFIG_SCLP_CONSOLE))
287 			SET_CONSOLE_SCLP;
288 		else
289 			SET_CONSOLE_HVC;
290 	} else {
291 #if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
292 		SET_CONSOLE_SCLP;
293 #endif
294 	}
295 }
296 
297 #ifdef CONFIG_CRASH_DUMP
298 static void __init setup_zfcpdump(void)
299 {
300 	if (!is_ipl_type_dump())
301 		return;
302 	if (oldmem_data.start)
303 		return;
304 	strcat(boot_command_line, " cio_ignore=all,!ipldev,!condev");
305 	console_loglevel = 2;
306 }
307 #else
308 static inline void setup_zfcpdump(void) {}
309 #endif /* CONFIG_CRASH_DUMP */
310 
311  /*
312  * Reboot, halt and power_off stubs. They just call _machine_restart,
313  * _machine_halt or _machine_power_off.
314  */
315 
316 void machine_restart(char *command)
317 {
318 	if ((!in_interrupt() && !in_atomic()) || oops_in_progress)
319 		/*
320 		 * Only unblank the console if we are called in enabled
321 		 * context or a bust_spinlocks cleared the way for us.
322 		 */
323 		console_unblank();
324 	_machine_restart(command);
325 }
326 
327 void machine_halt(void)
328 {
329 	if (!in_interrupt() || oops_in_progress)
330 		/*
331 		 * Only unblank the console if we are called in enabled
332 		 * context or a bust_spinlocks cleared the way for us.
333 		 */
334 		console_unblank();
335 	_machine_halt();
336 }
337 
338 void machine_power_off(void)
339 {
340 	if (!in_interrupt() || oops_in_progress)
341 		/*
342 		 * Only unblank the console if we are called in enabled
343 		 * context or a bust_spinlocks cleared the way for us.
344 		 */
345 		console_unblank();
346 	_machine_power_off();
347 }
348 
349 /*
350  * Dummy power off function.
351  */
352 void (*pm_power_off)(void) = machine_power_off;
353 EXPORT_SYMBOL_GPL(pm_power_off);
354 
355 void *restart_stack;
356 
357 unsigned long stack_alloc(void)
358 {
359 #ifdef CONFIG_VMAP_STACK
360 	void *ret;
361 
362 	ret = __vmalloc_node(THREAD_SIZE, THREAD_SIZE, THREADINFO_GFP,
363 			     NUMA_NO_NODE, __builtin_return_address(0));
364 	kmemleak_not_leak(ret);
365 	return (unsigned long)ret;
366 #else
367 	return __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
368 #endif
369 }
370 
371 void stack_free(unsigned long stack)
372 {
373 #ifdef CONFIG_VMAP_STACK
374 	vfree((void *) stack);
375 #else
376 	free_pages(stack, THREAD_SIZE_ORDER);
377 #endif
378 }
379 
380 int __init arch_early_irq_init(void)
381 {
382 	unsigned long stack;
383 
384 	stack = __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
385 	if (!stack)
386 		panic("Couldn't allocate async stack");
387 	S390_lowcore.async_stack = stack + STACK_INIT_OFFSET;
388 	return 0;
389 }
390 
391 void __init arch_call_rest_init(void)
392 {
393 	unsigned long stack;
394 
395 	stack = stack_alloc();
396 	if (!stack)
397 		panic("Couldn't allocate kernel stack");
398 	current->stack = (void *) stack;
399 #ifdef CONFIG_VMAP_STACK
400 	current->stack_vm_area = (void *) stack;
401 #endif
402 	set_task_stack_end_magic(current);
403 	stack += STACK_INIT_OFFSET;
404 	S390_lowcore.kernel_stack = stack;
405 	call_on_stack_noreturn(rest_init, stack);
406 }
407 
408 static void __init setup_lowcore_dat_off(void)
409 {
410 	unsigned long int_psw_mask = PSW_KERNEL_BITS;
411 	unsigned long mcck_stack;
412 	struct lowcore *lc;
413 
414 	if (IS_ENABLED(CONFIG_KASAN))
415 		int_psw_mask |= PSW_MASK_DAT;
416 
417 	/*
418 	 * Setup lowcore for boot cpu
419 	 */
420 	BUILD_BUG_ON(sizeof(struct lowcore) != LC_PAGES * PAGE_SIZE);
421 	lc = memblock_alloc_low(sizeof(*lc), sizeof(*lc));
422 	if (!lc)
423 		panic("%s: Failed to allocate %zu bytes align=%zx\n",
424 		      __func__, sizeof(*lc), sizeof(*lc));
425 
426 	lc->restart_psw.mask = PSW_KERNEL_BITS;
427 	lc->restart_psw.addr = (unsigned long) restart_int_handler;
428 	lc->external_new_psw.mask = int_psw_mask | PSW_MASK_MCHECK;
429 	lc->external_new_psw.addr = (unsigned long) ext_int_handler;
430 	lc->svc_new_psw.mask = int_psw_mask | PSW_MASK_MCHECK;
431 	lc->svc_new_psw.addr = (unsigned long) system_call;
432 	lc->program_new_psw.mask = int_psw_mask | PSW_MASK_MCHECK;
433 	lc->program_new_psw.addr = (unsigned long) pgm_check_handler;
434 	lc->mcck_new_psw.mask = PSW_KERNEL_BITS;
435 	lc->mcck_new_psw.addr = (unsigned long) mcck_int_handler;
436 	lc->io_new_psw.mask = int_psw_mask | PSW_MASK_MCHECK;
437 	lc->io_new_psw.addr = (unsigned long) io_int_handler;
438 	lc->clock_comparator = clock_comparator_max;
439 	lc->nodat_stack = ((unsigned long) &init_thread_union)
440 		+ THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
441 	lc->current_task = (unsigned long)&init_task;
442 	lc->lpp = LPP_MAGIC;
443 	lc->machine_flags = S390_lowcore.machine_flags;
444 	lc->preempt_count = S390_lowcore.preempt_count;
445 	nmi_alloc_boot_cpu(lc);
446 	lc->sys_enter_timer = S390_lowcore.sys_enter_timer;
447 	lc->exit_timer = S390_lowcore.exit_timer;
448 	lc->user_timer = S390_lowcore.user_timer;
449 	lc->system_timer = S390_lowcore.system_timer;
450 	lc->steal_timer = S390_lowcore.steal_timer;
451 	lc->last_update_timer = S390_lowcore.last_update_timer;
452 	lc->last_update_clock = S390_lowcore.last_update_clock;
453 
454 	/*
455 	 * Allocate the global restart stack which is the same for
456 	 * all CPUs in cast *one* of them does a PSW restart.
457 	 */
458 	restart_stack = memblock_alloc(THREAD_SIZE, THREAD_SIZE);
459 	if (!restart_stack)
460 		panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
461 		      __func__, THREAD_SIZE, THREAD_SIZE);
462 	restart_stack += STACK_INIT_OFFSET;
463 
464 	/*
465 	 * Set up PSW restart to call ipl.c:do_restart(). Copy the relevant
466 	 * restart data to the absolute zero lowcore. This is necessary if
467 	 * PSW restart is done on an offline CPU that has lowcore zero.
468 	 */
469 	lc->restart_stack = (unsigned long) restart_stack;
470 	lc->restart_fn = (unsigned long) do_restart;
471 	lc->restart_data = 0;
472 	lc->restart_source = -1U;
473 
474 	mcck_stack = (unsigned long)memblock_alloc(THREAD_SIZE, THREAD_SIZE);
475 	if (!mcck_stack)
476 		panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
477 		      __func__, THREAD_SIZE, THREAD_SIZE);
478 	lc->mcck_stack = mcck_stack + STACK_INIT_OFFSET;
479 
480 	/* Setup absolute zero lowcore */
481 	mem_assign_absolute(S390_lowcore.restart_stack, lc->restart_stack);
482 	mem_assign_absolute(S390_lowcore.restart_fn, lc->restart_fn);
483 	mem_assign_absolute(S390_lowcore.restart_data, lc->restart_data);
484 	mem_assign_absolute(S390_lowcore.restart_source, lc->restart_source);
485 	mem_assign_absolute(S390_lowcore.restart_psw, lc->restart_psw);
486 
487 	lc->spinlock_lockval = arch_spin_lockval(0);
488 	lc->spinlock_index = 0;
489 	arch_spin_lock_setup(0);
490 	lc->br_r1_trampoline = 0x07f1;	/* br %r1 */
491 	lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW);
492 	lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW);
493 	lc->preempt_count = PREEMPT_DISABLED;
494 
495 	set_prefix((u32)(unsigned long) lc);
496 	lowcore_ptr[0] = lc;
497 }
498 
499 static void __init setup_lowcore_dat_on(void)
500 {
501 	struct lowcore *lc = lowcore_ptr[0];
502 
503 	__ctl_clear_bit(0, 28);
504 	S390_lowcore.external_new_psw.mask |= PSW_MASK_DAT;
505 	S390_lowcore.svc_new_psw.mask |= PSW_MASK_DAT;
506 	S390_lowcore.program_new_psw.mask |= PSW_MASK_DAT;
507 	S390_lowcore.io_new_psw.mask |= PSW_MASK_DAT;
508 	__ctl_store(S390_lowcore.cregs_save_area, 0, 15);
509 	__ctl_set_bit(0, 28);
510 	mem_assign_absolute(S390_lowcore.restart_flags, RESTART_FLAG_CTLREGS);
511 	mem_assign_absolute(S390_lowcore.program_new_psw, lc->program_new_psw);
512 	memcpy_absolute(&S390_lowcore.cregs_save_area, lc->cregs_save_area,
513 			sizeof(S390_lowcore.cregs_save_area));
514 }
515 
516 static struct resource code_resource = {
517 	.name  = "Kernel code",
518 	.flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
519 };
520 
521 static struct resource data_resource = {
522 	.name = "Kernel data",
523 	.flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
524 };
525 
526 static struct resource bss_resource = {
527 	.name = "Kernel bss",
528 	.flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
529 };
530 
531 static struct resource __initdata *standard_resources[] = {
532 	&code_resource,
533 	&data_resource,
534 	&bss_resource,
535 };
536 
537 static void __init setup_resources(void)
538 {
539 	struct resource *res, *std_res, *sub_res;
540 	phys_addr_t start, end;
541 	int j;
542 	u64 i;
543 
544 	code_resource.start = (unsigned long) _text;
545 	code_resource.end = (unsigned long) _etext - 1;
546 	data_resource.start = (unsigned long) _etext;
547 	data_resource.end = (unsigned long) _edata - 1;
548 	bss_resource.start = (unsigned long) __bss_start;
549 	bss_resource.end = (unsigned long) __bss_stop - 1;
550 
551 	for_each_mem_range(i, &start, &end) {
552 		res = memblock_alloc(sizeof(*res), 8);
553 		if (!res)
554 			panic("%s: Failed to allocate %zu bytes align=0x%x\n",
555 			      __func__, sizeof(*res), 8);
556 		res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM;
557 
558 		res->name = "System RAM";
559 		res->start = start;
560 		/*
561 		 * In memblock, end points to the first byte after the
562 		 * range while in resourses, end points to the last byte in
563 		 * the range.
564 		 */
565 		res->end = end - 1;
566 		request_resource(&iomem_resource, res);
567 
568 		for (j = 0; j < ARRAY_SIZE(standard_resources); j++) {
569 			std_res = standard_resources[j];
570 			if (std_res->start < res->start ||
571 			    std_res->start > res->end)
572 				continue;
573 			if (std_res->end > res->end) {
574 				sub_res = memblock_alloc(sizeof(*sub_res), 8);
575 				if (!sub_res)
576 					panic("%s: Failed to allocate %zu bytes align=0x%x\n",
577 					      __func__, sizeof(*sub_res), 8);
578 				*sub_res = *std_res;
579 				sub_res->end = res->end;
580 				std_res->start = res->end + 1;
581 				request_resource(res, sub_res);
582 			} else {
583 				request_resource(res, std_res);
584 			}
585 		}
586 	}
587 #ifdef CONFIG_CRASH_DUMP
588 	/*
589 	 * Re-add removed crash kernel memory as reserved memory. This makes
590 	 * sure it will be mapped with the identity mapping and struct pages
591 	 * will be created, so it can be resized later on.
592 	 * However add it later since the crash kernel resource should not be
593 	 * part of the System RAM resource.
594 	 */
595 	if (crashk_res.end) {
596 		memblock_add_node(crashk_res.start, resource_size(&crashk_res), 0);
597 		memblock_reserve(crashk_res.start, resource_size(&crashk_res));
598 		insert_resource(&iomem_resource, &crashk_res);
599 	}
600 #endif
601 }
602 
603 static void __init setup_memory_end(void)
604 {
605 	memblock_remove(ident_map_size, ULONG_MAX);
606 	max_pfn = max_low_pfn = PFN_DOWN(ident_map_size);
607 	pr_notice("The maximum memory size is %luMB\n", ident_map_size >> 20);
608 }
609 
610 #ifdef CONFIG_CRASH_DUMP
611 
612 /*
613  * When kdump is enabled, we have to ensure that no memory from the area
614  * [0 - crashkernel memory size] is set offline - it will be exchanged with
615  * the crashkernel memory region when kdump is triggered. The crashkernel
616  * memory region can never get offlined (pages are unmovable).
617  */
618 static int kdump_mem_notifier(struct notifier_block *nb,
619 			      unsigned long action, void *data)
620 {
621 	struct memory_notify *arg = data;
622 
623 	if (action != MEM_GOING_OFFLINE)
624 		return NOTIFY_OK;
625 	if (arg->start_pfn < PFN_DOWN(resource_size(&crashk_res)))
626 		return NOTIFY_BAD;
627 	return NOTIFY_OK;
628 }
629 
630 static struct notifier_block kdump_mem_nb = {
631 	.notifier_call = kdump_mem_notifier,
632 };
633 
634 #endif
635 
636 /*
637  * Make sure that the area above identity mapping is protected
638  */
639 static void __init reserve_above_ident_map(void)
640 {
641 	memblock_reserve(ident_map_size, ULONG_MAX);
642 }
643 
644 /*
645  * Reserve memory for kdump kernel to be loaded with kexec
646  */
647 static void __init reserve_crashkernel(void)
648 {
649 #ifdef CONFIG_CRASH_DUMP
650 	unsigned long long crash_base, crash_size;
651 	phys_addr_t low, high;
652 	int rc;
653 
654 	rc = parse_crashkernel(boot_command_line, ident_map_size, &crash_size,
655 			       &crash_base);
656 
657 	crash_base = ALIGN(crash_base, KEXEC_CRASH_MEM_ALIGN);
658 	crash_size = ALIGN(crash_size, KEXEC_CRASH_MEM_ALIGN);
659 	if (rc || crash_size == 0)
660 		return;
661 
662 	if (memblock.memory.regions[0].size < crash_size) {
663 		pr_info("crashkernel reservation failed: %s\n",
664 			"first memory chunk must be at least crashkernel size");
665 		return;
666 	}
667 
668 	low = crash_base ?: oldmem_data.start;
669 	high = low + crash_size;
670 	if (low >= oldmem_data.start && high <= oldmem_data.start + oldmem_data.size) {
671 		/* The crashkernel fits into OLDMEM, reuse OLDMEM */
672 		crash_base = low;
673 	} else {
674 		/* Find suitable area in free memory */
675 		low = max_t(unsigned long, crash_size, sclp.hsa_size);
676 		high = crash_base ? crash_base + crash_size : ULONG_MAX;
677 
678 		if (crash_base && crash_base < low) {
679 			pr_info("crashkernel reservation failed: %s\n",
680 				"crash_base too low");
681 			return;
682 		}
683 		low = crash_base ?: low;
684 		crash_base = memblock_phys_alloc_range(crash_size,
685 						       KEXEC_CRASH_MEM_ALIGN,
686 						       low, high);
687 	}
688 
689 	if (!crash_base) {
690 		pr_info("crashkernel reservation failed: %s\n",
691 			"no suitable area found");
692 		return;
693 	}
694 
695 	if (register_memory_notifier(&kdump_mem_nb)) {
696 		memblock_free(crash_base, crash_size);
697 		return;
698 	}
699 
700 	if (!oldmem_data.start && MACHINE_IS_VM)
701 		diag10_range(PFN_DOWN(crash_base), PFN_DOWN(crash_size));
702 	crashk_res.start = crash_base;
703 	crashk_res.end = crash_base + crash_size - 1;
704 	memblock_remove(crash_base, crash_size);
705 	pr_info("Reserving %lluMB of memory at %lluMB "
706 		"for crashkernel (System RAM: %luMB)\n",
707 		crash_size >> 20, crash_base >> 20,
708 		(unsigned long)memblock.memory.total_size >> 20);
709 	os_info_crashkernel_add(crash_base, crash_size);
710 #endif
711 }
712 
713 /*
714  * Reserve the initrd from being used by memblock
715  */
716 static void __init reserve_initrd(void)
717 {
718 #ifdef CONFIG_BLK_DEV_INITRD
719 	if (!initrd_data.start || !initrd_data.size)
720 		return;
721 	initrd_start = initrd_data.start;
722 	initrd_end = initrd_start + initrd_data.size;
723 	memblock_reserve(initrd_data.start, initrd_data.size);
724 #endif
725 }
726 
727 /*
728  * Reserve the memory area used to pass the certificate lists
729  */
730 static void __init reserve_certificate_list(void)
731 {
732 	if (ipl_cert_list_addr)
733 		memblock_reserve(ipl_cert_list_addr, ipl_cert_list_size);
734 }
735 
736 static void __init reserve_mem_detect_info(void)
737 {
738 	unsigned long start, size;
739 
740 	get_mem_detect_reserved(&start, &size);
741 	if (size)
742 		memblock_reserve(start, size);
743 }
744 
745 static void __init free_mem_detect_info(void)
746 {
747 	unsigned long start, size;
748 
749 	get_mem_detect_reserved(&start, &size);
750 	if (size)
751 		memblock_free(start, size);
752 }
753 
754 static const char * __init get_mem_info_source(void)
755 {
756 	switch (mem_detect.info_source) {
757 	case MEM_DETECT_SCLP_STOR_INFO:
758 		return "sclp storage info";
759 	case MEM_DETECT_DIAG260:
760 		return "diag260";
761 	case MEM_DETECT_SCLP_READ_INFO:
762 		return "sclp read info";
763 	case MEM_DETECT_BIN_SEARCH:
764 		return "binary search";
765 	}
766 	return "none";
767 }
768 
769 static void __init memblock_add_mem_detect_info(void)
770 {
771 	unsigned long start, end;
772 	int i;
773 
774 	pr_debug("physmem info source: %s (%hhd)\n",
775 		 get_mem_info_source(), mem_detect.info_source);
776 	/* keep memblock lists close to the kernel */
777 	memblock_set_bottom_up(true);
778 	for_each_mem_detect_block(i, &start, &end) {
779 		memblock_add(start, end - start);
780 		memblock_physmem_add(start, end - start);
781 	}
782 	memblock_set_bottom_up(false);
783 	memblock_set_node(0, ULONG_MAX, &memblock.memory, 0);
784 	memblock_dump_all();
785 }
786 
787 /*
788  * Check for initrd being in usable memory
789  */
790 static void __init check_initrd(void)
791 {
792 #ifdef CONFIG_BLK_DEV_INITRD
793 	if (initrd_data.start && initrd_data.size &&
794 	    !memblock_is_region_memory(initrd_data.start, initrd_data.size)) {
795 		pr_err("The initial RAM disk does not fit into the memory\n");
796 		memblock_free(initrd_data.start, initrd_data.size);
797 		initrd_start = initrd_end = 0;
798 	}
799 #endif
800 }
801 
802 /*
803  * Reserve memory used for lowcore/command line/kernel image.
804  */
805 static void __init reserve_kernel(void)
806 {
807 	unsigned long start_pfn = PFN_UP(__pa(_end));
808 
809 	memblock_reserve(0, STARTUP_NORMAL_OFFSET);
810 	memblock_reserve((unsigned long)sclp_early_sccb, EXT_SCCB_READ_SCP);
811 	memblock_reserve((unsigned long)_stext, PFN_PHYS(start_pfn)
812 			 - (unsigned long)_stext);
813 }
814 
815 static void __init setup_memory(void)
816 {
817 	phys_addr_t start, end;
818 	u64 i;
819 
820 	/*
821 	 * Init storage key for present memory
822 	 */
823 	for_each_mem_range(i, &start, &end)
824 		storage_key_init_range(start, end);
825 
826 	psw_set_key(PAGE_DEFAULT_KEY);
827 
828 	/* Only cosmetics */
829 	memblock_enforce_memory_limit(memblock_end_of_DRAM());
830 }
831 
832 static void __init relocate_amode31_section(void)
833 {
834 	unsigned long amode31_addr, amode31_size;
835 	long amode31_offset;
836 	long *ptr;
837 
838 	/* Allocate a new AMODE31 capable memory region */
839 	amode31_size = __eamode31 - __samode31;
840 	pr_info("Relocating AMODE31 section of size 0x%08lx\n", amode31_size);
841 	amode31_addr = (unsigned long)memblock_alloc_low(amode31_size, PAGE_SIZE);
842 	if (!amode31_addr)
843 		panic("Failed to allocate memory for AMODE31 section\n");
844 	amode31_offset = amode31_addr - __samode31;
845 
846 	/* Move original AMODE31 section to the new one */
847 	memmove((void *)amode31_addr, (void *)__samode31, amode31_size);
848 	/* Zero out the old AMODE31 section to catch invalid accesses within it */
849 	memset((void *)__samode31, 0, amode31_size);
850 
851 	/* Update all AMODE31 region references */
852 	for (ptr = _start_amode31_refs; ptr != _end_amode31_refs; ptr++)
853 		*ptr += amode31_offset;
854 }
855 
856 /* This must be called after AMODE31 relocation */
857 static void __init setup_cr(void)
858 {
859 	union ctlreg2 cr2;
860 	union ctlreg5 cr5;
861 	union ctlreg15 cr15;
862 
863 	__ctl_duct[1] = (unsigned long)__ctl_aste;
864 	__ctl_duct[2] = (unsigned long)__ctl_aste;
865 	__ctl_duct[4] = (unsigned long)__ctl_duald;
866 
867 	/* Update control registers CR2, CR5 and CR15 */
868 	__ctl_store(cr2.val, 2, 2);
869 	__ctl_store(cr5.val, 5, 5);
870 	__ctl_store(cr15.val, 15, 15);
871 	cr2.ducto = (unsigned long)__ctl_duct >> 6;
872 	cr5.pasteo = (unsigned long)__ctl_duct >> 6;
873 	cr15.lsea = (unsigned long)__ctl_linkage_stack >> 3;
874 	__ctl_load(cr2.val, 2, 2);
875 	__ctl_load(cr5.val, 5, 5);
876 	__ctl_load(cr15.val, 15, 15);
877 }
878 
879 /*
880  * Add system information as device randomness
881  */
882 static void __init setup_randomness(void)
883 {
884 	struct sysinfo_3_2_2 *vmms;
885 
886 	vmms = (struct sysinfo_3_2_2 *) memblock_phys_alloc(PAGE_SIZE,
887 							    PAGE_SIZE);
888 	if (!vmms)
889 		panic("Failed to allocate memory for sysinfo structure\n");
890 
891 	if (stsi(vmms, 3, 2, 2) == 0 && vmms->count)
892 		add_device_randomness(&vmms->vm, sizeof(vmms->vm[0]) * vmms->count);
893 	memblock_free((unsigned long) vmms, PAGE_SIZE);
894 }
895 
896 /*
897  * Find the correct size for the task_struct. This depends on
898  * the size of the struct fpu at the end of the thread_struct
899  * which is embedded in the task_struct.
900  */
901 static void __init setup_task_size(void)
902 {
903 	int task_size = sizeof(struct task_struct);
904 
905 	if (!MACHINE_HAS_VX) {
906 		task_size -= sizeof(__vector128) * __NUM_VXRS;
907 		task_size += sizeof(freg_t) * __NUM_FPRS;
908 	}
909 	arch_task_struct_size = task_size;
910 }
911 
912 /*
913  * Issue diagnose 318 to set the control program name and
914  * version codes.
915  */
916 static void __init setup_control_program_code(void)
917 {
918 	union diag318_info diag318_info = {
919 		.cpnc = CPNC_LINUX,
920 		.cpvc = 0,
921 	};
922 
923 	if (!sclp.has_diag318)
924 		return;
925 
926 	diag_stat_inc(DIAG_STAT_X318);
927 	asm volatile("diag %0,0,0x318\n" : : "d" (diag318_info.val));
928 }
929 
930 /*
931  * Print the component list from the IPL report
932  */
933 static void __init log_component_list(void)
934 {
935 	struct ipl_rb_component_entry *ptr, *end;
936 	char *str;
937 
938 	if (!early_ipl_comp_list_addr)
939 		return;
940 	if (ipl_block.hdr.flags & IPL_PL_FLAG_SIPL)
941 		pr_info("Linux is running with Secure-IPL enabled\n");
942 	else
943 		pr_info("Linux is running with Secure-IPL disabled\n");
944 	ptr = (void *) early_ipl_comp_list_addr;
945 	end = (void *) ptr + early_ipl_comp_list_size;
946 	pr_info("The IPL report contains the following components:\n");
947 	while (ptr < end) {
948 		if (ptr->flags & IPL_RB_COMPONENT_FLAG_SIGNED) {
949 			if (ptr->flags & IPL_RB_COMPONENT_FLAG_VERIFIED)
950 				str = "signed, verified";
951 			else
952 				str = "signed, verification failed";
953 		} else {
954 			str = "not signed";
955 		}
956 		pr_info("%016llx - %016llx (%s)\n",
957 			ptr->addr, ptr->addr + ptr->len, str);
958 		ptr++;
959 	}
960 }
961 
962 /*
963  * Setup function called from init/main.c just after the banner
964  * was printed.
965  */
966 
967 void __init setup_arch(char **cmdline_p)
968 {
969         /*
970          * print what head.S has found out about the machine
971          */
972 	if (MACHINE_IS_VM)
973 		pr_info("Linux is running as a z/VM "
974 			"guest operating system in 64-bit mode\n");
975 	else if (MACHINE_IS_KVM)
976 		pr_info("Linux is running under KVM in 64-bit mode\n");
977 	else if (MACHINE_IS_LPAR)
978 		pr_info("Linux is running natively in 64-bit mode\n");
979 	else
980 		pr_info("Linux is running as a guest in 64-bit mode\n");
981 
982 	log_component_list();
983 
984 	/* Have one command line that is parsed and saved in /proc/cmdline */
985 	/* boot_command_line has been already set up in early.c */
986 	*cmdline_p = boot_command_line;
987 
988         ROOT_DEV = Root_RAM0;
989 
990 	setup_initial_init_mm(_text, _etext, _edata, _end);
991 
992 	if (IS_ENABLED(CONFIG_EXPOLINE_AUTO))
993 		nospec_auto_detect();
994 
995 	jump_label_init();
996 	parse_early_param();
997 #ifdef CONFIG_CRASH_DUMP
998 	/* Deactivate elfcorehdr= kernel parameter */
999 	elfcorehdr_addr = ELFCORE_ADDR_MAX;
1000 #endif
1001 
1002 	os_info_init();
1003 	setup_ipl();
1004 	setup_task_size();
1005 	setup_control_program_code();
1006 
1007 	/* Do some memory reservations *before* memory is added to memblock */
1008 	reserve_above_ident_map();
1009 	reserve_kernel();
1010 	reserve_initrd();
1011 	reserve_certificate_list();
1012 	reserve_mem_detect_info();
1013 	memblock_allow_resize();
1014 
1015 	/* Get information about *all* installed memory */
1016 	memblock_add_mem_detect_info();
1017 
1018 	free_mem_detect_info();
1019 
1020 	relocate_amode31_section();
1021 	setup_cr();
1022 
1023 	setup_uv();
1024 	setup_memory_end();
1025 	setup_memory();
1026 	dma_contiguous_reserve(ident_map_size);
1027 	vmcp_cma_reserve();
1028 	if (MACHINE_HAS_EDAT2)
1029 		hugetlb_cma_reserve(PUD_SHIFT - PAGE_SHIFT);
1030 
1031 	check_initrd();
1032 	reserve_crashkernel();
1033 #ifdef CONFIG_CRASH_DUMP
1034 	/*
1035 	 * Be aware that smp_save_dump_cpus() triggers a system reset.
1036 	 * Therefore CPU and device initialization should be done afterwards.
1037 	 */
1038 	smp_save_dump_cpus();
1039 #endif
1040 
1041 	setup_resources();
1042 	setup_lowcore_dat_off();
1043 	smp_fill_possible_mask();
1044 	cpu_detect_mhz_feature();
1045         cpu_init();
1046 	numa_setup();
1047 	smp_detect_cpus();
1048 	topology_init_early();
1049 
1050 	/*
1051 	 * Create kernel page tables and switch to virtual addressing.
1052 	 */
1053         paging_init();
1054 
1055 	/*
1056 	 * After paging_init created the kernel page table, the new PSWs
1057 	 * in lowcore can now run with DAT enabled.
1058 	 */
1059 	setup_lowcore_dat_on();
1060 
1061         /* Setup default console */
1062 	conmode_default();
1063 	set_preferred_console();
1064 
1065 	apply_alternative_instructions();
1066 	if (IS_ENABLED(CONFIG_EXPOLINE))
1067 		nospec_init_branches();
1068 
1069 	/* Setup zfcp/nvme dump support */
1070 	setup_zfcpdump();
1071 
1072 	/* Add system specific data to the random pool */
1073 	setup_randomness();
1074 }
1075