xref: /openbmc/linux/arch/s390/boot/startup.c (revision 94fd5220)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/string.h>
3 #include <linux/elf.h>
4 #include <asm/boot_data.h>
5 #include <asm/sections.h>
6 #include <asm/maccess.h>
7 #include <asm/cpu_mf.h>
8 #include <asm/setup.h>
9 #include <asm/kasan.h>
10 #include <asm/kexec.h>
11 #include <asm/sclp.h>
12 #include <asm/diag.h>
13 #include <asm/uv.h>
14 #include <asm/abs_lowcore.h>
15 #include <asm/physmem_info.h>
16 #include "decompressor.h"
17 #include "boot.h"
18 #include "uv.h"
19 
20 unsigned long __bootdata_preserved(__kaslr_offset);
21 unsigned long __bootdata_preserved(__abs_lowcore);
22 unsigned long __bootdata_preserved(__memcpy_real_area);
23 pte_t *__bootdata_preserved(memcpy_real_ptep);
24 unsigned long __bootdata_preserved(VMALLOC_START);
25 unsigned long __bootdata_preserved(VMALLOC_END);
26 struct page *__bootdata_preserved(vmemmap);
27 unsigned long __bootdata_preserved(vmemmap_size);
28 unsigned long __bootdata_preserved(MODULES_VADDR);
29 unsigned long __bootdata_preserved(MODULES_END);
30 unsigned long __bootdata_preserved(max_mappable);
31 unsigned long __bootdata(ident_map_size);
32 
33 u64 __bootdata_preserved(stfle_fac_list[16]);
34 u64 __bootdata_preserved(alt_stfle_fac_list[16]);
35 struct oldmem_data __bootdata_preserved(oldmem_data);
36 
37 struct machine_info machine;
38 
39 void error(char *x)
40 {
41 	sclp_early_printk("\n\n");
42 	sclp_early_printk(x);
43 	sclp_early_printk("\n\n -- System halted");
44 
45 	disabled_wait();
46 }
47 
48 static void detect_facilities(void)
49 {
50 	if (test_facility(8)) {
51 		machine.has_edat1 = 1;
52 		__ctl_set_bit(0, 23);
53 	}
54 	if (test_facility(78))
55 		machine.has_edat2 = 1;
56 	if (!noexec_disabled && test_facility(130)) {
57 		machine.has_nx = 1;
58 		__ctl_set_bit(0, 20);
59 	}
60 }
61 
62 static void setup_lpp(void)
63 {
64 	S390_lowcore.current_pid = 0;
65 	S390_lowcore.lpp = LPP_MAGIC;
66 	if (test_facility(40))
67 		lpp(&S390_lowcore.lpp);
68 }
69 
70 #ifdef CONFIG_KERNEL_UNCOMPRESSED
71 unsigned long mem_safe_offset(void)
72 {
73 	return vmlinux.default_lma + vmlinux.image_size + vmlinux.bss_size;
74 }
75 #endif
76 
77 static void rescue_initrd(unsigned long min, unsigned long max)
78 {
79 	unsigned long old_addr, addr, size;
80 
81 	if (!IS_ENABLED(CONFIG_BLK_DEV_INITRD))
82 		return;
83 	if (!get_physmem_reserved(RR_INITRD, &addr, &size))
84 		return;
85 	if (addr >= min && addr + size <= max)
86 		return;
87 	old_addr = addr;
88 	physmem_free(RR_INITRD);
89 	addr = physmem_alloc_top_down(RR_INITRD, size, 0);
90 	memmove((void *)addr, (void *)old_addr, size);
91 }
92 
93 static void copy_bootdata(void)
94 {
95 	if (__boot_data_end - __boot_data_start != vmlinux.bootdata_size)
96 		error(".boot.data section size mismatch");
97 	memcpy((void *)vmlinux.bootdata_off, __boot_data_start, vmlinux.bootdata_size);
98 	if (__boot_data_preserved_end - __boot_data_preserved_start != vmlinux.bootdata_preserved_size)
99 		error(".boot.preserved.data section size mismatch");
100 	memcpy((void *)vmlinux.bootdata_preserved_off, __boot_data_preserved_start, vmlinux.bootdata_preserved_size);
101 }
102 
103 static void handle_relocs(unsigned long offset)
104 {
105 	Elf64_Rela *rela_start, *rela_end, *rela;
106 	int r_type, r_sym, rc;
107 	Elf64_Addr loc, val;
108 	Elf64_Sym *dynsym;
109 
110 	rela_start = (Elf64_Rela *) vmlinux.rela_dyn_start;
111 	rela_end = (Elf64_Rela *) vmlinux.rela_dyn_end;
112 	dynsym = (Elf64_Sym *) vmlinux.dynsym_start;
113 	for (rela = rela_start; rela < rela_end; rela++) {
114 		loc = rela->r_offset + offset;
115 		val = rela->r_addend;
116 		r_sym = ELF64_R_SYM(rela->r_info);
117 		if (r_sym) {
118 			if (dynsym[r_sym].st_shndx != SHN_UNDEF)
119 				val += dynsym[r_sym].st_value + offset;
120 		} else {
121 			/*
122 			 * 0 == undefined symbol table index (STN_UNDEF),
123 			 * used for R_390_RELATIVE, only add KASLR offset
124 			 */
125 			val += offset;
126 		}
127 		r_type = ELF64_R_TYPE(rela->r_info);
128 		rc = arch_kexec_do_relocs(r_type, (void *) loc, val, 0);
129 		if (rc)
130 			error("Unknown relocation type");
131 	}
132 }
133 
134 /*
135  * Merge information from several sources into a single ident_map_size value.
136  * "ident_map_size" represents the upper limit of physical memory we may ever
137  * reach. It might not be all online memory, but also include standby (offline)
138  * memory. "ident_map_size" could be lower then actual standby or even online
139  * memory present, due to limiting factors. We should never go above this limit.
140  * It is the size of our identity mapping.
141  *
142  * Consider the following factors:
143  * 1. max_physmem_end - end of physical memory online or standby.
144  *    Always >= end of the last online memory range (get_physmem_online_end()).
145  * 2. CONFIG_MAX_PHYSMEM_BITS - the maximum size of physical memory the
146  *    kernel is able to support.
147  * 3. "mem=" kernel command line option which limits physical memory usage.
148  * 4. OLDMEM_BASE which is a kdump memory limit when the kernel is executed as
149  *    crash kernel.
150  * 5. "hsa" size which is a memory limit when the kernel is executed during
151  *    zfcp/nvme dump.
152  */
153 static void setup_ident_map_size(unsigned long max_physmem_end)
154 {
155 	unsigned long hsa_size;
156 
157 	ident_map_size = max_physmem_end;
158 	if (memory_limit)
159 		ident_map_size = min(ident_map_size, memory_limit);
160 	ident_map_size = min(ident_map_size, 1UL << MAX_PHYSMEM_BITS);
161 
162 #ifdef CONFIG_CRASH_DUMP
163 	if (oldmem_data.start) {
164 		__kaslr_enabled = 0;
165 		ident_map_size = min(ident_map_size, oldmem_data.size);
166 	} else if (ipl_block_valid && is_ipl_block_dump()) {
167 		__kaslr_enabled = 0;
168 		if (!sclp_early_get_hsa_size(&hsa_size) && hsa_size)
169 			ident_map_size = min(ident_map_size, hsa_size);
170 	}
171 #endif
172 }
173 
174 static unsigned long setup_kernel_memory_layout(void)
175 {
176 	unsigned long vmemmap_start;
177 	unsigned long asce_limit;
178 	unsigned long rte_size;
179 	unsigned long pages;
180 	unsigned long vmax;
181 
182 	pages = ident_map_size / PAGE_SIZE;
183 	/* vmemmap contains a multiple of PAGES_PER_SECTION struct pages */
184 	vmemmap_size = SECTION_ALIGN_UP(pages) * sizeof(struct page);
185 
186 	/* choose kernel address space layout: 4 or 3 levels. */
187 	vmemmap_start = round_up(ident_map_size, _REGION3_SIZE);
188 	if (IS_ENABLED(CONFIG_KASAN) ||
189 	    vmalloc_size > _REGION2_SIZE ||
190 	    vmemmap_start + vmemmap_size + vmalloc_size + MODULES_LEN >
191 		    _REGION2_SIZE) {
192 		asce_limit = _REGION1_SIZE;
193 		rte_size = _REGION2_SIZE;
194 	} else {
195 		asce_limit = _REGION2_SIZE;
196 		rte_size = _REGION3_SIZE;
197 	}
198 	/*
199 	 * forcing modules and vmalloc area under the ultravisor
200 	 * secure storage limit, so that any vmalloc allocation
201 	 * we do could be used to back secure guest storage.
202 	 */
203 	vmax = adjust_to_uv_max(asce_limit);
204 #ifdef CONFIG_KASAN
205 	/* force vmalloc and modules below kasan shadow */
206 	vmax = min(vmax, KASAN_SHADOW_START);
207 #endif
208 	__memcpy_real_area = round_down(vmax - PAGE_SIZE, PAGE_SIZE);
209 	__abs_lowcore = round_down(__memcpy_real_area - ABS_LOWCORE_MAP_SIZE,
210 				   sizeof(struct lowcore));
211 	MODULES_END = round_down(__abs_lowcore, _SEGMENT_SIZE);
212 	MODULES_VADDR = MODULES_END - MODULES_LEN;
213 	VMALLOC_END = MODULES_VADDR;
214 
215 	/* allow vmalloc area to occupy up to about 1/2 of the rest virtual space left */
216 	vmalloc_size = min(vmalloc_size, round_down(VMALLOC_END / 2, _REGION3_SIZE));
217 	VMALLOC_START = VMALLOC_END - vmalloc_size;
218 
219 	/* split remaining virtual space between 1:1 mapping & vmemmap array */
220 	pages = VMALLOC_START / (PAGE_SIZE + sizeof(struct page));
221 	pages = SECTION_ALIGN_UP(pages);
222 	/* keep vmemmap_start aligned to a top level region table entry */
223 	vmemmap_start = round_down(VMALLOC_START - pages * sizeof(struct page), rte_size);
224 	/* vmemmap_start is the future VMEM_MAX_PHYS, make sure it is within MAX_PHYSMEM */
225 	vmemmap_start = min(vmemmap_start, 1UL << MAX_PHYSMEM_BITS);
226 	/* maximum mappable address as seen by arch_get_mappable_range() */
227 	max_mappable = vmemmap_start;
228 	/* make sure identity map doesn't overlay with vmemmap */
229 	ident_map_size = min(ident_map_size, vmemmap_start);
230 	vmemmap_size = SECTION_ALIGN_UP(ident_map_size / PAGE_SIZE) * sizeof(struct page);
231 	/* make sure vmemmap doesn't overlay with vmalloc area */
232 	VMALLOC_START = max(vmemmap_start + vmemmap_size, VMALLOC_START);
233 	vmemmap = (struct page *)vmemmap_start;
234 
235 	return asce_limit;
236 }
237 
238 /*
239  * This function clears the BSS section of the decompressed Linux kernel and NOT the decompressor's.
240  */
241 static void clear_bss_section(unsigned long vmlinux_lma)
242 {
243 	memset((void *)vmlinux_lma + vmlinux.image_size, 0, vmlinux.bss_size);
244 }
245 
246 /*
247  * Set vmalloc area size to an 8th of (potential) physical memory
248  * size, unless size has been set by kernel command line parameter.
249  */
250 static void setup_vmalloc_size(void)
251 {
252 	unsigned long size;
253 
254 	if (vmalloc_size_set)
255 		return;
256 	size = round_up(ident_map_size / 8, _SEGMENT_SIZE);
257 	vmalloc_size = max(size, vmalloc_size);
258 }
259 
260 static void offset_vmlinux_info(unsigned long offset)
261 {
262 	*(unsigned long *)(&vmlinux.entry) += offset;
263 	vmlinux.bootdata_off += offset;
264 	vmlinux.bootdata_preserved_off += offset;
265 	vmlinux.rela_dyn_start += offset;
266 	vmlinux.rela_dyn_end += offset;
267 	vmlinux.dynsym_start += offset;
268 	vmlinux.init_mm_off += offset;
269 	vmlinux.swapper_pg_dir_off += offset;
270 	vmlinux.invalid_pg_dir_off += offset;
271 #ifdef CONFIG_KASAN
272 	vmlinux.kasan_early_shadow_page_off += offset;
273 	vmlinux.kasan_early_shadow_pte_off += offset;
274 	vmlinux.kasan_early_shadow_pmd_off += offset;
275 	vmlinux.kasan_early_shadow_pud_off += offset;
276 	vmlinux.kasan_early_shadow_p4d_off += offset;
277 #endif
278 }
279 
280 void startup_kernel(void)
281 {
282 	unsigned long max_physmem_end;
283 	unsigned long vmlinux_lma = 0;
284 	unsigned long amode31_lma = 0;
285 	unsigned long asce_limit;
286 	unsigned long safe_addr;
287 	void *img;
288 	psw_t psw;
289 
290 	setup_lpp();
291 	safe_addr = mem_safe_offset();
292 	/*
293 	 * reserve decompressor memory together with decompression heap, buffer and
294 	 * memory which might be occupied by uncompressed kernel at default 1Mb
295 	 * position (if KASLR is off or failed).
296 	 */
297 	physmem_reserve(RR_DECOMPRESSOR, 0, safe_addr);
298 	if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && parmarea.initrd_size)
299 		physmem_reserve(RR_INITRD, parmarea.initrd_start, parmarea.initrd_size);
300 	oldmem_data.start = parmarea.oldmem_base;
301 	oldmem_data.size = parmarea.oldmem_size;
302 
303 	store_ipl_parmblock();
304 	read_ipl_report();
305 	uv_query_info();
306 	sclp_early_read_info();
307 	setup_boot_command_line();
308 	parse_boot_command_line();
309 	detect_facilities();
310 	sanitize_prot_virt_host();
311 	max_physmem_end = detect_max_physmem_end();
312 	setup_ident_map_size(max_physmem_end);
313 	setup_vmalloc_size();
314 	asce_limit = setup_kernel_memory_layout();
315 	/* got final ident_map_size, physmem allocations could be performed now */
316 	physmem_set_usable_limit(ident_map_size);
317 	detect_physmem_online_ranges(max_physmem_end);
318 	save_ipl_cert_comp_list();
319 	rescue_initrd(safe_addr, ident_map_size);
320 
321 	if (kaslr_enabled()) {
322 		vmlinux_lma = randomize_within_range(vmlinux.image_size + vmlinux.bss_size,
323 						     THREAD_SIZE, vmlinux.default_lma,
324 						     ident_map_size);
325 		if (vmlinux_lma) {
326 			__kaslr_offset = vmlinux_lma - vmlinux.default_lma;
327 			offset_vmlinux_info(__kaslr_offset);
328 		}
329 	}
330 	vmlinux_lma = vmlinux_lma ?: vmlinux.default_lma;
331 	physmem_reserve(RR_VMLINUX, vmlinux_lma, vmlinux.image_size + vmlinux.bss_size);
332 
333 	if (!IS_ENABLED(CONFIG_KERNEL_UNCOMPRESSED)) {
334 		img = decompress_kernel();
335 		memmove((void *)vmlinux_lma, img, vmlinux.image_size);
336 	} else if (__kaslr_offset) {
337 		img = (void *)vmlinux.default_lma;
338 		memmove((void *)vmlinux_lma, img, vmlinux.image_size);
339 		memset(img, 0, vmlinux.image_size);
340 	}
341 
342 	/* vmlinux decompression is done, shrink reserved low memory */
343 	physmem_reserve(RR_DECOMPRESSOR, 0, (unsigned long)_decompressor_end);
344 	if (kaslr_enabled())
345 		amode31_lma = randomize_within_range(vmlinux.amode31_size, PAGE_SIZE, 0, SZ_2G);
346 	amode31_lma = amode31_lma ?: vmlinux.default_lma - vmlinux.amode31_size;
347 	physmem_reserve(RR_AMODE31, amode31_lma, vmlinux.amode31_size);
348 
349 	/*
350 	 * The order of the following operations is important:
351 	 *
352 	 * - handle_relocs() must follow clear_bss_section() to establish static
353 	 *   memory references to data in .bss to be used by setup_vmem()
354 	 *   (i.e init_mm.pgd)
355 	 *
356 	 * - setup_vmem() must follow handle_relocs() to be able using
357 	 *   static memory references to data in .bss (i.e init_mm.pgd)
358 	 *
359 	 * - copy_bootdata() must follow setup_vmem() to propagate changes to
360 	 *   bootdata made by setup_vmem()
361 	 */
362 	clear_bss_section(vmlinux_lma);
363 	handle_relocs(__kaslr_offset);
364 	setup_vmem(asce_limit);
365 	copy_bootdata();
366 
367 	/*
368 	 * Save KASLR offset for early dumps, before vmcore_info is set.
369 	 * Mark as uneven to distinguish from real vmcore_info pointer.
370 	 */
371 	S390_lowcore.vmcore_info = __kaslr_offset ? __kaslr_offset | 0x1UL : 0;
372 
373 	/*
374 	 * Jump to the decompressed kernel entry point and switch DAT mode on.
375 	 */
376 	psw.addr = vmlinux.entry;
377 	psw.mask = PSW_KERNEL_BITS;
378 	__load_psw(psw);
379 }
380