xref: /openbmc/linux/arch/s390/boot/startup.c (revision 083a7fba)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/string.h>
3 #include <linux/elf.h>
4 #include <asm/boot_data.h>
5 #include <asm/sections.h>
6 #include <asm/cpu_mf.h>
7 #include <asm/setup.h>
8 #include <asm/kasan.h>
9 #include <asm/kexec.h>
10 #include <asm/sclp.h>
11 #include <asm/diag.h>
12 #include <asm/uv.h>
13 #include "compressed/decompressor.h"
14 #include "boot.h"
15 #include "uv.h"
16 
17 unsigned long __bootdata_preserved(__kaslr_offset);
18 unsigned long __bootdata(__amode31_base);
19 unsigned long __bootdata_preserved(VMALLOC_START);
20 unsigned long __bootdata_preserved(VMALLOC_END);
21 struct page *__bootdata_preserved(vmemmap);
22 unsigned long __bootdata_preserved(vmemmap_size);
23 unsigned long __bootdata_preserved(MODULES_VADDR);
24 unsigned long __bootdata_preserved(MODULES_END);
25 unsigned long __bootdata(ident_map_size);
26 int __bootdata(is_full_image) = 1;
27 struct initrd_data __bootdata(initrd_data);
28 
29 u64 __bootdata_preserved(stfle_fac_list[16]);
30 u64 __bootdata_preserved(alt_stfle_fac_list[16]);
31 struct oldmem_data __bootdata_preserved(oldmem_data);
32 
33 void error(char *x)
34 {
35 	sclp_early_printk("\n\n");
36 	sclp_early_printk(x);
37 	sclp_early_printk("\n\n -- System halted");
38 
39 	disabled_wait();
40 }
41 
42 static void setup_lpp(void)
43 {
44 	S390_lowcore.current_pid = 0;
45 	S390_lowcore.lpp = LPP_MAGIC;
46 	if (test_facility(40))
47 		lpp(&S390_lowcore.lpp);
48 }
49 
50 #ifdef CONFIG_KERNEL_UNCOMPRESSED
51 unsigned long mem_safe_offset(void)
52 {
53 	return vmlinux.default_lma + vmlinux.image_size + vmlinux.bss_size;
54 }
55 #endif
56 
57 static void rescue_initrd(unsigned long addr)
58 {
59 	if (!IS_ENABLED(CONFIG_BLK_DEV_INITRD))
60 		return;
61 	if (!initrd_data.start || !initrd_data.size)
62 		return;
63 	if (addr <= initrd_data.start)
64 		return;
65 	memmove((void *)addr, (void *)initrd_data.start, initrd_data.size);
66 	initrd_data.start = addr;
67 }
68 
69 static void copy_bootdata(void)
70 {
71 	if (__boot_data_end - __boot_data_start != vmlinux.bootdata_size)
72 		error(".boot.data section size mismatch");
73 	memcpy((void *)vmlinux.bootdata_off, __boot_data_start, vmlinux.bootdata_size);
74 	if (__boot_data_preserved_end - __boot_data_preserved_start != vmlinux.bootdata_preserved_size)
75 		error(".boot.preserved.data section size mismatch");
76 	memcpy((void *)vmlinux.bootdata_preserved_off, __boot_data_preserved_start, vmlinux.bootdata_preserved_size);
77 }
78 
79 static void handle_relocs(unsigned long offset)
80 {
81 	Elf64_Rela *rela_start, *rela_end, *rela;
82 	int r_type, r_sym, rc;
83 	Elf64_Addr loc, val;
84 	Elf64_Sym *dynsym;
85 
86 	rela_start = (Elf64_Rela *) vmlinux.rela_dyn_start;
87 	rela_end = (Elf64_Rela *) vmlinux.rela_dyn_end;
88 	dynsym = (Elf64_Sym *) vmlinux.dynsym_start;
89 	for (rela = rela_start; rela < rela_end; rela++) {
90 		loc = rela->r_offset + offset;
91 		val = rela->r_addend;
92 		r_sym = ELF64_R_SYM(rela->r_info);
93 		if (r_sym) {
94 			if (dynsym[r_sym].st_shndx != SHN_UNDEF)
95 				val += dynsym[r_sym].st_value + offset;
96 		} else {
97 			/*
98 			 * 0 == undefined symbol table index (STN_UNDEF),
99 			 * used for R_390_RELATIVE, only add KASLR offset
100 			 */
101 			val += offset;
102 		}
103 		r_type = ELF64_R_TYPE(rela->r_info);
104 		rc = arch_kexec_do_relocs(r_type, (void *) loc, val, 0);
105 		if (rc)
106 			error("Unknown relocation type");
107 	}
108 }
109 
110 /*
111  * Merge information from several sources into a single ident_map_size value.
112  * "ident_map_size" represents the upper limit of physical memory we may ever
113  * reach. It might not be all online memory, but also include standby (offline)
114  * memory. "ident_map_size" could be lower then actual standby or even online
115  * memory present, due to limiting factors. We should never go above this limit.
116  * It is the size of our identity mapping.
117  *
118  * Consider the following factors:
119  * 1. max_physmem_end - end of physical memory online or standby.
120  *    Always <= end of the last online memory block (get_mem_detect_end()).
121  * 2. CONFIG_MAX_PHYSMEM_BITS - the maximum size of physical memory the
122  *    kernel is able to support.
123  * 3. "mem=" kernel command line option which limits physical memory usage.
124  * 4. OLDMEM_BASE which is a kdump memory limit when the kernel is executed as
125  *    crash kernel.
126  * 5. "hsa" size which is a memory limit when the kernel is executed during
127  *    zfcp/nvme dump.
128  */
129 static void setup_ident_map_size(unsigned long max_physmem_end)
130 {
131 	unsigned long hsa_size;
132 
133 	ident_map_size = max_physmem_end;
134 	if (memory_limit)
135 		ident_map_size = min(ident_map_size, memory_limit);
136 	ident_map_size = min(ident_map_size, 1UL << MAX_PHYSMEM_BITS);
137 
138 #ifdef CONFIG_CRASH_DUMP
139 	if (oldmem_data.start) {
140 		kaslr_enabled = 0;
141 		ident_map_size = min(ident_map_size, oldmem_data.size);
142 	} else if (ipl_block_valid && is_ipl_block_dump()) {
143 		kaslr_enabled = 0;
144 		if (!sclp_early_get_hsa_size(&hsa_size) && hsa_size)
145 			ident_map_size = min(ident_map_size, hsa_size);
146 	}
147 #endif
148 }
149 
150 static void setup_kernel_memory_layout(void)
151 {
152 	bool vmalloc_size_verified = false;
153 	unsigned long vmemmap_off;
154 	unsigned long vspace_left;
155 	unsigned long rte_size;
156 	unsigned long pages;
157 	unsigned long vmax;
158 
159 	pages = ident_map_size / PAGE_SIZE;
160 	/* vmemmap contains a multiple of PAGES_PER_SECTION struct pages */
161 	vmemmap_size = SECTION_ALIGN_UP(pages) * sizeof(struct page);
162 
163 	/* choose kernel address space layout: 4 or 3 levels. */
164 	vmemmap_off = round_up(ident_map_size, _REGION3_SIZE);
165 	if (IS_ENABLED(CONFIG_KASAN) ||
166 	    vmalloc_size > _REGION2_SIZE ||
167 	    vmemmap_off + vmemmap_size + vmalloc_size + MODULES_LEN > _REGION2_SIZE)
168 		vmax = _REGION1_SIZE;
169 	else
170 		vmax = _REGION2_SIZE;
171 
172 	/* keep vmemmap_off aligned to a top level region table entry */
173 	rte_size = vmax == _REGION1_SIZE ? _REGION2_SIZE : _REGION3_SIZE;
174 	MODULES_END = vmax;
175 	if (is_prot_virt_host()) {
176 		/*
177 		 * forcing modules and vmalloc area under the ultravisor
178 		 * secure storage limit, so that any vmalloc allocation
179 		 * we do could be used to back secure guest storage.
180 		 */
181 		adjust_to_uv_max(&MODULES_END);
182 	}
183 
184 #ifdef CONFIG_KASAN
185 	if (MODULES_END < vmax) {
186 		/* force vmalloc and modules below kasan shadow */
187 		MODULES_END = min(MODULES_END, KASAN_SHADOW_START);
188 	} else {
189 		/*
190 		 * leave vmalloc and modules above kasan shadow but make
191 		 * sure they don't overlap with it
192 		 */
193 		vmalloc_size = min(vmalloc_size, vmax - KASAN_SHADOW_END - MODULES_LEN);
194 		vmalloc_size_verified = true;
195 		vspace_left = KASAN_SHADOW_START;
196 	}
197 #endif
198 	MODULES_VADDR = MODULES_END - MODULES_LEN;
199 	VMALLOC_END = MODULES_VADDR;
200 
201 	if (vmalloc_size_verified) {
202 		VMALLOC_START = VMALLOC_END - vmalloc_size;
203 	} else {
204 		vmemmap_off = round_up(ident_map_size, rte_size);
205 
206 		if (vmemmap_off + vmemmap_size > VMALLOC_END ||
207 		    vmalloc_size > VMALLOC_END - vmemmap_off - vmemmap_size) {
208 			/*
209 			 * allow vmalloc area to occupy up to 1/2 of
210 			 * the rest virtual space left.
211 			 */
212 			vmalloc_size = min(vmalloc_size, VMALLOC_END / 2);
213 		}
214 		VMALLOC_START = VMALLOC_END - vmalloc_size;
215 		vspace_left = VMALLOC_START;
216 	}
217 
218 	pages = vspace_left / (PAGE_SIZE + sizeof(struct page));
219 	pages = SECTION_ALIGN_UP(pages);
220 	vmemmap_off = round_up(vspace_left - pages * sizeof(struct page), rte_size);
221 	/* keep vmemmap left most starting from a fresh region table entry */
222 	vmemmap_off = min(vmemmap_off, round_up(ident_map_size, rte_size));
223 	/* take care that identity map is lower then vmemmap */
224 	ident_map_size = min(ident_map_size, vmemmap_off);
225 	vmemmap_size = SECTION_ALIGN_UP(ident_map_size / PAGE_SIZE) * sizeof(struct page);
226 	VMALLOC_START = max(vmemmap_off + vmemmap_size, VMALLOC_START);
227 	vmemmap = (struct page *)vmemmap_off;
228 }
229 
230 /*
231  * This function clears the BSS section of the decompressed Linux kernel and NOT the decompressor's.
232  */
233 static void clear_bss_section(void)
234 {
235 	memset((void *)vmlinux.default_lma + vmlinux.image_size, 0, vmlinux.bss_size);
236 }
237 
238 /*
239  * Set vmalloc area size to an 8th of (potential) physical memory
240  * size, unless size has been set by kernel command line parameter.
241  */
242 static void setup_vmalloc_size(void)
243 {
244 	unsigned long size;
245 
246 	if (vmalloc_size_set)
247 		return;
248 	size = round_up(ident_map_size / 8, _SEGMENT_SIZE);
249 	vmalloc_size = max(size, vmalloc_size);
250 }
251 
252 static void offset_vmlinux_info(unsigned long offset)
253 {
254 	vmlinux.default_lma += offset;
255 	*(unsigned long *)(&vmlinux.entry) += offset;
256 	vmlinux.bootdata_off += offset;
257 	vmlinux.bootdata_preserved_off += offset;
258 	vmlinux.rela_dyn_start += offset;
259 	vmlinux.rela_dyn_end += offset;
260 	vmlinux.dynsym_start += offset;
261 }
262 
263 static unsigned long reserve_amode31(unsigned long safe_addr)
264 {
265 	__amode31_base = PAGE_ALIGN(safe_addr);
266 	return safe_addr + vmlinux.amode31_size;
267 }
268 
269 void startup_kernel(void)
270 {
271 	unsigned long random_lma;
272 	unsigned long safe_addr;
273 	void *img;
274 
275 	initrd_data.start = parmarea.initrd_start;
276 	initrd_data.size = parmarea.initrd_size;
277 	oldmem_data.start = parmarea.oldmem_base;
278 	oldmem_data.size = parmarea.oldmem_size;
279 
280 	setup_lpp();
281 	store_ipl_parmblock();
282 	safe_addr = mem_safe_offset();
283 	safe_addr = reserve_amode31(safe_addr);
284 	safe_addr = read_ipl_report(safe_addr);
285 	uv_query_info();
286 	rescue_initrd(safe_addr);
287 	sclp_early_read_info();
288 	setup_boot_command_line();
289 	parse_boot_command_line();
290 	sanitize_prot_virt_host();
291 	setup_ident_map_size(detect_memory());
292 	setup_vmalloc_size();
293 	setup_kernel_memory_layout();
294 
295 	if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_enabled) {
296 		random_lma = get_random_base(safe_addr);
297 		if (random_lma) {
298 			__kaslr_offset = random_lma - vmlinux.default_lma;
299 			img = (void *)vmlinux.default_lma;
300 			offset_vmlinux_info(__kaslr_offset);
301 		}
302 	}
303 
304 	if (!IS_ENABLED(CONFIG_KERNEL_UNCOMPRESSED)) {
305 		img = decompress_kernel();
306 		memmove((void *)vmlinux.default_lma, img, vmlinux.image_size);
307 	} else if (__kaslr_offset)
308 		memcpy((void *)vmlinux.default_lma, img, vmlinux.image_size);
309 
310 	clear_bss_section();
311 	copy_bootdata();
312 	if (IS_ENABLED(CONFIG_RELOCATABLE))
313 		handle_relocs(__kaslr_offset);
314 
315 	if (__kaslr_offset) {
316 		/*
317 		 * Save KASLR offset for early dumps, before vmcore_info is set.
318 		 * Mark as uneven to distinguish from real vmcore_info pointer.
319 		 */
320 		S390_lowcore.vmcore_info = __kaslr_offset | 0x1UL;
321 		/* Clear non-relocated kernel */
322 		if (IS_ENABLED(CONFIG_KERNEL_UNCOMPRESSED))
323 			memset(img, 0, vmlinux.image_size);
324 	}
325 	vmlinux.entry();
326 }
327