1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/string.h>
3 #include <linux/elf.h>
4 #include <asm/boot_data.h>
5 #include <asm/sections.h>
6 #include <asm/maccess.h>
7 #include <asm/cpu_mf.h>
8 #include <asm/setup.h>
9 #include <asm/kasan.h>
10 #include <asm/kexec.h>
11 #include <asm/sclp.h>
12 #include <asm/diag.h>
13 #include <asm/uv.h>
14 #include <asm/abs_lowcore.h>
15 #include <asm/physmem_info.h>
16 #include "decompressor.h"
17 #include "boot.h"
18 #include "uv.h"
19
20 unsigned long __bootdata_preserved(__kaslr_offset);
21 unsigned long __bootdata_preserved(__abs_lowcore);
22 unsigned long __bootdata_preserved(__memcpy_real_area);
23 pte_t *__bootdata_preserved(memcpy_real_ptep);
24 unsigned long __bootdata_preserved(VMALLOC_START);
25 unsigned long __bootdata_preserved(VMALLOC_END);
26 struct page *__bootdata_preserved(vmemmap);
27 unsigned long __bootdata_preserved(vmemmap_size);
28 unsigned long __bootdata_preserved(MODULES_VADDR);
29 unsigned long __bootdata_preserved(MODULES_END);
30 unsigned long __bootdata_preserved(max_mappable);
31 unsigned long __bootdata(ident_map_size);
32
33 u64 __bootdata_preserved(stfle_fac_list[16]);
34 struct oldmem_data __bootdata_preserved(oldmem_data);
35
36 struct machine_info machine;
37
error(char * x)38 void error(char *x)
39 {
40 sclp_early_printk("\n\n");
41 sclp_early_printk(x);
42 sclp_early_printk("\n\n -- System halted");
43
44 disabled_wait();
45 }
46
detect_facilities(void)47 static void detect_facilities(void)
48 {
49 if (test_facility(8)) {
50 machine.has_edat1 = 1;
51 __ctl_set_bit(0, 23);
52 }
53 if (test_facility(78))
54 machine.has_edat2 = 1;
55 if (test_facility(130))
56 machine.has_nx = 1;
57 }
58
setup_lpp(void)59 static void setup_lpp(void)
60 {
61 S390_lowcore.current_pid = 0;
62 S390_lowcore.lpp = LPP_MAGIC;
63 if (test_facility(40))
64 lpp(&S390_lowcore.lpp);
65 }
66
67 #ifdef CONFIG_KERNEL_UNCOMPRESSED
mem_safe_offset(void)68 unsigned long mem_safe_offset(void)
69 {
70 return vmlinux.default_lma + vmlinux.image_size + vmlinux.bss_size;
71 }
72 #endif
73
rescue_initrd(unsigned long min,unsigned long max)74 static void rescue_initrd(unsigned long min, unsigned long max)
75 {
76 unsigned long old_addr, addr, size;
77
78 if (!IS_ENABLED(CONFIG_BLK_DEV_INITRD))
79 return;
80 if (!get_physmem_reserved(RR_INITRD, &addr, &size))
81 return;
82 if (addr >= min && addr + size <= max)
83 return;
84 old_addr = addr;
85 physmem_free(RR_INITRD);
86 addr = physmem_alloc_top_down(RR_INITRD, size, 0);
87 memmove((void *)addr, (void *)old_addr, size);
88 }
89
copy_bootdata(void)90 static void copy_bootdata(void)
91 {
92 if (__boot_data_end - __boot_data_start != vmlinux.bootdata_size)
93 error(".boot.data section size mismatch");
94 memcpy((void *)vmlinux.bootdata_off, __boot_data_start, vmlinux.bootdata_size);
95 if (__boot_data_preserved_end - __boot_data_preserved_start != vmlinux.bootdata_preserved_size)
96 error(".boot.preserved.data section size mismatch");
97 memcpy((void *)vmlinux.bootdata_preserved_off, __boot_data_preserved_start, vmlinux.bootdata_preserved_size);
98 }
99
handle_relocs(unsigned long offset)100 static void handle_relocs(unsigned long offset)
101 {
102 Elf64_Rela *rela_start, *rela_end, *rela;
103 int r_type, r_sym, rc;
104 Elf64_Addr loc, val;
105 Elf64_Sym *dynsym;
106
107 rela_start = (Elf64_Rela *) vmlinux.rela_dyn_start;
108 rela_end = (Elf64_Rela *) vmlinux.rela_dyn_end;
109 dynsym = (Elf64_Sym *) vmlinux.dynsym_start;
110 for (rela = rela_start; rela < rela_end; rela++) {
111 loc = rela->r_offset + offset;
112 val = rela->r_addend;
113 r_sym = ELF64_R_SYM(rela->r_info);
114 if (r_sym) {
115 if (dynsym[r_sym].st_shndx != SHN_UNDEF)
116 val += dynsym[r_sym].st_value + offset;
117 } else {
118 /*
119 * 0 == undefined symbol table index (STN_UNDEF),
120 * used for R_390_RELATIVE, only add KASLR offset
121 */
122 val += offset;
123 }
124 r_type = ELF64_R_TYPE(rela->r_info);
125 rc = arch_kexec_do_relocs(r_type, (void *) loc, val, 0);
126 if (rc)
127 error("Unknown relocation type");
128 }
129 }
130
131 /*
132 * Merge information from several sources into a single ident_map_size value.
133 * "ident_map_size" represents the upper limit of physical memory we may ever
134 * reach. It might not be all online memory, but also include standby (offline)
135 * memory. "ident_map_size" could be lower then actual standby or even online
136 * memory present, due to limiting factors. We should never go above this limit.
137 * It is the size of our identity mapping.
138 *
139 * Consider the following factors:
140 * 1. max_physmem_end - end of physical memory online or standby.
141 * Always >= end of the last online memory range (get_physmem_online_end()).
142 * 2. CONFIG_MAX_PHYSMEM_BITS - the maximum size of physical memory the
143 * kernel is able to support.
144 * 3. "mem=" kernel command line option which limits physical memory usage.
145 * 4. OLDMEM_BASE which is a kdump memory limit when the kernel is executed as
146 * crash kernel.
147 * 5. "hsa" size which is a memory limit when the kernel is executed during
148 * zfcp/nvme dump.
149 */
setup_ident_map_size(unsigned long max_physmem_end)150 static void setup_ident_map_size(unsigned long max_physmem_end)
151 {
152 unsigned long hsa_size;
153
154 ident_map_size = max_physmem_end;
155 if (memory_limit)
156 ident_map_size = min(ident_map_size, memory_limit);
157 ident_map_size = min(ident_map_size, 1UL << MAX_PHYSMEM_BITS);
158
159 #ifdef CONFIG_CRASH_DUMP
160 if (oldmem_data.start) {
161 __kaslr_enabled = 0;
162 ident_map_size = min(ident_map_size, oldmem_data.size);
163 } else if (ipl_block_valid && is_ipl_block_dump()) {
164 __kaslr_enabled = 0;
165 if (!sclp_early_get_hsa_size(&hsa_size) && hsa_size)
166 ident_map_size = min(ident_map_size, hsa_size);
167 }
168 #endif
169 }
170
setup_kernel_memory_layout(void)171 static unsigned long setup_kernel_memory_layout(void)
172 {
173 unsigned long vmemmap_start;
174 unsigned long asce_limit;
175 unsigned long rte_size;
176 unsigned long pages;
177 unsigned long vsize;
178 unsigned long vmax;
179
180 pages = ident_map_size / PAGE_SIZE;
181 /* vmemmap contains a multiple of PAGES_PER_SECTION struct pages */
182 vmemmap_size = SECTION_ALIGN_UP(pages) * sizeof(struct page);
183
184 /* choose kernel address space layout: 4 or 3 levels. */
185 vsize = round_up(ident_map_size, _REGION3_SIZE) + vmemmap_size +
186 MODULES_LEN + MEMCPY_REAL_SIZE + ABS_LOWCORE_MAP_SIZE;
187 vsize = size_add(vsize, vmalloc_size);
188 if (IS_ENABLED(CONFIG_KASAN) || (vsize > _REGION2_SIZE)) {
189 asce_limit = _REGION1_SIZE;
190 rte_size = _REGION2_SIZE;
191 } else {
192 asce_limit = _REGION2_SIZE;
193 rte_size = _REGION3_SIZE;
194 }
195
196 /*
197 * Forcing modules and vmalloc area under the ultravisor
198 * secure storage limit, so that any vmalloc allocation
199 * we do could be used to back secure guest storage.
200 */
201 vmax = adjust_to_uv_max(asce_limit);
202 #ifdef CONFIG_KASAN
203 /* force vmalloc and modules below kasan shadow */
204 vmax = min(vmax, KASAN_SHADOW_START);
205 #endif
206 __memcpy_real_area = round_down(vmax - MEMCPY_REAL_SIZE, PAGE_SIZE);
207 __abs_lowcore = round_down(__memcpy_real_area - ABS_LOWCORE_MAP_SIZE,
208 sizeof(struct lowcore));
209 MODULES_END = round_down(__abs_lowcore, _SEGMENT_SIZE);
210 MODULES_VADDR = MODULES_END - MODULES_LEN;
211 VMALLOC_END = MODULES_VADDR;
212
213 /* allow vmalloc area to occupy up to about 1/2 of the rest virtual space left */
214 vsize = round_down(VMALLOC_END / 2, _SEGMENT_SIZE);
215 vmalloc_size = min(vmalloc_size, vsize);
216 VMALLOC_START = VMALLOC_END - vmalloc_size;
217
218 /* split remaining virtual space between 1:1 mapping & vmemmap array */
219 pages = VMALLOC_START / (PAGE_SIZE + sizeof(struct page));
220 pages = SECTION_ALIGN_UP(pages);
221 /* keep vmemmap_start aligned to a top level region table entry */
222 vmemmap_start = round_down(VMALLOC_START - pages * sizeof(struct page), rte_size);
223 vmemmap_start = min(vmemmap_start, 1UL << MAX_PHYSMEM_BITS);
224 /* maximum mappable address as seen by arch_get_mappable_range() */
225 max_mappable = vmemmap_start;
226 /* make sure identity map doesn't overlay with vmemmap */
227 ident_map_size = min(ident_map_size, vmemmap_start);
228 vmemmap_size = SECTION_ALIGN_UP(ident_map_size / PAGE_SIZE) * sizeof(struct page);
229 /* make sure vmemmap doesn't overlay with vmalloc area */
230 VMALLOC_START = max(vmemmap_start + vmemmap_size, VMALLOC_START);
231 vmemmap = (struct page *)vmemmap_start;
232
233 return asce_limit;
234 }
235
236 /*
237 * This function clears the BSS section of the decompressed Linux kernel and NOT the decompressor's.
238 */
clear_bss_section(unsigned long vmlinux_lma)239 static void clear_bss_section(unsigned long vmlinux_lma)
240 {
241 memset((void *)vmlinux_lma + vmlinux.image_size, 0, vmlinux.bss_size);
242 }
243
244 /*
245 * Set vmalloc area size to an 8th of (potential) physical memory
246 * size, unless size has been set by kernel command line parameter.
247 */
setup_vmalloc_size(void)248 static void setup_vmalloc_size(void)
249 {
250 unsigned long size;
251
252 if (vmalloc_size_set)
253 return;
254 size = round_up(ident_map_size / 8, _SEGMENT_SIZE);
255 vmalloc_size = max(size, vmalloc_size);
256 }
257
offset_vmlinux_info(unsigned long offset)258 static void offset_vmlinux_info(unsigned long offset)
259 {
260 *(unsigned long *)(&vmlinux.entry) += offset;
261 vmlinux.bootdata_off += offset;
262 vmlinux.bootdata_preserved_off += offset;
263 vmlinux.rela_dyn_start += offset;
264 vmlinux.rela_dyn_end += offset;
265 vmlinux.dynsym_start += offset;
266 vmlinux.init_mm_off += offset;
267 vmlinux.swapper_pg_dir_off += offset;
268 vmlinux.invalid_pg_dir_off += offset;
269 #ifdef CONFIG_KASAN
270 vmlinux.kasan_early_shadow_page_off += offset;
271 vmlinux.kasan_early_shadow_pte_off += offset;
272 vmlinux.kasan_early_shadow_pmd_off += offset;
273 vmlinux.kasan_early_shadow_pud_off += offset;
274 vmlinux.kasan_early_shadow_p4d_off += offset;
275 #endif
276 }
277
startup_kernel(void)278 void startup_kernel(void)
279 {
280 unsigned long max_physmem_end;
281 unsigned long vmlinux_lma = 0;
282 unsigned long amode31_lma = 0;
283 unsigned long asce_limit;
284 unsigned long safe_addr;
285 void *img;
286 psw_t psw;
287
288 setup_lpp();
289 safe_addr = mem_safe_offset();
290
291 /*
292 * Reserve decompressor memory together with decompression heap, buffer and
293 * memory which might be occupied by uncompressed kernel at default 1Mb
294 * position (if KASLR is off or failed).
295 */
296 physmem_reserve(RR_DECOMPRESSOR, 0, safe_addr);
297 if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && parmarea.initrd_size)
298 physmem_reserve(RR_INITRD, parmarea.initrd_start, parmarea.initrd_size);
299 oldmem_data.start = parmarea.oldmem_base;
300 oldmem_data.size = parmarea.oldmem_size;
301
302 store_ipl_parmblock();
303 read_ipl_report();
304 uv_query_info();
305 sclp_early_read_info();
306 setup_boot_command_line();
307 parse_boot_command_line();
308 detect_facilities();
309 sanitize_prot_virt_host();
310 max_physmem_end = detect_max_physmem_end();
311 setup_ident_map_size(max_physmem_end);
312 setup_vmalloc_size();
313 asce_limit = setup_kernel_memory_layout();
314 /* got final ident_map_size, physmem allocations could be performed now */
315 physmem_set_usable_limit(ident_map_size);
316 detect_physmem_online_ranges(max_physmem_end);
317 save_ipl_cert_comp_list();
318 rescue_initrd(safe_addr, ident_map_size);
319
320 if (kaslr_enabled()) {
321 vmlinux_lma = randomize_within_range(vmlinux.image_size + vmlinux.bss_size,
322 THREAD_SIZE, vmlinux.default_lma,
323 ident_map_size);
324 if (vmlinux_lma) {
325 __kaslr_offset = vmlinux_lma - vmlinux.default_lma;
326 offset_vmlinux_info(__kaslr_offset);
327 }
328 }
329 vmlinux_lma = vmlinux_lma ?: vmlinux.default_lma;
330 physmem_reserve(RR_VMLINUX, vmlinux_lma, vmlinux.image_size + vmlinux.bss_size);
331
332 if (!IS_ENABLED(CONFIG_KERNEL_UNCOMPRESSED)) {
333 img = decompress_kernel();
334 memmove((void *)vmlinux_lma, img, vmlinux.image_size);
335 } else if (__kaslr_offset) {
336 img = (void *)vmlinux.default_lma;
337 memmove((void *)vmlinux_lma, img, vmlinux.image_size);
338 memset(img, 0, vmlinux.image_size);
339 }
340
341 /* vmlinux decompression is done, shrink reserved low memory */
342 physmem_reserve(RR_DECOMPRESSOR, 0, (unsigned long)_decompressor_end);
343 if (kaslr_enabled())
344 amode31_lma = randomize_within_range(vmlinux.amode31_size, PAGE_SIZE, 0, SZ_2G);
345 amode31_lma = amode31_lma ?: vmlinux.default_lma - vmlinux.amode31_size;
346 physmem_reserve(RR_AMODE31, amode31_lma, vmlinux.amode31_size);
347
348 /*
349 * The order of the following operations is important:
350 *
351 * - handle_relocs() must follow clear_bss_section() to establish static
352 * memory references to data in .bss to be used by setup_vmem()
353 * (i.e init_mm.pgd)
354 *
355 * - setup_vmem() must follow handle_relocs() to be able using
356 * static memory references to data in .bss (i.e init_mm.pgd)
357 *
358 * - copy_bootdata() must follow setup_vmem() to propagate changes to
359 * bootdata made by setup_vmem()
360 */
361 clear_bss_section(vmlinux_lma);
362 handle_relocs(__kaslr_offset);
363 setup_vmem(asce_limit);
364 copy_bootdata();
365
366 /*
367 * Save KASLR offset for early dumps, before vmcore_info is set.
368 * Mark as uneven to distinguish from real vmcore_info pointer.
369 */
370 S390_lowcore.vmcore_info = __kaslr_offset ? __kaslr_offset | 0x1UL : 0;
371
372 /*
373 * Jump to the decompressed kernel entry point and switch DAT mode on.
374 */
375 psw.addr = vmlinux.entry;
376 psw.mask = PSW_KERNEL_BITS;
377 __load_psw(psw);
378 }
379