1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/string.h> 3 #include <linux/elf.h> 4 #include <asm/boot_data.h> 5 #include <asm/sections.h> 6 #include <asm/maccess.h> 7 #include <asm/cpu_mf.h> 8 #include <asm/setup.h> 9 #include <asm/kasan.h> 10 #include <asm/kexec.h> 11 #include <asm/sclp.h> 12 #include <asm/diag.h> 13 #include <asm/uv.h> 14 #include <asm/abs_lowcore.h> 15 #include <asm/mem_detect.h> 16 #include "decompressor.h" 17 #include "boot.h" 18 #include "uv.h" 19 20 unsigned long __bootdata_preserved(__kaslr_offset); 21 unsigned long __bootdata_preserved(__abs_lowcore); 22 unsigned long __bootdata_preserved(__memcpy_real_area); 23 pte_t *__bootdata_preserved(memcpy_real_ptep); 24 unsigned long __bootdata(__amode31_base); 25 unsigned long __bootdata_preserved(VMALLOC_START); 26 unsigned long __bootdata_preserved(VMALLOC_END); 27 struct page *__bootdata_preserved(vmemmap); 28 unsigned long __bootdata_preserved(vmemmap_size); 29 unsigned long __bootdata_preserved(MODULES_VADDR); 30 unsigned long __bootdata_preserved(MODULES_END); 31 unsigned long __bootdata(ident_map_size); 32 int __bootdata(is_full_image) = 1; 33 struct initrd_data __bootdata(initrd_data); 34 35 u64 __bootdata_preserved(stfle_fac_list[16]); 36 u64 __bootdata_preserved(alt_stfle_fac_list[16]); 37 struct oldmem_data __bootdata_preserved(oldmem_data); 38 39 struct machine_info machine; 40 41 void error(char *x) 42 { 43 sclp_early_printk("\n\n"); 44 sclp_early_printk(x); 45 sclp_early_printk("\n\n -- System halted"); 46 47 disabled_wait(); 48 } 49 50 static void detect_facilities(void) 51 { 52 if (test_facility(8)) { 53 machine.has_edat1 = 1; 54 __ctl_set_bit(0, 23); 55 } 56 if (test_facility(78)) 57 machine.has_edat2 = 1; 58 if (!noexec_disabled && test_facility(130)) { 59 machine.has_nx = 1; 60 __ctl_set_bit(0, 20); 61 } 62 } 63 64 static void setup_lpp(void) 65 { 66 S390_lowcore.current_pid = 0; 67 S390_lowcore.lpp = LPP_MAGIC; 68 if (test_facility(40)) 69 lpp(&S390_lowcore.lpp); 70 } 71 72 #ifdef CONFIG_KERNEL_UNCOMPRESSED 73 unsigned long mem_safe_offset(void) 74 { 75 return vmlinux.default_lma + vmlinux.image_size + vmlinux.bss_size; 76 } 77 #endif 78 79 static void rescue_initrd(unsigned long addr) 80 { 81 if (!IS_ENABLED(CONFIG_BLK_DEV_INITRD)) 82 return; 83 if (!initrd_data.start || !initrd_data.size) 84 return; 85 if (addr <= initrd_data.start) 86 return; 87 memmove((void *)addr, (void *)initrd_data.start, initrd_data.size); 88 initrd_data.start = addr; 89 } 90 91 static void copy_bootdata(void) 92 { 93 if (__boot_data_end - __boot_data_start != vmlinux.bootdata_size) 94 error(".boot.data section size mismatch"); 95 memcpy((void *)vmlinux.bootdata_off, __boot_data_start, vmlinux.bootdata_size); 96 if (__boot_data_preserved_end - __boot_data_preserved_start != vmlinux.bootdata_preserved_size) 97 error(".boot.preserved.data section size mismatch"); 98 memcpy((void *)vmlinux.bootdata_preserved_off, __boot_data_preserved_start, vmlinux.bootdata_preserved_size); 99 } 100 101 static void handle_relocs(unsigned long offset) 102 { 103 Elf64_Rela *rela_start, *rela_end, *rela; 104 int r_type, r_sym, rc; 105 Elf64_Addr loc, val; 106 Elf64_Sym *dynsym; 107 108 rela_start = (Elf64_Rela *) vmlinux.rela_dyn_start; 109 rela_end = (Elf64_Rela *) vmlinux.rela_dyn_end; 110 dynsym = (Elf64_Sym *) vmlinux.dynsym_start; 111 for (rela = rela_start; rela < rela_end; rela++) { 112 loc = rela->r_offset + offset; 113 val = rela->r_addend; 114 r_sym = ELF64_R_SYM(rela->r_info); 115 if (r_sym) { 116 if (dynsym[r_sym].st_shndx != SHN_UNDEF) 117 val += dynsym[r_sym].st_value + offset; 118 } else { 119 /* 120 * 0 == undefined symbol table index (STN_UNDEF), 121 * used for R_390_RELATIVE, only add KASLR offset 122 */ 123 val += offset; 124 } 125 r_type = ELF64_R_TYPE(rela->r_info); 126 rc = arch_kexec_do_relocs(r_type, (void *) loc, val, 0); 127 if (rc) 128 error("Unknown relocation type"); 129 } 130 } 131 132 /* 133 * Merge information from several sources into a single ident_map_size value. 134 * "ident_map_size" represents the upper limit of physical memory we may ever 135 * reach. It might not be all online memory, but also include standby (offline) 136 * memory. "ident_map_size" could be lower then actual standby or even online 137 * memory present, due to limiting factors. We should never go above this limit. 138 * It is the size of our identity mapping. 139 * 140 * Consider the following factors: 141 * 1. max_physmem_end - end of physical memory online or standby. 142 * Always <= end of the last online memory block (get_mem_detect_end()). 143 * 2. CONFIG_MAX_PHYSMEM_BITS - the maximum size of physical memory the 144 * kernel is able to support. 145 * 3. "mem=" kernel command line option which limits physical memory usage. 146 * 4. OLDMEM_BASE which is a kdump memory limit when the kernel is executed as 147 * crash kernel. 148 * 5. "hsa" size which is a memory limit when the kernel is executed during 149 * zfcp/nvme dump. 150 */ 151 static void setup_ident_map_size(unsigned long max_physmem_end) 152 { 153 unsigned long hsa_size; 154 155 ident_map_size = max_physmem_end; 156 if (memory_limit) 157 ident_map_size = min(ident_map_size, memory_limit); 158 ident_map_size = min(ident_map_size, 1UL << MAX_PHYSMEM_BITS); 159 160 #ifdef CONFIG_CRASH_DUMP 161 if (oldmem_data.start) { 162 kaslr_enabled = 0; 163 ident_map_size = min(ident_map_size, oldmem_data.size); 164 } else if (ipl_block_valid && is_ipl_block_dump()) { 165 kaslr_enabled = 0; 166 if (!sclp_early_get_hsa_size(&hsa_size) && hsa_size) 167 ident_map_size = min(ident_map_size, hsa_size); 168 } 169 #endif 170 } 171 172 static unsigned long setup_kernel_memory_layout(void) 173 { 174 unsigned long vmemmap_start; 175 unsigned long asce_limit; 176 unsigned long rte_size; 177 unsigned long pages; 178 unsigned long vmax; 179 180 pages = ident_map_size / PAGE_SIZE; 181 /* vmemmap contains a multiple of PAGES_PER_SECTION struct pages */ 182 vmemmap_size = SECTION_ALIGN_UP(pages) * sizeof(struct page); 183 184 /* choose kernel address space layout: 4 or 3 levels. */ 185 vmemmap_start = round_up(ident_map_size, _REGION3_SIZE); 186 if (IS_ENABLED(CONFIG_KASAN) || 187 vmalloc_size > _REGION2_SIZE || 188 vmemmap_start + vmemmap_size + vmalloc_size + MODULES_LEN > 189 _REGION2_SIZE) { 190 asce_limit = _REGION1_SIZE; 191 rte_size = _REGION2_SIZE; 192 } else { 193 asce_limit = _REGION2_SIZE; 194 rte_size = _REGION3_SIZE; 195 } 196 /* 197 * forcing modules and vmalloc area under the ultravisor 198 * secure storage limit, so that any vmalloc allocation 199 * we do could be used to back secure guest storage. 200 */ 201 vmax = adjust_to_uv_max(asce_limit); 202 #ifdef CONFIG_KASAN 203 /* force vmalloc and modules below kasan shadow */ 204 vmax = min(vmax, KASAN_SHADOW_START); 205 #endif 206 __memcpy_real_area = round_down(vmax - PAGE_SIZE, PAGE_SIZE); 207 __abs_lowcore = round_down(__memcpy_real_area - ABS_LOWCORE_MAP_SIZE, 208 sizeof(struct lowcore)); 209 MODULES_END = round_down(__abs_lowcore, _SEGMENT_SIZE); 210 MODULES_VADDR = MODULES_END - MODULES_LEN; 211 VMALLOC_END = MODULES_VADDR; 212 213 /* allow vmalloc area to occupy up to about 1/2 of the rest virtual space left */ 214 vmalloc_size = min(vmalloc_size, round_down(VMALLOC_END / 2, _REGION3_SIZE)); 215 VMALLOC_START = VMALLOC_END - vmalloc_size; 216 217 /* split remaining virtual space between 1:1 mapping & vmemmap array */ 218 pages = VMALLOC_START / (PAGE_SIZE + sizeof(struct page)); 219 pages = SECTION_ALIGN_UP(pages); 220 /* keep vmemmap_start aligned to a top level region table entry */ 221 vmemmap_start = round_down(VMALLOC_START - pages * sizeof(struct page), rte_size); 222 /* vmemmap_start is the future VMEM_MAX_PHYS, make sure it is within MAX_PHYSMEM */ 223 vmemmap_start = min(vmemmap_start, 1UL << MAX_PHYSMEM_BITS); 224 /* make sure identity map doesn't overlay with vmemmap */ 225 ident_map_size = min(ident_map_size, vmemmap_start); 226 vmemmap_size = SECTION_ALIGN_UP(ident_map_size / PAGE_SIZE) * sizeof(struct page); 227 /* make sure vmemmap doesn't overlay with vmalloc area */ 228 VMALLOC_START = max(vmemmap_start + vmemmap_size, VMALLOC_START); 229 vmemmap = (struct page *)vmemmap_start; 230 231 return asce_limit; 232 } 233 234 /* 235 * This function clears the BSS section of the decompressed Linux kernel and NOT the decompressor's. 236 */ 237 static void clear_bss_section(void) 238 { 239 memset((void *)vmlinux.default_lma + vmlinux.image_size, 0, vmlinux.bss_size); 240 } 241 242 /* 243 * Set vmalloc area size to an 8th of (potential) physical memory 244 * size, unless size has been set by kernel command line parameter. 245 */ 246 static void setup_vmalloc_size(void) 247 { 248 unsigned long size; 249 250 if (vmalloc_size_set) 251 return; 252 size = round_up(ident_map_size / 8, _SEGMENT_SIZE); 253 vmalloc_size = max(size, vmalloc_size); 254 } 255 256 static void offset_vmlinux_info(unsigned long offset) 257 { 258 vmlinux.default_lma += offset; 259 *(unsigned long *)(&vmlinux.entry) += offset; 260 vmlinux.bootdata_off += offset; 261 vmlinux.bootdata_preserved_off += offset; 262 vmlinux.rela_dyn_start += offset; 263 vmlinux.rela_dyn_end += offset; 264 vmlinux.dynsym_start += offset; 265 vmlinux.init_mm_off += offset; 266 vmlinux.swapper_pg_dir_off += offset; 267 vmlinux.invalid_pg_dir_off += offset; 268 } 269 270 static unsigned long reserve_amode31(unsigned long safe_addr) 271 { 272 __amode31_base = PAGE_ALIGN(safe_addr); 273 return safe_addr + vmlinux.amode31_size; 274 } 275 276 void startup_kernel(void) 277 { 278 unsigned long random_lma; 279 unsigned long safe_addr; 280 unsigned long asce_limit; 281 void *img; 282 psw_t psw; 283 284 detect_facilities(); 285 286 initrd_data.start = parmarea.initrd_start; 287 initrd_data.size = parmarea.initrd_size; 288 oldmem_data.start = parmarea.oldmem_base; 289 oldmem_data.size = parmarea.oldmem_size; 290 291 setup_lpp(); 292 store_ipl_parmblock(); 293 safe_addr = mem_safe_offset(); 294 safe_addr = reserve_amode31(safe_addr); 295 safe_addr = read_ipl_report(safe_addr); 296 uv_query_info(); 297 rescue_initrd(safe_addr); 298 sclp_early_read_info(); 299 setup_boot_command_line(); 300 parse_boot_command_line(); 301 sanitize_prot_virt_host(); 302 setup_ident_map_size(detect_memory()); 303 setup_vmalloc_size(); 304 asce_limit = setup_kernel_memory_layout(); 305 306 if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_enabled) { 307 random_lma = get_random_base(safe_addr); 308 if (random_lma) { 309 __kaslr_offset = random_lma - vmlinux.default_lma; 310 img = (void *)vmlinux.default_lma; 311 offset_vmlinux_info(__kaslr_offset); 312 } 313 } 314 315 if (!IS_ENABLED(CONFIG_KERNEL_UNCOMPRESSED)) { 316 img = decompress_kernel(); 317 memmove((void *)vmlinux.default_lma, img, vmlinux.image_size); 318 } else if (__kaslr_offset) 319 memcpy((void *)vmlinux.default_lma, img, vmlinux.image_size); 320 321 /* 322 * The order of the following operations is important: 323 * 324 * - handle_relocs() must follow clear_bss_section() to establish static 325 * memory references to data in .bss to be used by setup_vmem() 326 * (i.e init_mm.pgd) 327 * 328 * - setup_vmem() must follow handle_relocs() to be able using 329 * static memory references to data in .bss (i.e init_mm.pgd) 330 * 331 * - copy_bootdata() must follow setup_vmem() to propagate changes to 332 * bootdata made by setup_vmem() 333 */ 334 clear_bss_section(); 335 handle_relocs(__kaslr_offset); 336 setup_vmem(ident_map_size, asce_limit); 337 copy_bootdata(); 338 339 if (__kaslr_offset) { 340 /* 341 * Save KASLR offset for early dumps, before vmcore_info is set. 342 * Mark as uneven to distinguish from real vmcore_info pointer. 343 */ 344 S390_lowcore.vmcore_info = __kaslr_offset | 0x1UL; 345 /* Clear non-relocated kernel */ 346 if (IS_ENABLED(CONFIG_KERNEL_UNCOMPRESSED)) 347 memset(img, 0, vmlinux.image_size); 348 } 349 350 /* 351 * Jump to the decompressed kernel entry point and switch DAT mode on. 352 */ 353 psw.addr = vmlinux.entry; 354 psw.mask = PSW_KERNEL_BITS; 355 __load_psw(psw); 356 } 357