1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright IBM Corp. 2007, 2009 4 * Author(s): Hongjie Yang <hongjie@us.ibm.com>, 5 * Heiko Carstens <heiko.carstens@de.ibm.com> 6 */ 7 8 #define KMSG_COMPONENT "setup" 9 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 10 11 #include <linux/compiler.h> 12 #include <linux/init.h> 13 #include <linux/errno.h> 14 #include <linux/string.h> 15 #include <linux/ctype.h> 16 #include <linux/lockdep.h> 17 #include <linux/extable.h> 18 #include <linux/pfn.h> 19 #include <linux/uaccess.h> 20 #include <linux/kernel.h> 21 #include <asm/diag.h> 22 #include <asm/ebcdic.h> 23 #include <asm/ipl.h> 24 #include <asm/lowcore.h> 25 #include <asm/processor.h> 26 #include <asm/sections.h> 27 #include <asm/setup.h> 28 #include <asm/sysinfo.h> 29 #include <asm/cpcmd.h> 30 #include <asm/sclp.h> 31 #include <asm/facility.h> 32 #include "entry.h" 33 34 static void __init setup_boot_command_line(void); 35 36 /* 37 * Get the TOD clock running. 38 */ 39 static void __init reset_tod_clock(void) 40 { 41 u64 time; 42 43 if (store_tod_clock(&time) == 0) 44 return; 45 /* TOD clock not running. Set the clock to Unix Epoch. */ 46 if (set_tod_clock(TOD_UNIX_EPOCH) != 0 || store_tod_clock(&time) != 0) 47 disabled_wait(0); 48 49 memset(tod_clock_base, 0, 16); 50 *(__u64 *) &tod_clock_base[1] = TOD_UNIX_EPOCH; 51 S390_lowcore.last_update_clock = TOD_UNIX_EPOCH; 52 } 53 54 /* 55 * Clear bss memory 56 */ 57 static noinline __init void clear_bss_section(void) 58 { 59 memset(__bss_start, 0, __bss_stop - __bss_start); 60 } 61 62 /* 63 * Initialize storage key for kernel pages 64 */ 65 static noinline __init void init_kernel_storage_key(void) 66 { 67 #if PAGE_DEFAULT_KEY 68 unsigned long end_pfn, init_pfn; 69 70 end_pfn = PFN_UP(__pa(&_end)); 71 72 for (init_pfn = 0 ; init_pfn < end_pfn; init_pfn++) 73 page_set_storage_key(init_pfn << PAGE_SHIFT, 74 PAGE_DEFAULT_KEY, 0); 75 #endif 76 } 77 78 static __initdata char sysinfo_page[PAGE_SIZE] __aligned(PAGE_SIZE); 79 80 static noinline __init void detect_machine_type(void) 81 { 82 struct sysinfo_3_2_2 *vmms = (struct sysinfo_3_2_2 *)&sysinfo_page; 83 84 /* Check current-configuration-level */ 85 if (stsi(NULL, 0, 0, 0) <= 2) { 86 S390_lowcore.machine_flags |= MACHINE_FLAG_LPAR; 87 return; 88 } 89 /* Get virtual-machine cpu information. */ 90 if (stsi(vmms, 3, 2, 2) || !vmms->count) 91 return; 92 93 /* Running under KVM? If not we assume z/VM */ 94 if (!memcmp(vmms->vm[0].cpi, "\xd2\xe5\xd4", 3)) 95 S390_lowcore.machine_flags |= MACHINE_FLAG_KVM; 96 else 97 S390_lowcore.machine_flags |= MACHINE_FLAG_VM; 98 } 99 100 /* Remove leading, trailing and double whitespace. */ 101 static inline void strim_all(char *str) 102 { 103 char *s; 104 105 s = strim(str); 106 if (s != str) 107 memmove(str, s, strlen(s)); 108 while (*str) { 109 if (!isspace(*str++)) 110 continue; 111 if (isspace(*str)) { 112 s = skip_spaces(str); 113 memmove(str, s, strlen(s) + 1); 114 } 115 } 116 } 117 118 static noinline __init void setup_arch_string(void) 119 { 120 struct sysinfo_1_1_1 *mach = (struct sysinfo_1_1_1 *)&sysinfo_page; 121 struct sysinfo_3_2_2 *vm = (struct sysinfo_3_2_2 *)&sysinfo_page; 122 char mstr[80], hvstr[17]; 123 124 if (stsi(mach, 1, 1, 1)) 125 return; 126 EBCASC(mach->manufacturer, sizeof(mach->manufacturer)); 127 EBCASC(mach->type, sizeof(mach->type)); 128 EBCASC(mach->model, sizeof(mach->model)); 129 EBCASC(mach->model_capacity, sizeof(mach->model_capacity)); 130 sprintf(mstr, "%-16.16s %-4.4s %-16.16s %-16.16s", 131 mach->manufacturer, mach->type, 132 mach->model, mach->model_capacity); 133 strim_all(mstr); 134 if (stsi(vm, 3, 2, 2) == 0 && vm->count) { 135 EBCASC(vm->vm[0].cpi, sizeof(vm->vm[0].cpi)); 136 sprintf(hvstr, "%-16.16s", vm->vm[0].cpi); 137 strim_all(hvstr); 138 } else { 139 sprintf(hvstr, "%s", 140 MACHINE_IS_LPAR ? "LPAR" : 141 MACHINE_IS_VM ? "z/VM" : 142 MACHINE_IS_KVM ? "KVM" : "unknown"); 143 } 144 dump_stack_set_arch_desc("%s (%s)", mstr, hvstr); 145 } 146 147 static __init void setup_topology(void) 148 { 149 int max_mnest; 150 151 if (!test_facility(11)) 152 return; 153 S390_lowcore.machine_flags |= MACHINE_FLAG_TOPOLOGY; 154 for (max_mnest = 6; max_mnest > 1; max_mnest--) { 155 if (stsi(&sysinfo_page, 15, 1, max_mnest) == 0) 156 break; 157 } 158 topology_max_mnest = max_mnest; 159 } 160 161 static void early_pgm_check_handler(void) 162 { 163 const struct exception_table_entry *fixup; 164 unsigned long cr0, cr0_new; 165 unsigned long addr; 166 167 addr = S390_lowcore.program_old_psw.addr; 168 fixup = search_exception_tables(addr); 169 if (!fixup) 170 disabled_wait(0); 171 /* Disable low address protection before storing into lowcore. */ 172 __ctl_store(cr0, 0, 0); 173 cr0_new = cr0 & ~(1UL << 28); 174 __ctl_load(cr0_new, 0, 0); 175 S390_lowcore.program_old_psw.addr = extable_fixup(fixup); 176 __ctl_load(cr0, 0, 0); 177 } 178 179 static noinline __init void setup_lowcore_early(void) 180 { 181 psw_t psw; 182 183 psw.mask = PSW_MASK_BASE | PSW_DEFAULT_KEY | PSW_MASK_EA | PSW_MASK_BA; 184 psw.addr = (unsigned long) s390_base_ext_handler; 185 S390_lowcore.external_new_psw = psw; 186 psw.addr = (unsigned long) s390_base_pgm_handler; 187 S390_lowcore.program_new_psw = psw; 188 s390_base_pgm_handler_fn = early_pgm_check_handler; 189 S390_lowcore.preempt_count = INIT_PREEMPT_COUNT; 190 } 191 192 static noinline __init void setup_facility_list(void) 193 { 194 stfle(S390_lowcore.stfle_fac_list, 195 ARRAY_SIZE(S390_lowcore.stfle_fac_list)); 196 memcpy(S390_lowcore.alt_stfle_fac_list, 197 S390_lowcore.stfle_fac_list, 198 sizeof(S390_lowcore.alt_stfle_fac_list)); 199 if (!IS_ENABLED(CONFIG_KERNEL_NOBP)) 200 __clear_facility(82, S390_lowcore.alt_stfle_fac_list); 201 } 202 203 static __init void detect_diag9c(void) 204 { 205 unsigned int cpu_address; 206 int rc; 207 208 cpu_address = stap(); 209 diag_stat_inc(DIAG_STAT_X09C); 210 asm volatile( 211 " diag %2,0,0x9c\n" 212 "0: la %0,0\n" 213 "1:\n" 214 EX_TABLE(0b,1b) 215 : "=d" (rc) : "0" (-EOPNOTSUPP), "d" (cpu_address) : "cc"); 216 if (!rc) 217 S390_lowcore.machine_flags |= MACHINE_FLAG_DIAG9C; 218 } 219 220 static __init void detect_diag44(void) 221 { 222 int rc; 223 224 diag_stat_inc(DIAG_STAT_X044); 225 asm volatile( 226 " diag 0,0,0x44\n" 227 "0: la %0,0\n" 228 "1:\n" 229 EX_TABLE(0b,1b) 230 : "=d" (rc) : "0" (-EOPNOTSUPP) : "cc"); 231 if (!rc) 232 S390_lowcore.machine_flags |= MACHINE_FLAG_DIAG44; 233 } 234 235 static __init void detect_machine_facilities(void) 236 { 237 if (test_facility(8)) { 238 S390_lowcore.machine_flags |= MACHINE_FLAG_EDAT1; 239 __ctl_set_bit(0, 23); 240 } 241 if (test_facility(78)) 242 S390_lowcore.machine_flags |= MACHINE_FLAG_EDAT2; 243 if (test_facility(3)) 244 S390_lowcore.machine_flags |= MACHINE_FLAG_IDTE; 245 if (test_facility(40)) 246 S390_lowcore.machine_flags |= MACHINE_FLAG_LPP; 247 if (test_facility(50) && test_facility(73)) { 248 S390_lowcore.machine_flags |= MACHINE_FLAG_TE; 249 __ctl_set_bit(0, 55); 250 } 251 if (test_facility(51)) 252 S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_LC; 253 if (test_facility(129)) { 254 S390_lowcore.machine_flags |= MACHINE_FLAG_VX; 255 __ctl_set_bit(0, 17); 256 } 257 if (test_facility(130)) { 258 S390_lowcore.machine_flags |= MACHINE_FLAG_NX; 259 __ctl_set_bit(0, 20); 260 } 261 if (test_facility(133)) 262 S390_lowcore.machine_flags |= MACHINE_FLAG_GS; 263 if (test_facility(139) && (tod_clock_base[1] & 0x80)) { 264 /* Enabled signed clock comparator comparisons */ 265 S390_lowcore.machine_flags |= MACHINE_FLAG_SCC; 266 clock_comparator_max = -1ULL >> 1; 267 __ctl_set_bit(0, 53); 268 } 269 } 270 271 static inline void save_vector_registers(void) 272 { 273 #ifdef CONFIG_CRASH_DUMP 274 if (test_facility(129)) 275 save_vx_regs(boot_cpu_vector_save_area); 276 #endif 277 } 278 279 static int __init disable_vector_extension(char *str) 280 { 281 S390_lowcore.machine_flags &= ~MACHINE_FLAG_VX; 282 __ctl_clear_bit(0, 17); 283 return 0; 284 } 285 early_param("novx", disable_vector_extension); 286 287 static int __init noexec_setup(char *str) 288 { 289 bool enabled; 290 int rc; 291 292 rc = kstrtobool(str, &enabled); 293 if (!rc && !enabled) { 294 /* Disable no-execute support */ 295 S390_lowcore.machine_flags &= ~MACHINE_FLAG_NX; 296 __ctl_clear_bit(0, 20); 297 } 298 return rc; 299 } 300 early_param("noexec", noexec_setup); 301 302 static int __init cad_setup(char *str) 303 { 304 bool enabled; 305 int rc; 306 307 rc = kstrtobool(str, &enabled); 308 if (!rc && enabled && test_facility(128)) 309 /* Enable problem state CAD. */ 310 __ctl_set_bit(2, 3); 311 return rc; 312 } 313 early_param("cad", cad_setup); 314 315 static __init void memmove_early(void *dst, const void *src, size_t n) 316 { 317 unsigned long addr; 318 long incr; 319 psw_t old; 320 321 if (!n) 322 return; 323 incr = 1; 324 if (dst > src) { 325 incr = -incr; 326 dst += n - 1; 327 src += n - 1; 328 } 329 old = S390_lowcore.program_new_psw; 330 S390_lowcore.program_new_psw.mask = __extract_psw(); 331 asm volatile( 332 " larl %[addr],1f\n" 333 " stg %[addr],%[psw_pgm_addr]\n" 334 "0: mvc 0(1,%[dst]),0(%[src])\n" 335 " agr %[dst],%[incr]\n" 336 " agr %[src],%[incr]\n" 337 " brctg %[n],0b\n" 338 "1:\n" 339 : [addr] "=&d" (addr), 340 [psw_pgm_addr] "=Q" (S390_lowcore.program_new_psw.addr), 341 [dst] "+&a" (dst), [src] "+&a" (src), [n] "+d" (n) 342 : [incr] "d" (incr) 343 : "cc", "memory"); 344 S390_lowcore.program_new_psw = old; 345 } 346 347 static __init noinline void ipl_save_parameters(void) 348 { 349 void *src, *dst; 350 351 src = (void *)(unsigned long) S390_lowcore.ipl_parmblock_ptr; 352 dst = (void *) IPL_PARMBLOCK_ORIGIN; 353 memmove_early(dst, src, PAGE_SIZE); 354 S390_lowcore.ipl_parmblock_ptr = IPL_PARMBLOCK_ORIGIN; 355 } 356 357 static __init noinline void rescue_initrd(void) 358 { 359 #ifdef CONFIG_BLK_DEV_INITRD 360 unsigned long min_initrd_addr = (unsigned long) _end + (4UL << 20); 361 /* 362 * Just like in case of IPL from VM reader we make sure there is a 363 * gap of 4MB between end of kernel and start of initrd. 364 * That way we can also be sure that saving an NSS will succeed, 365 * which however only requires different segments. 366 */ 367 if (!INITRD_START || !INITRD_SIZE) 368 return; 369 if (INITRD_START >= min_initrd_addr) 370 return; 371 memmove_early((void *) min_initrd_addr, (void *) INITRD_START, INITRD_SIZE); 372 INITRD_START = min_initrd_addr; 373 #endif 374 } 375 376 /* Set up boot command line */ 377 static void __init append_to_cmdline(size_t (*ipl_data)(char *, size_t)) 378 { 379 char *parm, *delim; 380 size_t rc, len; 381 382 len = strlen(boot_command_line); 383 384 delim = boot_command_line + len; /* '\0' character position */ 385 parm = boot_command_line + len + 1; /* append right after '\0' */ 386 387 rc = ipl_data(parm, COMMAND_LINE_SIZE - len - 1); 388 if (rc) { 389 if (*parm == '=') 390 memmove(boot_command_line, parm + 1, rc); 391 else 392 *delim = ' '; /* replace '\0' with space */ 393 } 394 } 395 396 static inline int has_ebcdic_char(const char *str) 397 { 398 int i; 399 400 for (i = 0; str[i]; i++) 401 if (str[i] & 0x80) 402 return 1; 403 return 0; 404 } 405 406 static void __init setup_boot_command_line(void) 407 { 408 COMMAND_LINE[ARCH_COMMAND_LINE_SIZE - 1] = 0; 409 /* convert arch command line to ascii if necessary */ 410 if (has_ebcdic_char(COMMAND_LINE)) 411 EBCASC(COMMAND_LINE, ARCH_COMMAND_LINE_SIZE); 412 /* copy arch command line */ 413 strlcpy(boot_command_line, strstrip(COMMAND_LINE), 414 ARCH_COMMAND_LINE_SIZE); 415 416 /* append IPL PARM data to the boot command line */ 417 if (MACHINE_IS_VM) 418 append_to_cmdline(append_ipl_vmparm); 419 420 append_to_cmdline(append_ipl_scpdata); 421 } 422 423 void __init startup_init(void) 424 { 425 reset_tod_clock(); 426 ipl_save_parameters(); 427 rescue_initrd(); 428 clear_bss_section(); 429 ipl_verify_parameters(); 430 time_early_init(); 431 init_kernel_storage_key(); 432 lockdep_off(); 433 setup_lowcore_early(); 434 setup_facility_list(); 435 detect_machine_type(); 436 setup_arch_string(); 437 ipl_update_parameters(); 438 setup_boot_command_line(); 439 detect_diag9c(); 440 detect_diag44(); 441 detect_machine_facilities(); 442 save_vector_registers(); 443 setup_topology(); 444 sclp_early_detect(); 445 lockdep_on(); 446 } 447