1 /* 2 * arch/s390/kernel/early.c 3 * 4 * Copyright IBM Corp. 2007 5 * Author(s): Hongjie Yang <hongjie@us.ibm.com>, 6 * Heiko Carstens <heiko.carstens@de.ibm.com> 7 */ 8 9 #include <linux/init.h> 10 #include <linux/errno.h> 11 #include <linux/string.h> 12 #include <linux/ctype.h> 13 #include <linux/lockdep.h> 14 #include <linux/module.h> 15 #include <linux/pfn.h> 16 #include <linux/uaccess.h> 17 #include <asm/ipl.h> 18 #include <asm/lowcore.h> 19 #include <asm/processor.h> 20 #include <asm/sections.h> 21 #include <asm/setup.h> 22 #include <asm/cpcmd.h> 23 #include <asm/sclp.h> 24 #include "entry.h" 25 26 /* 27 * Create a Kernel NSS if the SAVESYS= parameter is defined 28 */ 29 #define DEFSYS_CMD_SIZE 96 30 #define SAVESYS_CMD_SIZE 32 31 32 char kernel_nss_name[NSS_NAME_SIZE + 1]; 33 34 #ifdef CONFIG_SHARED_KERNEL 35 static noinline __init void create_kernel_nss(void) 36 { 37 unsigned int i, stext_pfn, eshared_pfn, end_pfn, min_size; 38 #ifdef CONFIG_BLK_DEV_INITRD 39 unsigned int sinitrd_pfn, einitrd_pfn; 40 #endif 41 int response; 42 char *savesys_ptr; 43 char upper_command_line[COMMAND_LINE_SIZE]; 44 char defsys_cmd[DEFSYS_CMD_SIZE]; 45 char savesys_cmd[SAVESYS_CMD_SIZE]; 46 47 /* Do nothing if we are not running under VM */ 48 if (!MACHINE_IS_VM) 49 return; 50 51 /* Convert COMMAND_LINE to upper case */ 52 for (i = 0; i < strlen(COMMAND_LINE); i++) 53 upper_command_line[i] = toupper(COMMAND_LINE[i]); 54 55 savesys_ptr = strstr(upper_command_line, "SAVESYS="); 56 57 if (!savesys_ptr) 58 return; 59 60 savesys_ptr += 8; /* Point to the beginning of the NSS name */ 61 for (i = 0; i < NSS_NAME_SIZE; i++) { 62 if (savesys_ptr[i] == ' ' || savesys_ptr[i] == '\0') 63 break; 64 kernel_nss_name[i] = savesys_ptr[i]; 65 } 66 67 stext_pfn = PFN_DOWN(__pa(&_stext)); 68 eshared_pfn = PFN_DOWN(__pa(&_eshared)); 69 end_pfn = PFN_UP(__pa(&_end)); 70 min_size = end_pfn << 2; 71 72 sprintf(defsys_cmd, "DEFSYS %s 00000-%.5X EW %.5X-%.5X SR %.5X-%.5X", 73 kernel_nss_name, stext_pfn - 1, stext_pfn, eshared_pfn - 1, 74 eshared_pfn, end_pfn); 75 76 #ifdef CONFIG_BLK_DEV_INITRD 77 if (INITRD_START && INITRD_SIZE) { 78 sinitrd_pfn = PFN_DOWN(__pa(INITRD_START)); 79 einitrd_pfn = PFN_UP(__pa(INITRD_START + INITRD_SIZE)); 80 min_size = einitrd_pfn << 2; 81 sprintf(defsys_cmd, "%s EW %.5X-%.5X", defsys_cmd, 82 sinitrd_pfn, einitrd_pfn); 83 } 84 #endif 85 86 sprintf(defsys_cmd, "%s EW MINSIZE=%.7iK", defsys_cmd, min_size); 87 sprintf(savesys_cmd, "SAVESYS %s \n IPL %s", 88 kernel_nss_name, kernel_nss_name); 89 90 __cpcmd(defsys_cmd, NULL, 0, &response); 91 92 if (response != 0) { 93 kernel_nss_name[0] = '\0'; 94 return; 95 } 96 97 __cpcmd(savesys_cmd, NULL, 0, &response); 98 99 if (response != strlen(savesys_cmd)) { 100 kernel_nss_name[0] = '\0'; 101 return; 102 } 103 104 ipl_flags = IPL_NSS_VALID; 105 } 106 107 #else /* CONFIG_SHARED_KERNEL */ 108 109 static inline void create_kernel_nss(void) { } 110 111 #endif /* CONFIG_SHARED_KERNEL */ 112 113 /* 114 * Clear bss memory 115 */ 116 static noinline __init void clear_bss_section(void) 117 { 118 memset(__bss_start, 0, __bss_stop - __bss_start); 119 } 120 121 /* 122 * Initialize storage key for kernel pages 123 */ 124 static noinline __init void init_kernel_storage_key(void) 125 { 126 unsigned long end_pfn, init_pfn; 127 128 end_pfn = PFN_UP(__pa(&_end)); 129 130 for (init_pfn = 0 ; init_pfn < end_pfn; init_pfn++) 131 page_set_storage_key(init_pfn << PAGE_SHIFT, PAGE_DEFAULT_KEY); 132 } 133 134 static noinline __init void detect_machine_type(void) 135 { 136 struct cpuinfo_S390 *cpuinfo = &S390_lowcore.cpu_data; 137 138 get_cpu_id(&S390_lowcore.cpu_data.cpu_id); 139 140 /* Running under z/VM ? */ 141 if (cpuinfo->cpu_id.version == 0xff) 142 machine_flags |= 1; 143 144 /* Running on a P/390 ? */ 145 if (cpuinfo->cpu_id.machine == 0x7490) 146 machine_flags |= 4; 147 } 148 149 #ifdef CONFIG_64BIT 150 static noinline __init int memory_fast_detect(void) 151 { 152 unsigned long val0 = 0; 153 unsigned long val1 = 0xc; 154 int ret = -ENOSYS; 155 156 if (ipl_flags & IPL_NSS_VALID) 157 return -ENOSYS; 158 159 asm volatile( 160 " diag %1,%2,0x260\n" 161 "0: lhi %0,0\n" 162 "1:\n" 163 EX_TABLE(0b,1b) 164 : "+d" (ret), "+d" (val0), "+d" (val1) : : "cc"); 165 166 if (ret || val0 != val1) 167 return -ENOSYS; 168 169 memory_chunk[0].size = val0 + 1; 170 return 0; 171 } 172 #else 173 static inline int memory_fast_detect(void) 174 { 175 return -ENOSYS; 176 } 177 #endif 178 179 static inline __init unsigned long __tprot(unsigned long addr) 180 { 181 int cc = -1; 182 183 asm volatile( 184 " tprot 0(%1),0\n" 185 "0: ipm %0\n" 186 " srl %0,28\n" 187 "1:\n" 188 EX_TABLE(0b,1b) 189 : "+d" (cc) : "a" (addr) : "cc"); 190 return (unsigned long)cc; 191 } 192 193 /* Checking memory in 128KB increments. */ 194 #define CHUNK_INCR (1UL << 17) 195 #define ADDR2G (1UL << 31) 196 197 static noinline __init void find_memory_chunks(unsigned long memsize) 198 { 199 unsigned long addr = 0, old_addr = 0; 200 unsigned long old_cc = CHUNK_READ_WRITE; 201 unsigned long cc; 202 int chunk = 0; 203 204 while (chunk < MEMORY_CHUNKS) { 205 cc = __tprot(addr); 206 while (cc == old_cc) { 207 addr += CHUNK_INCR; 208 if (memsize && addr >= memsize) 209 break; 210 #ifndef CONFIG_64BIT 211 if (addr == ADDR2G) 212 break; 213 #endif 214 cc = __tprot(addr); 215 } 216 217 if (old_addr != addr && 218 (old_cc == CHUNK_READ_WRITE || old_cc == CHUNK_READ_ONLY)) { 219 memory_chunk[chunk].addr = old_addr; 220 memory_chunk[chunk].size = addr - old_addr; 221 memory_chunk[chunk].type = old_cc; 222 chunk++; 223 } 224 225 old_addr = addr; 226 old_cc = cc; 227 228 #ifndef CONFIG_64BIT 229 if (addr == ADDR2G) 230 break; 231 #endif 232 /* 233 * Finish memory detection at the first hole 234 * if storage size is unknown. 235 */ 236 if (cc == -1UL && !memsize) 237 break; 238 if (memsize && addr >= memsize) 239 break; 240 } 241 } 242 243 static __init void early_pgm_check_handler(void) 244 { 245 unsigned long addr; 246 const struct exception_table_entry *fixup; 247 248 addr = S390_lowcore.program_old_psw.addr; 249 fixup = search_exception_tables(addr & PSW_ADDR_INSN); 250 if (!fixup) 251 disabled_wait(0); 252 S390_lowcore.program_old_psw.addr = fixup->fixup | PSW_ADDR_AMODE; 253 } 254 255 static noinline __init void setup_lowcore_early(void) 256 { 257 psw_t psw; 258 259 psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY; 260 psw.addr = PSW_ADDR_AMODE | (unsigned long) s390_base_ext_handler; 261 S390_lowcore.external_new_psw = psw; 262 psw.addr = PSW_ADDR_AMODE | (unsigned long) s390_base_pgm_handler; 263 S390_lowcore.program_new_psw = psw; 264 s390_base_pgm_handler_fn = early_pgm_check_handler; 265 } 266 267 /* 268 * Save ipl parameters, clear bss memory, initialize storage keys 269 * and create a kernel NSS at startup if the SAVESYS= parm is defined 270 */ 271 void __init startup_init(void) 272 { 273 unsigned long long memsize; 274 275 ipl_save_parameters(); 276 clear_bss_section(); 277 init_kernel_storage_key(); 278 lockdep_init(); 279 lockdep_off(); 280 detect_machine_type(); 281 create_kernel_nss(); 282 sort_main_extable(); 283 setup_lowcore_early(); 284 sclp_read_info_early(); 285 sclp_facilities_detect(); 286 memsize = sclp_memory_detect(); 287 #ifndef CONFIG_64BIT 288 /* 289 * Can't deal with more than 2G in 31 bit addressing mode, so 290 * limit the value in order to avoid strange side effects. 291 */ 292 if (memsize > ADDR2G) 293 memsize = ADDR2G; 294 #endif 295 if (memory_fast_detect() < 0) 296 find_memory_chunks((unsigned long) memsize); 297 lockdep_on(); 298 } 299