1 /* 2 * arch/sh/kernel/setup.c 3 * 4 * This file handles the architecture-dependent parts of initialization 5 * 6 * Copyright (C) 1999 Niibe Yutaka 7 * Copyright (C) 2002 - 2007 Paul Mundt 8 */ 9 #include <linux/screen_info.h> 10 #include <linux/ioport.h> 11 #include <linux/init.h> 12 #include <linux/initrd.h> 13 #include <linux/bootmem.h> 14 #include <linux/console.h> 15 #include <linux/seq_file.h> 16 #include <linux/root_dev.h> 17 #include <linux/utsname.h> 18 #include <linux/nodemask.h> 19 #include <linux/cpu.h> 20 #include <linux/pfn.h> 21 #include <linux/fs.h> 22 #include <linux/mm.h> 23 #include <linux/kexec.h> 24 #include <asm/uaccess.h> 25 #include <asm/io.h> 26 #include <asm/sections.h> 27 #include <asm/irq.h> 28 #include <asm/setup.h> 29 #include <asm/clock.h> 30 #include <asm/mmu_context.h> 31 32 extern void * __rd_start, * __rd_end; 33 34 /* 35 * Machine setup.. 36 */ 37 38 /* 39 * Initialize loops_per_jiffy as 10000000 (1000MIPS). 40 * This value will be used at the very early stage of serial setup. 41 * The bigger value means no problem. 42 */ 43 struct sh_cpuinfo boot_cpu_data = { CPU_SH_NONE, 10000000, }; 44 #ifdef CONFIG_VT 45 struct screen_info screen_info; 46 #endif 47 48 #if defined(CONFIG_SH_UNKNOWN) 49 struct sh_machine_vector sh_mv; 50 #endif 51 52 extern int root_mountflags; 53 54 #define MV_NAME_SIZE 32 55 56 static struct sh_machine_vector* __init get_mv_byname(const char* name); 57 58 /* 59 * This is set up by the setup-routine at boot-time 60 */ 61 #define PARAM ((unsigned char *)empty_zero_page) 62 63 #define MOUNT_ROOT_RDONLY (*(unsigned long *) (PARAM+0x000)) 64 #define RAMDISK_FLAGS (*(unsigned long *) (PARAM+0x004)) 65 #define ORIG_ROOT_DEV (*(unsigned long *) (PARAM+0x008)) 66 #define LOADER_TYPE (*(unsigned long *) (PARAM+0x00c)) 67 #define INITRD_START (*(unsigned long *) (PARAM+0x010)) 68 #define INITRD_SIZE (*(unsigned long *) (PARAM+0x014)) 69 /* ... */ 70 #define COMMAND_LINE ((char *) (PARAM+0x100)) 71 72 #define RAMDISK_IMAGE_START_MASK 0x07FF 73 #define RAMDISK_PROMPT_FLAG 0x8000 74 #define RAMDISK_LOAD_FLAG 0x4000 75 76 static char __initdata command_line[COMMAND_LINE_SIZE] = { 0, }; 77 78 static struct resource code_resource = { .name = "Kernel code", }; 79 static struct resource data_resource = { .name = "Kernel data", }; 80 81 unsigned long memory_start, memory_end; 82 83 static inline void parse_cmdline (char ** cmdline_p, char mv_name[MV_NAME_SIZE], 84 struct sh_machine_vector** mvp, 85 unsigned long *mv_io_base) 86 { 87 char c = ' ', *to = command_line, *from = COMMAND_LINE; 88 int len = 0; 89 90 /* Save unparsed command line copy for /proc/cmdline */ 91 memcpy(boot_command_line, COMMAND_LINE, COMMAND_LINE_SIZE); 92 boot_command_line[COMMAND_LINE_SIZE-1] = '\0'; 93 94 memory_start = (unsigned long)PAGE_OFFSET+__MEMORY_START; 95 memory_end = memory_start + __MEMORY_SIZE; 96 97 for (;;) { 98 /* 99 * "mem=XXX[kKmM]" defines a size of memory. 100 */ 101 if (c == ' ' && !memcmp(from, "mem=", 4)) { 102 if (to != command_line) 103 to--; 104 { 105 unsigned long mem_size; 106 107 mem_size = memparse(from+4, &from); 108 memory_end = memory_start + mem_size; 109 } 110 } 111 112 if (c == ' ' && !memcmp(from, "sh_mv=", 6)) { 113 char* mv_end; 114 char* mv_comma; 115 int mv_len; 116 if (to != command_line) 117 to--; 118 from += 6; 119 mv_end = strchr(from, ' '); 120 if (mv_end == NULL) 121 mv_end = from + strlen(from); 122 123 mv_comma = strchr(from, ','); 124 if ((mv_comma != NULL) && (mv_comma < mv_end)) { 125 int ints[3]; 126 get_options(mv_comma+1, ARRAY_SIZE(ints), ints); 127 *mv_io_base = ints[1]; 128 mv_len = mv_comma - from; 129 } else { 130 mv_len = mv_end - from; 131 } 132 if (mv_len > (MV_NAME_SIZE-1)) 133 mv_len = MV_NAME_SIZE-1; 134 memcpy(mv_name, from, mv_len); 135 mv_name[mv_len] = '\0'; 136 from = mv_end; 137 138 *mvp = get_mv_byname(mv_name); 139 } 140 141 c = *(from++); 142 if (!c) 143 break; 144 if (COMMAND_LINE_SIZE <= ++len) 145 break; 146 *(to++) = c; 147 } 148 *to = '\0'; 149 *cmdline_p = command_line; 150 } 151 152 static int __init sh_mv_setup(char **cmdline_p) 153 { 154 #ifdef CONFIG_SH_UNKNOWN 155 extern struct sh_machine_vector mv_unknown; 156 #endif 157 struct sh_machine_vector *mv = NULL; 158 char mv_name[MV_NAME_SIZE] = ""; 159 unsigned long mv_io_base = 0; 160 161 parse_cmdline(cmdline_p, mv_name, &mv, &mv_io_base); 162 163 #ifdef CONFIG_SH_UNKNOWN 164 if (mv == NULL) { 165 mv = &mv_unknown; 166 if (*mv_name != '\0') { 167 printk("Warning: Unsupported machine %s, using unknown\n", 168 mv_name); 169 } 170 } 171 sh_mv = *mv; 172 #endif 173 174 /* 175 * Manually walk the vec, fill in anything that the board hasn't yet 176 * by hand, wrapping to the generic implementation. 177 */ 178 #define mv_set(elem) do { \ 179 if (!sh_mv.mv_##elem) \ 180 sh_mv.mv_##elem = generic_##elem; \ 181 } while (0) 182 183 mv_set(inb); mv_set(inw); mv_set(inl); 184 mv_set(outb); mv_set(outw); mv_set(outl); 185 186 mv_set(inb_p); mv_set(inw_p); mv_set(inl_p); 187 mv_set(outb_p); mv_set(outw_p); mv_set(outl_p); 188 189 mv_set(insb); mv_set(insw); mv_set(insl); 190 mv_set(outsb); mv_set(outsw); mv_set(outsl); 191 192 mv_set(readb); mv_set(readw); mv_set(readl); 193 mv_set(writeb); mv_set(writew); mv_set(writel); 194 195 mv_set(ioport_map); 196 mv_set(ioport_unmap); 197 mv_set(irq_demux); 198 199 #ifdef CONFIG_SH_UNKNOWN 200 __set_io_port_base(mv_io_base); 201 #endif 202 203 if (!sh_mv.mv_nr_irqs) 204 sh_mv.mv_nr_irqs = NR_IRQS; 205 206 return 0; 207 } 208 209 /* 210 * Register fully available low RAM pages with the bootmem allocator. 211 */ 212 static void __init register_bootmem_low_pages(void) 213 { 214 unsigned long curr_pfn, last_pfn, pages; 215 216 /* 217 * We are rounding up the start address of usable memory: 218 */ 219 curr_pfn = PFN_UP(__MEMORY_START); 220 221 /* 222 * ... and at the end of the usable range downwards: 223 */ 224 last_pfn = PFN_DOWN(__pa(memory_end)); 225 226 if (last_pfn > max_low_pfn) 227 last_pfn = max_low_pfn; 228 229 pages = last_pfn - curr_pfn; 230 free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(pages)); 231 } 232 233 void __init setup_bootmem_allocator(unsigned long start_pfn) 234 { 235 unsigned long bootmap_size; 236 237 /* 238 * Find a proper area for the bootmem bitmap. After this 239 * bootstrap step all allocations (until the page allocator 240 * is intact) must be done via bootmem_alloc(). 241 */ 242 bootmap_size = init_bootmem_node(NODE_DATA(0), start_pfn, 243 min_low_pfn, max_low_pfn); 244 245 register_bootmem_low_pages(); 246 247 node_set_online(0); 248 249 /* 250 * Reserve the kernel text and 251 * Reserve the bootmem bitmap. We do this in two steps (first step 252 * was init_bootmem()), because this catches the (definitely buggy) 253 * case of us accidentally initializing the bootmem allocator with 254 * an invalid RAM area. 255 */ 256 reserve_bootmem(__MEMORY_START+PAGE_SIZE, 257 (PFN_PHYS(start_pfn)+bootmap_size+PAGE_SIZE-1)-__MEMORY_START); 258 259 /* 260 * reserve physical page 0 - it's a special BIOS page on many boxes, 261 * enabling clean reboots, SMP operation, laptop functions. 262 */ 263 reserve_bootmem(__MEMORY_START, PAGE_SIZE); 264 265 #ifdef CONFIG_BLK_DEV_INITRD 266 ROOT_DEV = MKDEV(RAMDISK_MAJOR, 0); 267 if (&__rd_start != &__rd_end) { 268 LOADER_TYPE = 1; 269 INITRD_START = PHYSADDR((unsigned long)&__rd_start) - 270 __MEMORY_START; 271 INITRD_SIZE = (unsigned long)&__rd_end - 272 (unsigned long)&__rd_start; 273 } 274 275 if (LOADER_TYPE && INITRD_START) { 276 if (INITRD_START + INITRD_SIZE <= (max_low_pfn << PAGE_SHIFT)) { 277 reserve_bootmem(INITRD_START + __MEMORY_START, 278 INITRD_SIZE); 279 initrd_start = INITRD_START + PAGE_OFFSET + 280 __MEMORY_START; 281 initrd_end = initrd_start + INITRD_SIZE; 282 } else { 283 printk("initrd extends beyond end of memory " 284 "(0x%08lx > 0x%08lx)\ndisabling initrd\n", 285 INITRD_START + INITRD_SIZE, 286 max_low_pfn << PAGE_SHIFT); 287 initrd_start = 0; 288 } 289 } 290 #endif 291 #ifdef CONFIG_KEXEC 292 if (crashk_res.start != crashk_res.end) 293 reserve_bootmem(crashk_res.start, 294 crashk_res.end - crashk_res.start + 1); 295 #endif 296 } 297 298 #ifndef CONFIG_NEED_MULTIPLE_NODES 299 static void __init setup_memory(void) 300 { 301 unsigned long start_pfn; 302 303 /* 304 * Partially used pages are not usable - thus 305 * we are rounding upwards: 306 */ 307 start_pfn = PFN_UP(__pa(_end)); 308 setup_bootmem_allocator(start_pfn); 309 } 310 #else 311 extern void __init setup_memory(void); 312 #endif 313 314 void __init setup_arch(char **cmdline_p) 315 { 316 enable_mmu(); 317 318 #ifdef CONFIG_CMDLINE_BOOL 319 strcpy(COMMAND_LINE, CONFIG_CMDLINE); 320 #endif 321 322 ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV); 323 324 #ifdef CONFIG_BLK_DEV_RAM 325 rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK; 326 rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0); 327 rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0); 328 #endif 329 330 if (!MOUNT_ROOT_RDONLY) 331 root_mountflags &= ~MS_RDONLY; 332 init_mm.start_code = (unsigned long) _text; 333 init_mm.end_code = (unsigned long) _etext; 334 init_mm.end_data = (unsigned long) _edata; 335 init_mm.brk = (unsigned long) _end; 336 337 code_resource.start = virt_to_phys(_text); 338 code_resource.end = virt_to_phys(_etext)-1; 339 data_resource.start = virt_to_phys(_etext); 340 data_resource.end = virt_to_phys(_edata)-1; 341 342 parse_early_param(); 343 344 sh_mv_setup(cmdline_p); 345 346 /* 347 * Find the highest page frame number we have available 348 */ 349 max_pfn = PFN_DOWN(__pa(memory_end)); 350 351 /* 352 * Determine low and high memory ranges: 353 */ 354 max_low_pfn = max_pfn; 355 min_low_pfn = __MEMORY_START >> PAGE_SHIFT; 356 357 nodes_clear(node_online_map); 358 setup_memory(); 359 paging_init(); 360 sparse_init(); 361 362 #ifdef CONFIG_DUMMY_CONSOLE 363 conswitchp = &dummy_con; 364 #endif 365 366 /* Perform the machine specific initialisation */ 367 if (likely(sh_mv.mv_setup)) 368 sh_mv.mv_setup(cmdline_p); 369 } 370 371 struct sh_machine_vector* __init get_mv_byname(const char* name) 372 { 373 extern long __machvec_start, __machvec_end; 374 struct sh_machine_vector *all_vecs = 375 (struct sh_machine_vector *)&__machvec_start; 376 377 int i, n = ((unsigned long)&__machvec_end 378 - (unsigned long)&__machvec_start)/ 379 sizeof(struct sh_machine_vector); 380 381 for (i = 0; i < n; ++i) { 382 struct sh_machine_vector *mv = &all_vecs[i]; 383 if (mv == NULL) 384 continue; 385 if (strcasecmp(name, get_system_type()) == 0) { 386 return mv; 387 } 388 } 389 return NULL; 390 } 391 392 static struct cpu cpu[NR_CPUS]; 393 394 static int __init topology_init(void) 395 { 396 int cpu_id; 397 398 for_each_possible_cpu(cpu_id) 399 register_cpu(&cpu[cpu_id], cpu_id); 400 401 return 0; 402 } 403 404 subsys_initcall(topology_init); 405 406 static const char *cpu_name[] = { 407 [CPU_SH7206] = "SH7206", [CPU_SH7619] = "SH7619", 408 [CPU_SH7604] = "SH7604", [CPU_SH7300] = "SH7300", 409 [CPU_SH7705] = "SH7705", [CPU_SH7706] = "SH7706", 410 [CPU_SH7707] = "SH7707", [CPU_SH7708] = "SH7708", 411 [CPU_SH7709] = "SH7709", [CPU_SH7710] = "SH7710", 412 [CPU_SH7712] = "SH7712", 413 [CPU_SH7729] = "SH7729", [CPU_SH7750] = "SH7750", 414 [CPU_SH7750S] = "SH7750S", [CPU_SH7750R] = "SH7750R", 415 [CPU_SH7751] = "SH7751", [CPU_SH7751R] = "SH7751R", 416 [CPU_SH7760] = "SH7760", [CPU_SH73180] = "SH73180", 417 [CPU_ST40RA] = "ST40RA", [CPU_ST40GX1] = "ST40GX1", 418 [CPU_SH4_202] = "SH4-202", [CPU_SH4_501] = "SH4-501", 419 [CPU_SH7770] = "SH7770", [CPU_SH7780] = "SH7780", 420 [CPU_SH7781] = "SH7781", [CPU_SH7343] = "SH7343", 421 [CPU_SH7785] = "SH7785", [CPU_SH7722] = "SH7722", 422 [CPU_SH_NONE] = "Unknown" 423 }; 424 425 const char *get_cpu_subtype(struct sh_cpuinfo *c) 426 { 427 return cpu_name[c->type]; 428 } 429 430 #ifdef CONFIG_PROC_FS 431 /* Symbolic CPU flags, keep in sync with asm/cpu-features.h */ 432 static const char *cpu_flags[] = { 433 "none", "fpu", "p2flush", "mmuassoc", "dsp", "perfctr", 434 "ptea", "llsc", "l2", "op32", NULL 435 }; 436 437 static void show_cpuflags(struct seq_file *m, struct sh_cpuinfo *c) 438 { 439 unsigned long i; 440 441 seq_printf(m, "cpu flags\t:"); 442 443 if (!c->flags) { 444 seq_printf(m, " %s\n", cpu_flags[0]); 445 return; 446 } 447 448 for (i = 0; cpu_flags[i]; i++) 449 if ((c->flags & (1 << i))) 450 seq_printf(m, " %s", cpu_flags[i+1]); 451 452 seq_printf(m, "\n"); 453 } 454 455 static void show_cacheinfo(struct seq_file *m, const char *type, 456 struct cache_info info) 457 { 458 unsigned int cache_size; 459 460 cache_size = info.ways * info.sets * info.linesz; 461 462 seq_printf(m, "%s size\t: %2dKiB (%d-way)\n", 463 type, cache_size >> 10, info.ways); 464 } 465 466 /* 467 * Get CPU information for use by the procfs. 468 */ 469 static int show_cpuinfo(struct seq_file *m, void *v) 470 { 471 struct sh_cpuinfo *c = v; 472 unsigned int cpu = c - cpu_data; 473 474 if (!cpu_online(cpu)) 475 return 0; 476 477 if (cpu == 0) 478 seq_printf(m, "machine\t\t: %s\n", get_system_type()); 479 480 seq_printf(m, "processor\t: %d\n", cpu); 481 seq_printf(m, "cpu family\t: %s\n", init_utsname()->machine); 482 seq_printf(m, "cpu type\t: %s\n", get_cpu_subtype(c)); 483 484 show_cpuflags(m, c); 485 486 seq_printf(m, "cache type\t: "); 487 488 /* 489 * Check for what type of cache we have, we support both the 490 * unified cache on the SH-2 and SH-3, as well as the harvard 491 * style cache on the SH-4. 492 */ 493 if (c->icache.flags & SH_CACHE_COMBINED) { 494 seq_printf(m, "unified\n"); 495 show_cacheinfo(m, "cache", c->icache); 496 } else { 497 seq_printf(m, "split (harvard)\n"); 498 show_cacheinfo(m, "icache", c->icache); 499 show_cacheinfo(m, "dcache", c->dcache); 500 } 501 502 /* Optional secondary cache */ 503 if (c->flags & CPU_HAS_L2_CACHE) 504 show_cacheinfo(m, "scache", c->scache); 505 506 seq_printf(m, "bogomips\t: %lu.%02lu\n", 507 c->loops_per_jiffy/(500000/HZ), 508 (c->loops_per_jiffy/(5000/HZ)) % 100); 509 510 return 0; 511 } 512 513 static void *c_start(struct seq_file *m, loff_t *pos) 514 { 515 return *pos < NR_CPUS ? cpu_data + *pos : NULL; 516 } 517 static void *c_next(struct seq_file *m, void *v, loff_t *pos) 518 { 519 ++*pos; 520 return c_start(m, pos); 521 } 522 static void c_stop(struct seq_file *m, void *v) 523 { 524 } 525 struct seq_operations cpuinfo_op = { 526 .start = c_start, 527 .next = c_next, 528 .stop = c_stop, 529 .show = show_cpuinfo, 530 }; 531 #endif /* CONFIG_PROC_FS */ 532