xref: /openbmc/linux/arch/mips/kernel/setup.c (revision 110e6f26)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1995 Linus Torvalds
7  * Copyright (C) 1995 Waldorf Electronics
8  * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 01, 02, 03  Ralf Baechle
9  * Copyright (C) 1996 Stoned Elipot
10  * Copyright (C) 1999 Silicon Graphics, Inc.
11  * Copyright (C) 2000, 2001, 2002, 2007	 Maciej W. Rozycki
12  */
13 #include <linux/init.h>
14 #include <linux/ioport.h>
15 #include <linux/export.h>
16 #include <linux/screen_info.h>
17 #include <linux/memblock.h>
18 #include <linux/bootmem.h>
19 #include <linux/initrd.h>
20 #include <linux/root_dev.h>
21 #include <linux/highmem.h>
22 #include <linux/console.h>
23 #include <linux/pfn.h>
24 #include <linux/debugfs.h>
25 #include <linux/kexec.h>
26 #include <linux/sizes.h>
27 #include <linux/device.h>
28 #include <linux/dma-contiguous.h>
29 
30 #include <asm/addrspace.h>
31 #include <asm/bootinfo.h>
32 #include <asm/bugs.h>
33 #include <asm/cache.h>
34 #include <asm/cdmm.h>
35 #include <asm/cpu.h>
36 #include <asm/debug.h>
37 #include <asm/sections.h>
38 #include <asm/setup.h>
39 #include <asm/smp-ops.h>
40 #include <asm/prom.h>
41 
42 #ifdef CONFIG_MIPS_ELF_APPENDED_DTB
43 const char __section(.appended_dtb) __appended_dtb[0x100000];
44 #endif /* CONFIG_MIPS_ELF_APPENDED_DTB */
45 
46 struct cpuinfo_mips cpu_data[NR_CPUS] __read_mostly;
47 
48 EXPORT_SYMBOL(cpu_data);
49 
50 #ifdef CONFIG_VT
51 struct screen_info screen_info;
52 #endif
53 
54 /*
55  * Despite it's name this variable is even if we don't have PCI
56  */
57 unsigned int PCI_DMA_BUS_IS_PHYS;
58 
59 EXPORT_SYMBOL(PCI_DMA_BUS_IS_PHYS);
60 
61 /*
62  * Setup information
63  *
64  * These are initialized so they are in the .data section
65  */
66 unsigned long mips_machtype __read_mostly = MACH_UNKNOWN;
67 
68 EXPORT_SYMBOL(mips_machtype);
69 
70 struct boot_mem_map boot_mem_map;
71 
72 static char __initdata command_line[COMMAND_LINE_SIZE];
73 char __initdata arcs_cmdline[COMMAND_LINE_SIZE];
74 
75 #ifdef CONFIG_CMDLINE_BOOL
76 static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE;
77 #endif
78 
79 /*
80  * mips_io_port_base is the begin of the address space to which x86 style
81  * I/O ports are mapped.
82  */
83 const unsigned long mips_io_port_base = -1;
84 EXPORT_SYMBOL(mips_io_port_base);
85 
86 static struct resource code_resource = { .name = "Kernel code", };
87 static struct resource data_resource = { .name = "Kernel data", };
88 
89 static void *detect_magic __initdata = detect_memory_region;
90 
91 void __init add_memory_region(phys_addr_t start, phys_addr_t size, long type)
92 {
93 	int x = boot_mem_map.nr_map;
94 	int i;
95 
96 	/* Sanity check */
97 	if (start + size < start) {
98 		pr_warn("Trying to add an invalid memory region, skipped\n");
99 		return;
100 	}
101 
102 	/*
103 	 * Try to merge with existing entry, if any.
104 	 */
105 	for (i = 0; i < boot_mem_map.nr_map; i++) {
106 		struct boot_mem_map_entry *entry = boot_mem_map.map + i;
107 		unsigned long top;
108 
109 		if (entry->type != type)
110 			continue;
111 
112 		if (start + size < entry->addr)
113 			continue;			/* no overlap */
114 
115 		if (entry->addr + entry->size < start)
116 			continue;			/* no overlap */
117 
118 		top = max(entry->addr + entry->size, start + size);
119 		entry->addr = min(entry->addr, start);
120 		entry->size = top - entry->addr;
121 
122 		return;
123 	}
124 
125 	if (boot_mem_map.nr_map == BOOT_MEM_MAP_MAX) {
126 		pr_err("Ooops! Too many entries in the memory map!\n");
127 		return;
128 	}
129 
130 	boot_mem_map.map[x].addr = start;
131 	boot_mem_map.map[x].size = size;
132 	boot_mem_map.map[x].type = type;
133 	boot_mem_map.nr_map++;
134 }
135 
136 void __init detect_memory_region(phys_addr_t start, phys_addr_t sz_min, phys_addr_t sz_max)
137 {
138 	void *dm = &detect_magic;
139 	phys_addr_t size;
140 
141 	for (size = sz_min; size < sz_max; size <<= 1) {
142 		if (!memcmp(dm, dm + size, sizeof(detect_magic)))
143 			break;
144 	}
145 
146 	pr_debug("Memory: %lluMB of RAM detected at 0x%llx (min: %lluMB, max: %lluMB)\n",
147 		((unsigned long long) size) / SZ_1M,
148 		(unsigned long long) start,
149 		((unsigned long long) sz_min) / SZ_1M,
150 		((unsigned long long) sz_max) / SZ_1M);
151 
152 	add_memory_region(start, size, BOOT_MEM_RAM);
153 }
154 
155 static void __init print_memory_map(void)
156 {
157 	int i;
158 	const int field = 2 * sizeof(unsigned long);
159 
160 	for (i = 0; i < boot_mem_map.nr_map; i++) {
161 		printk(KERN_INFO " memory: %0*Lx @ %0*Lx ",
162 		       field, (unsigned long long) boot_mem_map.map[i].size,
163 		       field, (unsigned long long) boot_mem_map.map[i].addr);
164 
165 		switch (boot_mem_map.map[i].type) {
166 		case BOOT_MEM_RAM:
167 			printk(KERN_CONT "(usable)\n");
168 			break;
169 		case BOOT_MEM_INIT_RAM:
170 			printk(KERN_CONT "(usable after init)\n");
171 			break;
172 		case BOOT_MEM_ROM_DATA:
173 			printk(KERN_CONT "(ROM data)\n");
174 			break;
175 		case BOOT_MEM_RESERVED:
176 			printk(KERN_CONT "(reserved)\n");
177 			break;
178 		default:
179 			printk(KERN_CONT "type %lu\n", boot_mem_map.map[i].type);
180 			break;
181 		}
182 	}
183 }
184 
185 /*
186  * Manage initrd
187  */
188 #ifdef CONFIG_BLK_DEV_INITRD
189 
190 static int __init rd_start_early(char *p)
191 {
192 	unsigned long start = memparse(p, &p);
193 
194 #ifdef CONFIG_64BIT
195 	/* Guess if the sign extension was forgotten by bootloader */
196 	if (start < XKPHYS)
197 		start = (int)start;
198 #endif
199 	initrd_start = start;
200 	initrd_end += start;
201 	return 0;
202 }
203 early_param("rd_start", rd_start_early);
204 
205 static int __init rd_size_early(char *p)
206 {
207 	initrd_end += memparse(p, &p);
208 	return 0;
209 }
210 early_param("rd_size", rd_size_early);
211 
212 /* it returns the next free pfn after initrd */
213 static unsigned long __init init_initrd(void)
214 {
215 	unsigned long end;
216 
217 	/*
218 	 * Board specific code or command line parser should have
219 	 * already set up initrd_start and initrd_end. In these cases
220 	 * perfom sanity checks and use them if all looks good.
221 	 */
222 	if (!initrd_start || initrd_end <= initrd_start)
223 		goto disable;
224 
225 	if (initrd_start & ~PAGE_MASK) {
226 		pr_err("initrd start must be page aligned\n");
227 		goto disable;
228 	}
229 	if (initrd_start < PAGE_OFFSET) {
230 		pr_err("initrd start < PAGE_OFFSET\n");
231 		goto disable;
232 	}
233 
234 	/*
235 	 * Sanitize initrd addresses. For example firmware
236 	 * can't guess if they need to pass them through
237 	 * 64-bits values if the kernel has been built in pure
238 	 * 32-bit. We need also to switch from KSEG0 to XKPHYS
239 	 * addresses now, so the code can now safely use __pa().
240 	 */
241 	end = __pa(initrd_end);
242 	initrd_end = (unsigned long)__va(end);
243 	initrd_start = (unsigned long)__va(__pa(initrd_start));
244 
245 	ROOT_DEV = Root_RAM0;
246 	return PFN_UP(end);
247 disable:
248 	initrd_start = 0;
249 	initrd_end = 0;
250 	return 0;
251 }
252 
253 static void __init finalize_initrd(void)
254 {
255 	unsigned long size = initrd_end - initrd_start;
256 
257 	if (size == 0) {
258 		printk(KERN_INFO "Initrd not found or empty");
259 		goto disable;
260 	}
261 	if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) {
262 		printk(KERN_ERR "Initrd extends beyond end of memory");
263 		goto disable;
264 	}
265 
266 	reserve_bootmem(__pa(initrd_start), size, BOOTMEM_DEFAULT);
267 	initrd_below_start_ok = 1;
268 
269 	pr_info("Initial ramdisk at: 0x%lx (%lu bytes)\n",
270 		initrd_start, size);
271 	return;
272 disable:
273 	printk(KERN_CONT " - disabling initrd\n");
274 	initrd_start = 0;
275 	initrd_end = 0;
276 }
277 
278 #else  /* !CONFIG_BLK_DEV_INITRD */
279 
280 static unsigned long __init init_initrd(void)
281 {
282 	return 0;
283 }
284 
285 #define finalize_initrd()	do {} while (0)
286 
287 #endif
288 
289 /*
290  * Initialize the bootmem allocator. It also setup initrd related data
291  * if needed.
292  */
293 #if defined(CONFIG_SGI_IP27) || (defined(CONFIG_CPU_LOONGSON3) && defined(CONFIG_NUMA))
294 
295 static void __init bootmem_init(void)
296 {
297 	init_initrd();
298 	finalize_initrd();
299 }
300 
301 #else  /* !CONFIG_SGI_IP27 */
302 
303 static void __init bootmem_init(void)
304 {
305 	unsigned long reserved_end;
306 	unsigned long mapstart = ~0UL;
307 	unsigned long bootmap_size;
308 	int i;
309 
310 	/*
311 	 * Sanity check any INITRD first. We don't take it into account
312 	 * for bootmem setup initially, rely on the end-of-kernel-code
313 	 * as our memory range starting point. Once bootmem is inited we
314 	 * will reserve the area used for the initrd.
315 	 */
316 	init_initrd();
317 	reserved_end = (unsigned long) PFN_UP(__pa_symbol(&_end));
318 
319 	/*
320 	 * max_low_pfn is not a number of pages. The number of pages
321 	 * of the system is given by 'max_low_pfn - min_low_pfn'.
322 	 */
323 	min_low_pfn = ~0UL;
324 	max_low_pfn = 0;
325 
326 	/*
327 	 * Find the highest page frame number we have available.
328 	 */
329 	for (i = 0; i < boot_mem_map.nr_map; i++) {
330 		unsigned long start, end;
331 
332 		if (boot_mem_map.map[i].type != BOOT_MEM_RAM)
333 			continue;
334 
335 		start = PFN_UP(boot_mem_map.map[i].addr);
336 		end = PFN_DOWN(boot_mem_map.map[i].addr
337 				+ boot_mem_map.map[i].size);
338 
339 		if (end > max_low_pfn)
340 			max_low_pfn = end;
341 		if (start < min_low_pfn)
342 			min_low_pfn = start;
343 		if (end <= reserved_end)
344 			continue;
345 #ifdef CONFIG_BLK_DEV_INITRD
346 		/* Skip zones before initrd and initrd itself */
347 		if (initrd_end && end <= (unsigned long)PFN_UP(__pa(initrd_end)))
348 			continue;
349 #endif
350 		if (start >= mapstart)
351 			continue;
352 		mapstart = max(reserved_end, start);
353 	}
354 
355 	if (min_low_pfn >= max_low_pfn)
356 		panic("Incorrect memory mapping !!!");
357 	if (min_low_pfn > ARCH_PFN_OFFSET) {
358 		pr_info("Wasting %lu bytes for tracking %lu unused pages\n",
359 			(min_low_pfn - ARCH_PFN_OFFSET) * sizeof(struct page),
360 			min_low_pfn - ARCH_PFN_OFFSET);
361 	} else if (min_low_pfn < ARCH_PFN_OFFSET) {
362 		pr_info("%lu free pages won't be used\n",
363 			ARCH_PFN_OFFSET - min_low_pfn);
364 	}
365 	min_low_pfn = ARCH_PFN_OFFSET;
366 
367 	/*
368 	 * Determine low and high memory ranges
369 	 */
370 	max_pfn = max_low_pfn;
371 	if (max_low_pfn > PFN_DOWN(HIGHMEM_START)) {
372 #ifdef CONFIG_HIGHMEM
373 		highstart_pfn = PFN_DOWN(HIGHMEM_START);
374 		highend_pfn = max_low_pfn;
375 #endif
376 		max_low_pfn = PFN_DOWN(HIGHMEM_START);
377 	}
378 
379 #ifdef CONFIG_BLK_DEV_INITRD
380 	/*
381 	 * mapstart should be after initrd_end
382 	 */
383 	if (initrd_end)
384 		mapstart = max(mapstart, (unsigned long)PFN_UP(__pa(initrd_end)));
385 #endif
386 
387 	/*
388 	 * Initialize the boot-time allocator with low memory only.
389 	 */
390 	bootmap_size = init_bootmem_node(NODE_DATA(0), mapstart,
391 					 min_low_pfn, max_low_pfn);
392 
393 
394 	for (i = 0; i < boot_mem_map.nr_map; i++) {
395 		unsigned long start, end;
396 
397 		start = PFN_UP(boot_mem_map.map[i].addr);
398 		end = PFN_DOWN(boot_mem_map.map[i].addr
399 				+ boot_mem_map.map[i].size);
400 
401 		if (start <= min_low_pfn)
402 			start = min_low_pfn;
403 		if (start >= end)
404 			continue;
405 
406 #ifndef CONFIG_HIGHMEM
407 		if (end > max_low_pfn)
408 			end = max_low_pfn;
409 
410 		/*
411 		 * ... finally, is the area going away?
412 		 */
413 		if (end <= start)
414 			continue;
415 #endif
416 
417 		memblock_add_node(PFN_PHYS(start), PFN_PHYS(end - start), 0);
418 	}
419 
420 	/*
421 	 * Register fully available low RAM pages with the bootmem allocator.
422 	 */
423 	for (i = 0; i < boot_mem_map.nr_map; i++) {
424 		unsigned long start, end, size;
425 
426 		start = PFN_UP(boot_mem_map.map[i].addr);
427 		end   = PFN_DOWN(boot_mem_map.map[i].addr
428 				    + boot_mem_map.map[i].size);
429 
430 		/*
431 		 * Reserve usable memory.
432 		 */
433 		switch (boot_mem_map.map[i].type) {
434 		case BOOT_MEM_RAM:
435 			break;
436 		case BOOT_MEM_INIT_RAM:
437 			memory_present(0, start, end);
438 			continue;
439 		default:
440 			/* Not usable memory */
441 			continue;
442 		}
443 
444 		/*
445 		 * We are rounding up the start address of usable memory
446 		 * and at the end of the usable range downwards.
447 		 */
448 		if (start >= max_low_pfn)
449 			continue;
450 		if (start < reserved_end)
451 			start = reserved_end;
452 		if (end > max_low_pfn)
453 			end = max_low_pfn;
454 
455 		/*
456 		 * ... finally, is the area going away?
457 		 */
458 		if (end <= start)
459 			continue;
460 		size = end - start;
461 
462 		/* Register lowmem ranges */
463 		free_bootmem(PFN_PHYS(start), size << PAGE_SHIFT);
464 		memory_present(0, start, end);
465 	}
466 
467 	/*
468 	 * Reserve the bootmap memory.
469 	 */
470 	reserve_bootmem(PFN_PHYS(mapstart), bootmap_size, BOOTMEM_DEFAULT);
471 
472 	/*
473 	 * Reserve initrd memory if needed.
474 	 */
475 	finalize_initrd();
476 }
477 
478 #endif	/* CONFIG_SGI_IP27 */
479 
480 /*
481  * arch_mem_init - initialize memory management subsystem
482  *
483  *  o plat_mem_setup() detects the memory configuration and will record detected
484  *    memory areas using add_memory_region.
485  *
486  * At this stage the memory configuration of the system is known to the
487  * kernel but generic memory management system is still entirely uninitialized.
488  *
489  *  o bootmem_init()
490  *  o sparse_init()
491  *  o paging_init()
492  *  o dma_contiguous_reserve()
493  *
494  * At this stage the bootmem allocator is ready to use.
495  *
496  * NOTE: historically plat_mem_setup did the entire platform initialization.
497  *	 This was rather impractical because it meant plat_mem_setup had to
498  * get away without any kind of memory allocator.  To keep old code from
499  * breaking plat_setup was just renamed to plat_mem_setup and a second platform
500  * initialization hook for anything else was introduced.
501  */
502 
503 static int usermem __initdata;
504 
505 static int __init early_parse_mem(char *p)
506 {
507 	phys_addr_t start, size;
508 
509 	/*
510 	 * If a user specifies memory size, we
511 	 * blow away any automatically generated
512 	 * size.
513 	 */
514 	if (usermem == 0) {
515 		boot_mem_map.nr_map = 0;
516 		usermem = 1;
517 	}
518 	start = 0;
519 	size = memparse(p, &p);
520 	if (*p == '@')
521 		start = memparse(p + 1, &p);
522 
523 	add_memory_region(start, size, BOOT_MEM_RAM);
524 	return 0;
525 }
526 early_param("mem", early_parse_mem);
527 
528 #ifdef CONFIG_PROC_VMCORE
529 unsigned long setup_elfcorehdr, setup_elfcorehdr_size;
530 static int __init early_parse_elfcorehdr(char *p)
531 {
532 	int i;
533 
534 	setup_elfcorehdr = memparse(p, &p);
535 
536 	for (i = 0; i < boot_mem_map.nr_map; i++) {
537 		unsigned long start = boot_mem_map.map[i].addr;
538 		unsigned long end = (boot_mem_map.map[i].addr +
539 				     boot_mem_map.map[i].size);
540 		if (setup_elfcorehdr >= start && setup_elfcorehdr < end) {
541 			/*
542 			 * Reserve from the elf core header to the end of
543 			 * the memory segment, that should all be kdump
544 			 * reserved memory.
545 			 */
546 			setup_elfcorehdr_size = end - setup_elfcorehdr;
547 			break;
548 		}
549 	}
550 	/*
551 	 * If we don't find it in the memory map, then we shouldn't
552 	 * have to worry about it, as the new kernel won't use it.
553 	 */
554 	return 0;
555 }
556 early_param("elfcorehdr", early_parse_elfcorehdr);
557 #endif
558 
559 static void __init arch_mem_addpart(phys_addr_t mem, phys_addr_t end, int type)
560 {
561 	phys_addr_t size;
562 	int i;
563 
564 	size = end - mem;
565 	if (!size)
566 		return;
567 
568 	/* Make sure it is in the boot_mem_map */
569 	for (i = 0; i < boot_mem_map.nr_map; i++) {
570 		if (mem >= boot_mem_map.map[i].addr &&
571 		    mem < (boot_mem_map.map[i].addr +
572 			   boot_mem_map.map[i].size))
573 			return;
574 	}
575 	add_memory_region(mem, size, type);
576 }
577 
578 #ifdef CONFIG_KEXEC
579 static inline unsigned long long get_total_mem(void)
580 {
581 	unsigned long long total;
582 
583 	total = max_pfn - min_low_pfn;
584 	return total << PAGE_SHIFT;
585 }
586 
587 static void __init mips_parse_crashkernel(void)
588 {
589 	unsigned long long total_mem;
590 	unsigned long long crash_size, crash_base;
591 	int ret;
592 
593 	total_mem = get_total_mem();
594 	ret = parse_crashkernel(boot_command_line, total_mem,
595 				&crash_size, &crash_base);
596 	if (ret != 0 || crash_size <= 0)
597 		return;
598 
599 	crashk_res.start = crash_base;
600 	crashk_res.end	 = crash_base + crash_size - 1;
601 }
602 
603 static void __init request_crashkernel(struct resource *res)
604 {
605 	int ret;
606 
607 	ret = request_resource(res, &crashk_res);
608 	if (!ret)
609 		pr_info("Reserving %ldMB of memory at %ldMB for crashkernel\n",
610 			(unsigned long)((crashk_res.end -
611 					 crashk_res.start + 1) >> 20),
612 			(unsigned long)(crashk_res.start  >> 20));
613 }
614 #else /* !defined(CONFIG_KEXEC)		*/
615 static void __init mips_parse_crashkernel(void)
616 {
617 }
618 
619 static void __init request_crashkernel(struct resource *res)
620 {
621 }
622 #endif /* !defined(CONFIG_KEXEC)  */
623 
624 #define USE_PROM_CMDLINE	IS_ENABLED(CONFIG_MIPS_CMDLINE_FROM_BOOTLOADER)
625 #define USE_DTB_CMDLINE		IS_ENABLED(CONFIG_MIPS_CMDLINE_FROM_DTB)
626 #define EXTEND_WITH_PROM	IS_ENABLED(CONFIG_MIPS_CMDLINE_DTB_EXTEND)
627 
628 static void __init arch_mem_init(char **cmdline_p)
629 {
630 	struct memblock_region *reg;
631 	extern void plat_mem_setup(void);
632 
633 	/* call board setup routine */
634 	plat_mem_setup();
635 
636 	/*
637 	 * Make sure all kernel memory is in the maps.  The "UP" and
638 	 * "DOWN" are opposite for initdata since if it crosses over
639 	 * into another memory section you don't want that to be
640 	 * freed when the initdata is freed.
641 	 */
642 	arch_mem_addpart(PFN_DOWN(__pa_symbol(&_text)) << PAGE_SHIFT,
643 			 PFN_UP(__pa_symbol(&_edata)) << PAGE_SHIFT,
644 			 BOOT_MEM_RAM);
645 	arch_mem_addpart(PFN_UP(__pa_symbol(&__init_begin)) << PAGE_SHIFT,
646 			 PFN_DOWN(__pa_symbol(&__init_end)) << PAGE_SHIFT,
647 			 BOOT_MEM_INIT_RAM);
648 
649 	pr_info("Determined physical RAM map:\n");
650 	print_memory_map();
651 
652 #if defined(CONFIG_CMDLINE_BOOL) && defined(CONFIG_CMDLINE_OVERRIDE)
653 	strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
654 #else
655 	if ((USE_PROM_CMDLINE && arcs_cmdline[0]) ||
656 	    (USE_DTB_CMDLINE && !boot_command_line[0]))
657 		strlcpy(boot_command_line, arcs_cmdline, COMMAND_LINE_SIZE);
658 
659 	if (EXTEND_WITH_PROM && arcs_cmdline[0]) {
660 		strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
661 		strlcat(boot_command_line, arcs_cmdline, COMMAND_LINE_SIZE);
662 	}
663 
664 #if defined(CONFIG_CMDLINE_BOOL)
665 	if (builtin_cmdline[0]) {
666 		strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
667 		strlcat(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
668 	}
669 #endif
670 #endif
671 	strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
672 
673 	*cmdline_p = command_line;
674 
675 	parse_early_param();
676 
677 	if (usermem) {
678 		pr_info("User-defined physical RAM map:\n");
679 		print_memory_map();
680 	}
681 
682 	bootmem_init();
683 #ifdef CONFIG_PROC_VMCORE
684 	if (setup_elfcorehdr && setup_elfcorehdr_size) {
685 		printk(KERN_INFO "kdump reserved memory at %lx-%lx\n",
686 		       setup_elfcorehdr, setup_elfcorehdr_size);
687 		reserve_bootmem(setup_elfcorehdr, setup_elfcorehdr_size,
688 				BOOTMEM_DEFAULT);
689 	}
690 #endif
691 
692 	mips_parse_crashkernel();
693 #ifdef CONFIG_KEXEC
694 	if (crashk_res.start != crashk_res.end)
695 		reserve_bootmem(crashk_res.start,
696 				crashk_res.end - crashk_res.start + 1,
697 				BOOTMEM_DEFAULT);
698 #endif
699 	device_tree_init();
700 	sparse_init();
701 	plat_swiotlb_setup();
702 	paging_init();
703 
704 	dma_contiguous_reserve(PFN_PHYS(max_low_pfn));
705 	/* Tell bootmem about cma reserved memblock section */
706 	for_each_memblock(reserved, reg)
707 		if (reg->size != 0)
708 			reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
709 }
710 
711 static void __init resource_init(void)
712 {
713 	int i;
714 
715 	if (UNCAC_BASE != IO_BASE)
716 		return;
717 
718 	code_resource.start = __pa_symbol(&_text);
719 	code_resource.end = __pa_symbol(&_etext) - 1;
720 	data_resource.start = __pa_symbol(&_etext);
721 	data_resource.end = __pa_symbol(&_edata) - 1;
722 
723 	for (i = 0; i < boot_mem_map.nr_map; i++) {
724 		struct resource *res;
725 		unsigned long start, end;
726 
727 		start = boot_mem_map.map[i].addr;
728 		end = boot_mem_map.map[i].addr + boot_mem_map.map[i].size - 1;
729 		if (start >= HIGHMEM_START)
730 			continue;
731 		if (end >= HIGHMEM_START)
732 			end = HIGHMEM_START - 1;
733 
734 		res = alloc_bootmem(sizeof(struct resource));
735 
736 		res->start = start;
737 		res->end = end;
738 		res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
739 
740 		switch (boot_mem_map.map[i].type) {
741 		case BOOT_MEM_RAM:
742 		case BOOT_MEM_INIT_RAM:
743 		case BOOT_MEM_ROM_DATA:
744 			res->name = "System RAM";
745 			res->flags |= IORESOURCE_SYSRAM;
746 			break;
747 		case BOOT_MEM_RESERVED:
748 		default:
749 			res->name = "reserved";
750 		}
751 
752 		request_resource(&iomem_resource, res);
753 
754 		/*
755 		 *  We don't know which RAM region contains kernel data,
756 		 *  so we try it repeatedly and let the resource manager
757 		 *  test it.
758 		 */
759 		request_resource(res, &code_resource);
760 		request_resource(res, &data_resource);
761 		request_crashkernel(res);
762 	}
763 }
764 
765 #ifdef CONFIG_SMP
766 static void __init prefill_possible_map(void)
767 {
768 	int i, possible = num_possible_cpus();
769 
770 	if (possible > nr_cpu_ids)
771 		possible = nr_cpu_ids;
772 
773 	for (i = 0; i < possible; i++)
774 		set_cpu_possible(i, true);
775 	for (; i < NR_CPUS; i++)
776 		set_cpu_possible(i, false);
777 
778 	nr_cpu_ids = possible;
779 }
780 #else
781 static inline void prefill_possible_map(void) {}
782 #endif
783 
784 void __init setup_arch(char **cmdline_p)
785 {
786 	cpu_probe();
787 	mips_cm_probe();
788 	prom_init();
789 
790 	setup_early_fdc_console();
791 #ifdef CONFIG_EARLY_PRINTK
792 	setup_early_printk();
793 #endif
794 	cpu_report();
795 	check_bugs_early();
796 
797 #if defined(CONFIG_VT)
798 #if defined(CONFIG_VGA_CONSOLE)
799 	conswitchp = &vga_con;
800 #elif defined(CONFIG_DUMMY_CONSOLE)
801 	conswitchp = &dummy_con;
802 #endif
803 #endif
804 
805 	arch_mem_init(cmdline_p);
806 
807 	resource_init();
808 	plat_smp_setup();
809 	prefill_possible_map();
810 
811 	cpu_cache_init();
812 }
813 
814 unsigned long kernelsp[NR_CPUS];
815 unsigned long fw_arg0, fw_arg1, fw_arg2, fw_arg3;
816 
817 #ifdef CONFIG_DEBUG_FS
818 struct dentry *mips_debugfs_dir;
819 static int __init debugfs_mips(void)
820 {
821 	struct dentry *d;
822 
823 	d = debugfs_create_dir("mips", NULL);
824 	if (!d)
825 		return -ENOMEM;
826 	mips_debugfs_dir = d;
827 	return 0;
828 }
829 arch_initcall(debugfs_mips);
830 #endif
831