xref: /openbmc/linux/arch/mips/kernel/setup.c (revision 7587eb18)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1995 Linus Torvalds
7  * Copyright (C) 1995 Waldorf Electronics
8  * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 01, 02, 03  Ralf Baechle
9  * Copyright (C) 1996 Stoned Elipot
10  * Copyright (C) 1999 Silicon Graphics, Inc.
11  * Copyright (C) 2000, 2001, 2002, 2007	 Maciej W. Rozycki
12  */
13 #include <linux/init.h>
14 #include <linux/ioport.h>
15 #include <linux/export.h>
16 #include <linux/screen_info.h>
17 #include <linux/memblock.h>
18 #include <linux/bootmem.h>
19 #include <linux/initrd.h>
20 #include <linux/root_dev.h>
21 #include <linux/highmem.h>
22 #include <linux/console.h>
23 #include <linux/pfn.h>
24 #include <linux/debugfs.h>
25 #include <linux/kexec.h>
26 #include <linux/sizes.h>
27 #include <linux/device.h>
28 #include <linux/dma-contiguous.h>
29 #include <linux/decompress/generic.h>
30 
31 #include <asm/addrspace.h>
32 #include <asm/bootinfo.h>
33 #include <asm/bugs.h>
34 #include <asm/cache.h>
35 #include <asm/cdmm.h>
36 #include <asm/cpu.h>
37 #include <asm/debug.h>
38 #include <asm/sections.h>
39 #include <asm/setup.h>
40 #include <asm/smp-ops.h>
41 #include <asm/prom.h>
42 
43 #ifdef CONFIG_MIPS_ELF_APPENDED_DTB
44 const char __section(.appended_dtb) __appended_dtb[0x100000];
45 #endif /* CONFIG_MIPS_ELF_APPENDED_DTB */
46 
47 struct cpuinfo_mips cpu_data[NR_CPUS] __read_mostly;
48 
49 EXPORT_SYMBOL(cpu_data);
50 
51 #ifdef CONFIG_VT
52 struct screen_info screen_info;
53 #endif
54 
55 /*
56  * Setup information
57  *
58  * These are initialized so they are in the .data section
59  */
60 unsigned long mips_machtype __read_mostly = MACH_UNKNOWN;
61 
62 EXPORT_SYMBOL(mips_machtype);
63 
64 struct boot_mem_map boot_mem_map;
65 
66 static char __initdata command_line[COMMAND_LINE_SIZE];
67 char __initdata arcs_cmdline[COMMAND_LINE_SIZE];
68 
69 #ifdef CONFIG_CMDLINE_BOOL
70 static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE;
71 #endif
72 
73 /*
74  * mips_io_port_base is the begin of the address space to which x86 style
75  * I/O ports are mapped.
76  */
77 const unsigned long mips_io_port_base = -1;
78 EXPORT_SYMBOL(mips_io_port_base);
79 
80 static struct resource code_resource = { .name = "Kernel code", };
81 static struct resource data_resource = { .name = "Kernel data", };
82 
83 static void *detect_magic __initdata = detect_memory_region;
84 
85 void __init add_memory_region(phys_addr_t start, phys_addr_t size, long type)
86 {
87 	int x = boot_mem_map.nr_map;
88 	int i;
89 
90 	/* Sanity check */
91 	if (start + size < start) {
92 		pr_warn("Trying to add an invalid memory region, skipped\n");
93 		return;
94 	}
95 
96 	/*
97 	 * Try to merge with existing entry, if any.
98 	 */
99 	for (i = 0; i < boot_mem_map.nr_map; i++) {
100 		struct boot_mem_map_entry *entry = boot_mem_map.map + i;
101 		unsigned long top;
102 
103 		if (entry->type != type)
104 			continue;
105 
106 		if (start + size < entry->addr)
107 			continue;			/* no overlap */
108 
109 		if (entry->addr + entry->size < start)
110 			continue;			/* no overlap */
111 
112 		top = max(entry->addr + entry->size, start + size);
113 		entry->addr = min(entry->addr, start);
114 		entry->size = top - entry->addr;
115 
116 		return;
117 	}
118 
119 	if (boot_mem_map.nr_map == BOOT_MEM_MAP_MAX) {
120 		pr_err("Ooops! Too many entries in the memory map!\n");
121 		return;
122 	}
123 
124 	boot_mem_map.map[x].addr = start;
125 	boot_mem_map.map[x].size = size;
126 	boot_mem_map.map[x].type = type;
127 	boot_mem_map.nr_map++;
128 }
129 
130 void __init detect_memory_region(phys_addr_t start, phys_addr_t sz_min, phys_addr_t sz_max)
131 {
132 	void *dm = &detect_magic;
133 	phys_addr_t size;
134 
135 	for (size = sz_min; size < sz_max; size <<= 1) {
136 		if (!memcmp(dm, dm + size, sizeof(detect_magic)))
137 			break;
138 	}
139 
140 	pr_debug("Memory: %lluMB of RAM detected at 0x%llx (min: %lluMB, max: %lluMB)\n",
141 		((unsigned long long) size) / SZ_1M,
142 		(unsigned long long) start,
143 		((unsigned long long) sz_min) / SZ_1M,
144 		((unsigned long long) sz_max) / SZ_1M);
145 
146 	add_memory_region(start, size, BOOT_MEM_RAM);
147 }
148 
149 static void __init print_memory_map(void)
150 {
151 	int i;
152 	const int field = 2 * sizeof(unsigned long);
153 
154 	for (i = 0; i < boot_mem_map.nr_map; i++) {
155 		printk(KERN_INFO " memory: %0*Lx @ %0*Lx ",
156 		       field, (unsigned long long) boot_mem_map.map[i].size,
157 		       field, (unsigned long long) boot_mem_map.map[i].addr);
158 
159 		switch (boot_mem_map.map[i].type) {
160 		case BOOT_MEM_RAM:
161 			printk(KERN_CONT "(usable)\n");
162 			break;
163 		case BOOT_MEM_INIT_RAM:
164 			printk(KERN_CONT "(usable after init)\n");
165 			break;
166 		case BOOT_MEM_ROM_DATA:
167 			printk(KERN_CONT "(ROM data)\n");
168 			break;
169 		case BOOT_MEM_RESERVED:
170 			printk(KERN_CONT "(reserved)\n");
171 			break;
172 		default:
173 			printk(KERN_CONT "type %lu\n", boot_mem_map.map[i].type);
174 			break;
175 		}
176 	}
177 }
178 
179 /*
180  * Manage initrd
181  */
182 #ifdef CONFIG_BLK_DEV_INITRD
183 
184 static int __init rd_start_early(char *p)
185 {
186 	unsigned long start = memparse(p, &p);
187 
188 #ifdef CONFIG_64BIT
189 	/* Guess if the sign extension was forgotten by bootloader */
190 	if (start < XKPHYS)
191 		start = (int)start;
192 #endif
193 	initrd_start = start;
194 	initrd_end += start;
195 	return 0;
196 }
197 early_param("rd_start", rd_start_early);
198 
199 static int __init rd_size_early(char *p)
200 {
201 	initrd_end += memparse(p, &p);
202 	return 0;
203 }
204 early_param("rd_size", rd_size_early);
205 
206 /* it returns the next free pfn after initrd */
207 static unsigned long __init init_initrd(void)
208 {
209 	unsigned long end;
210 
211 	/*
212 	 * Board specific code or command line parser should have
213 	 * already set up initrd_start and initrd_end. In these cases
214 	 * perfom sanity checks and use them if all looks good.
215 	 */
216 	if (!initrd_start || initrd_end <= initrd_start)
217 		goto disable;
218 
219 	if (initrd_start & ~PAGE_MASK) {
220 		pr_err("initrd start must be page aligned\n");
221 		goto disable;
222 	}
223 	if (initrd_start < PAGE_OFFSET) {
224 		pr_err("initrd start < PAGE_OFFSET\n");
225 		goto disable;
226 	}
227 
228 	/*
229 	 * Sanitize initrd addresses. For example firmware
230 	 * can't guess if they need to pass them through
231 	 * 64-bits values if the kernel has been built in pure
232 	 * 32-bit. We need also to switch from KSEG0 to XKPHYS
233 	 * addresses now, so the code can now safely use __pa().
234 	 */
235 	end = __pa(initrd_end);
236 	initrd_end = (unsigned long)__va(end);
237 	initrd_start = (unsigned long)__va(__pa(initrd_start));
238 
239 	ROOT_DEV = Root_RAM0;
240 	return PFN_UP(end);
241 disable:
242 	initrd_start = 0;
243 	initrd_end = 0;
244 	return 0;
245 }
246 
247 /* In some conditions (e.g. big endian bootloader with a little endian
248    kernel), the initrd might appear byte swapped.  Try to detect this and
249    byte swap it if needed.  */
250 static void __init maybe_bswap_initrd(void)
251 {
252 #if defined(CONFIG_CPU_CAVIUM_OCTEON)
253 	u64 buf;
254 
255 	/* Check for CPIO signature */
256 	if (!memcmp((void *)initrd_start, "070701", 6))
257 		return;
258 
259 	/* Check for compressed initrd */
260 	if (decompress_method((unsigned char *)initrd_start, 8, NULL))
261 		return;
262 
263 	/* Try again with a byte swapped header */
264 	buf = swab64p((u64 *)initrd_start);
265 	if (!memcmp(&buf, "070701", 6) ||
266 	    decompress_method((unsigned char *)(&buf), 8, NULL)) {
267 		unsigned long i;
268 
269 		pr_info("Byteswapped initrd detected\n");
270 		for (i = initrd_start; i < ALIGN(initrd_end, 8); i += 8)
271 			swab64s((u64 *)i);
272 	}
273 #endif
274 }
275 
276 static void __init finalize_initrd(void)
277 {
278 	unsigned long size = initrd_end - initrd_start;
279 
280 	if (size == 0) {
281 		printk(KERN_INFO "Initrd not found or empty");
282 		goto disable;
283 	}
284 	if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) {
285 		printk(KERN_ERR "Initrd extends beyond end of memory");
286 		goto disable;
287 	}
288 
289 	maybe_bswap_initrd();
290 
291 	reserve_bootmem(__pa(initrd_start), size, BOOTMEM_DEFAULT);
292 	initrd_below_start_ok = 1;
293 
294 	pr_info("Initial ramdisk at: 0x%lx (%lu bytes)\n",
295 		initrd_start, size);
296 	return;
297 disable:
298 	printk(KERN_CONT " - disabling initrd\n");
299 	initrd_start = 0;
300 	initrd_end = 0;
301 }
302 
303 #else  /* !CONFIG_BLK_DEV_INITRD */
304 
305 static unsigned long __init init_initrd(void)
306 {
307 	return 0;
308 }
309 
310 #define finalize_initrd()	do {} while (0)
311 
312 #endif
313 
314 /*
315  * Initialize the bootmem allocator. It also setup initrd related data
316  * if needed.
317  */
318 #if defined(CONFIG_SGI_IP27) || (defined(CONFIG_CPU_LOONGSON3) && defined(CONFIG_NUMA))
319 
320 static void __init bootmem_init(void)
321 {
322 	init_initrd();
323 	finalize_initrd();
324 }
325 
326 #else  /* !CONFIG_SGI_IP27 */
327 
328 static void __init bootmem_init(void)
329 {
330 	unsigned long reserved_end;
331 	unsigned long mapstart = ~0UL;
332 	unsigned long bootmap_size;
333 	int i;
334 
335 	/*
336 	 * Sanity check any INITRD first. We don't take it into account
337 	 * for bootmem setup initially, rely on the end-of-kernel-code
338 	 * as our memory range starting point. Once bootmem is inited we
339 	 * will reserve the area used for the initrd.
340 	 */
341 	init_initrd();
342 	reserved_end = (unsigned long) PFN_UP(__pa_symbol(&_end));
343 
344 	/*
345 	 * max_low_pfn is not a number of pages. The number of pages
346 	 * of the system is given by 'max_low_pfn - min_low_pfn'.
347 	 */
348 	min_low_pfn = ~0UL;
349 	max_low_pfn = 0;
350 
351 	/*
352 	 * Find the highest page frame number we have available.
353 	 */
354 	for (i = 0; i < boot_mem_map.nr_map; i++) {
355 		unsigned long start, end;
356 
357 		if (boot_mem_map.map[i].type != BOOT_MEM_RAM)
358 			continue;
359 
360 		start = PFN_UP(boot_mem_map.map[i].addr);
361 		end = PFN_DOWN(boot_mem_map.map[i].addr
362 				+ boot_mem_map.map[i].size);
363 
364 		if (end > max_low_pfn)
365 			max_low_pfn = end;
366 		if (start < min_low_pfn)
367 			min_low_pfn = start;
368 		if (end <= reserved_end)
369 			continue;
370 #ifdef CONFIG_BLK_DEV_INITRD
371 		/* Skip zones before initrd and initrd itself */
372 		if (initrd_end && end <= (unsigned long)PFN_UP(__pa(initrd_end)))
373 			continue;
374 #endif
375 		if (start >= mapstart)
376 			continue;
377 		mapstart = max(reserved_end, start);
378 	}
379 
380 	if (min_low_pfn >= max_low_pfn)
381 		panic("Incorrect memory mapping !!!");
382 	if (min_low_pfn > ARCH_PFN_OFFSET) {
383 		pr_info("Wasting %lu bytes for tracking %lu unused pages\n",
384 			(min_low_pfn - ARCH_PFN_OFFSET) * sizeof(struct page),
385 			min_low_pfn - ARCH_PFN_OFFSET);
386 	} else if (min_low_pfn < ARCH_PFN_OFFSET) {
387 		pr_info("%lu free pages won't be used\n",
388 			ARCH_PFN_OFFSET - min_low_pfn);
389 	}
390 	min_low_pfn = ARCH_PFN_OFFSET;
391 
392 	/*
393 	 * Determine low and high memory ranges
394 	 */
395 	max_pfn = max_low_pfn;
396 	if (max_low_pfn > PFN_DOWN(HIGHMEM_START)) {
397 #ifdef CONFIG_HIGHMEM
398 		highstart_pfn = PFN_DOWN(HIGHMEM_START);
399 		highend_pfn = max_low_pfn;
400 #endif
401 		max_low_pfn = PFN_DOWN(HIGHMEM_START);
402 	}
403 
404 #ifdef CONFIG_BLK_DEV_INITRD
405 	/*
406 	 * mapstart should be after initrd_end
407 	 */
408 	if (initrd_end)
409 		mapstart = max(mapstart, (unsigned long)PFN_UP(__pa(initrd_end)));
410 #endif
411 
412 	/*
413 	 * Initialize the boot-time allocator with low memory only.
414 	 */
415 	bootmap_size = init_bootmem_node(NODE_DATA(0), mapstart,
416 					 min_low_pfn, max_low_pfn);
417 
418 
419 	for (i = 0; i < boot_mem_map.nr_map; i++) {
420 		unsigned long start, end;
421 
422 		start = PFN_UP(boot_mem_map.map[i].addr);
423 		end = PFN_DOWN(boot_mem_map.map[i].addr
424 				+ boot_mem_map.map[i].size);
425 
426 		if (start <= min_low_pfn)
427 			start = min_low_pfn;
428 		if (start >= end)
429 			continue;
430 
431 #ifndef CONFIG_HIGHMEM
432 		if (end > max_low_pfn)
433 			end = max_low_pfn;
434 
435 		/*
436 		 * ... finally, is the area going away?
437 		 */
438 		if (end <= start)
439 			continue;
440 #endif
441 
442 		memblock_add_node(PFN_PHYS(start), PFN_PHYS(end - start), 0);
443 	}
444 
445 	/*
446 	 * Register fully available low RAM pages with the bootmem allocator.
447 	 */
448 	for (i = 0; i < boot_mem_map.nr_map; i++) {
449 		unsigned long start, end, size;
450 
451 		start = PFN_UP(boot_mem_map.map[i].addr);
452 		end   = PFN_DOWN(boot_mem_map.map[i].addr
453 				    + boot_mem_map.map[i].size);
454 
455 		/*
456 		 * Reserve usable memory.
457 		 */
458 		switch (boot_mem_map.map[i].type) {
459 		case BOOT_MEM_RAM:
460 			break;
461 		case BOOT_MEM_INIT_RAM:
462 			memory_present(0, start, end);
463 			continue;
464 		default:
465 			/* Not usable memory */
466 			continue;
467 		}
468 
469 		/*
470 		 * We are rounding up the start address of usable memory
471 		 * and at the end of the usable range downwards.
472 		 */
473 		if (start >= max_low_pfn)
474 			continue;
475 		if (start < reserved_end)
476 			start = reserved_end;
477 		if (end > max_low_pfn)
478 			end = max_low_pfn;
479 
480 		/*
481 		 * ... finally, is the area going away?
482 		 */
483 		if (end <= start)
484 			continue;
485 		size = end - start;
486 
487 		/* Register lowmem ranges */
488 		free_bootmem(PFN_PHYS(start), size << PAGE_SHIFT);
489 		memory_present(0, start, end);
490 	}
491 
492 	/*
493 	 * Reserve the bootmap memory.
494 	 */
495 	reserve_bootmem(PFN_PHYS(mapstart), bootmap_size, BOOTMEM_DEFAULT);
496 
497 #ifdef CONFIG_RELOCATABLE
498 	/*
499 	 * The kernel reserves all memory below its _end symbol as bootmem,
500 	 * but the kernel may now be at a much higher address. The memory
501 	 * between the original and new locations may be returned to the system.
502 	 */
503 	if (__pa_symbol(_text) > __pa_symbol(VMLINUX_LOAD_ADDRESS)) {
504 		unsigned long offset;
505 		extern void show_kernel_relocation(const char *level);
506 
507 		offset = __pa_symbol(_text) - __pa_symbol(VMLINUX_LOAD_ADDRESS);
508 		free_bootmem(__pa_symbol(VMLINUX_LOAD_ADDRESS), offset);
509 
510 #if defined(CONFIG_DEBUG_KERNEL) && defined(CONFIG_DEBUG_INFO)
511 		/*
512 		 * This information is necessary when debugging the kernel
513 		 * But is a security vulnerability otherwise!
514 		 */
515 		show_kernel_relocation(KERN_INFO);
516 #endif
517 	}
518 #endif
519 
520 	/*
521 	 * Reserve initrd memory if needed.
522 	 */
523 	finalize_initrd();
524 }
525 
526 #endif	/* CONFIG_SGI_IP27 */
527 
528 /*
529  * arch_mem_init - initialize memory management subsystem
530  *
531  *  o plat_mem_setup() detects the memory configuration and will record detected
532  *    memory areas using add_memory_region.
533  *
534  * At this stage the memory configuration of the system is known to the
535  * kernel but generic memory management system is still entirely uninitialized.
536  *
537  *  o bootmem_init()
538  *  o sparse_init()
539  *  o paging_init()
540  *  o dma_contiguous_reserve()
541  *
542  * At this stage the bootmem allocator is ready to use.
543  *
544  * NOTE: historically plat_mem_setup did the entire platform initialization.
545  *	 This was rather impractical because it meant plat_mem_setup had to
546  * get away without any kind of memory allocator.  To keep old code from
547  * breaking plat_setup was just renamed to plat_mem_setup and a second platform
548  * initialization hook for anything else was introduced.
549  */
550 
551 static int usermem __initdata;
552 
553 static int __init early_parse_mem(char *p)
554 {
555 	phys_addr_t start, size;
556 
557 	/*
558 	 * If a user specifies memory size, we
559 	 * blow away any automatically generated
560 	 * size.
561 	 */
562 	if (usermem == 0) {
563 		boot_mem_map.nr_map = 0;
564 		usermem = 1;
565 	}
566 	start = 0;
567 	size = memparse(p, &p);
568 	if (*p == '@')
569 		start = memparse(p + 1, &p);
570 
571 	add_memory_region(start, size, BOOT_MEM_RAM);
572 	return 0;
573 }
574 early_param("mem", early_parse_mem);
575 
576 #ifdef CONFIG_PROC_VMCORE
577 unsigned long setup_elfcorehdr, setup_elfcorehdr_size;
578 static int __init early_parse_elfcorehdr(char *p)
579 {
580 	int i;
581 
582 	setup_elfcorehdr = memparse(p, &p);
583 
584 	for (i = 0; i < boot_mem_map.nr_map; i++) {
585 		unsigned long start = boot_mem_map.map[i].addr;
586 		unsigned long end = (boot_mem_map.map[i].addr +
587 				     boot_mem_map.map[i].size);
588 		if (setup_elfcorehdr >= start && setup_elfcorehdr < end) {
589 			/*
590 			 * Reserve from the elf core header to the end of
591 			 * the memory segment, that should all be kdump
592 			 * reserved memory.
593 			 */
594 			setup_elfcorehdr_size = end - setup_elfcorehdr;
595 			break;
596 		}
597 	}
598 	/*
599 	 * If we don't find it in the memory map, then we shouldn't
600 	 * have to worry about it, as the new kernel won't use it.
601 	 */
602 	return 0;
603 }
604 early_param("elfcorehdr", early_parse_elfcorehdr);
605 #endif
606 
607 static void __init arch_mem_addpart(phys_addr_t mem, phys_addr_t end, int type)
608 {
609 	phys_addr_t size;
610 	int i;
611 
612 	size = end - mem;
613 	if (!size)
614 		return;
615 
616 	/* Make sure it is in the boot_mem_map */
617 	for (i = 0; i < boot_mem_map.nr_map; i++) {
618 		if (mem >= boot_mem_map.map[i].addr &&
619 		    mem < (boot_mem_map.map[i].addr +
620 			   boot_mem_map.map[i].size))
621 			return;
622 	}
623 	add_memory_region(mem, size, type);
624 }
625 
626 #ifdef CONFIG_KEXEC
627 static inline unsigned long long get_total_mem(void)
628 {
629 	unsigned long long total;
630 
631 	total = max_pfn - min_low_pfn;
632 	return total << PAGE_SHIFT;
633 }
634 
635 static void __init mips_parse_crashkernel(void)
636 {
637 	unsigned long long total_mem;
638 	unsigned long long crash_size, crash_base;
639 	int ret;
640 
641 	total_mem = get_total_mem();
642 	ret = parse_crashkernel(boot_command_line, total_mem,
643 				&crash_size, &crash_base);
644 	if (ret != 0 || crash_size <= 0)
645 		return;
646 
647 	crashk_res.start = crash_base;
648 	crashk_res.end	 = crash_base + crash_size - 1;
649 }
650 
651 static void __init request_crashkernel(struct resource *res)
652 {
653 	int ret;
654 
655 	ret = request_resource(res, &crashk_res);
656 	if (!ret)
657 		pr_info("Reserving %ldMB of memory at %ldMB for crashkernel\n",
658 			(unsigned long)((crashk_res.end -
659 					 crashk_res.start + 1) >> 20),
660 			(unsigned long)(crashk_res.start  >> 20));
661 }
662 #else /* !defined(CONFIG_KEXEC)		*/
663 static void __init mips_parse_crashkernel(void)
664 {
665 }
666 
667 static void __init request_crashkernel(struct resource *res)
668 {
669 }
670 #endif /* !defined(CONFIG_KEXEC)  */
671 
672 #define USE_PROM_CMDLINE	IS_ENABLED(CONFIG_MIPS_CMDLINE_FROM_BOOTLOADER)
673 #define USE_DTB_CMDLINE		IS_ENABLED(CONFIG_MIPS_CMDLINE_FROM_DTB)
674 #define EXTEND_WITH_PROM	IS_ENABLED(CONFIG_MIPS_CMDLINE_DTB_EXTEND)
675 #define BUILTIN_EXTEND_WITH_PROM	\
676 	IS_ENABLED(CONFIG_MIPS_CMDLINE_BUILTIN_EXTEND)
677 
678 static void __init arch_mem_init(char **cmdline_p)
679 {
680 	struct memblock_region *reg;
681 	extern void plat_mem_setup(void);
682 
683 	/* call board setup routine */
684 	plat_mem_setup();
685 
686 	/*
687 	 * Make sure all kernel memory is in the maps.  The "UP" and
688 	 * "DOWN" are opposite for initdata since if it crosses over
689 	 * into another memory section you don't want that to be
690 	 * freed when the initdata is freed.
691 	 */
692 	arch_mem_addpart(PFN_DOWN(__pa_symbol(&_text)) << PAGE_SHIFT,
693 			 PFN_UP(__pa_symbol(&_edata)) << PAGE_SHIFT,
694 			 BOOT_MEM_RAM);
695 	arch_mem_addpart(PFN_UP(__pa_symbol(&__init_begin)) << PAGE_SHIFT,
696 			 PFN_DOWN(__pa_symbol(&__init_end)) << PAGE_SHIFT,
697 			 BOOT_MEM_INIT_RAM);
698 
699 	pr_info("Determined physical RAM map:\n");
700 	print_memory_map();
701 
702 #if defined(CONFIG_CMDLINE_BOOL) && defined(CONFIG_CMDLINE_OVERRIDE)
703 	strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
704 #else
705 	if ((USE_PROM_CMDLINE && arcs_cmdline[0]) ||
706 	    (USE_DTB_CMDLINE && !boot_command_line[0]))
707 		strlcpy(boot_command_line, arcs_cmdline, COMMAND_LINE_SIZE);
708 
709 	if (EXTEND_WITH_PROM && arcs_cmdline[0]) {
710 		if (boot_command_line[0])
711 			strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
712 		strlcat(boot_command_line, arcs_cmdline, COMMAND_LINE_SIZE);
713 	}
714 
715 #if defined(CONFIG_CMDLINE_BOOL)
716 	if (builtin_cmdline[0]) {
717 		if (boot_command_line[0])
718 			strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
719 		strlcat(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
720 	}
721 
722 	if (BUILTIN_EXTEND_WITH_PROM && arcs_cmdline[0]) {
723 		if (boot_command_line[0])
724 			strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
725 		strlcat(boot_command_line, arcs_cmdline, COMMAND_LINE_SIZE);
726 	}
727 #endif
728 #endif
729 	strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
730 
731 	*cmdline_p = command_line;
732 
733 	parse_early_param();
734 
735 	if (usermem) {
736 		pr_info("User-defined physical RAM map:\n");
737 		print_memory_map();
738 	}
739 
740 	bootmem_init();
741 #ifdef CONFIG_PROC_VMCORE
742 	if (setup_elfcorehdr && setup_elfcorehdr_size) {
743 		printk(KERN_INFO "kdump reserved memory at %lx-%lx\n",
744 		       setup_elfcorehdr, setup_elfcorehdr_size);
745 		reserve_bootmem(setup_elfcorehdr, setup_elfcorehdr_size,
746 				BOOTMEM_DEFAULT);
747 	}
748 #endif
749 
750 	mips_parse_crashkernel();
751 #ifdef CONFIG_KEXEC
752 	if (crashk_res.start != crashk_res.end)
753 		reserve_bootmem(crashk_res.start,
754 				crashk_res.end - crashk_res.start + 1,
755 				BOOTMEM_DEFAULT);
756 #endif
757 	device_tree_init();
758 	sparse_init();
759 	plat_swiotlb_setup();
760 	paging_init();
761 
762 	dma_contiguous_reserve(PFN_PHYS(max_low_pfn));
763 	/* Tell bootmem about cma reserved memblock section */
764 	for_each_memblock(reserved, reg)
765 		if (reg->size != 0)
766 			reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
767 
768 	reserve_bootmem_region(__pa_symbol(&__nosave_begin),
769 			__pa_symbol(&__nosave_end)); /* Reserve for hibernation */
770 }
771 
772 static void __init resource_init(void)
773 {
774 	int i;
775 
776 	if (UNCAC_BASE != IO_BASE)
777 		return;
778 
779 	code_resource.start = __pa_symbol(&_text);
780 	code_resource.end = __pa_symbol(&_etext) - 1;
781 	data_resource.start = __pa_symbol(&_etext);
782 	data_resource.end = __pa_symbol(&_edata) - 1;
783 
784 	for (i = 0; i < boot_mem_map.nr_map; i++) {
785 		struct resource *res;
786 		unsigned long start, end;
787 
788 		start = boot_mem_map.map[i].addr;
789 		end = boot_mem_map.map[i].addr + boot_mem_map.map[i].size - 1;
790 		if (start >= HIGHMEM_START)
791 			continue;
792 		if (end >= HIGHMEM_START)
793 			end = HIGHMEM_START - 1;
794 
795 		res = alloc_bootmem(sizeof(struct resource));
796 
797 		res->start = start;
798 		res->end = end;
799 		res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
800 
801 		switch (boot_mem_map.map[i].type) {
802 		case BOOT_MEM_RAM:
803 		case BOOT_MEM_INIT_RAM:
804 		case BOOT_MEM_ROM_DATA:
805 			res->name = "System RAM";
806 			res->flags |= IORESOURCE_SYSRAM;
807 			break;
808 		case BOOT_MEM_RESERVED:
809 		default:
810 			res->name = "reserved";
811 		}
812 
813 		request_resource(&iomem_resource, res);
814 
815 		/*
816 		 *  We don't know which RAM region contains kernel data,
817 		 *  so we try it repeatedly and let the resource manager
818 		 *  test it.
819 		 */
820 		request_resource(res, &code_resource);
821 		request_resource(res, &data_resource);
822 		request_crashkernel(res);
823 	}
824 }
825 
826 #ifdef CONFIG_SMP
827 static void __init prefill_possible_map(void)
828 {
829 	int i, possible = num_possible_cpus();
830 
831 	if (possible > nr_cpu_ids)
832 		possible = nr_cpu_ids;
833 
834 	for (i = 0; i < possible; i++)
835 		set_cpu_possible(i, true);
836 	for (; i < NR_CPUS; i++)
837 		set_cpu_possible(i, false);
838 
839 	nr_cpu_ids = possible;
840 }
841 #else
842 static inline void prefill_possible_map(void) {}
843 #endif
844 
845 void __init setup_arch(char **cmdline_p)
846 {
847 	cpu_probe();
848 	mips_cm_probe();
849 	prom_init();
850 
851 	setup_early_fdc_console();
852 #ifdef CONFIG_EARLY_PRINTK
853 	setup_early_printk();
854 #endif
855 	cpu_report();
856 	check_bugs_early();
857 
858 #if defined(CONFIG_VT)
859 #if defined(CONFIG_VGA_CONSOLE)
860 	conswitchp = &vga_con;
861 #elif defined(CONFIG_DUMMY_CONSOLE)
862 	conswitchp = &dummy_con;
863 #endif
864 #endif
865 
866 	arch_mem_init(cmdline_p);
867 
868 	resource_init();
869 	plat_smp_setup();
870 	prefill_possible_map();
871 
872 	cpu_cache_init();
873 }
874 
875 unsigned long kernelsp[NR_CPUS];
876 unsigned long fw_arg0, fw_arg1, fw_arg2, fw_arg3;
877 
878 #ifdef CONFIG_DEBUG_FS
879 struct dentry *mips_debugfs_dir;
880 static int __init debugfs_mips(void)
881 {
882 	struct dentry *d;
883 
884 	d = debugfs_create_dir("mips", NULL);
885 	if (!d)
886 		return -ENOMEM;
887 	mips_debugfs_dir = d;
888 	return 0;
889 }
890 arch_initcall(debugfs_mips);
891 #endif
892