xref: /openbmc/linux/arch/mips/kernel/setup.c (revision 0c874100)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1995 Linus Torvalds
7  * Copyright (C) 1995 Waldorf Electronics
8  * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 01, 02, 03  Ralf Baechle
9  * Copyright (C) 1996 Stoned Elipot
10  * Copyright (C) 1999 Silicon Graphics, Inc.
11  * Copyright (C) 2000, 2001, 2002, 2007	 Maciej W. Rozycki
12  */
13 #include <linux/init.h>
14 #include <linux/ioport.h>
15 #include <linux/export.h>
16 #include <linux/screen_info.h>
17 #include <linux/memblock.h>
18 #include <linux/initrd.h>
19 #include <linux/root_dev.h>
20 #include <linux/highmem.h>
21 #include <linux/console.h>
22 #include <linux/pfn.h>
23 #include <linux/debugfs.h>
24 #include <linux/kexec.h>
25 #include <linux/sizes.h>
26 #include <linux/device.h>
27 #include <linux/dma-contiguous.h>
28 #include <linux/decompress/generic.h>
29 #include <linux/of_fdt.h>
30 
31 #include <asm/addrspace.h>
32 #include <asm/bootinfo.h>
33 #include <asm/bugs.h>
34 #include <asm/cache.h>
35 #include <asm/cdmm.h>
36 #include <asm/cpu.h>
37 #include <asm/debug.h>
38 #include <asm/dma-coherence.h>
39 #include <asm/sections.h>
40 #include <asm/setup.h>
41 #include <asm/smp-ops.h>
42 #include <asm/prom.h>
43 
44 #ifdef CONFIG_MIPS_ELF_APPENDED_DTB
45 const char __section(.appended_dtb) __appended_dtb[0x100000];
46 #endif /* CONFIG_MIPS_ELF_APPENDED_DTB */
47 
48 struct cpuinfo_mips cpu_data[NR_CPUS] __read_mostly;
49 
50 EXPORT_SYMBOL(cpu_data);
51 
52 #ifdef CONFIG_VT
53 struct screen_info screen_info;
54 #endif
55 
56 /*
57  * Setup information
58  *
59  * These are initialized so they are in the .data section
60  */
61 unsigned long mips_machtype __read_mostly = MACH_UNKNOWN;
62 
63 EXPORT_SYMBOL(mips_machtype);
64 
65 struct boot_mem_map boot_mem_map;
66 
67 static char __initdata command_line[COMMAND_LINE_SIZE];
68 char __initdata arcs_cmdline[COMMAND_LINE_SIZE];
69 
70 #ifdef CONFIG_CMDLINE_BOOL
71 static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE;
72 #endif
73 
74 /*
75  * mips_io_port_base is the begin of the address space to which x86 style
76  * I/O ports are mapped.
77  */
78 const unsigned long mips_io_port_base = -1;
79 EXPORT_SYMBOL(mips_io_port_base);
80 
81 static struct resource code_resource = { .name = "Kernel code", };
82 static struct resource data_resource = { .name = "Kernel data", };
83 static struct resource bss_resource = { .name = "Kernel bss", };
84 
85 static void *detect_magic __initdata = detect_memory_region;
86 
87 #ifdef CONFIG_MIPS_AUTO_PFN_OFFSET
88 unsigned long ARCH_PFN_OFFSET;
89 EXPORT_SYMBOL(ARCH_PFN_OFFSET);
90 #endif
91 
92 void __init add_memory_region(phys_addr_t start, phys_addr_t size, long type)
93 {
94 	int x = boot_mem_map.nr_map;
95 	int i;
96 
97 	/*
98 	 * If the region reaches the top of the physical address space, adjust
99 	 * the size slightly so that (start + size) doesn't overflow
100 	 */
101 	if (start + size - 1 == PHYS_ADDR_MAX)
102 		--size;
103 
104 	/* Sanity check */
105 	if (start + size < start) {
106 		pr_warn("Trying to add an invalid memory region, skipped\n");
107 		return;
108 	}
109 
110 	/*
111 	 * Try to merge with existing entry, if any.
112 	 */
113 	for (i = 0; i < boot_mem_map.nr_map; i++) {
114 		struct boot_mem_map_entry *entry = boot_mem_map.map + i;
115 		unsigned long top;
116 
117 		if (entry->type != type)
118 			continue;
119 
120 		if (start + size < entry->addr)
121 			continue;			/* no overlap */
122 
123 		if (entry->addr + entry->size < start)
124 			continue;			/* no overlap */
125 
126 		top = max(entry->addr + entry->size, start + size);
127 		entry->addr = min(entry->addr, start);
128 		entry->size = top - entry->addr;
129 
130 		return;
131 	}
132 
133 	if (boot_mem_map.nr_map == BOOT_MEM_MAP_MAX) {
134 		pr_err("Ooops! Too many entries in the memory map!\n");
135 		return;
136 	}
137 
138 	boot_mem_map.map[x].addr = start;
139 	boot_mem_map.map[x].size = size;
140 	boot_mem_map.map[x].type = type;
141 	boot_mem_map.nr_map++;
142 }
143 
144 void __init detect_memory_region(phys_addr_t start, phys_addr_t sz_min, phys_addr_t sz_max)
145 {
146 	void *dm = &detect_magic;
147 	phys_addr_t size;
148 
149 	for (size = sz_min; size < sz_max; size <<= 1) {
150 		if (!memcmp(dm, dm + size, sizeof(detect_magic)))
151 			break;
152 	}
153 
154 	pr_debug("Memory: %lluMB of RAM detected at 0x%llx (min: %lluMB, max: %lluMB)\n",
155 		((unsigned long long) size) / SZ_1M,
156 		(unsigned long long) start,
157 		((unsigned long long) sz_min) / SZ_1M,
158 		((unsigned long long) sz_max) / SZ_1M);
159 
160 	add_memory_region(start, size, BOOT_MEM_RAM);
161 }
162 
163 static bool __init __maybe_unused memory_region_available(phys_addr_t start,
164 							  phys_addr_t size)
165 {
166 	int i;
167 	bool in_ram = false, free = true;
168 
169 	for (i = 0; i < boot_mem_map.nr_map; i++) {
170 		phys_addr_t start_, end_;
171 
172 		start_ = boot_mem_map.map[i].addr;
173 		end_ = boot_mem_map.map[i].addr + boot_mem_map.map[i].size;
174 
175 		switch (boot_mem_map.map[i].type) {
176 		case BOOT_MEM_RAM:
177 			if (start >= start_ && start + size <= end_)
178 				in_ram = true;
179 			break;
180 		case BOOT_MEM_RESERVED:
181 			if ((start >= start_ && start < end_) ||
182 			    (start < start_ && start + size >= start_))
183 				free = false;
184 			break;
185 		default:
186 			continue;
187 		}
188 	}
189 
190 	return in_ram && free;
191 }
192 
193 static void __init print_memory_map(void)
194 {
195 	int i;
196 	const int field = 2 * sizeof(unsigned long);
197 
198 	for (i = 0; i < boot_mem_map.nr_map; i++) {
199 		printk(KERN_INFO " memory: %0*Lx @ %0*Lx ",
200 		       field, (unsigned long long) boot_mem_map.map[i].size,
201 		       field, (unsigned long long) boot_mem_map.map[i].addr);
202 
203 		switch (boot_mem_map.map[i].type) {
204 		case BOOT_MEM_RAM:
205 			printk(KERN_CONT "(usable)\n");
206 			break;
207 		case BOOT_MEM_INIT_RAM:
208 			printk(KERN_CONT "(usable after init)\n");
209 			break;
210 		case BOOT_MEM_ROM_DATA:
211 			printk(KERN_CONT "(ROM data)\n");
212 			break;
213 		case BOOT_MEM_RESERVED:
214 			printk(KERN_CONT "(reserved)\n");
215 			break;
216 		default:
217 			printk(KERN_CONT "type %lu\n", boot_mem_map.map[i].type);
218 			break;
219 		}
220 	}
221 }
222 
223 /*
224  * Manage initrd
225  */
226 #ifdef CONFIG_BLK_DEV_INITRD
227 
228 static int __init rd_start_early(char *p)
229 {
230 	unsigned long start = memparse(p, &p);
231 
232 #ifdef CONFIG_64BIT
233 	/* Guess if the sign extension was forgotten by bootloader */
234 	if (start < XKPHYS)
235 		start = (int)start;
236 #endif
237 	initrd_start = start;
238 	initrd_end += start;
239 	return 0;
240 }
241 early_param("rd_start", rd_start_early);
242 
243 static int __init rd_size_early(char *p)
244 {
245 	initrd_end += memparse(p, &p);
246 	return 0;
247 }
248 early_param("rd_size", rd_size_early);
249 
250 /* it returns the next free pfn after initrd */
251 static unsigned long __init init_initrd(void)
252 {
253 	unsigned long end;
254 
255 	/*
256 	 * Board specific code or command line parser should have
257 	 * already set up initrd_start and initrd_end. In these cases
258 	 * perfom sanity checks and use them if all looks good.
259 	 */
260 	if (!initrd_start || initrd_end <= initrd_start)
261 		goto disable;
262 
263 	if (initrd_start & ~PAGE_MASK) {
264 		pr_err("initrd start must be page aligned\n");
265 		goto disable;
266 	}
267 	if (initrd_start < PAGE_OFFSET) {
268 		pr_err("initrd start < PAGE_OFFSET\n");
269 		goto disable;
270 	}
271 
272 	/*
273 	 * Sanitize initrd addresses. For example firmware
274 	 * can't guess if they need to pass them through
275 	 * 64-bits values if the kernel has been built in pure
276 	 * 32-bit. We need also to switch from KSEG0 to XKPHYS
277 	 * addresses now, so the code can now safely use __pa().
278 	 */
279 	end = __pa(initrd_end);
280 	initrd_end = (unsigned long)__va(end);
281 	initrd_start = (unsigned long)__va(__pa(initrd_start));
282 
283 	ROOT_DEV = Root_RAM0;
284 	return PFN_UP(end);
285 disable:
286 	initrd_start = 0;
287 	initrd_end = 0;
288 	return 0;
289 }
290 
291 /* In some conditions (e.g. big endian bootloader with a little endian
292    kernel), the initrd might appear byte swapped.  Try to detect this and
293    byte swap it if needed.  */
294 static void __init maybe_bswap_initrd(void)
295 {
296 #if defined(CONFIG_CPU_CAVIUM_OCTEON)
297 	u64 buf;
298 
299 	/* Check for CPIO signature */
300 	if (!memcmp((void *)initrd_start, "070701", 6))
301 		return;
302 
303 	/* Check for compressed initrd */
304 	if (decompress_method((unsigned char *)initrd_start, 8, NULL))
305 		return;
306 
307 	/* Try again with a byte swapped header */
308 	buf = swab64p((u64 *)initrd_start);
309 	if (!memcmp(&buf, "070701", 6) ||
310 	    decompress_method((unsigned char *)(&buf), 8, NULL)) {
311 		unsigned long i;
312 
313 		pr_info("Byteswapped initrd detected\n");
314 		for (i = initrd_start; i < ALIGN(initrd_end, 8); i += 8)
315 			swab64s((u64 *)i);
316 	}
317 #endif
318 }
319 
320 static void __init finalize_initrd(void)
321 {
322 	unsigned long size = initrd_end - initrd_start;
323 
324 	if (size == 0) {
325 		printk(KERN_INFO "Initrd not found or empty");
326 		goto disable;
327 	}
328 	if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) {
329 		printk(KERN_ERR "Initrd extends beyond end of memory");
330 		goto disable;
331 	}
332 
333 	maybe_bswap_initrd();
334 
335 	memblock_reserve(__pa(initrd_start), size);
336 	initrd_below_start_ok = 1;
337 
338 	pr_info("Initial ramdisk at: 0x%lx (%lu bytes)\n",
339 		initrd_start, size);
340 	return;
341 disable:
342 	printk(KERN_CONT " - disabling initrd\n");
343 	initrd_start = 0;
344 	initrd_end = 0;
345 }
346 
347 #else  /* !CONFIG_BLK_DEV_INITRD */
348 
349 static unsigned long __init init_initrd(void)
350 {
351 	return 0;
352 }
353 
354 #define finalize_initrd()	do {} while (0)
355 
356 #endif
357 
358 /*
359  * Initialize the bootmem allocator. It also setup initrd related data
360  * if needed.
361  */
362 #if defined(CONFIG_SGI_IP27) || (defined(CONFIG_CPU_LOONGSON3) && defined(CONFIG_NUMA))
363 
364 static void __init bootmem_init(void)
365 {
366 	init_initrd();
367 	finalize_initrd();
368 }
369 
370 #else  /* !CONFIG_SGI_IP27 */
371 
372 static void __init bootmem_init(void)
373 {
374 	unsigned long reserved_end;
375 	phys_addr_t ramstart = PHYS_ADDR_MAX;
376 	int i;
377 
378 	/*
379 	 * Sanity check any INITRD first. We don't take it into account
380 	 * for bootmem setup initially, rely on the end-of-kernel-code
381 	 * as our memory range starting point. Once bootmem is inited we
382 	 * will reserve the area used for the initrd.
383 	 */
384 	init_initrd();
385 	reserved_end = (unsigned long) PFN_UP(__pa_symbol(&_end));
386 
387 	memblock_reserve(PHYS_OFFSET, reserved_end << PAGE_SHIFT);
388 
389 	/*
390 	 * max_low_pfn is not a number of pages. The number of pages
391 	 * of the system is given by 'max_low_pfn - min_low_pfn'.
392 	 */
393 	min_low_pfn = ~0UL;
394 	max_low_pfn = 0;
395 
396 	/*
397 	 * Find the highest page frame number we have available
398 	 * and the lowest used RAM address
399 	 */
400 	for (i = 0; i < boot_mem_map.nr_map; i++) {
401 		unsigned long start, end;
402 
403 		if (boot_mem_map.map[i].type != BOOT_MEM_RAM)
404 			continue;
405 
406 		start = PFN_UP(boot_mem_map.map[i].addr);
407 		end = PFN_DOWN(boot_mem_map.map[i].addr
408 				+ boot_mem_map.map[i].size);
409 
410 		ramstart = min(ramstart, boot_mem_map.map[i].addr);
411 
412 #ifndef CONFIG_HIGHMEM
413 		/*
414 		 * Skip highmem here so we get an accurate max_low_pfn if low
415 		 * memory stops short of high memory.
416 		 * If the region overlaps HIGHMEM_START, end is clipped so
417 		 * max_pfn excludes the highmem portion.
418 		 */
419 		if (start >= PFN_DOWN(HIGHMEM_START))
420 			continue;
421 		if (end > PFN_DOWN(HIGHMEM_START))
422 			end = PFN_DOWN(HIGHMEM_START);
423 #endif
424 
425 		if (end > max_low_pfn)
426 			max_low_pfn = end;
427 		if (start < min_low_pfn)
428 			min_low_pfn = start;
429 		if (end <= reserved_end)
430 			continue;
431 #ifdef CONFIG_BLK_DEV_INITRD
432 		/* Skip zones before initrd and initrd itself */
433 		if (initrd_end && end <= (unsigned long)PFN_UP(__pa(initrd_end)))
434 			continue;
435 #endif
436 	}
437 
438 	if (min_low_pfn >= max_low_pfn)
439 		panic("Incorrect memory mapping !!!");
440 
441 #ifdef CONFIG_MIPS_AUTO_PFN_OFFSET
442 	ARCH_PFN_OFFSET = PFN_UP(ramstart);
443 #else
444 	/*
445 	 * Reserve any memory between the start of RAM and PHYS_OFFSET
446 	 */
447 	if (ramstart > PHYS_OFFSET) {
448 		add_memory_region(PHYS_OFFSET, ramstart - PHYS_OFFSET,
449 				  BOOT_MEM_RESERVED);
450 		memblock_reserve(PHYS_OFFSET, ramstart - PHYS_OFFSET);
451 	}
452 
453 	if (min_low_pfn > ARCH_PFN_OFFSET) {
454 		pr_info("Wasting %lu bytes for tracking %lu unused pages\n",
455 			(min_low_pfn - ARCH_PFN_OFFSET) * sizeof(struct page),
456 			min_low_pfn - ARCH_PFN_OFFSET);
457 	} else if (ARCH_PFN_OFFSET - min_low_pfn > 0UL) {
458 		pr_info("%lu free pages won't be used\n",
459 			ARCH_PFN_OFFSET - min_low_pfn);
460 	}
461 	min_low_pfn = ARCH_PFN_OFFSET;
462 #endif
463 
464 	/*
465 	 * Determine low and high memory ranges
466 	 */
467 	max_pfn = max_low_pfn;
468 	if (max_low_pfn > PFN_DOWN(HIGHMEM_START)) {
469 #ifdef CONFIG_HIGHMEM
470 		highstart_pfn = PFN_DOWN(HIGHMEM_START);
471 		highend_pfn = max_low_pfn;
472 #endif
473 		max_low_pfn = PFN_DOWN(HIGHMEM_START);
474 	}
475 
476 	for (i = 0; i < boot_mem_map.nr_map; i++) {
477 		unsigned long start, end;
478 
479 		start = PFN_UP(boot_mem_map.map[i].addr);
480 		end = PFN_DOWN(boot_mem_map.map[i].addr
481 				+ boot_mem_map.map[i].size);
482 
483 		if (start <= min_low_pfn)
484 			start = min_low_pfn;
485 		if (start >= end)
486 			continue;
487 
488 #ifndef CONFIG_HIGHMEM
489 		if (end > max_low_pfn)
490 			end = max_low_pfn;
491 
492 		/*
493 		 * ... finally, is the area going away?
494 		 */
495 		if (end <= start)
496 			continue;
497 #endif
498 
499 		memblock_add_node(PFN_PHYS(start), PFN_PHYS(end - start), 0);
500 	}
501 
502 	/*
503 	 * Register fully available low RAM pages with the bootmem allocator.
504 	 */
505 	for (i = 0; i < boot_mem_map.nr_map; i++) {
506 		unsigned long start, end, size;
507 
508 		start = PFN_UP(boot_mem_map.map[i].addr);
509 		end   = PFN_DOWN(boot_mem_map.map[i].addr
510 				    + boot_mem_map.map[i].size);
511 
512 		/*
513 		 * Reserve usable memory.
514 		 */
515 		switch (boot_mem_map.map[i].type) {
516 		case BOOT_MEM_RAM:
517 			break;
518 		case BOOT_MEM_INIT_RAM:
519 			memory_present(0, start, end);
520 			continue;
521 		default:
522 			/* Not usable memory */
523 			if (start > min_low_pfn && end < max_low_pfn)
524 				memblock_reserve(boot_mem_map.map[i].addr,
525 						boot_mem_map.map[i].size);
526 
527 			continue;
528 		}
529 
530 		/*
531 		 * We are rounding up the start address of usable memory
532 		 * and at the end of the usable range downwards.
533 		 */
534 		if (start >= max_low_pfn)
535 			continue;
536 		if (start < reserved_end)
537 			start = reserved_end;
538 		if (end > max_low_pfn)
539 			end = max_low_pfn;
540 
541 		/*
542 		 * ... finally, is the area going away?
543 		 */
544 		if (end <= start)
545 			continue;
546 		size = end - start;
547 
548 		/* Register lowmem ranges */
549 		memory_present(0, start, end);
550 	}
551 
552 #ifdef CONFIG_RELOCATABLE
553 	/*
554 	 * The kernel reserves all memory below its _end symbol as bootmem,
555 	 * but the kernel may now be at a much higher address. The memory
556 	 * between the original and new locations may be returned to the system.
557 	 */
558 	if (__pa_symbol(_text) > __pa_symbol(VMLINUX_LOAD_ADDRESS)) {
559 		unsigned long offset;
560 		extern void show_kernel_relocation(const char *level);
561 
562 		offset = __pa_symbol(_text) - __pa_symbol(VMLINUX_LOAD_ADDRESS);
563 		memblock_free(__pa_symbol(VMLINUX_LOAD_ADDRESS), offset);
564 
565 #if defined(CONFIG_DEBUG_KERNEL) && defined(CONFIG_DEBUG_INFO)
566 		/*
567 		 * This information is necessary when debugging the kernel
568 		 * But is a security vulnerability otherwise!
569 		 */
570 		show_kernel_relocation(KERN_INFO);
571 #endif
572 	}
573 #endif
574 
575 	/*
576 	 * Reserve initrd memory if needed.
577 	 */
578 	finalize_initrd();
579 }
580 
581 #endif	/* CONFIG_SGI_IP27 */
582 
583 static int usermem __initdata;
584 
585 static int __init early_parse_mem(char *p)
586 {
587 	phys_addr_t start, size;
588 
589 	/*
590 	 * If a user specifies memory size, we
591 	 * blow away any automatically generated
592 	 * size.
593 	 */
594 	if (usermem == 0) {
595 		boot_mem_map.nr_map = 0;
596 		usermem = 1;
597 	}
598 	start = 0;
599 	size = memparse(p, &p);
600 	if (*p == '@')
601 		start = memparse(p + 1, &p);
602 
603 	add_memory_region(start, size, BOOT_MEM_RAM);
604 
605 	return 0;
606 }
607 early_param("mem", early_parse_mem);
608 
609 static int __init early_parse_memmap(char *p)
610 {
611 	char *oldp;
612 	u64 start_at, mem_size;
613 
614 	if (!p)
615 		return -EINVAL;
616 
617 	if (!strncmp(p, "exactmap", 8)) {
618 		pr_err("\"memmap=exactmap\" invalid on MIPS\n");
619 		return 0;
620 	}
621 
622 	oldp = p;
623 	mem_size = memparse(p, &p);
624 	if (p == oldp)
625 		return -EINVAL;
626 
627 	if (*p == '@') {
628 		start_at = memparse(p+1, &p);
629 		add_memory_region(start_at, mem_size, BOOT_MEM_RAM);
630 	} else if (*p == '#') {
631 		pr_err("\"memmap=nn#ss\" (force ACPI data) invalid on MIPS\n");
632 		return -EINVAL;
633 	} else if (*p == '$') {
634 		start_at = memparse(p+1, &p);
635 		add_memory_region(start_at, mem_size, BOOT_MEM_RESERVED);
636 	} else {
637 		pr_err("\"memmap\" invalid format!\n");
638 		return -EINVAL;
639 	}
640 
641 	if (*p == '\0') {
642 		usermem = 1;
643 		return 0;
644 	} else
645 		return -EINVAL;
646 }
647 early_param("memmap", early_parse_memmap);
648 
649 #ifdef CONFIG_PROC_VMCORE
650 unsigned long setup_elfcorehdr, setup_elfcorehdr_size;
651 static int __init early_parse_elfcorehdr(char *p)
652 {
653 	int i;
654 
655 	setup_elfcorehdr = memparse(p, &p);
656 
657 	for (i = 0; i < boot_mem_map.nr_map; i++) {
658 		unsigned long start = boot_mem_map.map[i].addr;
659 		unsigned long end = (boot_mem_map.map[i].addr +
660 				     boot_mem_map.map[i].size);
661 		if (setup_elfcorehdr >= start && setup_elfcorehdr < end) {
662 			/*
663 			 * Reserve from the elf core header to the end of
664 			 * the memory segment, that should all be kdump
665 			 * reserved memory.
666 			 */
667 			setup_elfcorehdr_size = end - setup_elfcorehdr;
668 			break;
669 		}
670 	}
671 	/*
672 	 * If we don't find it in the memory map, then we shouldn't
673 	 * have to worry about it, as the new kernel won't use it.
674 	 */
675 	return 0;
676 }
677 early_param("elfcorehdr", early_parse_elfcorehdr);
678 #endif
679 
680 static void __init arch_mem_addpart(phys_addr_t mem, phys_addr_t end, int type)
681 {
682 	phys_addr_t size;
683 	int i;
684 
685 	size = end - mem;
686 	if (!size)
687 		return;
688 
689 	/* Make sure it is in the boot_mem_map */
690 	for (i = 0; i < boot_mem_map.nr_map; i++) {
691 		if (mem >= boot_mem_map.map[i].addr &&
692 		    mem < (boot_mem_map.map[i].addr +
693 			   boot_mem_map.map[i].size))
694 			return;
695 	}
696 	add_memory_region(mem, size, type);
697 }
698 
699 #ifdef CONFIG_KEXEC
700 static inline unsigned long long get_total_mem(void)
701 {
702 	unsigned long long total;
703 
704 	total = max_pfn - min_low_pfn;
705 	return total << PAGE_SHIFT;
706 }
707 
708 static void __init mips_parse_crashkernel(void)
709 {
710 	unsigned long long total_mem;
711 	unsigned long long crash_size, crash_base;
712 	int ret;
713 
714 	total_mem = get_total_mem();
715 	ret = parse_crashkernel(boot_command_line, total_mem,
716 				&crash_size, &crash_base);
717 	if (ret != 0 || crash_size <= 0)
718 		return;
719 
720 	if (!memory_region_available(crash_base, crash_size)) {
721 		pr_warn("Invalid memory region reserved for crash kernel\n");
722 		return;
723 	}
724 
725 	crashk_res.start = crash_base;
726 	crashk_res.end	 = crash_base + crash_size - 1;
727 }
728 
729 static void __init request_crashkernel(struct resource *res)
730 {
731 	int ret;
732 
733 	if (crashk_res.start == crashk_res.end)
734 		return;
735 
736 	ret = request_resource(res, &crashk_res);
737 	if (!ret)
738 		pr_info("Reserving %ldMB of memory at %ldMB for crashkernel\n",
739 			(unsigned long)((crashk_res.end -
740 					 crashk_res.start + 1) >> 20),
741 			(unsigned long)(crashk_res.start  >> 20));
742 }
743 #else /* !defined(CONFIG_KEXEC)		*/
744 static void __init mips_parse_crashkernel(void)
745 {
746 }
747 
748 static void __init request_crashkernel(struct resource *res)
749 {
750 }
751 #endif /* !defined(CONFIG_KEXEC)  */
752 
753 #define USE_PROM_CMDLINE	IS_ENABLED(CONFIG_MIPS_CMDLINE_FROM_BOOTLOADER)
754 #define USE_DTB_CMDLINE		IS_ENABLED(CONFIG_MIPS_CMDLINE_FROM_DTB)
755 #define EXTEND_WITH_PROM	IS_ENABLED(CONFIG_MIPS_CMDLINE_DTB_EXTEND)
756 #define BUILTIN_EXTEND_WITH_PROM	\
757 	IS_ENABLED(CONFIG_MIPS_CMDLINE_BUILTIN_EXTEND)
758 
759 /*
760  * arch_mem_init - initialize memory management subsystem
761  *
762  *  o plat_mem_setup() detects the memory configuration and will record detected
763  *    memory areas using add_memory_region.
764  *
765  * At this stage the memory configuration of the system is known to the
766  * kernel but generic memory management system is still entirely uninitialized.
767  *
768  *  o bootmem_init()
769  *  o sparse_init()
770  *  o paging_init()
771  *  o dma_contiguous_reserve()
772  *
773  * At this stage the bootmem allocator is ready to use.
774  *
775  * NOTE: historically plat_mem_setup did the entire platform initialization.
776  *	 This was rather impractical because it meant plat_mem_setup had to
777  * get away without any kind of memory allocator.  To keep old code from
778  * breaking plat_setup was just renamed to plat_mem_setup and a second platform
779  * initialization hook for anything else was introduced.
780  */
781 static void __init arch_mem_init(char **cmdline_p)
782 {
783 	struct memblock_region *reg;
784 	extern void plat_mem_setup(void);
785 
786 	/*
787 	 * Initialize boot_command_line to an innocuous but non-empty string in
788 	 * order to prevent early_init_dt_scan_chosen() from copying
789 	 * CONFIG_CMDLINE into it without our knowledge. We handle
790 	 * CONFIG_CMDLINE ourselves below & don't want to duplicate its
791 	 * content because repeating arguments can be problematic.
792 	 */
793 	strlcpy(boot_command_line, " ", COMMAND_LINE_SIZE);
794 
795 	/* call board setup routine */
796 	plat_mem_setup();
797 
798 	/*
799 	 * Make sure all kernel memory is in the maps.  The "UP" and
800 	 * "DOWN" are opposite for initdata since if it crosses over
801 	 * into another memory section you don't want that to be
802 	 * freed when the initdata is freed.
803 	 */
804 	arch_mem_addpart(PFN_DOWN(__pa_symbol(&_text)) << PAGE_SHIFT,
805 			 PFN_UP(__pa_symbol(&_edata)) << PAGE_SHIFT,
806 			 BOOT_MEM_RAM);
807 	arch_mem_addpart(PFN_UP(__pa_symbol(&__init_begin)) << PAGE_SHIFT,
808 			 PFN_DOWN(__pa_symbol(&__init_end)) << PAGE_SHIFT,
809 			 BOOT_MEM_INIT_RAM);
810 
811 	pr_info("Determined physical RAM map:\n");
812 	print_memory_map();
813 
814 #if defined(CONFIG_CMDLINE_BOOL) && defined(CONFIG_CMDLINE_OVERRIDE)
815 	strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
816 #else
817 	if ((USE_PROM_CMDLINE && arcs_cmdline[0]) ||
818 	    (USE_DTB_CMDLINE && !boot_command_line[0]))
819 		strlcpy(boot_command_line, arcs_cmdline, COMMAND_LINE_SIZE);
820 
821 	if (EXTEND_WITH_PROM && arcs_cmdline[0]) {
822 		if (boot_command_line[0])
823 			strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
824 		strlcat(boot_command_line, arcs_cmdline, COMMAND_LINE_SIZE);
825 	}
826 
827 #if defined(CONFIG_CMDLINE_BOOL)
828 	if (builtin_cmdline[0]) {
829 		if (boot_command_line[0])
830 			strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
831 		strlcat(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
832 	}
833 
834 	if (BUILTIN_EXTEND_WITH_PROM && arcs_cmdline[0]) {
835 		if (boot_command_line[0])
836 			strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
837 		strlcat(boot_command_line, arcs_cmdline, COMMAND_LINE_SIZE);
838 	}
839 #endif
840 #endif
841 	strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
842 
843 	*cmdline_p = command_line;
844 
845 	parse_early_param();
846 
847 	if (usermem) {
848 		pr_info("User-defined physical RAM map:\n");
849 		print_memory_map();
850 	}
851 
852 	early_init_fdt_reserve_self();
853 	early_init_fdt_scan_reserved_mem();
854 
855 	bootmem_init();
856 
857 	/*
858 	 * Prevent memblock from allocating high memory.
859 	 * This cannot be done before max_low_pfn is detected, so up
860 	 * to this point is possible to only reserve physical memory
861 	 * with memblock_reserve; memblock_alloc* can be used
862 	 * only after this point
863 	 */
864 	memblock_set_current_limit(PFN_PHYS(max_low_pfn));
865 
866 #ifdef CONFIG_PROC_VMCORE
867 	if (setup_elfcorehdr && setup_elfcorehdr_size) {
868 		printk(KERN_INFO "kdump reserved memory at %lx-%lx\n",
869 		       setup_elfcorehdr, setup_elfcorehdr_size);
870 		memblock_reserve(setup_elfcorehdr, setup_elfcorehdr_size);
871 	}
872 #endif
873 
874 	mips_parse_crashkernel();
875 #ifdef CONFIG_KEXEC
876 	if (crashk_res.start != crashk_res.end)
877 		memblock_reserve(crashk_res.start,
878 				 crashk_res.end - crashk_res.start + 1);
879 #endif
880 	device_tree_init();
881 	sparse_init();
882 	plat_swiotlb_setup();
883 
884 	dma_contiguous_reserve(PFN_PHYS(max_low_pfn));
885 	/* Tell bootmem about cma reserved memblock section */
886 	for_each_memblock(reserved, reg)
887 		if (reg->size != 0)
888 			memblock_reserve(reg->base, reg->size);
889 
890 	reserve_bootmem_region(__pa_symbol(&__nosave_begin),
891 			__pa_symbol(&__nosave_end)); /* Reserve for hibernation */
892 }
893 
894 static void __init resource_init(void)
895 {
896 	int i;
897 
898 	if (UNCAC_BASE != IO_BASE)
899 		return;
900 
901 	code_resource.start = __pa_symbol(&_text);
902 	code_resource.end = __pa_symbol(&_etext) - 1;
903 	data_resource.start = __pa_symbol(&_etext);
904 	data_resource.end = __pa_symbol(&_edata) - 1;
905 	bss_resource.start = __pa_symbol(&__bss_start);
906 	bss_resource.end = __pa_symbol(&__bss_stop) - 1;
907 
908 	for (i = 0; i < boot_mem_map.nr_map; i++) {
909 		struct resource *res;
910 		unsigned long start, end;
911 
912 		start = boot_mem_map.map[i].addr;
913 		end = boot_mem_map.map[i].addr + boot_mem_map.map[i].size - 1;
914 		if (start >= HIGHMEM_START)
915 			continue;
916 		if (end >= HIGHMEM_START)
917 			end = HIGHMEM_START - 1;
918 
919 		res = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES);
920 
921 		res->start = start;
922 		res->end = end;
923 		res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
924 
925 		switch (boot_mem_map.map[i].type) {
926 		case BOOT_MEM_RAM:
927 		case BOOT_MEM_INIT_RAM:
928 		case BOOT_MEM_ROM_DATA:
929 			res->name = "System RAM";
930 			res->flags |= IORESOURCE_SYSRAM;
931 			break;
932 		case BOOT_MEM_RESERVED:
933 		default:
934 			res->name = "reserved";
935 		}
936 
937 		request_resource(&iomem_resource, res);
938 
939 		/*
940 		 *  We don't know which RAM region contains kernel data,
941 		 *  so we try it repeatedly and let the resource manager
942 		 *  test it.
943 		 */
944 		request_resource(res, &code_resource);
945 		request_resource(res, &data_resource);
946 		request_resource(res, &bss_resource);
947 		request_crashkernel(res);
948 	}
949 }
950 
951 #ifdef CONFIG_SMP
952 static void __init prefill_possible_map(void)
953 {
954 	int i, possible = num_possible_cpus();
955 
956 	if (possible > nr_cpu_ids)
957 		possible = nr_cpu_ids;
958 
959 	for (i = 0; i < possible; i++)
960 		set_cpu_possible(i, true);
961 	for (; i < NR_CPUS; i++)
962 		set_cpu_possible(i, false);
963 
964 	nr_cpu_ids = possible;
965 }
966 #else
967 static inline void prefill_possible_map(void) {}
968 #endif
969 
970 void __init setup_arch(char **cmdline_p)
971 {
972 	cpu_probe();
973 	mips_cm_probe();
974 	prom_init();
975 
976 	setup_early_fdc_console();
977 #ifdef CONFIG_EARLY_PRINTK
978 	setup_early_printk();
979 #endif
980 	cpu_report();
981 	check_bugs_early();
982 
983 #if defined(CONFIG_VT)
984 #if defined(CONFIG_VGA_CONSOLE)
985 	conswitchp = &vga_con;
986 #elif defined(CONFIG_DUMMY_CONSOLE)
987 	conswitchp = &dummy_con;
988 #endif
989 #endif
990 
991 	arch_mem_init(cmdline_p);
992 
993 	resource_init();
994 	plat_smp_setup();
995 	prefill_possible_map();
996 
997 	cpu_cache_init();
998 	paging_init();
999 }
1000 
1001 unsigned long kernelsp[NR_CPUS];
1002 unsigned long fw_arg0, fw_arg1, fw_arg2, fw_arg3;
1003 
1004 #ifdef CONFIG_USE_OF
1005 unsigned long fw_passed_dtb;
1006 #endif
1007 
1008 #ifdef CONFIG_DEBUG_FS
1009 struct dentry *mips_debugfs_dir;
1010 static int __init debugfs_mips(void)
1011 {
1012 	struct dentry *d;
1013 
1014 	d = debugfs_create_dir("mips", NULL);
1015 	if (!d)
1016 		return -ENOMEM;
1017 	mips_debugfs_dir = d;
1018 	return 0;
1019 }
1020 arch_initcall(debugfs_mips);
1021 #endif
1022 
1023 #ifdef CONFIG_DMA_MAYBE_COHERENT
1024 /* User defined DMA coherency from command line. */
1025 enum coherent_io_user_state coherentio = IO_COHERENCE_DEFAULT;
1026 EXPORT_SYMBOL_GPL(coherentio);
1027 int hw_coherentio = 0;	/* Actual hardware supported DMA coherency setting. */
1028 
1029 static int __init setcoherentio(char *str)
1030 {
1031 	coherentio = IO_COHERENCE_ENABLED;
1032 	pr_info("Hardware DMA cache coherency (command line)\n");
1033 	return 0;
1034 }
1035 early_param("coherentio", setcoherentio);
1036 
1037 static int __init setnocoherentio(char *str)
1038 {
1039 	coherentio = IO_COHERENCE_DISABLED;
1040 	pr_info("Software DMA cache coherency (command line)\n");
1041 	return 0;
1042 }
1043 early_param("nocoherentio", setnocoherentio);
1044 #endif
1045