xref: /openbmc/linux/arch/sparc/kernel/setup_64.c (revision 1d27a0be)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/arch/sparc64/kernel/setup.c
4  *
5  *  Copyright (C) 1995,1996  David S. Miller (davem@caip.rutgers.edu)
6  *  Copyright (C) 1997       Jakub Jelinek (jj@sunsite.mff.cuni.cz)
7  */
8 
9 #include <linux/errno.h>
10 #include <linux/sched.h>
11 #include <linux/kernel.h>
12 #include <linux/mm.h>
13 #include <linux/stddef.h>
14 #include <linux/unistd.h>
15 #include <linux/ptrace.h>
16 #include <asm/smp.h>
17 #include <linux/user.h>
18 #include <linux/screen_info.h>
19 #include <linux/delay.h>
20 #include <linux/fs.h>
21 #include <linux/seq_file.h>
22 #include <linux/syscalls.h>
23 #include <linux/kdev_t.h>
24 #include <linux/major.h>
25 #include <linux/string.h>
26 #include <linux/init.h>
27 #include <linux/inet.h>
28 #include <linux/console.h>
29 #include <linux/root_dev.h>
30 #include <linux/interrupt.h>
31 #include <linux/cpu.h>
32 #include <linux/initrd.h>
33 #include <linux/module.h>
34 #include <linux/start_kernel.h>
35 #include <linux/memblock.h>
36 #include <uapi/linux/mount.h>
37 
38 #include <asm/io.h>
39 #include <asm/processor.h>
40 #include <asm/oplib.h>
41 #include <asm/page.h>
42 #include <asm/idprom.h>
43 #include <asm/head.h>
44 #include <asm/starfire.h>
45 #include <asm/mmu_context.h>
46 #include <asm/timer.h>
47 #include <asm/sections.h>
48 #include <asm/setup.h>
49 #include <asm/mmu.h>
50 #include <asm/ns87303.h>
51 #include <asm/btext.h>
52 #include <asm/elf.h>
53 #include <asm/mdesc.h>
54 #include <asm/cacheflush.h>
55 #include <asm/dma.h>
56 #include <asm/irq.h>
57 
58 #ifdef CONFIG_IP_PNP
59 #include <net/ipconfig.h>
60 #endif
61 
62 #include "entry.h"
63 #include "kernel.h"
64 
65 /* Used to synchronize accesses to NatSemi SUPER I/O chip configure
66  * operations in asm/ns87303.h
67  */
68 DEFINE_SPINLOCK(ns87303_lock);
69 EXPORT_SYMBOL(ns87303_lock);
70 
71 struct screen_info screen_info = {
72 	0, 0,			/* orig-x, orig-y */
73 	0,			/* unused */
74 	0,			/* orig-video-page */
75 	0,			/* orig-video-mode */
76 	128,			/* orig-video-cols */
77 	0, 0, 0,		/* unused, ega_bx, unused */
78 	54,			/* orig-video-lines */
79 	0,                      /* orig-video-isVGA */
80 	16                      /* orig-video-points */
81 };
82 
83 static void
84 prom_console_write(struct console *con, const char *s, unsigned int n)
85 {
86 	prom_write(s, n);
87 }
88 
89 /* Exported for mm/init.c:paging_init. */
90 unsigned long cmdline_memory_size = 0;
91 
92 static struct console prom_early_console = {
93 	.name =		"earlyprom",
94 	.write =	prom_console_write,
95 	.flags =	CON_PRINTBUFFER | CON_BOOT | CON_ANYTIME,
96 	.index =	-1,
97 };
98 
99 /*
100  * Process kernel command line switches that are specific to the
101  * SPARC or that require special low-level processing.
102  */
103 static void __init process_switch(char c)
104 {
105 	switch (c) {
106 	case 'd':
107 	case 's':
108 		break;
109 	case 'h':
110 		prom_printf("boot_flags_init: Halt!\n");
111 		prom_halt();
112 		break;
113 	case 'p':
114 		prom_early_console.flags &= ~CON_BOOT;
115 		break;
116 	case 'P':
117 		/* Force UltraSPARC-III P-Cache on. */
118 		if (tlb_type != cheetah) {
119 			printk("BOOT: Ignoring P-Cache force option.\n");
120 			break;
121 		}
122 		cheetah_pcache_forced_on = 1;
123 		add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
124 		cheetah_enable_pcache();
125 		break;
126 
127 	default:
128 		printk("Unknown boot switch (-%c)\n", c);
129 		break;
130 	}
131 }
132 
133 static void __init boot_flags_init(char *commands)
134 {
135 	while (*commands) {
136 		/* Move to the start of the next "argument". */
137 		while (*commands == ' ')
138 			commands++;
139 
140 		/* Process any command switches, otherwise skip it. */
141 		if (*commands == '\0')
142 			break;
143 		if (*commands == '-') {
144 			commands++;
145 			while (*commands && *commands != ' ')
146 				process_switch(*commands++);
147 			continue;
148 		}
149 		if (!strncmp(commands, "mem=", 4))
150 			cmdline_memory_size = memparse(commands + 4, &commands);
151 
152 		while (*commands && *commands != ' ')
153 			commands++;
154 	}
155 }
156 
157 extern unsigned short root_flags;
158 extern unsigned short root_dev;
159 extern unsigned short ram_flags;
160 #define RAMDISK_IMAGE_START_MASK	0x07FF
161 #define RAMDISK_PROMPT_FLAG		0x8000
162 #define RAMDISK_LOAD_FLAG		0x4000
163 
164 extern int root_mountflags;
165 
166 char reboot_command[COMMAND_LINE_SIZE];
167 
168 static struct pt_regs fake_swapper_regs = { { 0, }, 0, 0, 0, 0 };
169 
170 static void __init per_cpu_patch(void)
171 {
172 	struct cpuid_patch_entry *p;
173 	unsigned long ver;
174 	int is_jbus;
175 
176 	if (tlb_type == spitfire && !this_is_starfire)
177 		return;
178 
179 	is_jbus = 0;
180 	if (tlb_type != hypervisor) {
181 		__asm__ ("rdpr %%ver, %0" : "=r" (ver));
182 		is_jbus = ((ver >> 32UL) == __JALAPENO_ID ||
183 			   (ver >> 32UL) == __SERRANO_ID);
184 	}
185 
186 	p = &__cpuid_patch;
187 	while (p < &__cpuid_patch_end) {
188 		unsigned long addr = p->addr;
189 		unsigned int *insns;
190 
191 		switch (tlb_type) {
192 		case spitfire:
193 			insns = &p->starfire[0];
194 			break;
195 		case cheetah:
196 		case cheetah_plus:
197 			if (is_jbus)
198 				insns = &p->cheetah_jbus[0];
199 			else
200 				insns = &p->cheetah_safari[0];
201 			break;
202 		case hypervisor:
203 			insns = &p->sun4v[0];
204 			break;
205 		default:
206 			prom_printf("Unknown cpu type, halting.\n");
207 			prom_halt();
208 		}
209 
210 		*(unsigned int *) (addr +  0) = insns[0];
211 		wmb();
212 		__asm__ __volatile__("flush	%0" : : "r" (addr +  0));
213 
214 		*(unsigned int *) (addr +  4) = insns[1];
215 		wmb();
216 		__asm__ __volatile__("flush	%0" : : "r" (addr +  4));
217 
218 		*(unsigned int *) (addr +  8) = insns[2];
219 		wmb();
220 		__asm__ __volatile__("flush	%0" : : "r" (addr +  8));
221 
222 		*(unsigned int *) (addr + 12) = insns[3];
223 		wmb();
224 		__asm__ __volatile__("flush	%0" : : "r" (addr + 12));
225 
226 		p++;
227 	}
228 }
229 
230 void sun4v_patch_1insn_range(struct sun4v_1insn_patch_entry *start,
231 			     struct sun4v_1insn_patch_entry *end)
232 {
233 	while (start < end) {
234 		unsigned long addr = start->addr;
235 
236 		*(unsigned int *) (addr +  0) = start->insn;
237 		wmb();
238 		__asm__ __volatile__("flush	%0" : : "r" (addr +  0));
239 
240 		start++;
241 	}
242 }
243 
244 void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *start,
245 			     struct sun4v_2insn_patch_entry *end)
246 {
247 	while (start < end) {
248 		unsigned long addr = start->addr;
249 
250 		*(unsigned int *) (addr +  0) = start->insns[0];
251 		wmb();
252 		__asm__ __volatile__("flush	%0" : : "r" (addr +  0));
253 
254 		*(unsigned int *) (addr +  4) = start->insns[1];
255 		wmb();
256 		__asm__ __volatile__("flush	%0" : : "r" (addr +  4));
257 
258 		start++;
259 	}
260 }
261 
262 void sun_m7_patch_2insn_range(struct sun4v_2insn_patch_entry *start,
263 			     struct sun4v_2insn_patch_entry *end)
264 {
265 	while (start < end) {
266 		unsigned long addr = start->addr;
267 
268 		*(unsigned int *) (addr +  0) = start->insns[0];
269 		wmb();
270 		__asm__ __volatile__("flush	%0" : : "r" (addr +  0));
271 
272 		*(unsigned int *) (addr +  4) = start->insns[1];
273 		wmb();
274 		__asm__ __volatile__("flush	%0" : : "r" (addr +  4));
275 
276 		start++;
277 	}
278 }
279 
280 static void __init sun4v_patch(void)
281 {
282 	extern void sun4v_hvapi_init(void);
283 
284 	if (tlb_type != hypervisor)
285 		return;
286 
287 	sun4v_patch_1insn_range(&__sun4v_1insn_patch,
288 				&__sun4v_1insn_patch_end);
289 
290 	sun4v_patch_2insn_range(&__sun4v_2insn_patch,
291 				&__sun4v_2insn_patch_end);
292 
293 	switch (sun4v_chip_type) {
294 	case SUN4V_CHIP_SPARC_M7:
295 	case SUN4V_CHIP_SPARC_M8:
296 	case SUN4V_CHIP_SPARC_SN:
297 		sun4v_patch_1insn_range(&__sun_m7_1insn_patch,
298 					&__sun_m7_1insn_patch_end);
299 		sun_m7_patch_2insn_range(&__sun_m7_2insn_patch,
300 					 &__sun_m7_2insn_patch_end);
301 		break;
302 	default:
303 		break;
304 	}
305 
306 	if (sun4v_chip_type != SUN4V_CHIP_NIAGARA1) {
307 		sun4v_patch_1insn_range(&__fast_win_ctrl_1insn_patch,
308 					&__fast_win_ctrl_1insn_patch_end);
309 	}
310 
311 	sun4v_hvapi_init();
312 }
313 
314 static void __init popc_patch(void)
315 {
316 	struct popc_3insn_patch_entry *p3;
317 	struct popc_6insn_patch_entry *p6;
318 
319 	p3 = &__popc_3insn_patch;
320 	while (p3 < &__popc_3insn_patch_end) {
321 		unsigned long i, addr = p3->addr;
322 
323 		for (i = 0; i < 3; i++) {
324 			*(unsigned int *) (addr +  (i * 4)) = p3->insns[i];
325 			wmb();
326 			__asm__ __volatile__("flush	%0"
327 					     : : "r" (addr +  (i * 4)));
328 		}
329 
330 		p3++;
331 	}
332 
333 	p6 = &__popc_6insn_patch;
334 	while (p6 < &__popc_6insn_patch_end) {
335 		unsigned long i, addr = p6->addr;
336 
337 		for (i = 0; i < 6; i++) {
338 			*(unsigned int *) (addr +  (i * 4)) = p6->insns[i];
339 			wmb();
340 			__asm__ __volatile__("flush	%0"
341 					     : : "r" (addr +  (i * 4)));
342 		}
343 
344 		p6++;
345 	}
346 }
347 
348 static void __init pause_patch(void)
349 {
350 	struct pause_patch_entry *p;
351 
352 	p = &__pause_3insn_patch;
353 	while (p < &__pause_3insn_patch_end) {
354 		unsigned long i, addr = p->addr;
355 
356 		for (i = 0; i < 3; i++) {
357 			*(unsigned int *) (addr +  (i * 4)) = p->insns[i];
358 			wmb();
359 			__asm__ __volatile__("flush	%0"
360 					     : : "r" (addr +  (i * 4)));
361 		}
362 
363 		p++;
364 	}
365 }
366 
367 void __init start_early_boot(void)
368 {
369 	int cpu;
370 
371 	check_if_starfire();
372 	per_cpu_patch();
373 	sun4v_patch();
374 	smp_init_cpu_poke();
375 
376 	cpu = hard_smp_processor_id();
377 	if (cpu >= NR_CPUS) {
378 		prom_printf("Serious problem, boot cpu id (%d) >= NR_CPUS (%d)\n",
379 			    cpu, NR_CPUS);
380 		prom_halt();
381 	}
382 	current_thread_info()->cpu = cpu;
383 
384 	time_init_early();
385 	prom_init_report();
386 	start_kernel();
387 }
388 
389 /* On Ultra, we support all of the v8 capabilities. */
390 unsigned long sparc64_elf_hwcap = (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR |
391 				   HWCAP_SPARC_SWAP | HWCAP_SPARC_MULDIV |
392 				   HWCAP_SPARC_V9);
393 EXPORT_SYMBOL(sparc64_elf_hwcap);
394 
395 static const char *hwcaps[] = {
396 	"flush", "stbar", "swap", "muldiv", "v9",
397 	"ultra3", "blkinit", "n2",
398 
399 	/* These strings are as they appear in the machine description
400 	 * 'hwcap-list' property for cpu nodes.
401 	 */
402 	"mul32", "div32", "fsmuld", "v8plus", "popc", "vis", "vis2",
403 	"ASIBlkInit", "fmaf", "vis3", "hpc", "random", "trans", "fjfmau",
404 	"ima", "cspare", "pause", "cbcond", NULL /*reserved for crypto */,
405 	"adp",
406 };
407 
408 static const char *crypto_hwcaps[] = {
409 	"aes", "des", "kasumi", "camellia", "md5", "sha1", "sha256",
410 	"sha512", "mpmul", "montmul", "montsqr", "crc32c",
411 };
412 
413 void cpucap_info(struct seq_file *m)
414 {
415 	unsigned long caps = sparc64_elf_hwcap;
416 	int i, printed = 0;
417 
418 	seq_puts(m, "cpucaps\t\t: ");
419 	for (i = 0; i < ARRAY_SIZE(hwcaps); i++) {
420 		unsigned long bit = 1UL << i;
421 		if (hwcaps[i] && (caps & bit)) {
422 			seq_printf(m, "%s%s",
423 				   printed ? "," : "", hwcaps[i]);
424 			printed++;
425 		}
426 	}
427 	if (caps & HWCAP_SPARC_CRYPTO) {
428 		unsigned long cfr;
429 
430 		__asm__ __volatile__("rd %%asr26, %0" : "=r" (cfr));
431 		for (i = 0; i < ARRAY_SIZE(crypto_hwcaps); i++) {
432 			unsigned long bit = 1UL << i;
433 			if (cfr & bit) {
434 				seq_printf(m, "%s%s",
435 					   printed ? "," : "", crypto_hwcaps[i]);
436 				printed++;
437 			}
438 		}
439 	}
440 	seq_putc(m, '\n');
441 }
442 
443 static void __init report_one_hwcap(int *printed, const char *name)
444 {
445 	if ((*printed) == 0)
446 		printk(KERN_INFO "CPU CAPS: [");
447 	printk(KERN_CONT "%s%s",
448 	       (*printed) ? "," : "", name);
449 	if (++(*printed) == 8) {
450 		printk(KERN_CONT "]\n");
451 		*printed = 0;
452 	}
453 }
454 
455 static void __init report_crypto_hwcaps(int *printed)
456 {
457 	unsigned long cfr;
458 	int i;
459 
460 	__asm__ __volatile__("rd %%asr26, %0" : "=r" (cfr));
461 
462 	for (i = 0; i < ARRAY_SIZE(crypto_hwcaps); i++) {
463 		unsigned long bit = 1UL << i;
464 		if (cfr & bit)
465 			report_one_hwcap(printed, crypto_hwcaps[i]);
466 	}
467 }
468 
469 static void __init report_hwcaps(unsigned long caps)
470 {
471 	int i, printed = 0;
472 
473 	for (i = 0; i < ARRAY_SIZE(hwcaps); i++) {
474 		unsigned long bit = 1UL << i;
475 		if (hwcaps[i] && (caps & bit))
476 			report_one_hwcap(&printed, hwcaps[i]);
477 	}
478 	if (caps & HWCAP_SPARC_CRYPTO)
479 		report_crypto_hwcaps(&printed);
480 	if (printed != 0)
481 		printk(KERN_CONT "]\n");
482 }
483 
484 static unsigned long __init mdesc_cpu_hwcap_list(void)
485 {
486 	struct mdesc_handle *hp;
487 	unsigned long caps = 0;
488 	const char *prop;
489 	int len;
490 	u64 pn;
491 
492 	hp = mdesc_grab();
493 	if (!hp)
494 		return 0;
495 
496 	pn = mdesc_node_by_name(hp, MDESC_NODE_NULL, "cpu");
497 	if (pn == MDESC_NODE_NULL)
498 		goto out;
499 
500 	prop = mdesc_get_property(hp, pn, "hwcap-list", &len);
501 	if (!prop)
502 		goto out;
503 
504 	while (len) {
505 		int i, plen;
506 
507 		for (i = 0; i < ARRAY_SIZE(hwcaps); i++) {
508 			unsigned long bit = 1UL << i;
509 
510 			if (hwcaps[i] && !strcmp(prop, hwcaps[i])) {
511 				caps |= bit;
512 				break;
513 			}
514 		}
515 		for (i = 0; i < ARRAY_SIZE(crypto_hwcaps); i++) {
516 			if (!strcmp(prop, crypto_hwcaps[i]))
517 				caps |= HWCAP_SPARC_CRYPTO;
518 		}
519 
520 		plen = strlen(prop) + 1;
521 		prop += plen;
522 		len -= plen;
523 	}
524 
525 out:
526 	mdesc_release(hp);
527 	return caps;
528 }
529 
530 /* This yields a mask that user programs can use to figure out what
531  * instruction set this cpu supports.
532  */
533 static void __init init_sparc64_elf_hwcap(void)
534 {
535 	unsigned long cap = sparc64_elf_hwcap;
536 	unsigned long mdesc_caps;
537 
538 	if (tlb_type == cheetah || tlb_type == cheetah_plus)
539 		cap |= HWCAP_SPARC_ULTRA3;
540 	else if (tlb_type == hypervisor) {
541 		if (sun4v_chip_type == SUN4V_CHIP_NIAGARA1 ||
542 		    sun4v_chip_type == SUN4V_CHIP_NIAGARA2 ||
543 		    sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
544 		    sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
545 		    sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
546 		    sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
547 		    sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
548 		    sun4v_chip_type == SUN4V_CHIP_SPARC_M8 ||
549 		    sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
550 		    sun4v_chip_type == SUN4V_CHIP_SPARC64X)
551 			cap |= HWCAP_SPARC_BLKINIT;
552 		if (sun4v_chip_type == SUN4V_CHIP_NIAGARA2 ||
553 		    sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
554 		    sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
555 		    sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
556 		    sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
557 		    sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
558 		    sun4v_chip_type == SUN4V_CHIP_SPARC_M8 ||
559 		    sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
560 		    sun4v_chip_type == SUN4V_CHIP_SPARC64X)
561 			cap |= HWCAP_SPARC_N2;
562 	}
563 
564 	cap |= (AV_SPARC_MUL32 | AV_SPARC_DIV32 | AV_SPARC_V8PLUS);
565 
566 	mdesc_caps = mdesc_cpu_hwcap_list();
567 	if (!mdesc_caps) {
568 		if (tlb_type == spitfire)
569 			cap |= AV_SPARC_VIS;
570 		if (tlb_type == cheetah || tlb_type == cheetah_plus)
571 			cap |= AV_SPARC_VIS | AV_SPARC_VIS2;
572 		if (tlb_type == cheetah_plus) {
573 			unsigned long impl, ver;
574 
575 			__asm__ __volatile__("rdpr %%ver, %0" : "=r" (ver));
576 			impl = ((ver >> 32) & 0xffff);
577 			if (impl == PANTHER_IMPL)
578 				cap |= AV_SPARC_POPC;
579 		}
580 		if (tlb_type == hypervisor) {
581 			if (sun4v_chip_type == SUN4V_CHIP_NIAGARA1)
582 				cap |= AV_SPARC_ASI_BLK_INIT;
583 			if (sun4v_chip_type == SUN4V_CHIP_NIAGARA2 ||
584 			    sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
585 			    sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
586 			    sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
587 			    sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
588 			    sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
589 			    sun4v_chip_type == SUN4V_CHIP_SPARC_M8 ||
590 			    sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
591 			    sun4v_chip_type == SUN4V_CHIP_SPARC64X)
592 				cap |= (AV_SPARC_VIS | AV_SPARC_VIS2 |
593 					AV_SPARC_ASI_BLK_INIT |
594 					AV_SPARC_POPC);
595 			if (sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
596 			    sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
597 			    sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
598 			    sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
599 			    sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
600 			    sun4v_chip_type == SUN4V_CHIP_SPARC_M8 ||
601 			    sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
602 			    sun4v_chip_type == SUN4V_CHIP_SPARC64X)
603 				cap |= (AV_SPARC_VIS3 | AV_SPARC_HPC |
604 					AV_SPARC_FMAF);
605 		}
606 	}
607 	sparc64_elf_hwcap = cap | mdesc_caps;
608 
609 	report_hwcaps(sparc64_elf_hwcap);
610 
611 	if (sparc64_elf_hwcap & AV_SPARC_POPC)
612 		popc_patch();
613 	if (sparc64_elf_hwcap & AV_SPARC_PAUSE)
614 		pause_patch();
615 }
616 
617 void __init alloc_irqstack_bootmem(void)
618 {
619 	unsigned int i, node;
620 
621 	for_each_possible_cpu(i) {
622 		node = cpu_to_node(i);
623 
624 		softirq_stack[i] = memblock_alloc_node(THREAD_SIZE,
625 						       THREAD_SIZE, node);
626 		if (!softirq_stack[i])
627 			panic("%s: Failed to allocate %lu bytes align=%lx nid=%d\n",
628 			      __func__, THREAD_SIZE, THREAD_SIZE, node);
629 		hardirq_stack[i] = memblock_alloc_node(THREAD_SIZE,
630 						       THREAD_SIZE, node);
631 		if (!hardirq_stack[i])
632 			panic("%s: Failed to allocate %lu bytes align=%lx nid=%d\n",
633 			      __func__, THREAD_SIZE, THREAD_SIZE, node);
634 	}
635 }
636 
637 void __init setup_arch(char **cmdline_p)
638 {
639 	/* Initialize PROM console and command line. */
640 	*cmdline_p = prom_getbootargs();
641 	strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE);
642 	parse_early_param();
643 
644 	boot_flags_init(*cmdline_p);
645 #ifdef CONFIG_EARLYFB
646 	if (btext_find_display())
647 #endif
648 		register_console(&prom_early_console);
649 
650 	if (tlb_type == hypervisor)
651 		pr_info("ARCH: SUN4V\n");
652 	else
653 		pr_info("ARCH: SUN4U\n");
654 
655 	idprom_init();
656 
657 	if (!root_flags)
658 		root_mountflags &= ~MS_RDONLY;
659 	ROOT_DEV = old_decode_dev(root_dev);
660 #ifdef CONFIG_BLK_DEV_RAM
661 	rd_image_start = ram_flags & RAMDISK_IMAGE_START_MASK;
662 	rd_prompt = ((ram_flags & RAMDISK_PROMPT_FLAG) != 0);
663 	rd_doload = ((ram_flags & RAMDISK_LOAD_FLAG) != 0);
664 #endif
665 
666 	task_thread_info(&init_task)->kregs = &fake_swapper_regs;
667 
668 #ifdef CONFIG_IP_PNP
669 	if (!ic_set_manually) {
670 		phandle chosen = prom_finddevice("/chosen");
671 		u32 cl, sv, gw;
672 
673 		cl = prom_getintdefault (chosen, "client-ip", 0);
674 		sv = prom_getintdefault (chosen, "server-ip", 0);
675 		gw = prom_getintdefault (chosen, "gateway-ip", 0);
676 		if (cl && sv) {
677 			ic_myaddr = cl;
678 			ic_servaddr = sv;
679 			if (gw)
680 				ic_gateway = gw;
681 #if defined(CONFIG_IP_PNP_BOOTP) || defined(CONFIG_IP_PNP_RARP)
682 			ic_proto_enabled = 0;
683 #endif
684 		}
685 	}
686 #endif
687 
688 	/* Get boot processor trap_block[] setup.  */
689 	init_cur_cpu_trap(current_thread_info());
690 
691 	paging_init();
692 	init_sparc64_elf_hwcap();
693 	smp_fill_in_cpu_possible_map();
694 	/*
695 	 * Once the OF device tree and MDESC have been setup and nr_cpus has
696 	 * been parsed, we know the list of possible cpus.  Therefore we can
697 	 * allocate the IRQ stacks.
698 	 */
699 	alloc_irqstack_bootmem();
700 }
701 
702 extern int stop_a_enabled;
703 
704 void sun_do_break(void)
705 {
706 	if (!stop_a_enabled)
707 		return;
708 
709 	prom_printf("\n");
710 	flush_user_windows();
711 
712 	prom_cmdline();
713 }
714 EXPORT_SYMBOL(sun_do_break);
715 
716 int stop_a_enabled = 1;
717 EXPORT_SYMBOL(stop_a_enabled);
718