xref: /openbmc/linux/arch/xtensa/kernel/setup.c (revision 2d96b44f)
1 /*
2  * arch/xtensa/kernel/setup.c
3  *
4  * This file is subject to the terms and conditions of the GNU General Public
5  * License.  See the file "COPYING" in the main directory of this archive
6  * for more details.
7  *
8  * Copyright (C) 1995  Linus Torvalds
9  * Copyright (C) 2001 - 2005  Tensilica Inc.
10  * Copyright (C) 2014 - 2016  Cadence Design Systems Inc.
11  *
12  * Chris Zankel	<chris@zankel.net>
13  * Joe Taylor	<joe@tensilica.com, joetylr@yahoo.com>
14  * Kevin Chea
15  * Marc Gauthier<marc@tensilica.com> <marc@alumni.uwaterloo.ca>
16  */
17 
18 #include <linux/errno.h>
19 #include <linux/init.h>
20 #include <linux/mm.h>
21 #include <linux/proc_fs.h>
22 #include <linux/screen_info.h>
23 #include <linux/bootmem.h>
24 #include <linux/kernel.h>
25 #include <linux/percpu.h>
26 #include <linux/cpu.h>
27 #include <linux/of.h>
28 #include <linux/of_fdt.h>
29 
30 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
31 # include <linux/console.h>
32 #endif
33 
34 #ifdef CONFIG_RTC
35 # include <linux/timex.h>
36 #endif
37 
38 #ifdef CONFIG_PROC_FS
39 # include <linux/seq_file.h>
40 #endif
41 
42 #include <asm/bootparam.h>
43 #include <asm/mmu_context.h>
44 #include <asm/pgtable.h>
45 #include <asm/processor.h>
46 #include <asm/timex.h>
47 #include <asm/platform.h>
48 #include <asm/page.h>
49 #include <asm/setup.h>
50 #include <asm/param.h>
51 #include <asm/traps.h>
52 #include <asm/smp.h>
53 #include <asm/sysmem.h>
54 
55 #include <platform/hardware.h>
56 
57 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
58 struct screen_info screen_info = { 0, 24, 0, 0, 0, 80, 0, 0, 0, 24, 1, 16};
59 #endif
60 
61 #ifdef CONFIG_BLK_DEV_FD
62 extern struct fd_ops no_fd_ops;
63 struct fd_ops *fd_ops;
64 #endif
65 
66 extern struct rtc_ops no_rtc_ops;
67 struct rtc_ops *rtc_ops;
68 
69 #ifdef CONFIG_BLK_DEV_INITRD
70 extern unsigned long initrd_start;
71 extern unsigned long initrd_end;
72 int initrd_is_mapped = 0;
73 extern int initrd_below_start_ok;
74 #endif
75 
76 #ifdef CONFIG_OF
77 void *dtb_start = __dtb_start;
78 #endif
79 
80 unsigned char aux_device_present;
81 extern unsigned long loops_per_jiffy;
82 
83 /* Command line specified as configuration option. */
84 
85 static char __initdata command_line[COMMAND_LINE_SIZE];
86 
87 #ifdef CONFIG_CMDLINE_BOOL
88 static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
89 #endif
90 
91 /*
92  * Boot parameter parsing.
93  *
94  * The Xtensa port uses a list of variable-sized tags to pass data to
95  * the kernel. The first tag must be a BP_TAG_FIRST tag for the list
96  * to be recognised. The list is terminated with a zero-sized
97  * BP_TAG_LAST tag.
98  */
99 
100 typedef struct tagtable {
101 	u32 tag;
102 	int (*parse)(const bp_tag_t*);
103 } tagtable_t;
104 
105 #define __tagtable(tag, fn) static tagtable_t __tagtable_##fn 		\
106 	__attribute__((used, section(".taglist"))) = { tag, fn }
107 
108 /* parse current tag */
109 
110 static int __init parse_tag_mem(const bp_tag_t *tag)
111 {
112 	struct bp_meminfo *mi = (struct bp_meminfo *)(tag->data);
113 
114 	if (mi->type != MEMORY_TYPE_CONVENTIONAL)
115 		return -1;
116 
117 	return memblock_add(mi->start, mi->end - mi->start);
118 }
119 
120 __tagtable(BP_TAG_MEMORY, parse_tag_mem);
121 
122 #ifdef CONFIG_BLK_DEV_INITRD
123 
124 static int __init parse_tag_initrd(const bp_tag_t* tag)
125 {
126 	struct bp_meminfo *mi = (struct bp_meminfo *)(tag->data);
127 
128 	initrd_start = (unsigned long)__va(mi->start);
129 	initrd_end = (unsigned long)__va(mi->end);
130 
131 	return 0;
132 }
133 
134 __tagtable(BP_TAG_INITRD, parse_tag_initrd);
135 
136 #ifdef CONFIG_OF
137 
138 static int __init parse_tag_fdt(const bp_tag_t *tag)
139 {
140 	dtb_start = __va(tag->data[0]);
141 	return 0;
142 }
143 
144 __tagtable(BP_TAG_FDT, parse_tag_fdt);
145 
146 #endif /* CONFIG_OF */
147 
148 #endif /* CONFIG_BLK_DEV_INITRD */
149 
150 static int __init parse_tag_cmdline(const bp_tag_t* tag)
151 {
152 	strlcpy(command_line, (char *)(tag->data), COMMAND_LINE_SIZE);
153 	return 0;
154 }
155 
156 __tagtable(BP_TAG_COMMAND_LINE, parse_tag_cmdline);
157 
158 static int __init parse_bootparam(const bp_tag_t* tag)
159 {
160 	extern tagtable_t __tagtable_begin, __tagtable_end;
161 	tagtable_t *t;
162 
163 	/* Boot parameters must start with a BP_TAG_FIRST tag. */
164 
165 	if (tag->id != BP_TAG_FIRST) {
166 		printk(KERN_WARNING "Invalid boot parameters!\n");
167 		return 0;
168 	}
169 
170 	tag = (bp_tag_t*)((unsigned long)tag + sizeof(bp_tag_t) + tag->size);
171 
172 	/* Parse all tags. */
173 
174 	while (tag != NULL && tag->id != BP_TAG_LAST) {
175 	 	for (t = &__tagtable_begin; t < &__tagtable_end; t++) {
176 			if (tag->id == t->tag) {
177 				t->parse(tag);
178 				break;
179 			}
180 		}
181 		if (t == &__tagtable_end)
182 			printk(KERN_WARNING "Ignoring tag "
183 			       "0x%08x\n", tag->id);
184 		tag = (bp_tag_t*)((unsigned long)(tag + 1) + tag->size);
185 	}
186 
187 	return 0;
188 }
189 
190 #ifdef CONFIG_OF
191 
192 #if !XCHAL_HAVE_PTP_MMU || XCHAL_HAVE_SPANNING_WAY
193 unsigned long xtensa_kio_paddr = XCHAL_KIO_DEFAULT_PADDR;
194 EXPORT_SYMBOL(xtensa_kio_paddr);
195 
196 static int __init xtensa_dt_io_area(unsigned long node, const char *uname,
197 		int depth, void *data)
198 {
199 	const __be32 *ranges;
200 	int len;
201 
202 	if (depth > 1)
203 		return 0;
204 
205 	if (!of_flat_dt_is_compatible(node, "simple-bus"))
206 		return 0;
207 
208 	ranges = of_get_flat_dt_prop(node, "ranges", &len);
209 	if (!ranges)
210 		return 1;
211 	if (len == 0)
212 		return 1;
213 
214 	xtensa_kio_paddr = of_read_ulong(ranges+1, 1);
215 	/* round down to nearest 256MB boundary */
216 	xtensa_kio_paddr &= 0xf0000000;
217 
218 	return 1;
219 }
220 #else
221 static int __init xtensa_dt_io_area(unsigned long node, const char *uname,
222 		int depth, void *data)
223 {
224 	return 1;
225 }
226 #endif
227 
228 void __init early_init_dt_add_memory_arch(u64 base, u64 size)
229 {
230 	size &= PAGE_MASK;
231 	memblock_add(base, size);
232 }
233 
234 void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
235 {
236 	return __alloc_bootmem(size, align, 0);
237 }
238 
239 void __init early_init_devtree(void *params)
240 {
241 	early_init_dt_scan(params);
242 	of_scan_flat_dt(xtensa_dt_io_area, NULL);
243 
244 	if (!command_line[0])
245 		strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
246 }
247 
248 #endif /* CONFIG_OF */
249 
250 /*
251  * Initialize architecture. (Early stage)
252  */
253 
254 void __init init_arch(bp_tag_t *bp_start)
255 {
256 	/* Parse boot parameters */
257 
258 	if (bp_start)
259 		parse_bootparam(bp_start);
260 
261 #ifdef CONFIG_OF
262 	early_init_devtree(dtb_start);
263 #endif
264 
265 #ifdef CONFIG_CMDLINE_BOOL
266 	if (!command_line[0])
267 		strlcpy(command_line, default_command_line, COMMAND_LINE_SIZE);
268 #endif
269 
270 	/* Early hook for platforms */
271 
272 	platform_init(bp_start);
273 
274 	/* Initialize MMU. */
275 
276 	init_mmu();
277 }
278 
279 /*
280  * Initialize system. Setup memory and reserve regions.
281  */
282 
283 extern char _end;
284 extern char _stext;
285 extern char _WindowVectors_text_start;
286 extern char _WindowVectors_text_end;
287 extern char _DebugInterruptVector_literal_start;
288 extern char _DebugInterruptVector_text_end;
289 extern char _KernelExceptionVector_literal_start;
290 extern char _KernelExceptionVector_text_end;
291 extern char _UserExceptionVector_literal_start;
292 extern char _UserExceptionVector_text_end;
293 extern char _DoubleExceptionVector_literal_start;
294 extern char _DoubleExceptionVector_text_end;
295 #if XCHAL_EXCM_LEVEL >= 2
296 extern char _Level2InterruptVector_text_start;
297 extern char _Level2InterruptVector_text_end;
298 #endif
299 #if XCHAL_EXCM_LEVEL >= 3
300 extern char _Level3InterruptVector_text_start;
301 extern char _Level3InterruptVector_text_end;
302 #endif
303 #if XCHAL_EXCM_LEVEL >= 4
304 extern char _Level4InterruptVector_text_start;
305 extern char _Level4InterruptVector_text_end;
306 #endif
307 #if XCHAL_EXCM_LEVEL >= 5
308 extern char _Level5InterruptVector_text_start;
309 extern char _Level5InterruptVector_text_end;
310 #endif
311 #if XCHAL_EXCM_LEVEL >= 6
312 extern char _Level6InterruptVector_text_start;
313 extern char _Level6InterruptVector_text_end;
314 #endif
315 #ifdef CONFIG_SMP
316 extern char _SecondaryResetVector_text_start;
317 extern char _SecondaryResetVector_text_end;
318 #endif
319 
320 
321 #ifdef CONFIG_S32C1I_SELFTEST
322 #if XCHAL_HAVE_S32C1I
323 
324 static int __initdata rcw_word, rcw_probe_pc, rcw_exc;
325 
326 /*
327  * Basic atomic compare-and-swap, that records PC of S32C1I for probing.
328  *
329  * If *v == cmp, set *v = set.  Return previous *v.
330  */
331 static inline int probed_compare_swap(int *v, int cmp, int set)
332 {
333 	int tmp;
334 
335 	__asm__ __volatile__(
336 			"	movi	%1, 1f\n"
337 			"	s32i	%1, %4, 0\n"
338 			"	wsr	%2, scompare1\n"
339 			"1:	s32c1i	%0, %3, 0\n"
340 			: "=a" (set), "=&a" (tmp)
341 			: "a" (cmp), "a" (v), "a" (&rcw_probe_pc), "0" (set)
342 			: "memory"
343 			);
344 	return set;
345 }
346 
347 /* Handle probed exception */
348 
349 static void __init do_probed_exception(struct pt_regs *regs,
350 		unsigned long exccause)
351 {
352 	if (regs->pc == rcw_probe_pc) {	/* exception on s32c1i ? */
353 		regs->pc += 3;		/* skip the s32c1i instruction */
354 		rcw_exc = exccause;
355 	} else {
356 		do_unhandled(regs, exccause);
357 	}
358 }
359 
360 /* Simple test of S32C1I (soc bringup assist) */
361 
362 static int __init check_s32c1i(void)
363 {
364 	int n, cause1, cause2;
365 	void *handbus, *handdata, *handaddr; /* temporarily saved handlers */
366 
367 	rcw_probe_pc = 0;
368 	handbus  = trap_set_handler(EXCCAUSE_LOAD_STORE_ERROR,
369 			do_probed_exception);
370 	handdata = trap_set_handler(EXCCAUSE_LOAD_STORE_DATA_ERROR,
371 			do_probed_exception);
372 	handaddr = trap_set_handler(EXCCAUSE_LOAD_STORE_ADDR_ERROR,
373 			do_probed_exception);
374 
375 	/* First try an S32C1I that does not store: */
376 	rcw_exc = 0;
377 	rcw_word = 1;
378 	n = probed_compare_swap(&rcw_word, 0, 2);
379 	cause1 = rcw_exc;
380 
381 	/* took exception? */
382 	if (cause1 != 0) {
383 		/* unclean exception? */
384 		if (n != 2 || rcw_word != 1)
385 			panic("S32C1I exception error");
386 	} else if (rcw_word != 1 || n != 1) {
387 		panic("S32C1I compare error");
388 	}
389 
390 	/* Then an S32C1I that stores: */
391 	rcw_exc = 0;
392 	rcw_word = 0x1234567;
393 	n = probed_compare_swap(&rcw_word, 0x1234567, 0xabcde);
394 	cause2 = rcw_exc;
395 
396 	if (cause2 != 0) {
397 		/* unclean exception? */
398 		if (n != 0xabcde || rcw_word != 0x1234567)
399 			panic("S32C1I exception error (b)");
400 	} else if (rcw_word != 0xabcde || n != 0x1234567) {
401 		panic("S32C1I store error");
402 	}
403 
404 	/* Verify consistency of exceptions: */
405 	if (cause1 || cause2) {
406 		pr_warn("S32C1I took exception %d, %d\n", cause1, cause2);
407 		/* If emulation of S32C1I upon bus error gets implemented,
408 		   we can get rid of this panic for single core (not SMP) */
409 		panic("S32C1I exceptions not currently supported");
410 	}
411 	if (cause1 != cause2)
412 		panic("inconsistent S32C1I exceptions");
413 
414 	trap_set_handler(EXCCAUSE_LOAD_STORE_ERROR, handbus);
415 	trap_set_handler(EXCCAUSE_LOAD_STORE_DATA_ERROR, handdata);
416 	trap_set_handler(EXCCAUSE_LOAD_STORE_ADDR_ERROR, handaddr);
417 	return 0;
418 }
419 
420 #else /* XCHAL_HAVE_S32C1I */
421 
422 /* This condition should not occur with a commercially deployed processor.
423    Display reminder for early engr test or demo chips / FPGA bitstreams */
424 static int __init check_s32c1i(void)
425 {
426 	pr_warn("Processor configuration lacks atomic compare-and-swap support!\n");
427 	return 0;
428 }
429 
430 #endif /* XCHAL_HAVE_S32C1I */
431 early_initcall(check_s32c1i);
432 #endif /* CONFIG_S32C1I_SELFTEST */
433 
434 static inline int mem_reserve(unsigned long start, unsigned long end)
435 {
436 	return memblock_reserve(start, end - start);
437 }
438 
439 void __init setup_arch(char **cmdline_p)
440 {
441 	strlcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
442 	*cmdline_p = command_line;
443 
444 	/* Reserve some memory regions */
445 
446 #ifdef CONFIG_BLK_DEV_INITRD
447 	if (initrd_start < initrd_end) {
448 		initrd_is_mapped = mem_reserve(__pa(initrd_start),
449 					       __pa(initrd_end)) == 0;
450 		initrd_below_start_ok = 1;
451 	} else {
452 		initrd_start = 0;
453 	}
454 #endif
455 
456 	mem_reserve(__pa(&_stext), __pa(&_end));
457 
458 	mem_reserve(__pa(&_WindowVectors_text_start),
459 		    __pa(&_WindowVectors_text_end));
460 
461 	mem_reserve(__pa(&_DebugInterruptVector_literal_start),
462 		    __pa(&_DebugInterruptVector_text_end));
463 
464 	mem_reserve(__pa(&_KernelExceptionVector_literal_start),
465 		    __pa(&_KernelExceptionVector_text_end));
466 
467 	mem_reserve(__pa(&_UserExceptionVector_literal_start),
468 		    __pa(&_UserExceptionVector_text_end));
469 
470 	mem_reserve(__pa(&_DoubleExceptionVector_literal_start),
471 		    __pa(&_DoubleExceptionVector_text_end));
472 
473 #if XCHAL_EXCM_LEVEL >= 2
474 	mem_reserve(__pa(&_Level2InterruptVector_text_start),
475 		    __pa(&_Level2InterruptVector_text_end));
476 #endif
477 #if XCHAL_EXCM_LEVEL >= 3
478 	mem_reserve(__pa(&_Level3InterruptVector_text_start),
479 		    __pa(&_Level3InterruptVector_text_end));
480 #endif
481 #if XCHAL_EXCM_LEVEL >= 4
482 	mem_reserve(__pa(&_Level4InterruptVector_text_start),
483 		    __pa(&_Level4InterruptVector_text_end));
484 #endif
485 #if XCHAL_EXCM_LEVEL >= 5
486 	mem_reserve(__pa(&_Level5InterruptVector_text_start),
487 		    __pa(&_Level5InterruptVector_text_end));
488 #endif
489 #if XCHAL_EXCM_LEVEL >= 6
490 	mem_reserve(__pa(&_Level6InterruptVector_text_start),
491 		    __pa(&_Level6InterruptVector_text_end));
492 #endif
493 
494 #ifdef CONFIG_SMP
495 	mem_reserve(__pa(&_SecondaryResetVector_text_start),
496 		    __pa(&_SecondaryResetVector_text_end));
497 #endif
498 	parse_early_param();
499 	bootmem_init();
500 
501 	unflatten_and_copy_device_tree();
502 
503 	platform_setup(cmdline_p);
504 
505 #ifdef CONFIG_SMP
506 	smp_init_cpus();
507 #endif
508 
509 	paging_init();
510 	zones_init();
511 
512 #ifdef CONFIG_VT
513 # if defined(CONFIG_VGA_CONSOLE)
514 	conswitchp = &vga_con;
515 # elif defined(CONFIG_DUMMY_CONSOLE)
516 	conswitchp = &dummy_con;
517 # endif
518 #endif
519 
520 #ifdef CONFIG_PCI
521 	platform_pcibios_init();
522 #endif
523 }
524 
525 static DEFINE_PER_CPU(struct cpu, cpu_data);
526 
527 static int __init topology_init(void)
528 {
529 	int i;
530 
531 	for_each_possible_cpu(i) {
532 		struct cpu *cpu = &per_cpu(cpu_data, i);
533 		cpu->hotpluggable = !!i;
534 		register_cpu(cpu, i);
535 	}
536 
537 	return 0;
538 }
539 subsys_initcall(topology_init);
540 
541 void cpu_reset(void)
542 {
543 #if XCHAL_HAVE_PTP_MMU
544 	local_irq_disable();
545 	/*
546 	 * We have full MMU: all autoload ways, ways 7, 8 and 9 of DTLB must
547 	 * be flushed.
548 	 * Way 4 is not currently used by linux.
549 	 * Ways 5 and 6 shall not be touched on MMUv2 as they are hardwired.
550 	 * Way 5 shall be flushed and way 6 shall be set to identity mapping
551 	 * on MMUv3.
552 	 */
553 	local_flush_tlb_all();
554 	invalidate_page_directory();
555 #if XCHAL_HAVE_SPANNING_WAY
556 	/* MMU v3 */
557 	{
558 		unsigned long vaddr = (unsigned long)cpu_reset;
559 		unsigned long paddr = __pa(vaddr);
560 		unsigned long tmpaddr = vaddr + SZ_512M;
561 		unsigned long tmp0, tmp1, tmp2, tmp3;
562 
563 		/*
564 		 * Find a place for the temporary mapping. It must not be
565 		 * in the same 512MB region with vaddr or paddr, otherwise
566 		 * there may be multihit exception either on entry to the
567 		 * temporary mapping, or on entry to the identity mapping.
568 		 * (512MB is the biggest page size supported by TLB.)
569 		 */
570 		while (((tmpaddr ^ paddr) & -SZ_512M) == 0)
571 			tmpaddr += SZ_512M;
572 
573 		/* Invalidate mapping in the selected temporary area */
574 		if (itlb_probe(tmpaddr) & 0x8)
575 			invalidate_itlb_entry(itlb_probe(tmpaddr));
576 		if (itlb_probe(tmpaddr + PAGE_SIZE) & 0x8)
577 			invalidate_itlb_entry(itlb_probe(tmpaddr + PAGE_SIZE));
578 
579 		/*
580 		 * Map two consecutive pages starting at the physical address
581 		 * of this function to the temporary mapping area.
582 		 */
583 		write_itlb_entry(__pte((paddr & PAGE_MASK) |
584 				       _PAGE_HW_VALID |
585 				       _PAGE_HW_EXEC |
586 				       _PAGE_CA_BYPASS),
587 				 tmpaddr & PAGE_MASK);
588 		write_itlb_entry(__pte(((paddr & PAGE_MASK) + PAGE_SIZE) |
589 				       _PAGE_HW_VALID |
590 				       _PAGE_HW_EXEC |
591 				       _PAGE_CA_BYPASS),
592 				 (tmpaddr & PAGE_MASK) + PAGE_SIZE);
593 
594 		/* Reinitialize TLB */
595 		__asm__ __volatile__ ("movi	%0, 1f\n\t"
596 				      "movi	%3, 2f\n\t"
597 				      "add	%0, %0, %4\n\t"
598 				      "add	%3, %3, %5\n\t"
599 				      "jx	%0\n"
600 				      /*
601 				       * No literal, data or stack access
602 				       * below this point
603 				       */
604 				      "1:\n\t"
605 				      /* Initialize *tlbcfg */
606 				      "movi	%0, 0\n\t"
607 				      "wsr	%0, itlbcfg\n\t"
608 				      "wsr	%0, dtlbcfg\n\t"
609 				      /* Invalidate TLB way 5 */
610 				      "movi	%0, 4\n\t"
611 				      "movi	%1, 5\n"
612 				      "1:\n\t"
613 				      "iitlb	%1\n\t"
614 				      "idtlb	%1\n\t"
615 				      "add	%1, %1, %6\n\t"
616 				      "addi	%0, %0, -1\n\t"
617 				      "bnez	%0, 1b\n\t"
618 				      /* Initialize TLB way 6 */
619 				      "movi	%0, 7\n\t"
620 				      "addi	%1, %9, 3\n\t"
621 				      "addi	%2, %9, 6\n"
622 				      "1:\n\t"
623 				      "witlb	%1, %2\n\t"
624 				      "wdtlb	%1, %2\n\t"
625 				      "add	%1, %1, %7\n\t"
626 				      "add	%2, %2, %7\n\t"
627 				      "addi	%0, %0, -1\n\t"
628 				      "bnez	%0, 1b\n\t"
629 				      /* Jump to identity mapping */
630 				      "jx	%3\n"
631 				      "2:\n\t"
632 				      /* Complete way 6 initialization */
633 				      "witlb	%1, %2\n\t"
634 				      "wdtlb	%1, %2\n\t"
635 				      /* Invalidate temporary mapping */
636 				      "sub	%0, %9, %7\n\t"
637 				      "iitlb	%0\n\t"
638 				      "add	%0, %0, %8\n\t"
639 				      "iitlb	%0"
640 				      : "=&a"(tmp0), "=&a"(tmp1), "=&a"(tmp2),
641 					"=&a"(tmp3)
642 				      : "a"(tmpaddr - vaddr),
643 					"a"(paddr - vaddr),
644 					"a"(SZ_128M), "a"(SZ_512M),
645 					"a"(PAGE_SIZE),
646 					"a"((tmpaddr + SZ_512M) & PAGE_MASK)
647 				      : "memory");
648 	}
649 #endif
650 #endif
651 	__asm__ __volatile__ ("movi	a2, 0\n\t"
652 			      "wsr	a2, icountlevel\n\t"
653 			      "movi	a2, 0\n\t"
654 			      "wsr	a2, icount\n\t"
655 #if XCHAL_NUM_IBREAK > 0
656 			      "wsr	a2, ibreakenable\n\t"
657 #endif
658 #if XCHAL_HAVE_LOOPS
659 			      "wsr	a2, lcount\n\t"
660 #endif
661 			      "movi	a2, 0x1f\n\t"
662 			      "wsr	a2, ps\n\t"
663 			      "isync\n\t"
664 			      "jx	%0\n\t"
665 			      :
666 			      : "a" (XCHAL_RESET_VECTOR_VADDR)
667 			      : "a2");
668 	for (;;)
669 		;
670 }
671 
672 void machine_restart(char * cmd)
673 {
674 	platform_restart();
675 }
676 
677 void machine_halt(void)
678 {
679 	platform_halt();
680 	while (1);
681 }
682 
683 void machine_power_off(void)
684 {
685 	platform_power_off();
686 	while (1);
687 }
688 #ifdef CONFIG_PROC_FS
689 
690 /*
691  * Display some core information through /proc/cpuinfo.
692  */
693 
694 static int
695 c_show(struct seq_file *f, void *slot)
696 {
697 	/* high-level stuff */
698 	seq_printf(f, "CPU count\t: %u\n"
699 		      "CPU list\t: %*pbl\n"
700 		      "vendor_id\t: Tensilica\n"
701 		      "model\t\t: Xtensa " XCHAL_HW_VERSION_NAME "\n"
702 		      "core ID\t\t: " XCHAL_CORE_ID "\n"
703 		      "build ID\t: 0x%x\n"
704 		      "byte order\t: %s\n"
705 		      "cpu MHz\t\t: %lu.%02lu\n"
706 		      "bogomips\t: %lu.%02lu\n",
707 		      num_online_cpus(),
708 		      cpumask_pr_args(cpu_online_mask),
709 		      XCHAL_BUILD_UNIQUE_ID,
710 		      XCHAL_HAVE_BE ?  "big" : "little",
711 		      ccount_freq/1000000,
712 		      (ccount_freq/10000) % 100,
713 		      loops_per_jiffy/(500000/HZ),
714 		      (loops_per_jiffy/(5000/HZ)) % 100);
715 
716 	seq_printf(f,"flags\t\t: "
717 #if XCHAL_HAVE_NMI
718 		     "nmi "
719 #endif
720 #if XCHAL_HAVE_DEBUG
721 		     "debug "
722 # if XCHAL_HAVE_OCD
723 		     "ocd "
724 # endif
725 #endif
726 #if XCHAL_HAVE_DENSITY
727 	    	     "density "
728 #endif
729 #if XCHAL_HAVE_BOOLEANS
730 		     "boolean "
731 #endif
732 #if XCHAL_HAVE_LOOPS
733 		     "loop "
734 #endif
735 #if XCHAL_HAVE_NSA
736 		     "nsa "
737 #endif
738 #if XCHAL_HAVE_MINMAX
739 		     "minmax "
740 #endif
741 #if XCHAL_HAVE_SEXT
742 		     "sext "
743 #endif
744 #if XCHAL_HAVE_CLAMPS
745 		     "clamps "
746 #endif
747 #if XCHAL_HAVE_MAC16
748 		     "mac16 "
749 #endif
750 #if XCHAL_HAVE_MUL16
751 		     "mul16 "
752 #endif
753 #if XCHAL_HAVE_MUL32
754 		     "mul32 "
755 #endif
756 #if XCHAL_HAVE_MUL32_HIGH
757 		     "mul32h "
758 #endif
759 #if XCHAL_HAVE_FP
760 		     "fpu "
761 #endif
762 #if XCHAL_HAVE_S32C1I
763 		     "s32c1i "
764 #endif
765 		     "\n");
766 
767 	/* Registers. */
768 	seq_printf(f,"physical aregs\t: %d\n"
769 		     "misc regs\t: %d\n"
770 		     "ibreak\t\t: %d\n"
771 		     "dbreak\t\t: %d\n",
772 		     XCHAL_NUM_AREGS,
773 		     XCHAL_NUM_MISC_REGS,
774 		     XCHAL_NUM_IBREAK,
775 		     XCHAL_NUM_DBREAK);
776 
777 
778 	/* Interrupt. */
779 	seq_printf(f,"num ints\t: %d\n"
780 		     "ext ints\t: %d\n"
781 		     "int levels\t: %d\n"
782 		     "timers\t\t: %d\n"
783 		     "debug level\t: %d\n",
784 		     XCHAL_NUM_INTERRUPTS,
785 		     XCHAL_NUM_EXTINTERRUPTS,
786 		     XCHAL_NUM_INTLEVELS,
787 		     XCHAL_NUM_TIMERS,
788 		     XCHAL_DEBUGLEVEL);
789 
790 	/* Cache */
791 	seq_printf(f,"icache line size: %d\n"
792 		     "icache ways\t: %d\n"
793 		     "icache size\t: %d\n"
794 		     "icache flags\t: "
795 #if XCHAL_ICACHE_LINE_LOCKABLE
796 		     "lock "
797 #endif
798 		     "\n"
799 		     "dcache line size: %d\n"
800 		     "dcache ways\t: %d\n"
801 		     "dcache size\t: %d\n"
802 		     "dcache flags\t: "
803 #if XCHAL_DCACHE_IS_WRITEBACK
804 		     "writeback "
805 #endif
806 #if XCHAL_DCACHE_LINE_LOCKABLE
807 		     "lock "
808 #endif
809 		     "\n",
810 		     XCHAL_ICACHE_LINESIZE,
811 		     XCHAL_ICACHE_WAYS,
812 		     XCHAL_ICACHE_SIZE,
813 		     XCHAL_DCACHE_LINESIZE,
814 		     XCHAL_DCACHE_WAYS,
815 		     XCHAL_DCACHE_SIZE);
816 
817 	return 0;
818 }
819 
820 /*
821  * We show only CPU #0 info.
822  */
823 static void *
824 c_start(struct seq_file *f, loff_t *pos)
825 {
826 	return (*pos == 0) ? (void *)1 : NULL;
827 }
828 
829 static void *
830 c_next(struct seq_file *f, void *v, loff_t *pos)
831 {
832 	return NULL;
833 }
834 
835 static void
836 c_stop(struct seq_file *f, void *v)
837 {
838 }
839 
840 const struct seq_operations cpuinfo_op =
841 {
842 	.start	= c_start,
843 	.next	= c_next,
844 	.stop	= c_stop,
845 	.show	= c_show,
846 };
847 
848 #endif /* CONFIG_PROC_FS */
849