xref: /openbmc/linux/arch/powerpc/kernel/misc_64.S (revision 3f639ee8c52c187d8c95db430ac6f485bffbe5af)
19994a338SPaul Mackerras/*
29994a338SPaul Mackerras * This file contains miscellaneous low-level functions.
39994a338SPaul Mackerras *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
49994a338SPaul Mackerras *
59994a338SPaul Mackerras * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
69994a338SPaul Mackerras * and Paul Mackerras.
79994a338SPaul Mackerras * Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com)
89994a338SPaul Mackerras * PPC64 updates by Dave Engebretsen (engebret@us.ibm.com)
99994a338SPaul Mackerras *
109994a338SPaul Mackerras * This program is free software; you can redistribute it and/or
119994a338SPaul Mackerras * modify it under the terms of the GNU General Public License
129994a338SPaul Mackerras * as published by the Free Software Foundation; either version
139994a338SPaul Mackerras * 2 of the License, or (at your option) any later version.
149994a338SPaul Mackerras *
159994a338SPaul Mackerras */
169994a338SPaul Mackerras
179994a338SPaul Mackerras#include <linux/sys.h>
189994a338SPaul Mackerras#include <asm/unistd.h>
199994a338SPaul Mackerras#include <asm/errno.h>
209994a338SPaul Mackerras#include <asm/processor.h>
219994a338SPaul Mackerras#include <asm/page.h>
229994a338SPaul Mackerras#include <asm/cache.h>
239994a338SPaul Mackerras#include <asm/ppc_asm.h>
249994a338SPaul Mackerras#include <asm/asm-offsets.h>
259994a338SPaul Mackerras#include <asm/cputable.h>
266cb7bfebSDavid Gibson#include <asm/thread_info.h>
279994a338SPaul Mackerras
289994a338SPaul Mackerras	.text
299994a338SPaul Mackerras
309994a338SPaul Mackerras_GLOBAL(get_msr)
319994a338SPaul Mackerras	mfmsr	r3
329994a338SPaul Mackerras	blr
339994a338SPaul Mackerras
349994a338SPaul Mackerras_GLOBAL(get_srr0)
359994a338SPaul Mackerras	mfsrr0  r3
369994a338SPaul Mackerras	blr
379994a338SPaul Mackerras
389994a338SPaul Mackerras_GLOBAL(get_srr1)
399994a338SPaul Mackerras	mfsrr1  r3
409994a338SPaul Mackerras	blr
419994a338SPaul Mackerras
429994a338SPaul Mackerras#ifdef CONFIG_IRQSTACKS
439994a338SPaul Mackerras_GLOBAL(call_do_softirq)
449994a338SPaul Mackerras	mflr	r0
459994a338SPaul Mackerras	std	r0,16(r1)
469994a338SPaul Mackerras	stdu	r1,THREAD_SIZE-112(r3)
479994a338SPaul Mackerras	mr	r1,r3
489994a338SPaul Mackerras	bl	.__do_softirq
499994a338SPaul Mackerras	ld	r1,0(r1)
509994a338SPaul Mackerras	ld	r0,16(r1)
519994a338SPaul Mackerras	mtlr	r0
529994a338SPaul Mackerras	blr
539994a338SPaul Mackerras
54b9e5b4e6SBenjamin Herrenschmidt_GLOBAL(call_handle_irq)
55b9e5b4e6SBenjamin Herrenschmidt	ld	r8,0(r7)
569994a338SPaul Mackerras	mflr	r0
579994a338SPaul Mackerras	std	r0,16(r1)
58b9e5b4e6SBenjamin Herrenschmidt	mtctr	r8
59b9e5b4e6SBenjamin Herrenschmidt	stdu	r1,THREAD_SIZE-112(r6)
60b9e5b4e6SBenjamin Herrenschmidt	mr	r1,r6
61b9e5b4e6SBenjamin Herrenschmidt	bctrl
629994a338SPaul Mackerras	ld	r1,0(r1)
639994a338SPaul Mackerras	ld	r0,16(r1)
649994a338SPaul Mackerras	mtlr	r0
659994a338SPaul Mackerras	blr
669994a338SPaul Mackerras#endif /* CONFIG_IRQSTACKS */
679994a338SPaul Mackerras
689994a338SPaul Mackerras	.section	".toc","aw"
699994a338SPaul MackerrasPPC64_CACHES:
709994a338SPaul Mackerras	.tc		ppc64_caches[TC],ppc64_caches
719994a338SPaul Mackerras	.section	".text"
729994a338SPaul Mackerras
739994a338SPaul Mackerras/*
749994a338SPaul Mackerras * Write any modified data cache blocks out to memory
759994a338SPaul Mackerras * and invalidate the corresponding instruction cache blocks.
769994a338SPaul Mackerras *
779994a338SPaul Mackerras * flush_icache_range(unsigned long start, unsigned long stop)
789994a338SPaul Mackerras *
799994a338SPaul Mackerras *   flush all bytes from start through stop-1 inclusive
809994a338SPaul Mackerras */
819994a338SPaul Mackerras
829994a338SPaul Mackerras_KPROBE(__flush_icache_range)
839994a338SPaul Mackerras
849994a338SPaul Mackerras/*
859994a338SPaul Mackerras * Flush the data cache to memory
869994a338SPaul Mackerras *
879994a338SPaul Mackerras * Different systems have different cache line sizes
889994a338SPaul Mackerras * and in some cases i-cache and d-cache line sizes differ from
899994a338SPaul Mackerras * each other.
909994a338SPaul Mackerras */
919994a338SPaul Mackerras 	ld	r10,PPC64_CACHES@toc(r2)
929994a338SPaul Mackerras	lwz	r7,DCACHEL1LINESIZE(r10)/* Get cache line size */
939994a338SPaul Mackerras	addi	r5,r7,-1
949994a338SPaul Mackerras	andc	r6,r3,r5		/* round low to line bdy */
959994a338SPaul Mackerras	subf	r8,r6,r4		/* compute length */
969994a338SPaul Mackerras	add	r8,r8,r5		/* ensure we get enough */
979994a338SPaul Mackerras	lwz	r9,DCACHEL1LOGLINESIZE(r10)	/* Get log-2 of cache line size */
989994a338SPaul Mackerras	srw.	r8,r8,r9		/* compute line count */
999994a338SPaul Mackerras	beqlr				/* nothing to do? */
1009994a338SPaul Mackerras	mtctr	r8
1019994a338SPaul Mackerras1:	dcbst	0,r6
1029994a338SPaul Mackerras	add	r6,r6,r7
1039994a338SPaul Mackerras	bdnz	1b
1049994a338SPaul Mackerras	sync
1059994a338SPaul Mackerras
1069994a338SPaul Mackerras/* Now invalidate the instruction cache */
1079994a338SPaul Mackerras
1089994a338SPaul Mackerras	lwz	r7,ICACHEL1LINESIZE(r10)	/* Get Icache line size */
1099994a338SPaul Mackerras	addi	r5,r7,-1
1109994a338SPaul Mackerras	andc	r6,r3,r5		/* round low to line bdy */
1119994a338SPaul Mackerras	subf	r8,r6,r4		/* compute length */
1129994a338SPaul Mackerras	add	r8,r8,r5
1139994a338SPaul Mackerras	lwz	r9,ICACHEL1LOGLINESIZE(r10)	/* Get log-2 of Icache line size */
1149994a338SPaul Mackerras	srw.	r8,r8,r9		/* compute line count */
1159994a338SPaul Mackerras	beqlr				/* nothing to do? */
1169994a338SPaul Mackerras	mtctr	r8
1179994a338SPaul Mackerras2:	icbi	0,r6
1189994a338SPaul Mackerras	add	r6,r6,r7
1199994a338SPaul Mackerras	bdnz	2b
1209994a338SPaul Mackerras	isync
1219994a338SPaul Mackerras	blr
1229994a338SPaul Mackerras	.previous .text
1239994a338SPaul Mackerras/*
1249994a338SPaul Mackerras * Like above, but only do the D-cache.
1259994a338SPaul Mackerras *
1269994a338SPaul Mackerras * flush_dcache_range(unsigned long start, unsigned long stop)
1279994a338SPaul Mackerras *
1289994a338SPaul Mackerras *    flush all bytes from start to stop-1 inclusive
1299994a338SPaul Mackerras */
1309994a338SPaul Mackerras_GLOBAL(flush_dcache_range)
1319994a338SPaul Mackerras
1329994a338SPaul Mackerras/*
1339994a338SPaul Mackerras * Flush the data cache to memory
1349994a338SPaul Mackerras *
1359994a338SPaul Mackerras * Different systems have different cache line sizes
1369994a338SPaul Mackerras */
1379994a338SPaul Mackerras 	ld	r10,PPC64_CACHES@toc(r2)
1389994a338SPaul Mackerras	lwz	r7,DCACHEL1LINESIZE(r10)	/* Get dcache line size */
1399994a338SPaul Mackerras	addi	r5,r7,-1
1409994a338SPaul Mackerras	andc	r6,r3,r5		/* round low to line bdy */
1419994a338SPaul Mackerras	subf	r8,r6,r4		/* compute length */
1429994a338SPaul Mackerras	add	r8,r8,r5		/* ensure we get enough */
1439994a338SPaul Mackerras	lwz	r9,DCACHEL1LOGLINESIZE(r10)	/* Get log-2 of dcache line size */
1449994a338SPaul Mackerras	srw.	r8,r8,r9		/* compute line count */
1459994a338SPaul Mackerras	beqlr				/* nothing to do? */
1469994a338SPaul Mackerras	mtctr	r8
1479994a338SPaul Mackerras0:	dcbst	0,r6
1489994a338SPaul Mackerras	add	r6,r6,r7
1499994a338SPaul Mackerras	bdnz	0b
1509994a338SPaul Mackerras	sync
1519994a338SPaul Mackerras	blr
1529994a338SPaul Mackerras
1539994a338SPaul Mackerras/*
1549994a338SPaul Mackerras * Like above, but works on non-mapped physical addresses.
1559994a338SPaul Mackerras * Use only for non-LPAR setups ! It also assumes real mode
1569994a338SPaul Mackerras * is cacheable. Used for flushing out the DART before using
1579994a338SPaul Mackerras * it as uncacheable memory
1589994a338SPaul Mackerras *
1599994a338SPaul Mackerras * flush_dcache_phys_range(unsigned long start, unsigned long stop)
1609994a338SPaul Mackerras *
1619994a338SPaul Mackerras *    flush all bytes from start to stop-1 inclusive
1629994a338SPaul Mackerras */
1639994a338SPaul Mackerras_GLOBAL(flush_dcache_phys_range)
1649994a338SPaul Mackerras 	ld	r10,PPC64_CACHES@toc(r2)
1659994a338SPaul Mackerras	lwz	r7,DCACHEL1LINESIZE(r10)	/* Get dcache line size */
1669994a338SPaul Mackerras	addi	r5,r7,-1
1679994a338SPaul Mackerras	andc	r6,r3,r5		/* round low to line bdy */
1689994a338SPaul Mackerras	subf	r8,r6,r4		/* compute length */
1699994a338SPaul Mackerras	add	r8,r8,r5		/* ensure we get enough */
1709994a338SPaul Mackerras	lwz	r9,DCACHEL1LOGLINESIZE(r10)	/* Get log-2 of dcache line size */
1719994a338SPaul Mackerras	srw.	r8,r8,r9		/* compute line count */
1729994a338SPaul Mackerras	beqlr				/* nothing to do? */
1739994a338SPaul Mackerras	mfmsr	r5			/* Disable MMU Data Relocation */
1749994a338SPaul Mackerras	ori	r0,r5,MSR_DR
1759994a338SPaul Mackerras	xori	r0,r0,MSR_DR
1769994a338SPaul Mackerras	sync
1779994a338SPaul Mackerras	mtmsr	r0
1789994a338SPaul Mackerras	sync
1799994a338SPaul Mackerras	isync
1809994a338SPaul Mackerras	mtctr	r8
1819994a338SPaul Mackerras0:	dcbst	0,r6
1829994a338SPaul Mackerras	add	r6,r6,r7
1839994a338SPaul Mackerras	bdnz	0b
1849994a338SPaul Mackerras	sync
1859994a338SPaul Mackerras	isync
1869994a338SPaul Mackerras	mtmsr	r5			/* Re-enable MMU Data Relocation */
1879994a338SPaul Mackerras	sync
1889994a338SPaul Mackerras	isync
1899994a338SPaul Mackerras	blr
1909994a338SPaul Mackerras
1919994a338SPaul Mackerras_GLOBAL(flush_inval_dcache_range)
1929994a338SPaul Mackerras 	ld	r10,PPC64_CACHES@toc(r2)
1939994a338SPaul Mackerras	lwz	r7,DCACHEL1LINESIZE(r10)	/* Get dcache line size */
1949994a338SPaul Mackerras	addi	r5,r7,-1
1959994a338SPaul Mackerras	andc	r6,r3,r5		/* round low to line bdy */
1969994a338SPaul Mackerras	subf	r8,r6,r4		/* compute length */
1979994a338SPaul Mackerras	add	r8,r8,r5		/* ensure we get enough */
1989994a338SPaul Mackerras	lwz	r9,DCACHEL1LOGLINESIZE(r10)/* Get log-2 of dcache line size */
1999994a338SPaul Mackerras	srw.	r8,r8,r9		/* compute line count */
2009994a338SPaul Mackerras	beqlr				/* nothing to do? */
2019994a338SPaul Mackerras	sync
2029994a338SPaul Mackerras	isync
2039994a338SPaul Mackerras	mtctr	r8
2049994a338SPaul Mackerras0:	dcbf	0,r6
2059994a338SPaul Mackerras	add	r6,r6,r7
2069994a338SPaul Mackerras	bdnz	0b
2079994a338SPaul Mackerras	sync
2089994a338SPaul Mackerras	isync
2099994a338SPaul Mackerras	blr
2109994a338SPaul Mackerras
2119994a338SPaul Mackerras
2129994a338SPaul Mackerras/*
2139994a338SPaul Mackerras * Flush a particular page from the data cache to RAM.
2149994a338SPaul Mackerras * Note: this is necessary because the instruction cache does *not*
2159994a338SPaul Mackerras * snoop from the data cache.
2169994a338SPaul Mackerras *
2179994a338SPaul Mackerras *	void __flush_dcache_icache(void *page)
2189994a338SPaul Mackerras */
2199994a338SPaul Mackerras_GLOBAL(__flush_dcache_icache)
2209994a338SPaul Mackerras/*
2219994a338SPaul Mackerras * Flush the data cache to memory
2229994a338SPaul Mackerras *
2239994a338SPaul Mackerras * Different systems have different cache line sizes
2249994a338SPaul Mackerras */
2259994a338SPaul Mackerras
2269994a338SPaul Mackerras/* Flush the dcache */
2279994a338SPaul Mackerras 	ld	r7,PPC64_CACHES@toc(r2)
2289994a338SPaul Mackerras	clrrdi	r3,r3,PAGE_SHIFT           	    /* Page align */
2299994a338SPaul Mackerras	lwz	r4,DCACHEL1LINESPERPAGE(r7)	/* Get # dcache lines per page */
2309994a338SPaul Mackerras	lwz	r5,DCACHEL1LINESIZE(r7)		/* Get dcache line size */
2319994a338SPaul Mackerras	mr	r6,r3
2329994a338SPaul Mackerras	mtctr	r4
2339994a338SPaul Mackerras0:	dcbst	0,r6
2349994a338SPaul Mackerras	add	r6,r6,r5
2359994a338SPaul Mackerras	bdnz	0b
2369994a338SPaul Mackerras	sync
2379994a338SPaul Mackerras
2389994a338SPaul Mackerras/* Now invalidate the icache */
2399994a338SPaul Mackerras
2409994a338SPaul Mackerras	lwz	r4,ICACHEL1LINESPERPAGE(r7)	/* Get # icache lines per page */
2419994a338SPaul Mackerras	lwz	r5,ICACHEL1LINESIZE(r7)		/* Get icache line size */
2429994a338SPaul Mackerras	mtctr	r4
2439994a338SPaul Mackerras1:	icbi	0,r3
2449994a338SPaul Mackerras	add	r3,r3,r5
2459994a338SPaul Mackerras	bdnz	1b
2469994a338SPaul Mackerras	isync
2479994a338SPaul Mackerras	blr
2489994a338SPaul Mackerras
2499994a338SPaul Mackerras/*
2509994a338SPaul Mackerras * identify_cpu and calls setup_cpu
2519994a338SPaul Mackerras * In:	r3 = base of the cpu_specs array
2529994a338SPaul Mackerras *	r4 = address of cur_cpu_spec
2539994a338SPaul Mackerras *	r5 = relocation offset
2549994a338SPaul Mackerras */
2559994a338SPaul Mackerras_GLOBAL(identify_cpu)
2569994a338SPaul Mackerras	mfpvr	r7
2579994a338SPaul Mackerras1:
2589994a338SPaul Mackerras	lwz	r8,CPU_SPEC_PVR_MASK(r3)
2599994a338SPaul Mackerras	and	r8,r8,r7
2609994a338SPaul Mackerras	lwz	r9,CPU_SPEC_PVR_VALUE(r3)
2619994a338SPaul Mackerras	cmplw	0,r9,r8
2629994a338SPaul Mackerras	beq	1f
2639994a338SPaul Mackerras	addi	r3,r3,CPU_SPEC_ENTRY_SIZE
2649994a338SPaul Mackerras	b	1b
2659994a338SPaul Mackerras1:
2669994a338SPaul Mackerras	sub	r0,r3,r5
2679994a338SPaul Mackerras	std	r0,0(r4)
2689994a338SPaul Mackerras	ld	r4,CPU_SPEC_SETUP(r3)
269b26f100dSGeoff Levand	cmpdi	0,r4,0
2709994a338SPaul Mackerras	add	r4,r4,r5
271b26f100dSGeoff Levand	beqlr
2729994a338SPaul Mackerras	ld	r4,0(r4)
2739994a338SPaul Mackerras	add	r4,r4,r5
2749994a338SPaul Mackerras	mtctr	r4
2759994a338SPaul Mackerras	/* Calling convention for cpu setup is r3=offset, r4=cur_cpu_spec */
2769994a338SPaul Mackerras	mr	r4,r3
2779994a338SPaul Mackerras	mr	r3,r5
2789994a338SPaul Mackerras	bctr
2799994a338SPaul Mackerras
2809994a338SPaul Mackerras/*
2819994a338SPaul Mackerras * do_cpu_ftr_fixups - goes through the list of CPU feature fixups
2829994a338SPaul Mackerras * and writes nop's over sections of code that don't apply for this cpu.
2839994a338SPaul Mackerras * r3 = data offset (not changed)
2849994a338SPaul Mackerras */
2859994a338SPaul Mackerras_GLOBAL(do_cpu_ftr_fixups)
2869994a338SPaul Mackerras	/* Get CPU 0 features */
287e58c3495SDavid Gibson	LOAD_REG_IMMEDIATE(r6,cur_cpu_spec)
2889994a338SPaul Mackerras	sub	r6,r6,r3
2899994a338SPaul Mackerras	ld	r4,0(r6)
2909994a338SPaul Mackerras	sub	r4,r4,r3
2919994a338SPaul Mackerras	ld	r4,CPU_SPEC_FEATURES(r4)
2929994a338SPaul Mackerras	/* Get the fixup table */
293e58c3495SDavid Gibson	LOAD_REG_IMMEDIATE(r6,__start___ftr_fixup)
2949994a338SPaul Mackerras	sub	r6,r6,r3
295e58c3495SDavid Gibson	LOAD_REG_IMMEDIATE(r7,__stop___ftr_fixup)
2969994a338SPaul Mackerras	sub	r7,r7,r3
2979994a338SPaul Mackerras	/* Do the fixup */
2989994a338SPaul Mackerras1:	cmpld	r6,r7
2999994a338SPaul Mackerras	bgelr
3009994a338SPaul Mackerras	addi	r6,r6,32
3019994a338SPaul Mackerras	ld	r8,-32(r6)	/* mask */
3029994a338SPaul Mackerras	and	r8,r8,r4
3039994a338SPaul Mackerras	ld	r9,-24(r6)	/* value */
3049994a338SPaul Mackerras	cmpld	r8,r9
3059994a338SPaul Mackerras	beq	1b
3069994a338SPaul Mackerras	ld	r8,-16(r6)	/* section begin */
3079994a338SPaul Mackerras	ld	r9,-8(r6)	/* section end */
3089994a338SPaul Mackerras	subf.	r9,r8,r9
3099994a338SPaul Mackerras	beq	1b
3109994a338SPaul Mackerras	/* write nops over the section of code */
3119994a338SPaul Mackerras	/* todo: if large section, add a branch at the start of it */
3129994a338SPaul Mackerras	srwi	r9,r9,2
3139994a338SPaul Mackerras	mtctr	r9
3149994a338SPaul Mackerras	sub	r8,r8,r3
3159994a338SPaul Mackerras	lis	r0,0x60000000@h	/* nop */
3169994a338SPaul Mackerras3:	stw	r0,0(r8)
3179994a338SPaul Mackerras	andi.	r10,r4,CPU_FTR_SPLIT_ID_CACHE@l
3189994a338SPaul Mackerras	beq	2f
3199994a338SPaul Mackerras	dcbst	0,r8		/* suboptimal, but simpler */
3209994a338SPaul Mackerras	sync
3219994a338SPaul Mackerras	icbi	0,r8
3229994a338SPaul Mackerras2:	addi	r8,r8,4
3239994a338SPaul Mackerras	bdnz	3b
3249994a338SPaul Mackerras	sync			/* additional sync needed on g4 */
3259994a338SPaul Mackerras	isync
3269994a338SPaul Mackerras	b	1b
3279994a338SPaul Mackerras
328*3f639ee8SStephen Rothwell/*
329*3f639ee8SStephen Rothwell * do_fw_ftr_fixups - goes through the list of firmware feature fixups
330*3f639ee8SStephen Rothwell * and writes nop's over sections of code that don't apply for this firmware.
331*3f639ee8SStephen Rothwell * r3 = data offset (not changed)
332*3f639ee8SStephen Rothwell */
333*3f639ee8SStephen Rothwell_GLOBAL(do_fw_ftr_fixups)
334*3f639ee8SStephen Rothwell	/* Get firmware features */
335*3f639ee8SStephen Rothwell	LOAD_REG_IMMEDIATE(r6,powerpc_firmware_features)
336*3f639ee8SStephen Rothwell	sub	r6,r6,r3
337*3f639ee8SStephen Rothwell	ld	r4,0(r6)
338*3f639ee8SStephen Rothwell	/* Get the fixup table */
339*3f639ee8SStephen Rothwell	LOAD_REG_IMMEDIATE(r6,__start___fw_ftr_fixup)
340*3f639ee8SStephen Rothwell	sub	r6,r6,r3
341*3f639ee8SStephen Rothwell	LOAD_REG_IMMEDIATE(r7,__stop___fw_ftr_fixup)
342*3f639ee8SStephen Rothwell	sub	r7,r7,r3
343*3f639ee8SStephen Rothwell	/* Do the fixup */
344*3f639ee8SStephen Rothwell1:	cmpld	r6,r7
345*3f639ee8SStephen Rothwell	bgelr
346*3f639ee8SStephen Rothwell	addi	r6,r6,32
347*3f639ee8SStephen Rothwell	ld	r8,-32(r6)	/* mask */
348*3f639ee8SStephen Rothwell	and	r8,r8,r4
349*3f639ee8SStephen Rothwell	ld	r9,-24(r6)	/* value */
350*3f639ee8SStephen Rothwell	cmpld	r8,r9
351*3f639ee8SStephen Rothwell	beq	1b
352*3f639ee8SStephen Rothwell	ld	r8,-16(r6)	/* section begin */
353*3f639ee8SStephen Rothwell	ld	r9,-8(r6)	/* section end */
354*3f639ee8SStephen Rothwell	subf.	r9,r8,r9
355*3f639ee8SStephen Rothwell	beq	1b
356*3f639ee8SStephen Rothwell	/* write nops over the section of code */
357*3f639ee8SStephen Rothwell	/* todo: if large section, add a branch at the start of it */
358*3f639ee8SStephen Rothwell	srwi	r9,r9,2
359*3f639ee8SStephen Rothwell	mtctr	r9
360*3f639ee8SStephen Rothwell	sub	r8,r8,r3
361*3f639ee8SStephen Rothwell	lis	r0,0x60000000@h	/* nop */
362*3f639ee8SStephen Rothwell3:	stw	r0,0(r8)
363*3f639ee8SStephen RothwellBEGIN_FTR_SECTION
364*3f639ee8SStephen Rothwell	dcbst	0,r8		/* suboptimal, but simpler */
365*3f639ee8SStephen Rothwell	sync
366*3f639ee8SStephen Rothwell	icbi	0,r8
367*3f639ee8SStephen RothwellEND_FTR_SECTION_IFSET(CPU_FTR_SPLIT_ID_CACHE)
368*3f639ee8SStephen Rothwell	addi	r8,r8,4
369*3f639ee8SStephen Rothwell	bdnz	3b
370*3f639ee8SStephen Rothwell	sync			/* additional sync needed on g4 */
371*3f639ee8SStephen Rothwell	isync
372*3f639ee8SStephen Rothwell	b	1b
373*3f639ee8SStephen Rothwell
3749994a338SPaul Mackerras#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE)
3759994a338SPaul Mackerras/*
3769994a338SPaul Mackerras * Do an IO access in real mode
3779994a338SPaul Mackerras */
3789994a338SPaul Mackerras_GLOBAL(real_readb)
3799994a338SPaul Mackerras	mfmsr	r7
3809994a338SPaul Mackerras	ori	r0,r7,MSR_DR
3819994a338SPaul Mackerras	xori	r0,r0,MSR_DR
3829994a338SPaul Mackerras	sync
3839994a338SPaul Mackerras	mtmsrd	r0
3849994a338SPaul Mackerras	sync
3859994a338SPaul Mackerras	isync
3869994a338SPaul Mackerras	mfspr	r6,SPRN_HID4
3879994a338SPaul Mackerras	rldicl	r5,r6,32,0
3889994a338SPaul Mackerras	ori	r5,r5,0x100
3899994a338SPaul Mackerras	rldicl	r5,r5,32,0
3909994a338SPaul Mackerras	sync
3919994a338SPaul Mackerras	mtspr	SPRN_HID4,r5
3929994a338SPaul Mackerras	isync
3939994a338SPaul Mackerras	slbia
3949994a338SPaul Mackerras	isync
3959994a338SPaul Mackerras	lbz	r3,0(r3)
3969994a338SPaul Mackerras	sync
3979994a338SPaul Mackerras	mtspr	SPRN_HID4,r6
3989994a338SPaul Mackerras	isync
3999994a338SPaul Mackerras	slbia
4009994a338SPaul Mackerras	isync
4019994a338SPaul Mackerras	mtmsrd	r7
4029994a338SPaul Mackerras	sync
4039994a338SPaul Mackerras	isync
4049994a338SPaul Mackerras	blr
4059994a338SPaul Mackerras
4069994a338SPaul Mackerras	/*
4079994a338SPaul Mackerras * Do an IO access in real mode
4089994a338SPaul Mackerras */
4099994a338SPaul Mackerras_GLOBAL(real_writeb)
4109994a338SPaul Mackerras	mfmsr	r7
4119994a338SPaul Mackerras	ori	r0,r7,MSR_DR
4129994a338SPaul Mackerras	xori	r0,r0,MSR_DR
4139994a338SPaul Mackerras	sync
4149994a338SPaul Mackerras	mtmsrd	r0
4159994a338SPaul Mackerras	sync
4169994a338SPaul Mackerras	isync
4179994a338SPaul Mackerras	mfspr	r6,SPRN_HID4
4189994a338SPaul Mackerras	rldicl	r5,r6,32,0
4199994a338SPaul Mackerras	ori	r5,r5,0x100
4209994a338SPaul Mackerras	rldicl	r5,r5,32,0
4219994a338SPaul Mackerras	sync
4229994a338SPaul Mackerras	mtspr	SPRN_HID4,r5
4239994a338SPaul Mackerras	isync
4249994a338SPaul Mackerras	slbia
4259994a338SPaul Mackerras	isync
4269994a338SPaul Mackerras	stb	r3,0(r4)
4279994a338SPaul Mackerras	sync
4289994a338SPaul Mackerras	mtspr	SPRN_HID4,r6
4299994a338SPaul Mackerras	isync
4309994a338SPaul Mackerras	slbia
4319994a338SPaul Mackerras	isync
4329994a338SPaul Mackerras	mtmsrd	r7
4339994a338SPaul Mackerras	sync
4349994a338SPaul Mackerras	isync
4359994a338SPaul Mackerras	blr
4369994a338SPaul Mackerras#endif /* defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE) */
4379994a338SPaul Mackerras
438127efeb2SStephen Rothwell#ifdef CONFIG_CPU_FREQ_PMAC64
4399994a338SPaul Mackerras/*
4404350147aSBenjamin Herrenschmidt * SCOM access functions for 970 (FX only for now)
4414350147aSBenjamin Herrenschmidt *
4424350147aSBenjamin Herrenschmidt * unsigned long scom970_read(unsigned int address);
4434350147aSBenjamin Herrenschmidt * void scom970_write(unsigned int address, unsigned long value);
4444350147aSBenjamin Herrenschmidt *
4454350147aSBenjamin Herrenschmidt * The address passed in is the 24 bits register address. This code
4464350147aSBenjamin Herrenschmidt * is 970 specific and will not check the status bits, so you should
4474350147aSBenjamin Herrenschmidt * know what you are doing.
4484350147aSBenjamin Herrenschmidt */
4494350147aSBenjamin Herrenschmidt_GLOBAL(scom970_read)
4504350147aSBenjamin Herrenschmidt	/* interrupts off */
4514350147aSBenjamin Herrenschmidt	mfmsr	r4
4524350147aSBenjamin Herrenschmidt	ori	r0,r4,MSR_EE
4534350147aSBenjamin Herrenschmidt	xori	r0,r0,MSR_EE
4544350147aSBenjamin Herrenschmidt	mtmsrd	r0,1
4554350147aSBenjamin Herrenschmidt
4564350147aSBenjamin Herrenschmidt	/* rotate 24 bits SCOM address 8 bits left and mask out it's low 8 bits
4574350147aSBenjamin Herrenschmidt	 * (including parity). On current CPUs they must be 0'd,
4584350147aSBenjamin Herrenschmidt	 * and finally or in RW bit
4594350147aSBenjamin Herrenschmidt	 */
4604350147aSBenjamin Herrenschmidt	rlwinm	r3,r3,8,0,15
4614350147aSBenjamin Herrenschmidt	ori	r3,r3,0x8000
4624350147aSBenjamin Herrenschmidt
4634350147aSBenjamin Herrenschmidt	/* do the actual scom read */
4644350147aSBenjamin Herrenschmidt	sync
4654350147aSBenjamin Herrenschmidt	mtspr	SPRN_SCOMC,r3
4664350147aSBenjamin Herrenschmidt	isync
4674350147aSBenjamin Herrenschmidt	mfspr	r3,SPRN_SCOMD
4684350147aSBenjamin Herrenschmidt	isync
4694350147aSBenjamin Herrenschmidt	mfspr	r0,SPRN_SCOMC
4704350147aSBenjamin Herrenschmidt	isync
4714350147aSBenjamin Herrenschmidt
4724350147aSBenjamin Herrenschmidt	/* XXX:	fixup result on some buggy 970's (ouch ! we lost a bit, bah
4734350147aSBenjamin Herrenschmidt	 * that's the best we can do). Not implemented yet as we don't use
4744350147aSBenjamin Herrenschmidt	 * the scom on any of the bogus CPUs yet, but may have to be done
4754350147aSBenjamin Herrenschmidt	 * ultimately
4764350147aSBenjamin Herrenschmidt	 */
4774350147aSBenjamin Herrenschmidt
4784350147aSBenjamin Herrenschmidt	/* restore interrupts */
4794350147aSBenjamin Herrenschmidt	mtmsrd	r4,1
4804350147aSBenjamin Herrenschmidt	blr
4814350147aSBenjamin Herrenschmidt
4824350147aSBenjamin Herrenschmidt
4834350147aSBenjamin Herrenschmidt_GLOBAL(scom970_write)
4844350147aSBenjamin Herrenschmidt	/* interrupts off */
4854350147aSBenjamin Herrenschmidt	mfmsr	r5
4864350147aSBenjamin Herrenschmidt	ori	r0,r5,MSR_EE
4874350147aSBenjamin Herrenschmidt	xori	r0,r0,MSR_EE
4884350147aSBenjamin Herrenschmidt	mtmsrd	r0,1
4894350147aSBenjamin Herrenschmidt
4904350147aSBenjamin Herrenschmidt	/* rotate 24 bits SCOM address 8 bits left and mask out it's low 8 bits
4914350147aSBenjamin Herrenschmidt	 * (including parity). On current CPUs they must be 0'd.
4924350147aSBenjamin Herrenschmidt	 */
4934350147aSBenjamin Herrenschmidt
4944350147aSBenjamin Herrenschmidt	rlwinm	r3,r3,8,0,15
4954350147aSBenjamin Herrenschmidt
4964350147aSBenjamin Herrenschmidt	sync
4974350147aSBenjamin Herrenschmidt	mtspr	SPRN_SCOMD,r4      /* write data */
4984350147aSBenjamin Herrenschmidt	isync
4994350147aSBenjamin Herrenschmidt	mtspr	SPRN_SCOMC,r3      /* write command */
5004350147aSBenjamin Herrenschmidt	isync
5014350147aSBenjamin Herrenschmidt	mfspr	3,SPRN_SCOMC
5024350147aSBenjamin Herrenschmidt	isync
5034350147aSBenjamin Herrenschmidt
5044350147aSBenjamin Herrenschmidt	/* restore interrupts */
5054350147aSBenjamin Herrenschmidt	mtmsrd	r5,1
5064350147aSBenjamin Herrenschmidt	blr
507127efeb2SStephen Rothwell#endif /* CONFIG_CPU_FREQ_PMAC64 */
5084350147aSBenjamin Herrenschmidt
5094350147aSBenjamin Herrenschmidt
5104350147aSBenjamin Herrenschmidt/*
5119994a338SPaul Mackerras * Create a kernel thread
5129994a338SPaul Mackerras *   kernel_thread(fn, arg, flags)
5139994a338SPaul Mackerras */
5149994a338SPaul Mackerras_GLOBAL(kernel_thread)
5159994a338SPaul Mackerras	std	r29,-24(r1)
5169994a338SPaul Mackerras	std	r30,-16(r1)
5179994a338SPaul Mackerras	stdu	r1,-STACK_FRAME_OVERHEAD(r1)
5189994a338SPaul Mackerras	mr	r29,r3
5199994a338SPaul Mackerras	mr	r30,r4
5209994a338SPaul Mackerras	ori	r3,r5,CLONE_VM	/* flags */
5219994a338SPaul Mackerras	oris	r3,r3,(CLONE_UNTRACED>>16)
5229994a338SPaul Mackerras	li	r4,0		/* new sp (unused) */
5239994a338SPaul Mackerras	li	r0,__NR_clone
5249994a338SPaul Mackerras	sc
5259994a338SPaul Mackerras	cmpdi	0,r3,0		/* parent or child? */
5269994a338SPaul Mackerras	bne	1f		/* return if parent */
5279994a338SPaul Mackerras	li	r0,0
5289994a338SPaul Mackerras	stdu	r0,-STACK_FRAME_OVERHEAD(r1)
5299994a338SPaul Mackerras	ld	r2,8(r29)
5309994a338SPaul Mackerras	ld	r29,0(r29)
5319994a338SPaul Mackerras	mtlr	r29              /* fn addr in lr */
5329994a338SPaul Mackerras	mr	r3,r30	        /* load arg and call fn */
5339994a338SPaul Mackerras	blrl
5349994a338SPaul Mackerras	li	r0,__NR_exit	/* exit after child exits */
5359994a338SPaul Mackerras        li	r3,0
5369994a338SPaul Mackerras	sc
5379994a338SPaul Mackerras1:	addi	r1,r1,STACK_FRAME_OVERHEAD
5389994a338SPaul Mackerras	ld	r29,-24(r1)
5399994a338SPaul Mackerras	ld	r30,-16(r1)
5409994a338SPaul Mackerras	blr
5419994a338SPaul Mackerras
5429994a338SPaul Mackerras/*
5439994a338SPaul Mackerras * disable_kernel_fp()
5449994a338SPaul Mackerras * Disable the FPU.
5459994a338SPaul Mackerras */
5469994a338SPaul Mackerras_GLOBAL(disable_kernel_fp)
5479994a338SPaul Mackerras	mfmsr	r3
5489994a338SPaul Mackerras	rldicl	r0,r3,(63-MSR_FP_LG),1
5499994a338SPaul Mackerras	rldicl	r3,r0,(MSR_FP_LG+1),0
5509994a338SPaul Mackerras	mtmsrd	r3			/* disable use of fpu now */
5519994a338SPaul Mackerras	isync
5529994a338SPaul Mackerras	blr
5539994a338SPaul Mackerras
5549994a338SPaul Mackerras#ifdef CONFIG_ALTIVEC
5559994a338SPaul Mackerras
5569994a338SPaul Mackerras#if 0 /* this has no callers for now */
5579994a338SPaul Mackerras/*
5589994a338SPaul Mackerras * disable_kernel_altivec()
5599994a338SPaul Mackerras * Disable the VMX.
5609994a338SPaul Mackerras */
5619994a338SPaul Mackerras_GLOBAL(disable_kernel_altivec)
5629994a338SPaul Mackerras	mfmsr	r3
5639994a338SPaul Mackerras	rldicl	r0,r3,(63-MSR_VEC_LG),1
5649994a338SPaul Mackerras	rldicl	r3,r0,(MSR_VEC_LG+1),0
5659994a338SPaul Mackerras	mtmsrd	r3			/* disable use of VMX now */
5669994a338SPaul Mackerras	isync
5679994a338SPaul Mackerras	blr
5689994a338SPaul Mackerras#endif /* 0 */
5699994a338SPaul Mackerras
5709994a338SPaul Mackerras/*
5719994a338SPaul Mackerras * giveup_altivec(tsk)
5729994a338SPaul Mackerras * Disable VMX for the task given as the argument,
5739994a338SPaul Mackerras * and save the vector registers in its thread_struct.
5749994a338SPaul Mackerras * Enables the VMX for use in the kernel on return.
5759994a338SPaul Mackerras */
5769994a338SPaul Mackerras_GLOBAL(giveup_altivec)
5779994a338SPaul Mackerras	mfmsr	r5
5789994a338SPaul Mackerras	oris	r5,r5,MSR_VEC@h
5799994a338SPaul Mackerras	mtmsrd	r5			/* enable use of VMX now */
5809994a338SPaul Mackerras	isync
5819994a338SPaul Mackerras	cmpdi	0,r3,0
5829994a338SPaul Mackerras	beqlr-				/* if no previous owner, done */
5839994a338SPaul Mackerras	addi	r3,r3,THREAD		/* want THREAD of task */
5849994a338SPaul Mackerras	ld	r5,PT_REGS(r3)
5859994a338SPaul Mackerras	cmpdi	0,r5,0
5869994a338SPaul Mackerras	SAVE_32VRS(0,r4,r3)
5879994a338SPaul Mackerras	mfvscr	vr0
5889994a338SPaul Mackerras	li	r4,THREAD_VSCR
5899994a338SPaul Mackerras	stvx	vr0,r4,r3
5909994a338SPaul Mackerras	beq	1f
5919994a338SPaul Mackerras	ld	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
5929994a338SPaul Mackerras	lis	r3,MSR_VEC@h
5939994a338SPaul Mackerras	andc	r4,r4,r3		/* disable FP for previous task */
5949994a338SPaul Mackerras	std	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
5959994a338SPaul Mackerras1:
5969994a338SPaul Mackerras#ifndef CONFIG_SMP
5979994a338SPaul Mackerras	li	r5,0
5989994a338SPaul Mackerras	ld	r4,last_task_used_altivec@got(r2)
5999994a338SPaul Mackerras	std	r5,0(r4)
6009994a338SPaul Mackerras#endif /* CONFIG_SMP */
6019994a338SPaul Mackerras	blr
6029994a338SPaul Mackerras
6039994a338SPaul Mackerras#endif /* CONFIG_ALTIVEC */
6049994a338SPaul Mackerras
6059994a338SPaul Mackerras_GLOBAL(execve)
6069994a338SPaul Mackerras	li	r0,__NR_execve
6079994a338SPaul Mackerras	sc
6089994a338SPaul Mackerras	bnslr
6099994a338SPaul Mackerras	neg	r3,r3
6109994a338SPaul Mackerras	blr
6119994a338SPaul Mackerras
6129994a338SPaul Mackerras/* kexec_wait(phys_cpu)
6139994a338SPaul Mackerras *
6149994a338SPaul Mackerras * wait for the flag to change, indicating this kernel is going away but
6159994a338SPaul Mackerras * the slave code for the next one is at addresses 0 to 100.
6169994a338SPaul Mackerras *
6179994a338SPaul Mackerras * This is used by all slaves.
6189994a338SPaul Mackerras *
6199994a338SPaul Mackerras * Physical (hardware) cpu id should be in r3.
6209994a338SPaul Mackerras */
6219994a338SPaul Mackerras_GLOBAL(kexec_wait)
6229994a338SPaul Mackerras	bl	1f
6239994a338SPaul Mackerras1:	mflr	r5
6249994a338SPaul Mackerras	addi	r5,r5,kexec_flag-1b
6259994a338SPaul Mackerras
6269994a338SPaul Mackerras99:	HMT_LOW
6279994a338SPaul Mackerras#ifdef CONFIG_KEXEC		/* use no memory without kexec */
6289994a338SPaul Mackerras	lwz	r4,0(r5)
6299994a338SPaul Mackerras	cmpwi	0,r4,0
6309994a338SPaul Mackerras	bnea	0x60
6319994a338SPaul Mackerras#endif
6329994a338SPaul Mackerras	b	99b
6339994a338SPaul Mackerras
6349994a338SPaul Mackerras/* this can be in text because we won't change it until we are
6359994a338SPaul Mackerras * running in real anyways
6369994a338SPaul Mackerras */
6379994a338SPaul Mackerraskexec_flag:
6389994a338SPaul Mackerras	.long	0
6399994a338SPaul Mackerras
6409994a338SPaul Mackerras
6419994a338SPaul Mackerras#ifdef CONFIG_KEXEC
6429994a338SPaul Mackerras
6439994a338SPaul Mackerras/* kexec_smp_wait(void)
6449994a338SPaul Mackerras *
6459994a338SPaul Mackerras * call with interrupts off
6469994a338SPaul Mackerras * note: this is a terminal routine, it does not save lr
6479994a338SPaul Mackerras *
6489994a338SPaul Mackerras * get phys id from paca
6499994a338SPaul Mackerras * set paca id to -1 to say we got here
6509994a338SPaul Mackerras * switch to real mode
6519994a338SPaul Mackerras * join other cpus in kexec_wait(phys_id)
6529994a338SPaul Mackerras */
6539994a338SPaul Mackerras_GLOBAL(kexec_smp_wait)
6549994a338SPaul Mackerras	lhz	r3,PACAHWCPUID(r13)
6559994a338SPaul Mackerras	li	r4,-1
6569994a338SPaul Mackerras	sth	r4,PACAHWCPUID(r13)	/* let others know we left */
6579994a338SPaul Mackerras	bl	real_mode
6589994a338SPaul Mackerras	b	.kexec_wait
6599994a338SPaul Mackerras
6609994a338SPaul Mackerras/*
6619994a338SPaul Mackerras * switch to real mode (turn mmu off)
6629994a338SPaul Mackerras * we use the early kernel trick that the hardware ignores bits
6639994a338SPaul Mackerras * 0 and 1 (big endian) of the effective address in real mode
6649994a338SPaul Mackerras *
6659994a338SPaul Mackerras * don't overwrite r3 here, it is live for kexec_wait above.
6669994a338SPaul Mackerras */
6679994a338SPaul Mackerrasreal_mode:	/* assume normal blr return */
6689994a338SPaul Mackerras1:	li	r9,MSR_RI
6699994a338SPaul Mackerras	li	r10,MSR_DR|MSR_IR
6709994a338SPaul Mackerras	mflr	r11		/* return address to SRR0 */
6719994a338SPaul Mackerras	mfmsr	r12
6729994a338SPaul Mackerras	andc	r9,r12,r9
6739994a338SPaul Mackerras	andc	r10,r12,r10
6749994a338SPaul Mackerras
6759994a338SPaul Mackerras	mtmsrd	r9,1
6769994a338SPaul Mackerras	mtspr	SPRN_SRR1,r10
6779994a338SPaul Mackerras	mtspr	SPRN_SRR0,r11
6789994a338SPaul Mackerras	rfid
6799994a338SPaul Mackerras
6809994a338SPaul Mackerras
6819994a338SPaul Mackerras/*
6829994a338SPaul Mackerras * kexec_sequence(newstack, start, image, control, clear_all())
6839994a338SPaul Mackerras *
6849994a338SPaul Mackerras * does the grungy work with stack switching and real mode switches
6859994a338SPaul Mackerras * also does simple calls to other code
6869994a338SPaul Mackerras */
6879994a338SPaul Mackerras
6889994a338SPaul Mackerras_GLOBAL(kexec_sequence)
6899994a338SPaul Mackerras	mflr	r0
6909994a338SPaul Mackerras	std	r0,16(r1)
6919994a338SPaul Mackerras
6929994a338SPaul Mackerras	/* switch stacks to newstack -- &kexec_stack.stack */
6939994a338SPaul Mackerras	stdu	r1,THREAD_SIZE-112(r3)
6949994a338SPaul Mackerras	mr	r1,r3
6959994a338SPaul Mackerras
6969994a338SPaul Mackerras	li	r0,0
6979994a338SPaul Mackerras	std	r0,16(r1)
6989994a338SPaul Mackerras
6999994a338SPaul Mackerras	/* save regs for local vars on new stack.
7009994a338SPaul Mackerras	 * yes, we won't go back, but ...
7019994a338SPaul Mackerras	 */
7029994a338SPaul Mackerras	std	r31,-8(r1)
7039994a338SPaul Mackerras	std	r30,-16(r1)
7049994a338SPaul Mackerras	std	r29,-24(r1)
7059994a338SPaul Mackerras	std	r28,-32(r1)
7069994a338SPaul Mackerras	std	r27,-40(r1)
7079994a338SPaul Mackerras	std	r26,-48(r1)
7089994a338SPaul Mackerras	std	r25,-56(r1)
7099994a338SPaul Mackerras
7109994a338SPaul Mackerras	stdu	r1,-112-64(r1)
7119994a338SPaul Mackerras
7129994a338SPaul Mackerras	/* save args into preserved regs */
7139994a338SPaul Mackerras	mr	r31,r3			/* newstack (both) */
7149994a338SPaul Mackerras	mr	r30,r4			/* start (real) */
7159994a338SPaul Mackerras	mr	r29,r5			/* image (virt) */
7169994a338SPaul Mackerras	mr	r28,r6			/* control, unused */
7179994a338SPaul Mackerras	mr	r27,r7			/* clear_all() fn desc */
7189994a338SPaul Mackerras	mr	r26,r8			/* spare */
7199994a338SPaul Mackerras	lhz	r25,PACAHWCPUID(r13)	/* get our phys cpu from paca */
7209994a338SPaul Mackerras
7219994a338SPaul Mackerras	/* disable interrupts, we are overwriting kernel data next */
7229994a338SPaul Mackerras	mfmsr	r3
7239994a338SPaul Mackerras	rlwinm	r3,r3,0,17,15
7249994a338SPaul Mackerras	mtmsrd	r3,1
7259994a338SPaul Mackerras
7269994a338SPaul Mackerras	/* copy dest pages, flush whole dest image */
7279994a338SPaul Mackerras	mr	r3,r29
7289994a338SPaul Mackerras	bl	.kexec_copy_flush	/* (image) */
7299994a338SPaul Mackerras
7309994a338SPaul Mackerras	/* turn off mmu */
7319994a338SPaul Mackerras	bl	real_mode
7329994a338SPaul Mackerras
7339994a338SPaul Mackerras	/* clear out hardware hash page table and tlb */
7349994a338SPaul Mackerras	ld	r5,0(r27)		/* deref function descriptor */
7359994a338SPaul Mackerras	mtctr	r5
7368d950cb8SGeoff Levand	bctrl				/* ppc_md.hpte_clear_all(void); */
7379994a338SPaul Mackerras
7389994a338SPaul Mackerras/*
7399994a338SPaul Mackerras *   kexec image calling is:
7409994a338SPaul Mackerras *      the first 0x100 bytes of the entry point are copied to 0
7419994a338SPaul Mackerras *
7429994a338SPaul Mackerras *      all slaves branch to slave = 0x60 (absolute)
7439994a338SPaul Mackerras *              slave(phys_cpu_id);
7449994a338SPaul Mackerras *
7459994a338SPaul Mackerras *      master goes to start = entry point
7469994a338SPaul Mackerras *              start(phys_cpu_id, start, 0);
7479994a338SPaul Mackerras *
7489994a338SPaul Mackerras *
7499994a338SPaul Mackerras *   a wrapper is needed to call existing kernels, here is an approximate
7509994a338SPaul Mackerras *   description of one method:
7519994a338SPaul Mackerras *
7529994a338SPaul Mackerras * v2: (2.6.10)
7539994a338SPaul Mackerras *   start will be near the boot_block (maybe 0x100 bytes before it?)
7549994a338SPaul Mackerras *   it will have a 0x60, which will b to boot_block, where it will wait
7559994a338SPaul Mackerras *   and 0 will store phys into struct boot-block and load r3 from there,
7569994a338SPaul Mackerras *   copy kernel 0-0x100 and tell slaves to back down to 0x60 again
7579994a338SPaul Mackerras *
7589994a338SPaul Mackerras * v1: (2.6.9)
7599994a338SPaul Mackerras *    boot block will have all cpus scanning device tree to see if they
7609994a338SPaul Mackerras *    are the boot cpu ?????
7619994a338SPaul Mackerras *    other device tree differences (prop sizes, va vs pa, etc)...
7629994a338SPaul Mackerras */
7639994a338SPaul Mackerras
7649994a338SPaul Mackerras	/* copy  0x100 bytes starting at start to 0 */
7659994a338SPaul Mackerras	li	r3,0
7669994a338SPaul Mackerras	mr	r4,r30
7679994a338SPaul Mackerras	li	r5,0x100
7689994a338SPaul Mackerras	li	r6,0
7699994a338SPaul Mackerras	bl	.copy_and_flush	/* (dest, src, copy limit, start offset) */
7709994a338SPaul Mackerras1:	/* assume normal blr return */
7719994a338SPaul Mackerras
7729994a338SPaul Mackerras	/* release other cpus to the new kernel secondary start at 0x60 */
7739994a338SPaul Mackerras	mflr	r5
7749994a338SPaul Mackerras	li	r6,1
7759994a338SPaul Mackerras	stw	r6,kexec_flag-1b(5)
7769994a338SPaul Mackerras	mr	r3,r25	# my phys cpu
7779994a338SPaul Mackerras	mr	r4,r30	# start, aka phys mem offset
7789994a338SPaul Mackerras	mtlr	4
7799994a338SPaul Mackerras	li	r5,0
7809994a338SPaul Mackerras	blr	/* image->start(physid, image->start, 0); */
7819994a338SPaul Mackerras#endif /* CONFIG_KEXEC */
782