xref: /openbmc/linux/arch/powerpc/kernel/misc_64.S (revision 1767c8f392857694899403a65942cc70b5b7d132)
19994a338SPaul Mackerras/*
29994a338SPaul Mackerras * This file contains miscellaneous low-level functions.
39994a338SPaul Mackerras *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
49994a338SPaul Mackerras *
59994a338SPaul Mackerras * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
69994a338SPaul Mackerras * and Paul Mackerras.
79994a338SPaul Mackerras * Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com)
89994a338SPaul Mackerras * PPC64 updates by Dave Engebretsen (engebret@us.ibm.com)
99994a338SPaul Mackerras *
109994a338SPaul Mackerras * This program is free software; you can redistribute it and/or
119994a338SPaul Mackerras * modify it under the terms of the GNU General Public License
129994a338SPaul Mackerras * as published by the Free Software Foundation; either version
139994a338SPaul Mackerras * 2 of the License, or (at your option) any later version.
149994a338SPaul Mackerras *
159994a338SPaul Mackerras */
169994a338SPaul Mackerras
179994a338SPaul Mackerras#include <linux/sys.h>
189994a338SPaul Mackerras#include <asm/unistd.h>
199994a338SPaul Mackerras#include <asm/errno.h>
209994a338SPaul Mackerras#include <asm/processor.h>
219994a338SPaul Mackerras#include <asm/page.h>
229994a338SPaul Mackerras#include <asm/cache.h>
239994a338SPaul Mackerras#include <asm/ppc_asm.h>
249994a338SPaul Mackerras#include <asm/asm-offsets.h>
259994a338SPaul Mackerras#include <asm/cputable.h>
266cb7bfebSDavid Gibson#include <asm/thread_info.h>
279994a338SPaul Mackerras
289994a338SPaul Mackerras	.text
299994a338SPaul Mackerras
309994a338SPaul Mackerras#ifdef CONFIG_IRQSTACKS
319994a338SPaul Mackerras_GLOBAL(call_do_softirq)
329994a338SPaul Mackerras	mflr	r0
339994a338SPaul Mackerras	std	r0,16(r1)
344ae2dcb6SKumar Gala	stdu	r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3)
359994a338SPaul Mackerras	mr	r1,r3
369994a338SPaul Mackerras	bl	.__do_softirq
379994a338SPaul Mackerras	ld	r1,0(r1)
389994a338SPaul Mackerras	ld	r0,16(r1)
399994a338SPaul Mackerras	mtlr	r0
409994a338SPaul Mackerras	blr
419994a338SPaul Mackerras
42b9e5b4e6SBenjamin Herrenschmidt_GLOBAL(call_handle_irq)
437d12e780SDavid Howells	ld	r8,0(r6)
449994a338SPaul Mackerras	mflr	r0
459994a338SPaul Mackerras	std	r0,16(r1)
46b9e5b4e6SBenjamin Herrenschmidt	mtctr	r8
474ae2dcb6SKumar Gala	stdu	r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r5)
487d12e780SDavid Howells	mr	r1,r5
49b9e5b4e6SBenjamin Herrenschmidt	bctrl
509994a338SPaul Mackerras	ld	r1,0(r1)
519994a338SPaul Mackerras	ld	r0,16(r1)
529994a338SPaul Mackerras	mtlr	r0
539994a338SPaul Mackerras	blr
549994a338SPaul Mackerras#endif /* CONFIG_IRQSTACKS */
559994a338SPaul Mackerras
569994a338SPaul Mackerras	.section	".toc","aw"
579994a338SPaul MackerrasPPC64_CACHES:
589994a338SPaul Mackerras	.tc		ppc64_caches[TC],ppc64_caches
599994a338SPaul Mackerras	.section	".text"
609994a338SPaul Mackerras
619994a338SPaul Mackerras/*
629994a338SPaul Mackerras * Write any modified data cache blocks out to memory
639994a338SPaul Mackerras * and invalidate the corresponding instruction cache blocks.
649994a338SPaul Mackerras *
659994a338SPaul Mackerras * flush_icache_range(unsigned long start, unsigned long stop)
669994a338SPaul Mackerras *
679994a338SPaul Mackerras *   flush all bytes from start through stop-1 inclusive
689994a338SPaul Mackerras */
699994a338SPaul Mackerras
709994a338SPaul Mackerras_KPROBE(__flush_icache_range)
719994a338SPaul Mackerras
729994a338SPaul Mackerras/*
739994a338SPaul Mackerras * Flush the data cache to memory
749994a338SPaul Mackerras *
759994a338SPaul Mackerras * Different systems have different cache line sizes
769994a338SPaul Mackerras * and in some cases i-cache and d-cache line sizes differ from
779994a338SPaul Mackerras * each other.
789994a338SPaul Mackerras */
799994a338SPaul Mackerras 	ld	r10,PPC64_CACHES@toc(r2)
809994a338SPaul Mackerras	lwz	r7,DCACHEL1LINESIZE(r10)/* Get cache line size */
819994a338SPaul Mackerras	addi	r5,r7,-1
829994a338SPaul Mackerras	andc	r6,r3,r5		/* round low to line bdy */
839994a338SPaul Mackerras	subf	r8,r6,r4		/* compute length */
849994a338SPaul Mackerras	add	r8,r8,r5		/* ensure we get enough */
859994a338SPaul Mackerras	lwz	r9,DCACHEL1LOGLINESIZE(r10)	/* Get log-2 of cache line size */
869994a338SPaul Mackerras	srw.	r8,r8,r9		/* compute line count */
879994a338SPaul Mackerras	beqlr				/* nothing to do? */
889994a338SPaul Mackerras	mtctr	r8
899994a338SPaul Mackerras1:	dcbst	0,r6
909994a338SPaul Mackerras	add	r6,r6,r7
919994a338SPaul Mackerras	bdnz	1b
929994a338SPaul Mackerras	sync
939994a338SPaul Mackerras
949994a338SPaul Mackerras/* Now invalidate the instruction cache */
959994a338SPaul Mackerras
969994a338SPaul Mackerras	lwz	r7,ICACHEL1LINESIZE(r10)	/* Get Icache line size */
979994a338SPaul Mackerras	addi	r5,r7,-1
989994a338SPaul Mackerras	andc	r6,r3,r5		/* round low to line bdy */
999994a338SPaul Mackerras	subf	r8,r6,r4		/* compute length */
1009994a338SPaul Mackerras	add	r8,r8,r5
1019994a338SPaul Mackerras	lwz	r9,ICACHEL1LOGLINESIZE(r10)	/* Get log-2 of Icache line size */
1029994a338SPaul Mackerras	srw.	r8,r8,r9		/* compute line count */
1039994a338SPaul Mackerras	beqlr				/* nothing to do? */
1049994a338SPaul Mackerras	mtctr	r8
1059994a338SPaul Mackerras2:	icbi	0,r6
1069994a338SPaul Mackerras	add	r6,r6,r7
1079994a338SPaul Mackerras	bdnz	2b
1089994a338SPaul Mackerras	isync
1099994a338SPaul Mackerras	blr
1109994a338SPaul Mackerras	.previous .text
1119994a338SPaul Mackerras/*
1129994a338SPaul Mackerras * Like above, but only do the D-cache.
1139994a338SPaul Mackerras *
1149994a338SPaul Mackerras * flush_dcache_range(unsigned long start, unsigned long stop)
1159994a338SPaul Mackerras *
1169994a338SPaul Mackerras *    flush all bytes from start to stop-1 inclusive
1179994a338SPaul Mackerras */
1189994a338SPaul Mackerras_GLOBAL(flush_dcache_range)
1199994a338SPaul Mackerras
1209994a338SPaul Mackerras/*
1219994a338SPaul Mackerras * Flush the data cache to memory
1229994a338SPaul Mackerras *
1239994a338SPaul Mackerras * Different systems have different cache line sizes
1249994a338SPaul Mackerras */
1259994a338SPaul Mackerras 	ld	r10,PPC64_CACHES@toc(r2)
1269994a338SPaul Mackerras	lwz	r7,DCACHEL1LINESIZE(r10)	/* Get dcache line size */
1279994a338SPaul Mackerras	addi	r5,r7,-1
1289994a338SPaul Mackerras	andc	r6,r3,r5		/* round low to line bdy */
1299994a338SPaul Mackerras	subf	r8,r6,r4		/* compute length */
1309994a338SPaul Mackerras	add	r8,r8,r5		/* ensure we get enough */
1319994a338SPaul Mackerras	lwz	r9,DCACHEL1LOGLINESIZE(r10)	/* Get log-2 of dcache line size */
1329994a338SPaul Mackerras	srw.	r8,r8,r9		/* compute line count */
1339994a338SPaul Mackerras	beqlr				/* nothing to do? */
1349994a338SPaul Mackerras	mtctr	r8
1359994a338SPaul Mackerras0:	dcbst	0,r6
1369994a338SPaul Mackerras	add	r6,r6,r7
1379994a338SPaul Mackerras	bdnz	0b
1389994a338SPaul Mackerras	sync
1399994a338SPaul Mackerras	blr
1409994a338SPaul Mackerras
1419994a338SPaul Mackerras/*
1429994a338SPaul Mackerras * Like above, but works on non-mapped physical addresses.
1439994a338SPaul Mackerras * Use only for non-LPAR setups ! It also assumes real mode
1449994a338SPaul Mackerras * is cacheable. Used for flushing out the DART before using
1459994a338SPaul Mackerras * it as uncacheable memory
1469994a338SPaul Mackerras *
1479994a338SPaul Mackerras * flush_dcache_phys_range(unsigned long start, unsigned long stop)
1489994a338SPaul Mackerras *
1499994a338SPaul Mackerras *    flush all bytes from start to stop-1 inclusive
1509994a338SPaul Mackerras */
1519994a338SPaul Mackerras_GLOBAL(flush_dcache_phys_range)
1529994a338SPaul Mackerras 	ld	r10,PPC64_CACHES@toc(r2)
1539994a338SPaul Mackerras	lwz	r7,DCACHEL1LINESIZE(r10)	/* Get dcache line size */
1549994a338SPaul Mackerras	addi	r5,r7,-1
1559994a338SPaul Mackerras	andc	r6,r3,r5		/* round low to line bdy */
1569994a338SPaul Mackerras	subf	r8,r6,r4		/* compute length */
1579994a338SPaul Mackerras	add	r8,r8,r5		/* ensure we get enough */
1589994a338SPaul Mackerras	lwz	r9,DCACHEL1LOGLINESIZE(r10)	/* Get log-2 of dcache line size */
1599994a338SPaul Mackerras	srw.	r8,r8,r9		/* compute line count */
1609994a338SPaul Mackerras	beqlr				/* nothing to do? */
1619994a338SPaul Mackerras	mfmsr	r5			/* Disable MMU Data Relocation */
1629994a338SPaul Mackerras	ori	r0,r5,MSR_DR
1639994a338SPaul Mackerras	xori	r0,r0,MSR_DR
1649994a338SPaul Mackerras	sync
1659994a338SPaul Mackerras	mtmsr	r0
1669994a338SPaul Mackerras	sync
1679994a338SPaul Mackerras	isync
1689994a338SPaul Mackerras	mtctr	r8
1699994a338SPaul Mackerras0:	dcbst	0,r6
1709994a338SPaul Mackerras	add	r6,r6,r7
1719994a338SPaul Mackerras	bdnz	0b
1729994a338SPaul Mackerras	sync
1739994a338SPaul Mackerras	isync
1749994a338SPaul Mackerras	mtmsr	r5			/* Re-enable MMU Data Relocation */
1759994a338SPaul Mackerras	sync
1769994a338SPaul Mackerras	isync
1779994a338SPaul Mackerras	blr
1789994a338SPaul Mackerras
1799994a338SPaul Mackerras_GLOBAL(flush_inval_dcache_range)
1809994a338SPaul Mackerras 	ld	r10,PPC64_CACHES@toc(r2)
1819994a338SPaul Mackerras	lwz	r7,DCACHEL1LINESIZE(r10)	/* Get dcache line size */
1829994a338SPaul Mackerras	addi	r5,r7,-1
1839994a338SPaul Mackerras	andc	r6,r3,r5		/* round low to line bdy */
1849994a338SPaul Mackerras	subf	r8,r6,r4		/* compute length */
1859994a338SPaul Mackerras	add	r8,r8,r5		/* ensure we get enough */
1869994a338SPaul Mackerras	lwz	r9,DCACHEL1LOGLINESIZE(r10)/* Get log-2 of dcache line size */
1879994a338SPaul Mackerras	srw.	r8,r8,r9		/* compute line count */
1889994a338SPaul Mackerras	beqlr				/* nothing to do? */
1899994a338SPaul Mackerras	sync
1909994a338SPaul Mackerras	isync
1919994a338SPaul Mackerras	mtctr	r8
1929994a338SPaul Mackerras0:	dcbf	0,r6
1939994a338SPaul Mackerras	add	r6,r6,r7
1949994a338SPaul Mackerras	bdnz	0b
1959994a338SPaul Mackerras	sync
1969994a338SPaul Mackerras	isync
1979994a338SPaul Mackerras	blr
1989994a338SPaul Mackerras
1999994a338SPaul Mackerras
2009994a338SPaul Mackerras/*
2019994a338SPaul Mackerras * Flush a particular page from the data cache to RAM.
2029994a338SPaul Mackerras * Note: this is necessary because the instruction cache does *not*
2039994a338SPaul Mackerras * snoop from the data cache.
2049994a338SPaul Mackerras *
2059994a338SPaul Mackerras *	void __flush_dcache_icache(void *page)
2069994a338SPaul Mackerras */
2079994a338SPaul Mackerras_GLOBAL(__flush_dcache_icache)
2089994a338SPaul Mackerras/*
2099994a338SPaul Mackerras * Flush the data cache to memory
2109994a338SPaul Mackerras *
2119994a338SPaul Mackerras * Different systems have different cache line sizes
2129994a338SPaul Mackerras */
2139994a338SPaul Mackerras
2149994a338SPaul Mackerras/* Flush the dcache */
2159994a338SPaul Mackerras 	ld	r7,PPC64_CACHES@toc(r2)
2169994a338SPaul Mackerras	clrrdi	r3,r3,PAGE_SHIFT           	    /* Page align */
2179994a338SPaul Mackerras	lwz	r4,DCACHEL1LINESPERPAGE(r7)	/* Get # dcache lines per page */
2189994a338SPaul Mackerras	lwz	r5,DCACHEL1LINESIZE(r7)		/* Get dcache line size */
2199994a338SPaul Mackerras	mr	r6,r3
2209994a338SPaul Mackerras	mtctr	r4
2219994a338SPaul Mackerras0:	dcbst	0,r6
2229994a338SPaul Mackerras	add	r6,r6,r5
2239994a338SPaul Mackerras	bdnz	0b
2249994a338SPaul Mackerras	sync
2259994a338SPaul Mackerras
2269994a338SPaul Mackerras/* Now invalidate the icache */
2279994a338SPaul Mackerras
2289994a338SPaul Mackerras	lwz	r4,ICACHEL1LINESPERPAGE(r7)	/* Get # icache lines per page */
2299994a338SPaul Mackerras	lwz	r5,ICACHEL1LINESIZE(r7)		/* Get icache line size */
2309994a338SPaul Mackerras	mtctr	r4
2319994a338SPaul Mackerras1:	icbi	0,r3
2329994a338SPaul Mackerras	add	r3,r3,r5
2339994a338SPaul Mackerras	bdnz	1b
2349994a338SPaul Mackerras	isync
2359994a338SPaul Mackerras	blr
2369994a338SPaul Mackerras
2373f639ee8SStephen Rothwell
2389994a338SPaul Mackerras#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE)
2399994a338SPaul Mackerras/*
2409994a338SPaul Mackerras * Do an IO access in real mode
2419994a338SPaul Mackerras */
2429994a338SPaul Mackerras_GLOBAL(real_readb)
2439994a338SPaul Mackerras	mfmsr	r7
2449994a338SPaul Mackerras	ori	r0,r7,MSR_DR
2459994a338SPaul Mackerras	xori	r0,r0,MSR_DR
2469994a338SPaul Mackerras	sync
2479994a338SPaul Mackerras	mtmsrd	r0
2489994a338SPaul Mackerras	sync
2499994a338SPaul Mackerras	isync
2509994a338SPaul Mackerras	mfspr	r6,SPRN_HID4
2519994a338SPaul Mackerras	rldicl	r5,r6,32,0
2529994a338SPaul Mackerras	ori	r5,r5,0x100
2539994a338SPaul Mackerras	rldicl	r5,r5,32,0
2549994a338SPaul Mackerras	sync
2559994a338SPaul Mackerras	mtspr	SPRN_HID4,r5
2569994a338SPaul Mackerras	isync
2579994a338SPaul Mackerras	slbia
2589994a338SPaul Mackerras	isync
2599994a338SPaul Mackerras	lbz	r3,0(r3)
2609994a338SPaul Mackerras	sync
2619994a338SPaul Mackerras	mtspr	SPRN_HID4,r6
2629994a338SPaul Mackerras	isync
2639994a338SPaul Mackerras	slbia
2649994a338SPaul Mackerras	isync
2659994a338SPaul Mackerras	mtmsrd	r7
2669994a338SPaul Mackerras	sync
2679994a338SPaul Mackerras	isync
2689994a338SPaul Mackerras	blr
2699994a338SPaul Mackerras
2709994a338SPaul Mackerras	/*
2719994a338SPaul Mackerras * Do an IO access in real mode
2729994a338SPaul Mackerras */
2739994a338SPaul Mackerras_GLOBAL(real_writeb)
2749994a338SPaul Mackerras	mfmsr	r7
2759994a338SPaul Mackerras	ori	r0,r7,MSR_DR
2769994a338SPaul Mackerras	xori	r0,r0,MSR_DR
2779994a338SPaul Mackerras	sync
2789994a338SPaul Mackerras	mtmsrd	r0
2799994a338SPaul Mackerras	sync
2809994a338SPaul Mackerras	isync
2819994a338SPaul Mackerras	mfspr	r6,SPRN_HID4
2829994a338SPaul Mackerras	rldicl	r5,r6,32,0
2839994a338SPaul Mackerras	ori	r5,r5,0x100
2849994a338SPaul Mackerras	rldicl	r5,r5,32,0
2859994a338SPaul Mackerras	sync
2869994a338SPaul Mackerras	mtspr	SPRN_HID4,r5
2879994a338SPaul Mackerras	isync
2889994a338SPaul Mackerras	slbia
2899994a338SPaul Mackerras	isync
2909994a338SPaul Mackerras	stb	r3,0(r4)
2919994a338SPaul Mackerras	sync
2929994a338SPaul Mackerras	mtspr	SPRN_HID4,r6
2939994a338SPaul Mackerras	isync
2949994a338SPaul Mackerras	slbia
2959994a338SPaul Mackerras	isync
2969994a338SPaul Mackerras	mtmsrd	r7
2979994a338SPaul Mackerras	sync
2989994a338SPaul Mackerras	isync
2999994a338SPaul Mackerras	blr
3009994a338SPaul Mackerras#endif /* defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE) */
3019994a338SPaul Mackerras
30239c870d5SOlof Johansson#ifdef CONFIG_PPC_PASEMI
30339c870d5SOlof Johansson
30439c870d5SOlof Johansson/* No support in all binutils for these yet, so use defines */
30539c870d5SOlof Johansson#define LBZCIX(RT,RA,RB)  .long (0x7c0006aa|(RT<<21)|(RA<<16)|(RB << 11))
30639c870d5SOlof Johansson#define STBCIX(RS,RA,RB)  .long (0x7c0007aa|(RS<<21)|(RA<<16)|(RB << 11))
30739c870d5SOlof Johansson
30839c870d5SOlof Johansson
30939c870d5SOlof Johansson_GLOBAL(real_205_readb)
31039c870d5SOlof Johansson	mfmsr	r7
31139c870d5SOlof Johansson	ori	r0,r7,MSR_DR
31239c870d5SOlof Johansson	xori	r0,r0,MSR_DR
31339c870d5SOlof Johansson	sync
31439c870d5SOlof Johansson	mtmsrd	r0
31539c870d5SOlof Johansson	sync
31639c870d5SOlof Johansson	isync
31739c870d5SOlof Johansson	LBZCIX(r3,0,r3)
31839c870d5SOlof Johansson	isync
31939c870d5SOlof Johansson	mtmsrd	r7
32039c870d5SOlof Johansson	sync
32139c870d5SOlof Johansson	isync
32239c870d5SOlof Johansson	blr
32339c870d5SOlof Johansson
32439c870d5SOlof Johansson_GLOBAL(real_205_writeb)
32539c870d5SOlof Johansson	mfmsr	r7
32639c870d5SOlof Johansson	ori	r0,r7,MSR_DR
32739c870d5SOlof Johansson	xori	r0,r0,MSR_DR
32839c870d5SOlof Johansson	sync
32939c870d5SOlof Johansson	mtmsrd	r0
33039c870d5SOlof Johansson	sync
33139c870d5SOlof Johansson	isync
33239c870d5SOlof Johansson	STBCIX(r3,0,r4)
33339c870d5SOlof Johansson	isync
33439c870d5SOlof Johansson	mtmsrd	r7
33539c870d5SOlof Johansson	sync
33639c870d5SOlof Johansson	isync
33739c870d5SOlof Johansson	blr
33839c870d5SOlof Johansson
33939c870d5SOlof Johansson#endif /* CONFIG_PPC_PASEMI */
34039c870d5SOlof Johansson
34139c870d5SOlof Johansson
342127efeb2SStephen Rothwell#ifdef CONFIG_CPU_FREQ_PMAC64
3439994a338SPaul Mackerras/*
3444350147aSBenjamin Herrenschmidt * SCOM access functions for 970 (FX only for now)
3454350147aSBenjamin Herrenschmidt *
3464350147aSBenjamin Herrenschmidt * unsigned long scom970_read(unsigned int address);
3474350147aSBenjamin Herrenschmidt * void scom970_write(unsigned int address, unsigned long value);
3484350147aSBenjamin Herrenschmidt *
3494350147aSBenjamin Herrenschmidt * The address passed in is the 24 bits register address. This code
3504350147aSBenjamin Herrenschmidt * is 970 specific and will not check the status bits, so you should
3514350147aSBenjamin Herrenschmidt * know what you are doing.
3524350147aSBenjamin Herrenschmidt */
3534350147aSBenjamin Herrenschmidt_GLOBAL(scom970_read)
3544350147aSBenjamin Herrenschmidt	/* interrupts off */
3554350147aSBenjamin Herrenschmidt	mfmsr	r4
3564350147aSBenjamin Herrenschmidt	ori	r0,r4,MSR_EE
3574350147aSBenjamin Herrenschmidt	xori	r0,r0,MSR_EE
3584350147aSBenjamin Herrenschmidt	mtmsrd	r0,1
3594350147aSBenjamin Herrenschmidt
3604350147aSBenjamin Herrenschmidt	/* rotate 24 bits SCOM address 8 bits left and mask out it's low 8 bits
3614350147aSBenjamin Herrenschmidt	 * (including parity). On current CPUs they must be 0'd,
3624350147aSBenjamin Herrenschmidt	 * and finally or in RW bit
3634350147aSBenjamin Herrenschmidt	 */
3644350147aSBenjamin Herrenschmidt	rlwinm	r3,r3,8,0,15
3654350147aSBenjamin Herrenschmidt	ori	r3,r3,0x8000
3664350147aSBenjamin Herrenschmidt
3674350147aSBenjamin Herrenschmidt	/* do the actual scom read */
3684350147aSBenjamin Herrenschmidt	sync
3694350147aSBenjamin Herrenschmidt	mtspr	SPRN_SCOMC,r3
3704350147aSBenjamin Herrenschmidt	isync
3714350147aSBenjamin Herrenschmidt	mfspr	r3,SPRN_SCOMD
3724350147aSBenjamin Herrenschmidt	isync
3734350147aSBenjamin Herrenschmidt	mfspr	r0,SPRN_SCOMC
3744350147aSBenjamin Herrenschmidt	isync
3754350147aSBenjamin Herrenschmidt
3764350147aSBenjamin Herrenschmidt	/* XXX:	fixup result on some buggy 970's (ouch ! we lost a bit, bah
3774350147aSBenjamin Herrenschmidt	 * that's the best we can do). Not implemented yet as we don't use
3784350147aSBenjamin Herrenschmidt	 * the scom on any of the bogus CPUs yet, but may have to be done
3794350147aSBenjamin Herrenschmidt	 * ultimately
3804350147aSBenjamin Herrenschmidt	 */
3814350147aSBenjamin Herrenschmidt
3824350147aSBenjamin Herrenschmidt	/* restore interrupts */
3834350147aSBenjamin Herrenschmidt	mtmsrd	r4,1
3844350147aSBenjamin Herrenschmidt	blr
3854350147aSBenjamin Herrenschmidt
3864350147aSBenjamin Herrenschmidt
3874350147aSBenjamin Herrenschmidt_GLOBAL(scom970_write)
3884350147aSBenjamin Herrenschmidt	/* interrupts off */
3894350147aSBenjamin Herrenschmidt	mfmsr	r5
3904350147aSBenjamin Herrenschmidt	ori	r0,r5,MSR_EE
3914350147aSBenjamin Herrenschmidt	xori	r0,r0,MSR_EE
3924350147aSBenjamin Herrenschmidt	mtmsrd	r0,1
3934350147aSBenjamin Herrenschmidt
3944350147aSBenjamin Herrenschmidt	/* rotate 24 bits SCOM address 8 bits left and mask out it's low 8 bits
3954350147aSBenjamin Herrenschmidt	 * (including parity). On current CPUs they must be 0'd.
3964350147aSBenjamin Herrenschmidt	 */
3974350147aSBenjamin Herrenschmidt
3984350147aSBenjamin Herrenschmidt	rlwinm	r3,r3,8,0,15
3994350147aSBenjamin Herrenschmidt
4004350147aSBenjamin Herrenschmidt	sync
4014350147aSBenjamin Herrenschmidt	mtspr	SPRN_SCOMD,r4      /* write data */
4024350147aSBenjamin Herrenschmidt	isync
4034350147aSBenjamin Herrenschmidt	mtspr	SPRN_SCOMC,r3      /* write command */
4044350147aSBenjamin Herrenschmidt	isync
4054350147aSBenjamin Herrenschmidt	mfspr	3,SPRN_SCOMC
4064350147aSBenjamin Herrenschmidt	isync
4074350147aSBenjamin Herrenschmidt
4084350147aSBenjamin Herrenschmidt	/* restore interrupts */
4094350147aSBenjamin Herrenschmidt	mtmsrd	r5,1
4104350147aSBenjamin Herrenschmidt	blr
411127efeb2SStephen Rothwell#endif /* CONFIG_CPU_FREQ_PMAC64 */
4124350147aSBenjamin Herrenschmidt
4134350147aSBenjamin Herrenschmidt
4144350147aSBenjamin Herrenschmidt/*
4159994a338SPaul Mackerras * Create a kernel thread
4169994a338SPaul Mackerras *   kernel_thread(fn, arg, flags)
4179994a338SPaul Mackerras */
4189994a338SPaul Mackerras_GLOBAL(kernel_thread)
4199994a338SPaul Mackerras	std	r29,-24(r1)
4209994a338SPaul Mackerras	std	r30,-16(r1)
4219994a338SPaul Mackerras	stdu	r1,-STACK_FRAME_OVERHEAD(r1)
4229994a338SPaul Mackerras	mr	r29,r3
4239994a338SPaul Mackerras	mr	r30,r4
4249994a338SPaul Mackerras	ori	r3,r5,CLONE_VM	/* flags */
4259994a338SPaul Mackerras	oris	r3,r3,(CLONE_UNTRACED>>16)
4269994a338SPaul Mackerras	li	r4,0		/* new sp (unused) */
4279994a338SPaul Mackerras	li	r0,__NR_clone
4289994a338SPaul Mackerras	sc
42941c2e949SJosh Poimboeuf	bns+	1f		/* did system call indicate error? */
43041c2e949SJosh Poimboeuf	neg	r3,r3		/* if so, make return code negative */
43141c2e949SJosh Poimboeuf1:	cmpdi	0,r3,0		/* parent or child? */
43241c2e949SJosh Poimboeuf	bne	2f		/* return if parent */
4339994a338SPaul Mackerras	li	r0,0
4349994a338SPaul Mackerras	stdu	r0,-STACK_FRAME_OVERHEAD(r1)
4359994a338SPaul Mackerras	ld	r2,8(r29)
4369994a338SPaul Mackerras	ld	r29,0(r29)
4379994a338SPaul Mackerras	mtlr	r29              /* fn addr in lr */
4389994a338SPaul Mackerras	mr	r3,r30	        /* load arg and call fn */
4399994a338SPaul Mackerras	blrl
4409994a338SPaul Mackerras	li	r0,__NR_exit	/* exit after child exits */
4419994a338SPaul Mackerras        li	r3,0
4429994a338SPaul Mackerras	sc
44341c2e949SJosh Poimboeuf2:	addi	r1,r1,STACK_FRAME_OVERHEAD
4449994a338SPaul Mackerras	ld	r29,-24(r1)
4459994a338SPaul Mackerras	ld	r30,-16(r1)
4469994a338SPaul Mackerras	blr
4479994a338SPaul Mackerras
4489994a338SPaul Mackerras/*
4499994a338SPaul Mackerras * disable_kernel_fp()
4509994a338SPaul Mackerras * Disable the FPU.
4519994a338SPaul Mackerras */
4529994a338SPaul Mackerras_GLOBAL(disable_kernel_fp)
4539994a338SPaul Mackerras	mfmsr	r3
4549994a338SPaul Mackerras	rldicl	r0,r3,(63-MSR_FP_LG),1
4559994a338SPaul Mackerras	rldicl	r3,r0,(MSR_FP_LG+1),0
4569994a338SPaul Mackerras	mtmsrd	r3			/* disable use of fpu now */
4579994a338SPaul Mackerras	isync
4589994a338SPaul Mackerras	blr
4599994a338SPaul Mackerras
4609994a338SPaul Mackerras#ifdef CONFIG_ALTIVEC
4619994a338SPaul Mackerras
4629994a338SPaul Mackerras#if 0 /* this has no callers for now */
4639994a338SPaul Mackerras/*
4649994a338SPaul Mackerras * disable_kernel_altivec()
4659994a338SPaul Mackerras * Disable the VMX.
4669994a338SPaul Mackerras */
4679994a338SPaul Mackerras_GLOBAL(disable_kernel_altivec)
4689994a338SPaul Mackerras	mfmsr	r3
4699994a338SPaul Mackerras	rldicl	r0,r3,(63-MSR_VEC_LG),1
4709994a338SPaul Mackerras	rldicl	r3,r0,(MSR_VEC_LG+1),0
4719994a338SPaul Mackerras	mtmsrd	r3			/* disable use of VMX now */
4729994a338SPaul Mackerras	isync
4739994a338SPaul Mackerras	blr
4749994a338SPaul Mackerras#endif /* 0 */
4759994a338SPaul Mackerras
4769994a338SPaul Mackerras/*
4779994a338SPaul Mackerras * giveup_altivec(tsk)
4789994a338SPaul Mackerras * Disable VMX for the task given as the argument,
4799994a338SPaul Mackerras * and save the vector registers in its thread_struct.
4809994a338SPaul Mackerras * Enables the VMX for use in the kernel on return.
4819994a338SPaul Mackerras */
4829994a338SPaul Mackerras_GLOBAL(giveup_altivec)
4839994a338SPaul Mackerras	mfmsr	r5
4849994a338SPaul Mackerras	oris	r5,r5,MSR_VEC@h
4859994a338SPaul Mackerras	mtmsrd	r5			/* enable use of VMX now */
4869994a338SPaul Mackerras	isync
4879994a338SPaul Mackerras	cmpdi	0,r3,0
4889994a338SPaul Mackerras	beqlr-				/* if no previous owner, done */
4899994a338SPaul Mackerras	addi	r3,r3,THREAD		/* want THREAD of task */
4909994a338SPaul Mackerras	ld	r5,PT_REGS(r3)
4919994a338SPaul Mackerras	cmpdi	0,r5,0
4929994a338SPaul Mackerras	SAVE_32VRS(0,r4,r3)
4939994a338SPaul Mackerras	mfvscr	vr0
4949994a338SPaul Mackerras	li	r4,THREAD_VSCR
4959994a338SPaul Mackerras	stvx	vr0,r4,r3
4969994a338SPaul Mackerras	beq	1f
4979994a338SPaul Mackerras	ld	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
4989994a338SPaul Mackerras	lis	r3,MSR_VEC@h
4999994a338SPaul Mackerras	andc	r4,r4,r3		/* disable FP for previous task */
5009994a338SPaul Mackerras	std	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
5019994a338SPaul Mackerras1:
5029994a338SPaul Mackerras#ifndef CONFIG_SMP
5039994a338SPaul Mackerras	li	r5,0
5049994a338SPaul Mackerras	ld	r4,last_task_used_altivec@got(r2)
5059994a338SPaul Mackerras	std	r5,0(r4)
5069994a338SPaul Mackerras#endif /* CONFIG_SMP */
5079994a338SPaul Mackerras	blr
5089994a338SPaul Mackerras
5099994a338SPaul Mackerras#endif /* CONFIG_ALTIVEC */
5109994a338SPaul Mackerras
511ce48b210SMichael Neuling#ifdef CONFIG_VSX
512ce48b210SMichael Neuling/*
5137c292170SMichael Neuling * __giveup_vsx(tsk)
5147c292170SMichael Neuling * Disable VSX for the task given as the argument.
5157c292170SMichael Neuling * Does NOT save vsx registers.
516ce48b210SMichael Neuling * Enables the VSX for use in the kernel on return.
517ce48b210SMichael Neuling */
5187c292170SMichael Neuling_GLOBAL(__giveup_vsx)
519ce48b210SMichael Neuling	mfmsr	r5
520ce48b210SMichael Neuling	oris	r5,r5,MSR_VSX@h
521ce48b210SMichael Neuling	mtmsrd	r5			/* enable use of VSX now */
522ce48b210SMichael Neuling	isync
523ce48b210SMichael Neuling
524ce48b210SMichael Neuling	cmpdi	0,r3,0
525ce48b210SMichael Neuling	beqlr-				/* if no previous owner, done */
526ce48b210SMichael Neuling	addi	r3,r3,THREAD		/* want THREAD of task */
527ce48b210SMichael Neuling	ld	r5,PT_REGS(r3)
528ce48b210SMichael Neuling	cmpdi	0,r5,0
529ce48b210SMichael Neuling	beq	1f
530ce48b210SMichael Neuling	ld	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
531ce48b210SMichael Neuling	lis	r3,MSR_VSX@h
532ce48b210SMichael Neuling	andc	r4,r4,r3		/* disable VSX for previous task */
533ce48b210SMichael Neuling	std	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
534ce48b210SMichael Neuling1:
535ce48b210SMichael Neuling#ifndef CONFIG_SMP
536ce48b210SMichael Neuling	li	r5,0
537ce48b210SMichael Neuling	ld	r4,last_task_used_vsx@got(r2)
538ce48b210SMichael Neuling	std	r5,0(r4)
539ce48b210SMichael Neuling#endif /* CONFIG_SMP */
540ce48b210SMichael Neuling	blr
541ce48b210SMichael Neuling
542ce48b210SMichael Neuling#endif /* CONFIG_VSX */
543ce48b210SMichael Neuling
5449994a338SPaul Mackerras/* kexec_wait(phys_cpu)
5459994a338SPaul Mackerras *
5469994a338SPaul Mackerras * wait for the flag to change, indicating this kernel is going away but
5479994a338SPaul Mackerras * the slave code for the next one is at addresses 0 to 100.
5489994a338SPaul Mackerras *
5499994a338SPaul Mackerras * This is used by all slaves.
5509994a338SPaul Mackerras *
5519994a338SPaul Mackerras * Physical (hardware) cpu id should be in r3.
5529994a338SPaul Mackerras */
5539994a338SPaul Mackerras_GLOBAL(kexec_wait)
5549994a338SPaul Mackerras	bl	1f
5559994a338SPaul Mackerras1:	mflr	r5
5569994a338SPaul Mackerras	addi	r5,r5,kexec_flag-1b
5579994a338SPaul Mackerras
5589994a338SPaul Mackerras99:	HMT_LOW
5599994a338SPaul Mackerras#ifdef CONFIG_KEXEC		/* use no memory without kexec */
5609994a338SPaul Mackerras	lwz	r4,0(r5)
5619994a338SPaul Mackerras	cmpwi	0,r4,0
5629994a338SPaul Mackerras	bnea	0x60
5639994a338SPaul Mackerras#endif
5649994a338SPaul Mackerras	b	99b
5659994a338SPaul Mackerras
5669994a338SPaul Mackerras/* this can be in text because we won't change it until we are
5679994a338SPaul Mackerras * running in real anyways
5689994a338SPaul Mackerras */
5699994a338SPaul Mackerraskexec_flag:
5709994a338SPaul Mackerras	.long	0
5719994a338SPaul Mackerras
5729994a338SPaul Mackerras
5739994a338SPaul Mackerras#ifdef CONFIG_KEXEC
5749994a338SPaul Mackerras
5759994a338SPaul Mackerras/* kexec_smp_wait(void)
5769994a338SPaul Mackerras *
5779994a338SPaul Mackerras * call with interrupts off
5789994a338SPaul Mackerras * note: this is a terminal routine, it does not save lr
5799994a338SPaul Mackerras *
5809994a338SPaul Mackerras * get phys id from paca
5819994a338SPaul Mackerras * set paca id to -1 to say we got here
5829994a338SPaul Mackerras * switch to real mode
5839994a338SPaul Mackerras * join other cpus in kexec_wait(phys_id)
5849994a338SPaul Mackerras */
5859994a338SPaul Mackerras_GLOBAL(kexec_smp_wait)
5869994a338SPaul Mackerras	lhz	r3,PACAHWCPUID(r13)
5879994a338SPaul Mackerras	li	r4,-1
5889994a338SPaul Mackerras	sth	r4,PACAHWCPUID(r13)	/* let others know we left */
5899994a338SPaul Mackerras	bl	real_mode
5909994a338SPaul Mackerras	b	.kexec_wait
5919994a338SPaul Mackerras
5929994a338SPaul Mackerras/*
5939994a338SPaul Mackerras * switch to real mode (turn mmu off)
5949994a338SPaul Mackerras * we use the early kernel trick that the hardware ignores bits
5959994a338SPaul Mackerras * 0 and 1 (big endian) of the effective address in real mode
5969994a338SPaul Mackerras *
5979994a338SPaul Mackerras * don't overwrite r3 here, it is live for kexec_wait above.
5989994a338SPaul Mackerras */
5999994a338SPaul Mackerrasreal_mode:	/* assume normal blr return */
6009994a338SPaul Mackerras1:	li	r9,MSR_RI
6019994a338SPaul Mackerras	li	r10,MSR_DR|MSR_IR
6029994a338SPaul Mackerras	mflr	r11		/* return address to SRR0 */
6039994a338SPaul Mackerras	mfmsr	r12
6049994a338SPaul Mackerras	andc	r9,r12,r9
6059994a338SPaul Mackerras	andc	r10,r12,r10
6069994a338SPaul Mackerras
6079994a338SPaul Mackerras	mtmsrd	r9,1
6089994a338SPaul Mackerras	mtspr	SPRN_SRR1,r10
6099994a338SPaul Mackerras	mtspr	SPRN_SRR0,r11
6109994a338SPaul Mackerras	rfid
6119994a338SPaul Mackerras
6129994a338SPaul Mackerras
6139994a338SPaul Mackerras/*
614*1767c8f3SMilton Miller * kexec_sequence(newstack, start, image, control, clear_all())
6159994a338SPaul Mackerras *
6169994a338SPaul Mackerras * does the grungy work with stack switching and real mode switches
6179994a338SPaul Mackerras * also does simple calls to other code
6189994a338SPaul Mackerras */
6199994a338SPaul Mackerras
6209994a338SPaul Mackerras_GLOBAL(kexec_sequence)
6219994a338SPaul Mackerras	mflr	r0
6229994a338SPaul Mackerras	std	r0,16(r1)
6239994a338SPaul Mackerras
6249994a338SPaul Mackerras	/* switch stacks to newstack -- &kexec_stack.stack */
6254ae2dcb6SKumar Gala	stdu	r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3)
6269994a338SPaul Mackerras	mr	r1,r3
6279994a338SPaul Mackerras
6289994a338SPaul Mackerras	li	r0,0
6299994a338SPaul Mackerras	std	r0,16(r1)
6309994a338SPaul Mackerras
6319994a338SPaul Mackerras	/* save regs for local vars on new stack.
6329994a338SPaul Mackerras	 * yes, we won't go back, but ...
6339994a338SPaul Mackerras	 */
6349994a338SPaul Mackerras	std	r31,-8(r1)
6359994a338SPaul Mackerras	std	r30,-16(r1)
6369994a338SPaul Mackerras	std	r29,-24(r1)
6379994a338SPaul Mackerras	std	r28,-32(r1)
6389994a338SPaul Mackerras	std	r27,-40(r1)
6399994a338SPaul Mackerras	std	r26,-48(r1)
6409994a338SPaul Mackerras	std	r25,-56(r1)
6419994a338SPaul Mackerras
6424ae2dcb6SKumar Gala	stdu	r1,-STACK_FRAME_OVERHEAD-64(r1)
6439994a338SPaul Mackerras
6449994a338SPaul Mackerras	/* save args into preserved regs */
6459994a338SPaul Mackerras	mr	r31,r3			/* newstack (both) */
6469994a338SPaul Mackerras	mr	r30,r4			/* start (real) */
6479994a338SPaul Mackerras	mr	r29,r5			/* image (virt) */
6489994a338SPaul Mackerras	mr	r28,r6			/* control, unused */
6499994a338SPaul Mackerras	mr	r27,r7			/* clear_all() fn desc */
650*1767c8f3SMilton Miller	mr	r26,r8			/* spare */
6519994a338SPaul Mackerras	lhz	r25,PACAHWCPUID(r13)	/* get our phys cpu from paca */
6529994a338SPaul Mackerras
6539994a338SPaul Mackerras	/* disable interrupts, we are overwriting kernel data next */
6549994a338SPaul Mackerras	mfmsr	r3
6559994a338SPaul Mackerras	rlwinm	r3,r3,0,17,15
6569994a338SPaul Mackerras	mtmsrd	r3,1
6579994a338SPaul Mackerras
6589994a338SPaul Mackerras	/* copy dest pages, flush whole dest image */
6599994a338SPaul Mackerras	mr	r3,r29
6609994a338SPaul Mackerras	bl	.kexec_copy_flush	/* (image) */
6619994a338SPaul Mackerras
6629994a338SPaul Mackerras	/* turn off mmu */
6639994a338SPaul Mackerras	bl	real_mode
6649994a338SPaul Mackerras
665ee46a90bSMilton Miller	/* copy  0x100 bytes starting at start to 0 */
666ee46a90bSMilton Miller	li	r3,0
667ee46a90bSMilton Miller	mr	r4,r30		/* start, aka phys mem offset */
668ee46a90bSMilton Miller	li	r5,0x100
669ee46a90bSMilton Miller	li	r6,0
670ee46a90bSMilton Miller	bl	.copy_and_flush	/* (dest, src, copy limit, start offset) */
671ee46a90bSMilton Miller1:	/* assume normal blr return */
672ee46a90bSMilton Miller
673ee46a90bSMilton Miller	/* release other cpus to the new kernel secondary start at 0x60 */
674ee46a90bSMilton Miller	mflr	r5
675ee46a90bSMilton Miller	li	r6,1
676ee46a90bSMilton Miller	stw	r6,kexec_flag-1b(5)
677ee46a90bSMilton Miller
6789994a338SPaul Mackerras	/* clear out hardware hash page table and tlb */
6799994a338SPaul Mackerras	ld	r5,0(r27)		/* deref function descriptor */
6809994a338SPaul Mackerras	mtctr	r5
6818d950cb8SGeoff Levand	bctrl				/* ppc_md.hpte_clear_all(void); */
6829994a338SPaul Mackerras
6839994a338SPaul Mackerras/*
6849994a338SPaul Mackerras *   kexec image calling is:
6859994a338SPaul Mackerras *      the first 0x100 bytes of the entry point are copied to 0
6869994a338SPaul Mackerras *
6879994a338SPaul Mackerras *      all slaves branch to slave = 0x60 (absolute)
6889994a338SPaul Mackerras *              slave(phys_cpu_id);
6899994a338SPaul Mackerras *
6909994a338SPaul Mackerras *      master goes to start = entry point
6919994a338SPaul Mackerras *              start(phys_cpu_id, start, 0);
6929994a338SPaul Mackerras *
6939994a338SPaul Mackerras *
6949994a338SPaul Mackerras *   a wrapper is needed to call existing kernels, here is an approximate
6959994a338SPaul Mackerras *   description of one method:
6969994a338SPaul Mackerras *
6979994a338SPaul Mackerras * v2: (2.6.10)
6989994a338SPaul Mackerras *   start will be near the boot_block (maybe 0x100 bytes before it?)
6999994a338SPaul Mackerras *   it will have a 0x60, which will b to boot_block, where it will wait
7009994a338SPaul Mackerras *   and 0 will store phys into struct boot-block and load r3 from there,
7019994a338SPaul Mackerras *   copy kernel 0-0x100 and tell slaves to back down to 0x60 again
7029994a338SPaul Mackerras *
7039994a338SPaul Mackerras * v1: (2.6.9)
7049994a338SPaul Mackerras *    boot block will have all cpus scanning device tree to see if they
7059994a338SPaul Mackerras *    are the boot cpu ?????
7069994a338SPaul Mackerras *    other device tree differences (prop sizes, va vs pa, etc)...
7079994a338SPaul Mackerras */
7089994a338SPaul Mackerras	mr	r3,r25	# my phys cpu
7099994a338SPaul Mackerras	mr	r4,r30	# start, aka phys mem offset
7109994a338SPaul Mackerras	mtlr	4
7119994a338SPaul Mackerras	li	r5,0
712*1767c8f3SMilton Miller	blr	/* image->start(physid, image->start, 0); */
7139994a338SPaul Mackerras#endif /* CONFIG_KEXEC */
714