xref: /openbmc/linux/arch/powerpc/kernel/misc_64.S (revision ee46a90b599952bb1a9dc67f894710017e7cc409)
19994a338SPaul Mackerras/*
29994a338SPaul Mackerras * This file contains miscellaneous low-level functions.
39994a338SPaul Mackerras *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
49994a338SPaul Mackerras *
59994a338SPaul Mackerras * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
69994a338SPaul Mackerras * and Paul Mackerras.
79994a338SPaul Mackerras * Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com)
89994a338SPaul Mackerras * PPC64 updates by Dave Engebretsen (engebret@us.ibm.com)
99994a338SPaul Mackerras *
109994a338SPaul Mackerras * This program is free software; you can redistribute it and/or
119994a338SPaul Mackerras * modify it under the terms of the GNU General Public License
129994a338SPaul Mackerras * as published by the Free Software Foundation; either version
139994a338SPaul Mackerras * 2 of the License, or (at your option) any later version.
149994a338SPaul Mackerras *
159994a338SPaul Mackerras */
169994a338SPaul Mackerras
179994a338SPaul Mackerras#include <linux/sys.h>
189994a338SPaul Mackerras#include <asm/unistd.h>
199994a338SPaul Mackerras#include <asm/errno.h>
209994a338SPaul Mackerras#include <asm/processor.h>
219994a338SPaul Mackerras#include <asm/page.h>
229994a338SPaul Mackerras#include <asm/cache.h>
239994a338SPaul Mackerras#include <asm/ppc_asm.h>
249994a338SPaul Mackerras#include <asm/asm-offsets.h>
259994a338SPaul Mackerras#include <asm/cputable.h>
266cb7bfebSDavid Gibson#include <asm/thread_info.h>
279994a338SPaul Mackerras
289994a338SPaul Mackerras	.text
299994a338SPaul Mackerras
309994a338SPaul Mackerras_GLOBAL(get_msr)
319994a338SPaul Mackerras	mfmsr	r3
329994a338SPaul Mackerras	blr
339994a338SPaul Mackerras
349994a338SPaul Mackerras_GLOBAL(get_srr0)
359994a338SPaul Mackerras	mfsrr0  r3
369994a338SPaul Mackerras	blr
379994a338SPaul Mackerras
389994a338SPaul Mackerras_GLOBAL(get_srr1)
399994a338SPaul Mackerras	mfsrr1  r3
409994a338SPaul Mackerras	blr
419994a338SPaul Mackerras
429994a338SPaul Mackerras#ifdef CONFIG_IRQSTACKS
439994a338SPaul Mackerras_GLOBAL(call_do_softirq)
449994a338SPaul Mackerras	mflr	r0
459994a338SPaul Mackerras	std	r0,16(r1)
469994a338SPaul Mackerras	stdu	r1,THREAD_SIZE-112(r3)
479994a338SPaul Mackerras	mr	r1,r3
489994a338SPaul Mackerras	bl	.__do_softirq
499994a338SPaul Mackerras	ld	r1,0(r1)
509994a338SPaul Mackerras	ld	r0,16(r1)
519994a338SPaul Mackerras	mtlr	r0
529994a338SPaul Mackerras	blr
539994a338SPaul Mackerras
54b9e5b4e6SBenjamin Herrenschmidt_GLOBAL(call_handle_irq)
557d12e780SDavid Howells	ld	r8,0(r6)
569994a338SPaul Mackerras	mflr	r0
579994a338SPaul Mackerras	std	r0,16(r1)
58b9e5b4e6SBenjamin Herrenschmidt	mtctr	r8
597d12e780SDavid Howells	stdu	r1,THREAD_SIZE-112(r5)
607d12e780SDavid Howells	mr	r1,r5
61b9e5b4e6SBenjamin Herrenschmidt	bctrl
629994a338SPaul Mackerras	ld	r1,0(r1)
639994a338SPaul Mackerras	ld	r0,16(r1)
649994a338SPaul Mackerras	mtlr	r0
659994a338SPaul Mackerras	blr
669994a338SPaul Mackerras#endif /* CONFIG_IRQSTACKS */
679994a338SPaul Mackerras
689994a338SPaul Mackerras	.section	".toc","aw"
699994a338SPaul MackerrasPPC64_CACHES:
709994a338SPaul Mackerras	.tc		ppc64_caches[TC],ppc64_caches
719994a338SPaul Mackerras	.section	".text"
729994a338SPaul Mackerras
739994a338SPaul Mackerras/*
749994a338SPaul Mackerras * Write any modified data cache blocks out to memory
759994a338SPaul Mackerras * and invalidate the corresponding instruction cache blocks.
769994a338SPaul Mackerras *
779994a338SPaul Mackerras * flush_icache_range(unsigned long start, unsigned long stop)
789994a338SPaul Mackerras *
799994a338SPaul Mackerras *   flush all bytes from start through stop-1 inclusive
809994a338SPaul Mackerras */
819994a338SPaul Mackerras
829994a338SPaul Mackerras_KPROBE(__flush_icache_range)
839994a338SPaul Mackerras
849994a338SPaul Mackerras/*
859994a338SPaul Mackerras * Flush the data cache to memory
869994a338SPaul Mackerras *
879994a338SPaul Mackerras * Different systems have different cache line sizes
889994a338SPaul Mackerras * and in some cases i-cache and d-cache line sizes differ from
899994a338SPaul Mackerras * each other.
909994a338SPaul Mackerras */
919994a338SPaul Mackerras 	ld	r10,PPC64_CACHES@toc(r2)
929994a338SPaul Mackerras	lwz	r7,DCACHEL1LINESIZE(r10)/* Get cache line size */
939994a338SPaul Mackerras	addi	r5,r7,-1
949994a338SPaul Mackerras	andc	r6,r3,r5		/* round low to line bdy */
959994a338SPaul Mackerras	subf	r8,r6,r4		/* compute length */
969994a338SPaul Mackerras	add	r8,r8,r5		/* ensure we get enough */
979994a338SPaul Mackerras	lwz	r9,DCACHEL1LOGLINESIZE(r10)	/* Get log-2 of cache line size */
989994a338SPaul Mackerras	srw.	r8,r8,r9		/* compute line count */
999994a338SPaul Mackerras	beqlr				/* nothing to do? */
1009994a338SPaul Mackerras	mtctr	r8
1019994a338SPaul Mackerras1:	dcbst	0,r6
1029994a338SPaul Mackerras	add	r6,r6,r7
1039994a338SPaul Mackerras	bdnz	1b
1049994a338SPaul Mackerras	sync
1059994a338SPaul Mackerras
1069994a338SPaul Mackerras/* Now invalidate the instruction cache */
1079994a338SPaul Mackerras
1089994a338SPaul Mackerras	lwz	r7,ICACHEL1LINESIZE(r10)	/* Get Icache line size */
1099994a338SPaul Mackerras	addi	r5,r7,-1
1109994a338SPaul Mackerras	andc	r6,r3,r5		/* round low to line bdy */
1119994a338SPaul Mackerras	subf	r8,r6,r4		/* compute length */
1129994a338SPaul Mackerras	add	r8,r8,r5
1139994a338SPaul Mackerras	lwz	r9,ICACHEL1LOGLINESIZE(r10)	/* Get log-2 of Icache line size */
1149994a338SPaul Mackerras	srw.	r8,r8,r9		/* compute line count */
1159994a338SPaul Mackerras	beqlr				/* nothing to do? */
1169994a338SPaul Mackerras	mtctr	r8
1179994a338SPaul Mackerras2:	icbi	0,r6
1189994a338SPaul Mackerras	add	r6,r6,r7
1199994a338SPaul Mackerras	bdnz	2b
1209994a338SPaul Mackerras	isync
1219994a338SPaul Mackerras	blr
1229994a338SPaul Mackerras	.previous .text
1239994a338SPaul Mackerras/*
1249994a338SPaul Mackerras * Like above, but only do the D-cache.
1259994a338SPaul Mackerras *
1269994a338SPaul Mackerras * flush_dcache_range(unsigned long start, unsigned long stop)
1279994a338SPaul Mackerras *
1289994a338SPaul Mackerras *    flush all bytes from start to stop-1 inclusive
1299994a338SPaul Mackerras */
1309994a338SPaul Mackerras_GLOBAL(flush_dcache_range)
1319994a338SPaul Mackerras
1329994a338SPaul Mackerras/*
1339994a338SPaul Mackerras * Flush the data cache to memory
1349994a338SPaul Mackerras *
1359994a338SPaul Mackerras * Different systems have different cache line sizes
1369994a338SPaul Mackerras */
1379994a338SPaul Mackerras 	ld	r10,PPC64_CACHES@toc(r2)
1389994a338SPaul Mackerras	lwz	r7,DCACHEL1LINESIZE(r10)	/* Get dcache line size */
1399994a338SPaul Mackerras	addi	r5,r7,-1
1409994a338SPaul Mackerras	andc	r6,r3,r5		/* round low to line bdy */
1419994a338SPaul Mackerras	subf	r8,r6,r4		/* compute length */
1429994a338SPaul Mackerras	add	r8,r8,r5		/* ensure we get enough */
1439994a338SPaul Mackerras	lwz	r9,DCACHEL1LOGLINESIZE(r10)	/* Get log-2 of dcache line size */
1449994a338SPaul Mackerras	srw.	r8,r8,r9		/* compute line count */
1459994a338SPaul Mackerras	beqlr				/* nothing to do? */
1469994a338SPaul Mackerras	mtctr	r8
1479994a338SPaul Mackerras0:	dcbst	0,r6
1489994a338SPaul Mackerras	add	r6,r6,r7
1499994a338SPaul Mackerras	bdnz	0b
1509994a338SPaul Mackerras	sync
1519994a338SPaul Mackerras	blr
1529994a338SPaul Mackerras
1539994a338SPaul Mackerras/*
1549994a338SPaul Mackerras * Like above, but works on non-mapped physical addresses.
1559994a338SPaul Mackerras * Use only for non-LPAR setups ! It also assumes real mode
1569994a338SPaul Mackerras * is cacheable. Used for flushing out the DART before using
1579994a338SPaul Mackerras * it as uncacheable memory
1589994a338SPaul Mackerras *
1599994a338SPaul Mackerras * flush_dcache_phys_range(unsigned long start, unsigned long stop)
1609994a338SPaul Mackerras *
1619994a338SPaul Mackerras *    flush all bytes from start to stop-1 inclusive
1629994a338SPaul Mackerras */
1639994a338SPaul Mackerras_GLOBAL(flush_dcache_phys_range)
1649994a338SPaul Mackerras 	ld	r10,PPC64_CACHES@toc(r2)
1659994a338SPaul Mackerras	lwz	r7,DCACHEL1LINESIZE(r10)	/* Get dcache line size */
1669994a338SPaul Mackerras	addi	r5,r7,-1
1679994a338SPaul Mackerras	andc	r6,r3,r5		/* round low to line bdy */
1689994a338SPaul Mackerras	subf	r8,r6,r4		/* compute length */
1699994a338SPaul Mackerras	add	r8,r8,r5		/* ensure we get enough */
1709994a338SPaul Mackerras	lwz	r9,DCACHEL1LOGLINESIZE(r10)	/* Get log-2 of dcache line size */
1719994a338SPaul Mackerras	srw.	r8,r8,r9		/* compute line count */
1729994a338SPaul Mackerras	beqlr				/* nothing to do? */
1739994a338SPaul Mackerras	mfmsr	r5			/* Disable MMU Data Relocation */
1749994a338SPaul Mackerras	ori	r0,r5,MSR_DR
1759994a338SPaul Mackerras	xori	r0,r0,MSR_DR
1769994a338SPaul Mackerras	sync
1779994a338SPaul Mackerras	mtmsr	r0
1789994a338SPaul Mackerras	sync
1799994a338SPaul Mackerras	isync
1809994a338SPaul Mackerras	mtctr	r8
1819994a338SPaul Mackerras0:	dcbst	0,r6
1829994a338SPaul Mackerras	add	r6,r6,r7
1839994a338SPaul Mackerras	bdnz	0b
1849994a338SPaul Mackerras	sync
1859994a338SPaul Mackerras	isync
1869994a338SPaul Mackerras	mtmsr	r5			/* Re-enable MMU Data Relocation */
1879994a338SPaul Mackerras	sync
1889994a338SPaul Mackerras	isync
1899994a338SPaul Mackerras	blr
1909994a338SPaul Mackerras
1919994a338SPaul Mackerras_GLOBAL(flush_inval_dcache_range)
1929994a338SPaul Mackerras 	ld	r10,PPC64_CACHES@toc(r2)
1939994a338SPaul Mackerras	lwz	r7,DCACHEL1LINESIZE(r10)	/* Get dcache line size */
1949994a338SPaul Mackerras	addi	r5,r7,-1
1959994a338SPaul Mackerras	andc	r6,r3,r5		/* round low to line bdy */
1969994a338SPaul Mackerras	subf	r8,r6,r4		/* compute length */
1979994a338SPaul Mackerras	add	r8,r8,r5		/* ensure we get enough */
1989994a338SPaul Mackerras	lwz	r9,DCACHEL1LOGLINESIZE(r10)/* Get log-2 of dcache line size */
1999994a338SPaul Mackerras	srw.	r8,r8,r9		/* compute line count */
2009994a338SPaul Mackerras	beqlr				/* nothing to do? */
2019994a338SPaul Mackerras	sync
2029994a338SPaul Mackerras	isync
2039994a338SPaul Mackerras	mtctr	r8
2049994a338SPaul Mackerras0:	dcbf	0,r6
2059994a338SPaul Mackerras	add	r6,r6,r7
2069994a338SPaul Mackerras	bdnz	0b
2079994a338SPaul Mackerras	sync
2089994a338SPaul Mackerras	isync
2099994a338SPaul Mackerras	blr
2109994a338SPaul Mackerras
2119994a338SPaul Mackerras
2129994a338SPaul Mackerras/*
2139994a338SPaul Mackerras * Flush a particular page from the data cache to RAM.
2149994a338SPaul Mackerras * Note: this is necessary because the instruction cache does *not*
2159994a338SPaul Mackerras * snoop from the data cache.
2169994a338SPaul Mackerras *
2179994a338SPaul Mackerras *	void __flush_dcache_icache(void *page)
2189994a338SPaul Mackerras */
2199994a338SPaul Mackerras_GLOBAL(__flush_dcache_icache)
2209994a338SPaul Mackerras/*
2219994a338SPaul Mackerras * Flush the data cache to memory
2229994a338SPaul Mackerras *
2239994a338SPaul Mackerras * Different systems have different cache line sizes
2249994a338SPaul Mackerras */
2259994a338SPaul Mackerras
2269994a338SPaul Mackerras/* Flush the dcache */
2279994a338SPaul Mackerras 	ld	r7,PPC64_CACHES@toc(r2)
2289994a338SPaul Mackerras	clrrdi	r3,r3,PAGE_SHIFT           	    /* Page align */
2299994a338SPaul Mackerras	lwz	r4,DCACHEL1LINESPERPAGE(r7)	/* Get # dcache lines per page */
2309994a338SPaul Mackerras	lwz	r5,DCACHEL1LINESIZE(r7)		/* Get dcache line size */
2319994a338SPaul Mackerras	mr	r6,r3
2329994a338SPaul Mackerras	mtctr	r4
2339994a338SPaul Mackerras0:	dcbst	0,r6
2349994a338SPaul Mackerras	add	r6,r6,r5
2359994a338SPaul Mackerras	bdnz	0b
2369994a338SPaul Mackerras	sync
2379994a338SPaul Mackerras
2389994a338SPaul Mackerras/* Now invalidate the icache */
2399994a338SPaul Mackerras
2409994a338SPaul Mackerras	lwz	r4,ICACHEL1LINESPERPAGE(r7)	/* Get # icache lines per page */
2419994a338SPaul Mackerras	lwz	r5,ICACHEL1LINESIZE(r7)		/* Get icache line size */
2429994a338SPaul Mackerras	mtctr	r4
2439994a338SPaul Mackerras1:	icbi	0,r3
2449994a338SPaul Mackerras	add	r3,r3,r5
2459994a338SPaul Mackerras	bdnz	1b
2469994a338SPaul Mackerras	isync
2479994a338SPaul Mackerras	blr
2489994a338SPaul Mackerras
2493f639ee8SStephen Rothwell
2509994a338SPaul Mackerras#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE)
2519994a338SPaul Mackerras/*
2529994a338SPaul Mackerras * Do an IO access in real mode
2539994a338SPaul Mackerras */
2549994a338SPaul Mackerras_GLOBAL(real_readb)
2559994a338SPaul Mackerras	mfmsr	r7
2569994a338SPaul Mackerras	ori	r0,r7,MSR_DR
2579994a338SPaul Mackerras	xori	r0,r0,MSR_DR
2589994a338SPaul Mackerras	sync
2599994a338SPaul Mackerras	mtmsrd	r0
2609994a338SPaul Mackerras	sync
2619994a338SPaul Mackerras	isync
2629994a338SPaul Mackerras	mfspr	r6,SPRN_HID4
2639994a338SPaul Mackerras	rldicl	r5,r6,32,0
2649994a338SPaul Mackerras	ori	r5,r5,0x100
2659994a338SPaul Mackerras	rldicl	r5,r5,32,0
2669994a338SPaul Mackerras	sync
2679994a338SPaul Mackerras	mtspr	SPRN_HID4,r5
2689994a338SPaul Mackerras	isync
2699994a338SPaul Mackerras	slbia
2709994a338SPaul Mackerras	isync
2719994a338SPaul Mackerras	lbz	r3,0(r3)
2729994a338SPaul Mackerras	sync
2739994a338SPaul Mackerras	mtspr	SPRN_HID4,r6
2749994a338SPaul Mackerras	isync
2759994a338SPaul Mackerras	slbia
2769994a338SPaul Mackerras	isync
2779994a338SPaul Mackerras	mtmsrd	r7
2789994a338SPaul Mackerras	sync
2799994a338SPaul Mackerras	isync
2809994a338SPaul Mackerras	blr
2819994a338SPaul Mackerras
2829994a338SPaul Mackerras	/*
2839994a338SPaul Mackerras * Do an IO access in real mode
2849994a338SPaul Mackerras */
2859994a338SPaul Mackerras_GLOBAL(real_writeb)
2869994a338SPaul Mackerras	mfmsr	r7
2879994a338SPaul Mackerras	ori	r0,r7,MSR_DR
2889994a338SPaul Mackerras	xori	r0,r0,MSR_DR
2899994a338SPaul Mackerras	sync
2909994a338SPaul Mackerras	mtmsrd	r0
2919994a338SPaul Mackerras	sync
2929994a338SPaul Mackerras	isync
2939994a338SPaul Mackerras	mfspr	r6,SPRN_HID4
2949994a338SPaul Mackerras	rldicl	r5,r6,32,0
2959994a338SPaul Mackerras	ori	r5,r5,0x100
2969994a338SPaul Mackerras	rldicl	r5,r5,32,0
2979994a338SPaul Mackerras	sync
2989994a338SPaul Mackerras	mtspr	SPRN_HID4,r5
2999994a338SPaul Mackerras	isync
3009994a338SPaul Mackerras	slbia
3019994a338SPaul Mackerras	isync
3029994a338SPaul Mackerras	stb	r3,0(r4)
3039994a338SPaul Mackerras	sync
3049994a338SPaul Mackerras	mtspr	SPRN_HID4,r6
3059994a338SPaul Mackerras	isync
3069994a338SPaul Mackerras	slbia
3079994a338SPaul Mackerras	isync
3089994a338SPaul Mackerras	mtmsrd	r7
3099994a338SPaul Mackerras	sync
3109994a338SPaul Mackerras	isync
3119994a338SPaul Mackerras	blr
3129994a338SPaul Mackerras#endif /* defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE) */
3139994a338SPaul Mackerras
31439c870d5SOlof Johansson#ifdef CONFIG_PPC_PASEMI
31539c870d5SOlof Johansson
31639c870d5SOlof Johansson/* No support in all binutils for these yet, so use defines */
31739c870d5SOlof Johansson#define LBZCIX(RT,RA,RB)  .long (0x7c0006aa|(RT<<21)|(RA<<16)|(RB << 11))
31839c870d5SOlof Johansson#define STBCIX(RS,RA,RB)  .long (0x7c0007aa|(RS<<21)|(RA<<16)|(RB << 11))
31939c870d5SOlof Johansson
32039c870d5SOlof Johansson
32139c870d5SOlof Johansson_GLOBAL(real_205_readb)
32239c870d5SOlof Johansson	mfmsr	r7
32339c870d5SOlof Johansson	ori	r0,r7,MSR_DR
32439c870d5SOlof Johansson	xori	r0,r0,MSR_DR
32539c870d5SOlof Johansson	sync
32639c870d5SOlof Johansson	mtmsrd	r0
32739c870d5SOlof Johansson	sync
32839c870d5SOlof Johansson	isync
32939c870d5SOlof Johansson	LBZCIX(r3,0,r3)
33039c870d5SOlof Johansson	isync
33139c870d5SOlof Johansson	mtmsrd	r7
33239c870d5SOlof Johansson	sync
33339c870d5SOlof Johansson	isync
33439c870d5SOlof Johansson	blr
33539c870d5SOlof Johansson
33639c870d5SOlof Johansson_GLOBAL(real_205_writeb)
33739c870d5SOlof Johansson	mfmsr	r7
33839c870d5SOlof Johansson	ori	r0,r7,MSR_DR
33939c870d5SOlof Johansson	xori	r0,r0,MSR_DR
34039c870d5SOlof Johansson	sync
34139c870d5SOlof Johansson	mtmsrd	r0
34239c870d5SOlof Johansson	sync
34339c870d5SOlof Johansson	isync
34439c870d5SOlof Johansson	STBCIX(r3,0,r4)
34539c870d5SOlof Johansson	isync
34639c870d5SOlof Johansson	mtmsrd	r7
34739c870d5SOlof Johansson	sync
34839c870d5SOlof Johansson	isync
34939c870d5SOlof Johansson	blr
35039c870d5SOlof Johansson
35139c870d5SOlof Johansson#endif /* CONFIG_PPC_PASEMI */
35239c870d5SOlof Johansson
35339c870d5SOlof Johansson
354127efeb2SStephen Rothwell#ifdef CONFIG_CPU_FREQ_PMAC64
3559994a338SPaul Mackerras/*
3564350147aSBenjamin Herrenschmidt * SCOM access functions for 970 (FX only for now)
3574350147aSBenjamin Herrenschmidt *
3584350147aSBenjamin Herrenschmidt * unsigned long scom970_read(unsigned int address);
3594350147aSBenjamin Herrenschmidt * void scom970_write(unsigned int address, unsigned long value);
3604350147aSBenjamin Herrenschmidt *
3614350147aSBenjamin Herrenschmidt * The address passed in is the 24 bits register address. This code
3624350147aSBenjamin Herrenschmidt * is 970 specific and will not check the status bits, so you should
3634350147aSBenjamin Herrenschmidt * know what you are doing.
3644350147aSBenjamin Herrenschmidt */
3654350147aSBenjamin Herrenschmidt_GLOBAL(scom970_read)
3664350147aSBenjamin Herrenschmidt	/* interrupts off */
3674350147aSBenjamin Herrenschmidt	mfmsr	r4
3684350147aSBenjamin Herrenschmidt	ori	r0,r4,MSR_EE
3694350147aSBenjamin Herrenschmidt	xori	r0,r0,MSR_EE
3704350147aSBenjamin Herrenschmidt	mtmsrd	r0,1
3714350147aSBenjamin Herrenschmidt
3724350147aSBenjamin Herrenschmidt	/* rotate 24 bits SCOM address 8 bits left and mask out it's low 8 bits
3734350147aSBenjamin Herrenschmidt	 * (including parity). On current CPUs they must be 0'd,
3744350147aSBenjamin Herrenschmidt	 * and finally or in RW bit
3754350147aSBenjamin Herrenschmidt	 */
3764350147aSBenjamin Herrenschmidt	rlwinm	r3,r3,8,0,15
3774350147aSBenjamin Herrenschmidt	ori	r3,r3,0x8000
3784350147aSBenjamin Herrenschmidt
3794350147aSBenjamin Herrenschmidt	/* do the actual scom read */
3804350147aSBenjamin Herrenschmidt	sync
3814350147aSBenjamin Herrenschmidt	mtspr	SPRN_SCOMC,r3
3824350147aSBenjamin Herrenschmidt	isync
3834350147aSBenjamin Herrenschmidt	mfspr	r3,SPRN_SCOMD
3844350147aSBenjamin Herrenschmidt	isync
3854350147aSBenjamin Herrenschmidt	mfspr	r0,SPRN_SCOMC
3864350147aSBenjamin Herrenschmidt	isync
3874350147aSBenjamin Herrenschmidt
3884350147aSBenjamin Herrenschmidt	/* XXX:	fixup result on some buggy 970's (ouch ! we lost a bit, bah
3894350147aSBenjamin Herrenschmidt	 * that's the best we can do). Not implemented yet as we don't use
3904350147aSBenjamin Herrenschmidt	 * the scom on any of the bogus CPUs yet, but may have to be done
3914350147aSBenjamin Herrenschmidt	 * ultimately
3924350147aSBenjamin Herrenschmidt	 */
3934350147aSBenjamin Herrenschmidt
3944350147aSBenjamin Herrenschmidt	/* restore interrupts */
3954350147aSBenjamin Herrenschmidt	mtmsrd	r4,1
3964350147aSBenjamin Herrenschmidt	blr
3974350147aSBenjamin Herrenschmidt
3984350147aSBenjamin Herrenschmidt
3994350147aSBenjamin Herrenschmidt_GLOBAL(scom970_write)
4004350147aSBenjamin Herrenschmidt	/* interrupts off */
4014350147aSBenjamin Herrenschmidt	mfmsr	r5
4024350147aSBenjamin Herrenschmidt	ori	r0,r5,MSR_EE
4034350147aSBenjamin Herrenschmidt	xori	r0,r0,MSR_EE
4044350147aSBenjamin Herrenschmidt	mtmsrd	r0,1
4054350147aSBenjamin Herrenschmidt
4064350147aSBenjamin Herrenschmidt	/* rotate 24 bits SCOM address 8 bits left and mask out it's low 8 bits
4074350147aSBenjamin Herrenschmidt	 * (including parity). On current CPUs they must be 0'd.
4084350147aSBenjamin Herrenschmidt	 */
4094350147aSBenjamin Herrenschmidt
4104350147aSBenjamin Herrenschmidt	rlwinm	r3,r3,8,0,15
4114350147aSBenjamin Herrenschmidt
4124350147aSBenjamin Herrenschmidt	sync
4134350147aSBenjamin Herrenschmidt	mtspr	SPRN_SCOMD,r4      /* write data */
4144350147aSBenjamin Herrenschmidt	isync
4154350147aSBenjamin Herrenschmidt	mtspr	SPRN_SCOMC,r3      /* write command */
4164350147aSBenjamin Herrenschmidt	isync
4174350147aSBenjamin Herrenschmidt	mfspr	3,SPRN_SCOMC
4184350147aSBenjamin Herrenschmidt	isync
4194350147aSBenjamin Herrenschmidt
4204350147aSBenjamin Herrenschmidt	/* restore interrupts */
4214350147aSBenjamin Herrenschmidt	mtmsrd	r5,1
4224350147aSBenjamin Herrenschmidt	blr
423127efeb2SStephen Rothwell#endif /* CONFIG_CPU_FREQ_PMAC64 */
4244350147aSBenjamin Herrenschmidt
4254350147aSBenjamin Herrenschmidt
4264350147aSBenjamin Herrenschmidt/*
4279994a338SPaul Mackerras * Create a kernel thread
4289994a338SPaul Mackerras *   kernel_thread(fn, arg, flags)
4299994a338SPaul Mackerras */
4309994a338SPaul Mackerras_GLOBAL(kernel_thread)
4319994a338SPaul Mackerras	std	r29,-24(r1)
4329994a338SPaul Mackerras	std	r30,-16(r1)
4339994a338SPaul Mackerras	stdu	r1,-STACK_FRAME_OVERHEAD(r1)
4349994a338SPaul Mackerras	mr	r29,r3
4359994a338SPaul Mackerras	mr	r30,r4
4369994a338SPaul Mackerras	ori	r3,r5,CLONE_VM	/* flags */
4379994a338SPaul Mackerras	oris	r3,r3,(CLONE_UNTRACED>>16)
4389994a338SPaul Mackerras	li	r4,0		/* new sp (unused) */
4399994a338SPaul Mackerras	li	r0,__NR_clone
4409994a338SPaul Mackerras	sc
4419994a338SPaul Mackerras	cmpdi	0,r3,0		/* parent or child? */
4429994a338SPaul Mackerras	bne	1f		/* return if parent */
4439994a338SPaul Mackerras	li	r0,0
4449994a338SPaul Mackerras	stdu	r0,-STACK_FRAME_OVERHEAD(r1)
4459994a338SPaul Mackerras	ld	r2,8(r29)
4469994a338SPaul Mackerras	ld	r29,0(r29)
4479994a338SPaul Mackerras	mtlr	r29              /* fn addr in lr */
4489994a338SPaul Mackerras	mr	r3,r30	        /* load arg and call fn */
4499994a338SPaul Mackerras	blrl
4509994a338SPaul Mackerras	li	r0,__NR_exit	/* exit after child exits */
4519994a338SPaul Mackerras        li	r3,0
4529994a338SPaul Mackerras	sc
4539994a338SPaul Mackerras1:	addi	r1,r1,STACK_FRAME_OVERHEAD
4549994a338SPaul Mackerras	ld	r29,-24(r1)
4559994a338SPaul Mackerras	ld	r30,-16(r1)
4569994a338SPaul Mackerras	blr
4579994a338SPaul Mackerras
4589994a338SPaul Mackerras/*
4599994a338SPaul Mackerras * disable_kernel_fp()
4609994a338SPaul Mackerras * Disable the FPU.
4619994a338SPaul Mackerras */
4629994a338SPaul Mackerras_GLOBAL(disable_kernel_fp)
4639994a338SPaul Mackerras	mfmsr	r3
4649994a338SPaul Mackerras	rldicl	r0,r3,(63-MSR_FP_LG),1
4659994a338SPaul Mackerras	rldicl	r3,r0,(MSR_FP_LG+1),0
4669994a338SPaul Mackerras	mtmsrd	r3			/* disable use of fpu now */
4679994a338SPaul Mackerras	isync
4689994a338SPaul Mackerras	blr
4699994a338SPaul Mackerras
4709994a338SPaul Mackerras#ifdef CONFIG_ALTIVEC
4719994a338SPaul Mackerras
4729994a338SPaul Mackerras#if 0 /* this has no callers for now */
4739994a338SPaul Mackerras/*
4749994a338SPaul Mackerras * disable_kernel_altivec()
4759994a338SPaul Mackerras * Disable the VMX.
4769994a338SPaul Mackerras */
4779994a338SPaul Mackerras_GLOBAL(disable_kernel_altivec)
4789994a338SPaul Mackerras	mfmsr	r3
4799994a338SPaul Mackerras	rldicl	r0,r3,(63-MSR_VEC_LG),1
4809994a338SPaul Mackerras	rldicl	r3,r0,(MSR_VEC_LG+1),0
4819994a338SPaul Mackerras	mtmsrd	r3			/* disable use of VMX now */
4829994a338SPaul Mackerras	isync
4839994a338SPaul Mackerras	blr
4849994a338SPaul Mackerras#endif /* 0 */
4859994a338SPaul Mackerras
4869994a338SPaul Mackerras/*
4879994a338SPaul Mackerras * giveup_altivec(tsk)
4889994a338SPaul Mackerras * Disable VMX for the task given as the argument,
4899994a338SPaul Mackerras * and save the vector registers in its thread_struct.
4909994a338SPaul Mackerras * Enables the VMX for use in the kernel on return.
4919994a338SPaul Mackerras */
4929994a338SPaul Mackerras_GLOBAL(giveup_altivec)
4939994a338SPaul Mackerras	mfmsr	r5
4949994a338SPaul Mackerras	oris	r5,r5,MSR_VEC@h
4959994a338SPaul Mackerras	mtmsrd	r5			/* enable use of VMX now */
4969994a338SPaul Mackerras	isync
4979994a338SPaul Mackerras	cmpdi	0,r3,0
4989994a338SPaul Mackerras	beqlr-				/* if no previous owner, done */
4999994a338SPaul Mackerras	addi	r3,r3,THREAD		/* want THREAD of task */
5009994a338SPaul Mackerras	ld	r5,PT_REGS(r3)
5019994a338SPaul Mackerras	cmpdi	0,r5,0
5029994a338SPaul Mackerras	SAVE_32VRS(0,r4,r3)
5039994a338SPaul Mackerras	mfvscr	vr0
5049994a338SPaul Mackerras	li	r4,THREAD_VSCR
5059994a338SPaul Mackerras	stvx	vr0,r4,r3
5069994a338SPaul Mackerras	beq	1f
5079994a338SPaul Mackerras	ld	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
5089994a338SPaul Mackerras	lis	r3,MSR_VEC@h
5099994a338SPaul Mackerras	andc	r4,r4,r3		/* disable FP for previous task */
5109994a338SPaul Mackerras	std	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
5119994a338SPaul Mackerras1:
5129994a338SPaul Mackerras#ifndef CONFIG_SMP
5139994a338SPaul Mackerras	li	r5,0
5149994a338SPaul Mackerras	ld	r4,last_task_used_altivec@got(r2)
5159994a338SPaul Mackerras	std	r5,0(r4)
5169994a338SPaul Mackerras#endif /* CONFIG_SMP */
5179994a338SPaul Mackerras	blr
5189994a338SPaul Mackerras
5199994a338SPaul Mackerras#endif /* CONFIG_ALTIVEC */
5209994a338SPaul Mackerras
5213db03b4aSArnd Bergmann_GLOBAL(kernel_execve)
5229994a338SPaul Mackerras	li	r0,__NR_execve
5239994a338SPaul Mackerras	sc
5249994a338SPaul Mackerras	bnslr
5259994a338SPaul Mackerras	neg	r3,r3
5269994a338SPaul Mackerras	blr
5279994a338SPaul Mackerras
5289994a338SPaul Mackerras/* kexec_wait(phys_cpu)
5299994a338SPaul Mackerras *
5309994a338SPaul Mackerras * wait for the flag to change, indicating this kernel is going away but
5319994a338SPaul Mackerras * the slave code for the next one is at addresses 0 to 100.
5329994a338SPaul Mackerras *
5339994a338SPaul Mackerras * This is used by all slaves.
5349994a338SPaul Mackerras *
5359994a338SPaul Mackerras * Physical (hardware) cpu id should be in r3.
5369994a338SPaul Mackerras */
5379994a338SPaul Mackerras_GLOBAL(kexec_wait)
5389994a338SPaul Mackerras	bl	1f
5399994a338SPaul Mackerras1:	mflr	r5
5409994a338SPaul Mackerras	addi	r5,r5,kexec_flag-1b
5419994a338SPaul Mackerras
5429994a338SPaul Mackerras99:	HMT_LOW
5439994a338SPaul Mackerras#ifdef CONFIG_KEXEC		/* use no memory without kexec */
5449994a338SPaul Mackerras	lwz	r4,0(r5)
5459994a338SPaul Mackerras	cmpwi	0,r4,0
5469994a338SPaul Mackerras	bnea	0x60
5479994a338SPaul Mackerras#endif
5489994a338SPaul Mackerras	b	99b
5499994a338SPaul Mackerras
5509994a338SPaul Mackerras/* this can be in text because we won't change it until we are
5519994a338SPaul Mackerras * running in real anyways
5529994a338SPaul Mackerras */
5539994a338SPaul Mackerraskexec_flag:
5549994a338SPaul Mackerras	.long	0
5559994a338SPaul Mackerras
5569994a338SPaul Mackerras
5579994a338SPaul Mackerras#ifdef CONFIG_KEXEC
5589994a338SPaul Mackerras
5599994a338SPaul Mackerras/* kexec_smp_wait(void)
5609994a338SPaul Mackerras *
5619994a338SPaul Mackerras * call with interrupts off
5629994a338SPaul Mackerras * note: this is a terminal routine, it does not save lr
5639994a338SPaul Mackerras *
5649994a338SPaul Mackerras * get phys id from paca
5659994a338SPaul Mackerras * set paca id to -1 to say we got here
5669994a338SPaul Mackerras * switch to real mode
5679994a338SPaul Mackerras * join other cpus in kexec_wait(phys_id)
5689994a338SPaul Mackerras */
5699994a338SPaul Mackerras_GLOBAL(kexec_smp_wait)
5709994a338SPaul Mackerras	lhz	r3,PACAHWCPUID(r13)
5719994a338SPaul Mackerras	li	r4,-1
5729994a338SPaul Mackerras	sth	r4,PACAHWCPUID(r13)	/* let others know we left */
5739994a338SPaul Mackerras	bl	real_mode
5749994a338SPaul Mackerras	b	.kexec_wait
5759994a338SPaul Mackerras
5769994a338SPaul Mackerras/*
5779994a338SPaul Mackerras * switch to real mode (turn mmu off)
5789994a338SPaul Mackerras * we use the early kernel trick that the hardware ignores bits
5799994a338SPaul Mackerras * 0 and 1 (big endian) of the effective address in real mode
5809994a338SPaul Mackerras *
5819994a338SPaul Mackerras * don't overwrite r3 here, it is live for kexec_wait above.
5829994a338SPaul Mackerras */
5839994a338SPaul Mackerrasreal_mode:	/* assume normal blr return */
5849994a338SPaul Mackerras1:	li	r9,MSR_RI
5859994a338SPaul Mackerras	li	r10,MSR_DR|MSR_IR
5869994a338SPaul Mackerras	mflr	r11		/* return address to SRR0 */
5879994a338SPaul Mackerras	mfmsr	r12
5889994a338SPaul Mackerras	andc	r9,r12,r9
5899994a338SPaul Mackerras	andc	r10,r12,r10
5909994a338SPaul Mackerras
5919994a338SPaul Mackerras	mtmsrd	r9,1
5929994a338SPaul Mackerras	mtspr	SPRN_SRR1,r10
5939994a338SPaul Mackerras	mtspr	SPRN_SRR0,r11
5949994a338SPaul Mackerras	rfid
5959994a338SPaul Mackerras
5969994a338SPaul Mackerras
5979994a338SPaul Mackerras/*
5989994a338SPaul Mackerras * kexec_sequence(newstack, start, image, control, clear_all())
5999994a338SPaul Mackerras *
6009994a338SPaul Mackerras * does the grungy work with stack switching and real mode switches
6019994a338SPaul Mackerras * also does simple calls to other code
6029994a338SPaul Mackerras */
6039994a338SPaul Mackerras
6049994a338SPaul Mackerras_GLOBAL(kexec_sequence)
6059994a338SPaul Mackerras	mflr	r0
6069994a338SPaul Mackerras	std	r0,16(r1)
6079994a338SPaul Mackerras
6089994a338SPaul Mackerras	/* switch stacks to newstack -- &kexec_stack.stack */
6099994a338SPaul Mackerras	stdu	r1,THREAD_SIZE-112(r3)
6109994a338SPaul Mackerras	mr	r1,r3
6119994a338SPaul Mackerras
6129994a338SPaul Mackerras	li	r0,0
6139994a338SPaul Mackerras	std	r0,16(r1)
6149994a338SPaul Mackerras
6159994a338SPaul Mackerras	/* save regs for local vars on new stack.
6169994a338SPaul Mackerras	 * yes, we won't go back, but ...
6179994a338SPaul Mackerras	 */
6189994a338SPaul Mackerras	std	r31,-8(r1)
6199994a338SPaul Mackerras	std	r30,-16(r1)
6209994a338SPaul Mackerras	std	r29,-24(r1)
6219994a338SPaul Mackerras	std	r28,-32(r1)
6229994a338SPaul Mackerras	std	r27,-40(r1)
6239994a338SPaul Mackerras	std	r26,-48(r1)
6249994a338SPaul Mackerras	std	r25,-56(r1)
6259994a338SPaul Mackerras
6269994a338SPaul Mackerras	stdu	r1,-112-64(r1)
6279994a338SPaul Mackerras
6289994a338SPaul Mackerras	/* save args into preserved regs */
6299994a338SPaul Mackerras	mr	r31,r3			/* newstack (both) */
6309994a338SPaul Mackerras	mr	r30,r4			/* start (real) */
6319994a338SPaul Mackerras	mr	r29,r5			/* image (virt) */
6329994a338SPaul Mackerras	mr	r28,r6			/* control, unused */
6339994a338SPaul Mackerras	mr	r27,r7			/* clear_all() fn desc */
6349994a338SPaul Mackerras	mr	r26,r8			/* spare */
6359994a338SPaul Mackerras	lhz	r25,PACAHWCPUID(r13)	/* get our phys cpu from paca */
6369994a338SPaul Mackerras
6379994a338SPaul Mackerras	/* disable interrupts, we are overwriting kernel data next */
6389994a338SPaul Mackerras	mfmsr	r3
6399994a338SPaul Mackerras	rlwinm	r3,r3,0,17,15
6409994a338SPaul Mackerras	mtmsrd	r3,1
6419994a338SPaul Mackerras
6429994a338SPaul Mackerras	/* copy dest pages, flush whole dest image */
6439994a338SPaul Mackerras	mr	r3,r29
6449994a338SPaul Mackerras	bl	.kexec_copy_flush	/* (image) */
6459994a338SPaul Mackerras
6469994a338SPaul Mackerras	/* turn off mmu */
6479994a338SPaul Mackerras	bl	real_mode
6489994a338SPaul Mackerras
649*ee46a90bSMilton Miller	/* copy  0x100 bytes starting at start to 0 */
650*ee46a90bSMilton Miller	li	r3,0
651*ee46a90bSMilton Miller	mr	r4,r30		/* start, aka phys mem offset */
652*ee46a90bSMilton Miller	li	r5,0x100
653*ee46a90bSMilton Miller	li	r6,0
654*ee46a90bSMilton Miller	bl	.copy_and_flush	/* (dest, src, copy limit, start offset) */
655*ee46a90bSMilton Miller1:	/* assume normal blr return */
656*ee46a90bSMilton Miller
657*ee46a90bSMilton Miller	/* release other cpus to the new kernel secondary start at 0x60 */
658*ee46a90bSMilton Miller	mflr	r5
659*ee46a90bSMilton Miller	li	r6,1
660*ee46a90bSMilton Miller	stw	r6,kexec_flag-1b(5)
661*ee46a90bSMilton Miller
6629994a338SPaul Mackerras	/* clear out hardware hash page table and tlb */
6639994a338SPaul Mackerras	ld	r5,0(r27)		/* deref function descriptor */
6649994a338SPaul Mackerras	mtctr	r5
6658d950cb8SGeoff Levand	bctrl				/* ppc_md.hpte_clear_all(void); */
6669994a338SPaul Mackerras
6679994a338SPaul Mackerras/*
6689994a338SPaul Mackerras *   kexec image calling is:
6699994a338SPaul Mackerras *      the first 0x100 bytes of the entry point are copied to 0
6709994a338SPaul Mackerras *
6719994a338SPaul Mackerras *      all slaves branch to slave = 0x60 (absolute)
6729994a338SPaul Mackerras *              slave(phys_cpu_id);
6739994a338SPaul Mackerras *
6749994a338SPaul Mackerras *      master goes to start = entry point
6759994a338SPaul Mackerras *              start(phys_cpu_id, start, 0);
6769994a338SPaul Mackerras *
6779994a338SPaul Mackerras *
6789994a338SPaul Mackerras *   a wrapper is needed to call existing kernels, here is an approximate
6799994a338SPaul Mackerras *   description of one method:
6809994a338SPaul Mackerras *
6819994a338SPaul Mackerras * v2: (2.6.10)
6829994a338SPaul Mackerras *   start will be near the boot_block (maybe 0x100 bytes before it?)
6839994a338SPaul Mackerras *   it will have a 0x60, which will b to boot_block, where it will wait
6849994a338SPaul Mackerras *   and 0 will store phys into struct boot-block and load r3 from there,
6859994a338SPaul Mackerras *   copy kernel 0-0x100 and tell slaves to back down to 0x60 again
6869994a338SPaul Mackerras *
6879994a338SPaul Mackerras * v1: (2.6.9)
6889994a338SPaul Mackerras *    boot block will have all cpus scanning device tree to see if they
6899994a338SPaul Mackerras *    are the boot cpu ?????
6909994a338SPaul Mackerras *    other device tree differences (prop sizes, va vs pa, etc)...
6919994a338SPaul Mackerras */
6929994a338SPaul Mackerras	mr	r3,r25	# my phys cpu
6939994a338SPaul Mackerras	mr	r4,r30	# start, aka phys mem offset
6949994a338SPaul Mackerras	mtlr	4
6959994a338SPaul Mackerras	li	r5,0
6969994a338SPaul Mackerras	blr	/* image->start(physid, image->start, 0); */
6979994a338SPaul Mackerras#endif /* CONFIG_KEXEC */
698