xref: /openbmc/linux/arch/powerpc/kernel/head_64.S (revision 745a14cc)
114cf11afSPaul Mackerras/*
214cf11afSPaul Mackerras *  PowerPC version
314cf11afSPaul Mackerras *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
414cf11afSPaul Mackerras *
514cf11afSPaul Mackerras *  Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
614cf11afSPaul Mackerras *    Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
714cf11afSPaul Mackerras *  Adapted for Power Macintosh by Paul Mackerras.
814cf11afSPaul Mackerras *  Low-level exception handlers and MMU support
914cf11afSPaul Mackerras *  rewritten by Paul Mackerras.
1014cf11afSPaul Mackerras *    Copyright (C) 1996 Paul Mackerras.
1114cf11afSPaul Mackerras *
1214cf11afSPaul Mackerras *  Adapted for 64bit PowerPC by Dave Engebretsen, Peter Bergner, and
1314cf11afSPaul Mackerras *    Mike Corrigan {engebret|bergner|mikejc}@us.ibm.com
1414cf11afSPaul Mackerras *
1514cf11afSPaul Mackerras *  This file contains the low-level support and setup for the
1614cf11afSPaul Mackerras *  PowerPC-64 platform, including trap and interrupt dispatch.
1714cf11afSPaul Mackerras *
1814cf11afSPaul Mackerras *  This program is free software; you can redistribute it and/or
1914cf11afSPaul Mackerras *  modify it under the terms of the GNU General Public License
2014cf11afSPaul Mackerras *  as published by the Free Software Foundation; either version
2114cf11afSPaul Mackerras *  2 of the License, or (at your option) any later version.
2214cf11afSPaul Mackerras */
2314cf11afSPaul Mackerras
2414cf11afSPaul Mackerras#include <linux/threads.h>
25b5bbeb23SPaul Mackerras#include <asm/reg.h>
2614cf11afSPaul Mackerras#include <asm/page.h>
2714cf11afSPaul Mackerras#include <asm/mmu.h>
2814cf11afSPaul Mackerras#include <asm/ppc_asm.h>
2914cf11afSPaul Mackerras#include <asm/asm-offsets.h>
3014cf11afSPaul Mackerras#include <asm/bug.h>
3114cf11afSPaul Mackerras#include <asm/cputable.h>
3214cf11afSPaul Mackerras#include <asm/setup.h>
3314cf11afSPaul Mackerras#include <asm/hvcall.h>
34c43a55ffSKelly Daly#include <asm/iseries/lpar_map.h>
356cb7bfebSDavid Gibson#include <asm/thread_info.h>
363f639ee8SStephen Rothwell#include <asm/firmware.h>
3716a15a30SStephen Rothwell#include <asm/page_64.h>
38f9ff0f30SStephen Rothwell#include <asm/exception.h>
39945feb17SBenjamin Herrenschmidt#include <asm/irqflags.h>
4014cf11afSPaul Mackerras
4114cf11afSPaul Mackerras/*
4214cf11afSPaul Mackerras * We layout physical memory as follows:
4314cf11afSPaul Mackerras * 0x0000 - 0x00ff : Secondary processor spin code
4414cf11afSPaul Mackerras * 0x0100 - 0x2fff : pSeries Interrupt prologs
4514cf11afSPaul Mackerras * 0x3000 - 0x5fff : interrupt support, iSeries and common interrupt prologs
4614cf11afSPaul Mackerras * 0x6000 - 0x6fff : Initial (CPU0) segment table
4714cf11afSPaul Mackerras * 0x7000 - 0x7fff : FWNMI data area
4814cf11afSPaul Mackerras * 0x8000 -        : Early init and support code
4914cf11afSPaul Mackerras */
5014cf11afSPaul Mackerras
5114cf11afSPaul Mackerras/*
5214cf11afSPaul Mackerras *   SPRG Usage
5314cf11afSPaul Mackerras *
5414cf11afSPaul Mackerras *   Register	Definition
5514cf11afSPaul Mackerras *
5614cf11afSPaul Mackerras *   SPRG0	reserved for hypervisor
5714cf11afSPaul Mackerras *   SPRG1	temp - used to save gpr
5814cf11afSPaul Mackerras *   SPRG2	temp - used to save gpr
5914cf11afSPaul Mackerras *   SPRG3	virt addr of paca
6014cf11afSPaul Mackerras */
6114cf11afSPaul Mackerras
6214cf11afSPaul Mackerras/*
6314cf11afSPaul Mackerras * Entering into this code we make the following assumptions:
6414cf11afSPaul Mackerras *  For pSeries:
6514cf11afSPaul Mackerras *   1. The MMU is off & open firmware is running in real mode.
6614cf11afSPaul Mackerras *   2. The kernel is entered at __start
6714cf11afSPaul Mackerras *
6814cf11afSPaul Mackerras *  For iSeries:
6914cf11afSPaul Mackerras *   1. The MMU is on (as it always is for iSeries)
7014cf11afSPaul Mackerras *   2. The kernel is entered at system_reset_iSeries
7114cf11afSPaul Mackerras */
7214cf11afSPaul Mackerras
7314cf11afSPaul Mackerras	.text
7414cf11afSPaul Mackerras	.globl  _stext
7514cf11afSPaul Mackerras_stext:
7614cf11afSPaul Mackerras_GLOBAL(__start)
7714cf11afSPaul Mackerras	/* NOP this out unconditionally */
7814cf11afSPaul MackerrasBEGIN_FTR_SECTION
7914cf11afSPaul Mackerras	b	.__start_initialization_multiplatform
8014cf11afSPaul MackerrasEND_FTR_SECTION(0, 1)
8114cf11afSPaul Mackerras
8214cf11afSPaul Mackerras	/* Catch branch to 0 in real mode */
8314cf11afSPaul Mackerras	trap
8414cf11afSPaul Mackerras
8514cf11afSPaul Mackerras	/* Secondary processors spin on this value until it goes to 1. */
8614cf11afSPaul Mackerras	.globl  __secondary_hold_spinloop
8714cf11afSPaul Mackerras__secondary_hold_spinloop:
8814cf11afSPaul Mackerras	.llong	0x0
8914cf11afSPaul Mackerras
9014cf11afSPaul Mackerras	/* Secondary processors write this value with their cpu # */
9114cf11afSPaul Mackerras	/* after they enter the spin loop immediately below.	  */
9214cf11afSPaul Mackerras	.globl	__secondary_hold_acknowledge
9314cf11afSPaul Mackerras__secondary_hold_acknowledge:
9414cf11afSPaul Mackerras	.llong	0x0
9514cf11afSPaul Mackerras
961dce0e30SMichael Ellerman#ifdef CONFIG_PPC_ISERIES
971dce0e30SMichael Ellerman	/*
981dce0e30SMichael Ellerman	 * At offset 0x20, there is a pointer to iSeries LPAR data.
991dce0e30SMichael Ellerman	 * This is required by the hypervisor
1001dce0e30SMichael Ellerman	 */
1011dce0e30SMichael Ellerman	. = 0x20
1021dce0e30SMichael Ellerman	.llong hvReleaseData-KERNELBASE
1031dce0e30SMichael Ellerman#endif /* CONFIG_PPC_ISERIES */
1041dce0e30SMichael Ellerman
10514cf11afSPaul Mackerras	. = 0x60
10614cf11afSPaul Mackerras/*
10775423b7bSGeoff Levand * The following code is used to hold secondary processors
10875423b7bSGeoff Levand * in a spin loop after they have entered the kernel, but
10914cf11afSPaul Mackerras * before the bulk of the kernel has been relocated.  This code
11014cf11afSPaul Mackerras * is relocated to physical address 0x60 before prom_init is run.
11114cf11afSPaul Mackerras * All of it must fit below the first exception vector at 0x100.
11214cf11afSPaul Mackerras */
11314cf11afSPaul Mackerras_GLOBAL(__secondary_hold)
11414cf11afSPaul Mackerras	mfmsr	r24
11514cf11afSPaul Mackerras	ori	r24,r24,MSR_RI
11614cf11afSPaul Mackerras	mtmsrd	r24			/* RI on */
11714cf11afSPaul Mackerras
118f1870f77SAnton Blanchard	/* Grab our physical cpu number */
11914cf11afSPaul Mackerras	mr	r24,r3
12014cf11afSPaul Mackerras
12114cf11afSPaul Mackerras	/* Tell the master cpu we're here */
12214cf11afSPaul Mackerras	/* Relocation is off & we are located at an address less */
12314cf11afSPaul Mackerras	/* than 0x100, so only need to grab low order offset.    */
12414cf11afSPaul Mackerras	std	r24,__secondary_hold_acknowledge@l(0)
12514cf11afSPaul Mackerras	sync
12614cf11afSPaul Mackerras
12714cf11afSPaul Mackerras	/* All secondary cpus wait here until told to start. */
12814cf11afSPaul Mackerras100:	ld	r4,__secondary_hold_spinloop@l(0)
12914cf11afSPaul Mackerras	cmpdi	0,r4,1
13014cf11afSPaul Mackerras	bne	100b
13114cf11afSPaul Mackerras
132f1870f77SAnton Blanchard#if defined(CONFIG_SMP) || defined(CONFIG_KEXEC)
133f39b7a55SOlof Johansson	LOAD_REG_IMMEDIATE(r4, .generic_secondary_smp_init)
134758438a7SMichael Ellerman	mtctr	r4
13514cf11afSPaul Mackerras	mr	r3,r24
136758438a7SMichael Ellerman	bctr
13714cf11afSPaul Mackerras#else
13814cf11afSPaul Mackerras	BUG_OPCODE
13914cf11afSPaul Mackerras#endif
14014cf11afSPaul Mackerras
14114cf11afSPaul Mackerras/* This value is used to mark exception frames on the stack. */
14214cf11afSPaul Mackerras	.section ".toc","aw"
14314cf11afSPaul Mackerrasexception_marker:
14414cf11afSPaul Mackerras	.tc	ID_72656773_68657265[TC],0x7265677368657265
14514cf11afSPaul Mackerras	.text
14614cf11afSPaul Mackerras
14714cf11afSPaul Mackerras/*
14814cf11afSPaul Mackerras * This is the start of the interrupt handlers for pSeries
14914cf11afSPaul Mackerras * This code runs with relocation off.
15014cf11afSPaul Mackerras */
15114cf11afSPaul Mackerras	. = 0x100
15214cf11afSPaul Mackerras	.globl __start_interrupts
15314cf11afSPaul Mackerras__start_interrupts:
15414cf11afSPaul Mackerras
15514cf11afSPaul Mackerras	STD_EXCEPTION_PSERIES(0x100, system_reset)
15614cf11afSPaul Mackerras
15714cf11afSPaul Mackerras	. = 0x200
15814cf11afSPaul Mackerras_machine_check_pSeries:
15914cf11afSPaul Mackerras	HMT_MEDIUM
160b5bbeb23SPaul Mackerras	mtspr	SPRN_SPRG1,r13		/* save r13 */
16114cf11afSPaul Mackerras	EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
16214cf11afSPaul Mackerras
16314cf11afSPaul Mackerras	. = 0x300
16414cf11afSPaul Mackerras	.globl data_access_pSeries
16514cf11afSPaul Mackerrasdata_access_pSeries:
16614cf11afSPaul Mackerras	HMT_MEDIUM
167b5bbeb23SPaul Mackerras	mtspr	SPRN_SPRG1,r13
16814cf11afSPaul MackerrasBEGIN_FTR_SECTION
169b5bbeb23SPaul Mackerras	mtspr	SPRN_SPRG2,r12
170b5bbeb23SPaul Mackerras	mfspr	r13,SPRN_DAR
171b5bbeb23SPaul Mackerras	mfspr	r12,SPRN_DSISR
17214cf11afSPaul Mackerras	srdi	r13,r13,60
17314cf11afSPaul Mackerras	rlwimi	r13,r12,16,0x20
17414cf11afSPaul Mackerras	mfcr	r12
17514cf11afSPaul Mackerras	cmpwi	r13,0x2c
1763ccfc65cSPaul Mackerras	beq	do_stab_bolted_pSeries
17714cf11afSPaul Mackerras	mtcrf	0x80,r12
178b5bbeb23SPaul Mackerras	mfspr	r12,SPRN_SPRG2
17914cf11afSPaul MackerrasEND_FTR_SECTION_IFCLR(CPU_FTR_SLB)
18014cf11afSPaul Mackerras	EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common)
18114cf11afSPaul Mackerras
18214cf11afSPaul Mackerras	. = 0x380
18314cf11afSPaul Mackerras	.globl data_access_slb_pSeries
18414cf11afSPaul Mackerrasdata_access_slb_pSeries:
18514cf11afSPaul Mackerras	HMT_MEDIUM
186b5bbeb23SPaul Mackerras	mtspr	SPRN_SPRG1,r13
187b5bbeb23SPaul Mackerras	mfspr	r13,SPRN_SPRG3		/* get paca address into r13 */
1883c726f8dSBenjamin Herrenschmidt	std	r3,PACA_EXSLB+EX_R3(r13)
1893c726f8dSBenjamin Herrenschmidt	mfspr	r3,SPRN_DAR
19014cf11afSPaul Mackerras	std	r9,PACA_EXSLB+EX_R9(r13)	/* save r9 - r12 */
1913c726f8dSBenjamin Herrenschmidt	mfcr	r9
1923c726f8dSBenjamin Herrenschmidt#ifdef __DISABLED__
1933c726f8dSBenjamin Herrenschmidt	/* Keep that around for when we re-implement dynamic VSIDs */
1943c726f8dSBenjamin Herrenschmidt	cmpdi	r3,0
1953c726f8dSBenjamin Herrenschmidt	bge	slb_miss_user_pseries
1963c726f8dSBenjamin Herrenschmidt#endif /* __DISABLED__ */
19714cf11afSPaul Mackerras	std	r10,PACA_EXSLB+EX_R10(r13)
19814cf11afSPaul Mackerras	std	r11,PACA_EXSLB+EX_R11(r13)
19914cf11afSPaul Mackerras	std	r12,PACA_EXSLB+EX_R12(r13)
2003c726f8dSBenjamin Herrenschmidt	mfspr	r10,SPRN_SPRG1
2013c726f8dSBenjamin Herrenschmidt	std	r10,PACA_EXSLB+EX_R13(r13)
202b5bbeb23SPaul Mackerras	mfspr	r12,SPRN_SRR1		/* and SRR1 */
2033c726f8dSBenjamin Herrenschmidt	b	.slb_miss_realmode	/* Rel. branch works in real mode */
20414cf11afSPaul Mackerras
20514cf11afSPaul Mackerras	STD_EXCEPTION_PSERIES(0x400, instruction_access)
20614cf11afSPaul Mackerras
20714cf11afSPaul Mackerras	. = 0x480
20814cf11afSPaul Mackerras	.globl instruction_access_slb_pSeries
20914cf11afSPaul Mackerrasinstruction_access_slb_pSeries:
21014cf11afSPaul Mackerras	HMT_MEDIUM
211b5bbeb23SPaul Mackerras	mtspr	SPRN_SPRG1,r13
212b5bbeb23SPaul Mackerras	mfspr	r13,SPRN_SPRG3		/* get paca address into r13 */
2133c726f8dSBenjamin Herrenschmidt	std	r3,PACA_EXSLB+EX_R3(r13)
2143c726f8dSBenjamin Herrenschmidt	mfspr	r3,SPRN_SRR0		/* SRR0 is faulting address */
21514cf11afSPaul Mackerras	std	r9,PACA_EXSLB+EX_R9(r13)	/* save r9 - r12 */
2163c726f8dSBenjamin Herrenschmidt	mfcr	r9
2173c726f8dSBenjamin Herrenschmidt#ifdef __DISABLED__
2183c726f8dSBenjamin Herrenschmidt	/* Keep that around for when we re-implement dynamic VSIDs */
2193c726f8dSBenjamin Herrenschmidt	cmpdi	r3,0
2203c726f8dSBenjamin Herrenschmidt	bge	slb_miss_user_pseries
2213c726f8dSBenjamin Herrenschmidt#endif /* __DISABLED__ */
22214cf11afSPaul Mackerras	std	r10,PACA_EXSLB+EX_R10(r13)
22314cf11afSPaul Mackerras	std	r11,PACA_EXSLB+EX_R11(r13)
22414cf11afSPaul Mackerras	std	r12,PACA_EXSLB+EX_R12(r13)
2253c726f8dSBenjamin Herrenschmidt	mfspr	r10,SPRN_SPRG1
2263c726f8dSBenjamin Herrenschmidt	std	r10,PACA_EXSLB+EX_R13(r13)
227b5bbeb23SPaul Mackerras	mfspr	r12,SPRN_SRR1		/* and SRR1 */
2283c726f8dSBenjamin Herrenschmidt	b	.slb_miss_realmode	/* Rel. branch works in real mode */
22914cf11afSPaul Mackerras
230d04c56f7SPaul Mackerras	MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt)
23114cf11afSPaul Mackerras	STD_EXCEPTION_PSERIES(0x600, alignment)
23214cf11afSPaul Mackerras	STD_EXCEPTION_PSERIES(0x700, program_check)
23314cf11afSPaul Mackerras	STD_EXCEPTION_PSERIES(0x800, fp_unavailable)
234d04c56f7SPaul Mackerras	MASKABLE_EXCEPTION_PSERIES(0x900, decrementer)
23514cf11afSPaul Mackerras	STD_EXCEPTION_PSERIES(0xa00, trap_0a)
23614cf11afSPaul Mackerras	STD_EXCEPTION_PSERIES(0xb00, trap_0b)
23714cf11afSPaul Mackerras
23814cf11afSPaul Mackerras	. = 0xc00
23914cf11afSPaul Mackerras	.globl	system_call_pSeries
24014cf11afSPaul Mackerrassystem_call_pSeries:
24114cf11afSPaul Mackerras	HMT_MEDIUM
242745a14ccSPaul MackerrasBEGIN_FTR_SECTION
243745a14ccSPaul Mackerras	cmpdi	r0,0x1ebe
244745a14ccSPaul Mackerras	beq-	1f
245745a14ccSPaul MackerrasEND_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)
24614cf11afSPaul Mackerras	mr	r9,r13
24714cf11afSPaul Mackerras	mfmsr	r10
248b5bbeb23SPaul Mackerras	mfspr	r13,SPRN_SPRG3
249b5bbeb23SPaul Mackerras	mfspr	r11,SPRN_SRR0
25014cf11afSPaul Mackerras	clrrdi	r12,r13,32
25114cf11afSPaul Mackerras	oris	r12,r12,system_call_common@h
25214cf11afSPaul Mackerras	ori	r12,r12,system_call_common@l
253b5bbeb23SPaul Mackerras	mtspr	SPRN_SRR0,r12
25414cf11afSPaul Mackerras	ori	r10,r10,MSR_IR|MSR_DR|MSR_RI
255b5bbeb23SPaul Mackerras	mfspr	r12,SPRN_SRR1
256b5bbeb23SPaul Mackerras	mtspr	SPRN_SRR1,r10
25714cf11afSPaul Mackerras	rfid
25814cf11afSPaul Mackerras	b	.	/* prevent speculative execution */
25914cf11afSPaul Mackerras
260745a14ccSPaul Mackerras/* Fast LE/BE switch system call */
261745a14ccSPaul Mackerras1:	mfspr	r12,SPRN_SRR1
262745a14ccSPaul Mackerras	xori	r12,r12,MSR_LE
263745a14ccSPaul Mackerras	mtspr	SPRN_SRR1,r12
264745a14ccSPaul Mackerras	rfid		/* return to userspace */
265745a14ccSPaul Mackerras	b	.
266745a14ccSPaul Mackerras
26714cf11afSPaul Mackerras	STD_EXCEPTION_PSERIES(0xd00, single_step)
26814cf11afSPaul Mackerras	STD_EXCEPTION_PSERIES(0xe00, trap_0e)
26914cf11afSPaul Mackerras
27014cf11afSPaul Mackerras	/* We need to deal with the Altivec unavailable exception
27114cf11afSPaul Mackerras	 * here which is at 0xf20, thus in the middle of the
27214cf11afSPaul Mackerras	 * prolog code of the PerformanceMonitor one. A little
27314cf11afSPaul Mackerras	 * trickery is thus necessary
27414cf11afSPaul Mackerras	 */
27514cf11afSPaul Mackerras	. = 0xf00
27614cf11afSPaul Mackerras	b	performance_monitor_pSeries
27714cf11afSPaul Mackerras
27814cf11afSPaul Mackerras	STD_EXCEPTION_PSERIES(0xf20, altivec_unavailable)
27914cf11afSPaul Mackerras
280acf7d768SBenjamin Herrenschmidt#ifdef CONFIG_CBE_RAS
281acf7d768SBenjamin Herrenschmidt	HSTD_EXCEPTION_PSERIES(0x1200, cbe_system_error)
282acf7d768SBenjamin Herrenschmidt#endif /* CONFIG_CBE_RAS */
28314cf11afSPaul Mackerras	STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint)
284acf7d768SBenjamin Herrenschmidt#ifdef CONFIG_CBE_RAS
285acf7d768SBenjamin Herrenschmidt	HSTD_EXCEPTION_PSERIES(0x1600, cbe_maintenance)
286acf7d768SBenjamin Herrenschmidt#endif /* CONFIG_CBE_RAS */
28714cf11afSPaul Mackerras	STD_EXCEPTION_PSERIES(0x1700, altivec_assist)
288acf7d768SBenjamin Herrenschmidt#ifdef CONFIG_CBE_RAS
289acf7d768SBenjamin Herrenschmidt	HSTD_EXCEPTION_PSERIES(0x1800, cbe_thermal)
290acf7d768SBenjamin Herrenschmidt#endif /* CONFIG_CBE_RAS */
29114cf11afSPaul Mackerras
29214cf11afSPaul Mackerras	. = 0x3000
29314cf11afSPaul Mackerras
29414cf11afSPaul Mackerras/*** pSeries interrupt support ***/
29514cf11afSPaul Mackerras
29614cf11afSPaul Mackerras	/* moved from 0xf00 */
297449d846dSLivio Soares	STD_EXCEPTION_PSERIES(., performance_monitor)
298d04c56f7SPaul Mackerras
299d04c56f7SPaul Mackerras/*
300d04c56f7SPaul Mackerras * An interrupt came in while soft-disabled; clear EE in SRR1,
301d04c56f7SPaul Mackerras * clear paca->hard_enabled and return.
302d04c56f7SPaul Mackerras */
303d04c56f7SPaul Mackerrasmasked_interrupt:
304d04c56f7SPaul Mackerras	stb	r10,PACAHARDIRQEN(r13)
305d04c56f7SPaul Mackerras	mtcrf	0x80,r9
306d04c56f7SPaul Mackerras	ld	r9,PACA_EXGEN+EX_R9(r13)
307d04c56f7SPaul Mackerras	mfspr	r10,SPRN_SRR1
308d04c56f7SPaul Mackerras	rldicl	r10,r10,48,1		/* clear MSR_EE */
309d04c56f7SPaul Mackerras	rotldi	r10,r10,16
310d04c56f7SPaul Mackerras	mtspr	SPRN_SRR1,r10
311d04c56f7SPaul Mackerras	ld	r10,PACA_EXGEN+EX_R10(r13)
312d04c56f7SPaul Mackerras	mfspr	r13,SPRN_SPRG1
313d04c56f7SPaul Mackerras	rfid
314d04c56f7SPaul Mackerras	b	.
31514cf11afSPaul Mackerras
31614cf11afSPaul Mackerras	.align	7
3173ccfc65cSPaul Mackerrasdo_stab_bolted_pSeries:
31814cf11afSPaul Mackerras	mtcrf	0x80,r12
319b5bbeb23SPaul Mackerras	mfspr	r12,SPRN_SPRG2
32014cf11afSPaul Mackerras	EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted)
32114cf11afSPaul Mackerras
32214cf11afSPaul Mackerras/*
3233c726f8dSBenjamin Herrenschmidt * We have some room here  we use that to put
3243c726f8dSBenjamin Herrenschmidt * the peries slb miss user trampoline code so it's reasonably
3253c726f8dSBenjamin Herrenschmidt * away from slb_miss_user_common to avoid problems with rfid
3263c726f8dSBenjamin Herrenschmidt *
3273c726f8dSBenjamin Herrenschmidt * This is used for when the SLB miss handler has to go virtual,
3283c726f8dSBenjamin Herrenschmidt * which doesn't happen for now anymore but will once we re-implement
3293c726f8dSBenjamin Herrenschmidt * dynamic VSIDs for shared page tables
3303c726f8dSBenjamin Herrenschmidt */
3313c726f8dSBenjamin Herrenschmidt#ifdef __DISABLED__
3323c726f8dSBenjamin Herrenschmidtslb_miss_user_pseries:
3333c726f8dSBenjamin Herrenschmidt	std	r10,PACA_EXGEN+EX_R10(r13)
3343c726f8dSBenjamin Herrenschmidt	std	r11,PACA_EXGEN+EX_R11(r13)
3353c726f8dSBenjamin Herrenschmidt	std	r12,PACA_EXGEN+EX_R12(r13)
3363c726f8dSBenjamin Herrenschmidt	mfspr	r10,SPRG1
3373c726f8dSBenjamin Herrenschmidt	ld	r11,PACA_EXSLB+EX_R9(r13)
3383c726f8dSBenjamin Herrenschmidt	ld	r12,PACA_EXSLB+EX_R3(r13)
3393c726f8dSBenjamin Herrenschmidt	std	r10,PACA_EXGEN+EX_R13(r13)
3403c726f8dSBenjamin Herrenschmidt	std	r11,PACA_EXGEN+EX_R9(r13)
3413c726f8dSBenjamin Herrenschmidt	std	r12,PACA_EXGEN+EX_R3(r13)
3423c726f8dSBenjamin Herrenschmidt	clrrdi	r12,r13,32
3433c726f8dSBenjamin Herrenschmidt	mfmsr	r10
3443c726f8dSBenjamin Herrenschmidt	mfspr	r11,SRR0			/* save SRR0 */
3453c726f8dSBenjamin Herrenschmidt	ori	r12,r12,slb_miss_user_common@l	/* virt addr of handler */
3463c726f8dSBenjamin Herrenschmidt	ori	r10,r10,MSR_IR|MSR_DR|MSR_RI
3473c726f8dSBenjamin Herrenschmidt	mtspr	SRR0,r12
3483c726f8dSBenjamin Herrenschmidt	mfspr	r12,SRR1			/* and SRR1 */
3493c726f8dSBenjamin Herrenschmidt	mtspr	SRR1,r10
3503c726f8dSBenjamin Herrenschmidt	rfid
3513c726f8dSBenjamin Herrenschmidt	b	.				/* prevent spec. execution */
3523c726f8dSBenjamin Herrenschmidt#endif /* __DISABLED__ */
3533c726f8dSBenjamin Herrenschmidt
3549e4859efSStephen Rothwell#ifdef CONFIG_PPC_PSERIES
3553c726f8dSBenjamin Herrenschmidt/*
35614cf11afSPaul Mackerras * Vectors for the FWNMI option.  Share common code.
35714cf11afSPaul Mackerras */
35814cf11afSPaul Mackerras	.globl system_reset_fwnmi
3598c4f1f29SMichael Ellerman      .align 7
36014cf11afSPaul Mackerrassystem_reset_fwnmi:
36114cf11afSPaul Mackerras	HMT_MEDIUM
362b5bbeb23SPaul Mackerras	mtspr	SPRN_SPRG1,r13		/* save r13 */
3639fc0a92cSOlaf Hering	EXCEPTION_PROLOG_PSERIES_FORCE_64BIT(PACA_EXGEN, system_reset_common)
36414cf11afSPaul Mackerras
36514cf11afSPaul Mackerras	.globl machine_check_fwnmi
3668c4f1f29SMichael Ellerman      .align 7
36714cf11afSPaul Mackerrasmachine_check_fwnmi:
36814cf11afSPaul Mackerras	HMT_MEDIUM
369b5bbeb23SPaul Mackerras	mtspr	SPRN_SPRG1,r13		/* save r13 */
3709fc0a92cSOlaf Hering	EXCEPTION_PROLOG_PSERIES_FORCE_64BIT(PACA_EXMC, machine_check_common)
37114cf11afSPaul Mackerras
3729e4859efSStephen Rothwell#endif /* CONFIG_PPC_PSERIES */
3739e4859efSStephen Rothwell
37414cf11afSPaul Mackerras/*** Common interrupt handlers ***/
37514cf11afSPaul Mackerras
37614cf11afSPaul Mackerras	STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception)
37714cf11afSPaul Mackerras
37814cf11afSPaul Mackerras	/*
37914cf11afSPaul Mackerras	 * Machine check is different because we use a different
38014cf11afSPaul Mackerras	 * save area: PACA_EXMC instead of PACA_EXGEN.
38114cf11afSPaul Mackerras	 */
38214cf11afSPaul Mackerras	.align	7
38314cf11afSPaul Mackerras	.globl machine_check_common
38414cf11afSPaul Mackerrasmachine_check_common:
38514cf11afSPaul Mackerras	EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
386f39224a8SPaul Mackerras	FINISH_NAP
38714cf11afSPaul Mackerras	DISABLE_INTS
38814cf11afSPaul Mackerras	bl	.save_nvgprs
38914cf11afSPaul Mackerras	addi	r3,r1,STACK_FRAME_OVERHEAD
39014cf11afSPaul Mackerras	bl	.machine_check_exception
39114cf11afSPaul Mackerras	b	.ret_from_except
39214cf11afSPaul Mackerras
39314cf11afSPaul Mackerras	STD_EXCEPTION_COMMON_LITE(0x900, decrementer, .timer_interrupt)
39414cf11afSPaul Mackerras	STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception)
39514cf11afSPaul Mackerras	STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
39614cf11afSPaul Mackerras	STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
39714cf11afSPaul Mackerras	STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception)
398f39224a8SPaul Mackerras	STD_EXCEPTION_COMMON_IDLE(0xf00, performance_monitor, .performance_monitor_exception)
39914cf11afSPaul Mackerras	STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception)
40014cf11afSPaul Mackerras#ifdef CONFIG_ALTIVEC
40114cf11afSPaul Mackerras	STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception)
40214cf11afSPaul Mackerras#else
40314cf11afSPaul Mackerras	STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception)
40414cf11afSPaul Mackerras#endif
405acf7d768SBenjamin Herrenschmidt#ifdef CONFIG_CBE_RAS
406acf7d768SBenjamin Herrenschmidt	STD_EXCEPTION_COMMON(0x1200, cbe_system_error, .cbe_system_error_exception)
407acf7d768SBenjamin Herrenschmidt	STD_EXCEPTION_COMMON(0x1600, cbe_maintenance, .cbe_maintenance_exception)
408acf7d768SBenjamin Herrenschmidt	STD_EXCEPTION_COMMON(0x1800, cbe_thermal, .cbe_thermal_exception)
409acf7d768SBenjamin Herrenschmidt#endif /* CONFIG_CBE_RAS */
41014cf11afSPaul Mackerras
41114cf11afSPaul Mackerras/*
41214cf11afSPaul Mackerras * Here we have detected that the kernel stack pointer is bad.
41314cf11afSPaul Mackerras * R9 contains the saved CR, r13 points to the paca,
41414cf11afSPaul Mackerras * r10 contains the (bad) kernel stack pointer,
41514cf11afSPaul Mackerras * r11 and r12 contain the saved SRR0 and SRR1.
41614cf11afSPaul Mackerras * We switch to using an emergency stack, save the registers there,
41714cf11afSPaul Mackerras * and call kernel_bad_stack(), which panics.
41814cf11afSPaul Mackerras */
41914cf11afSPaul Mackerrasbad_stack:
42014cf11afSPaul Mackerras	ld	r1,PACAEMERGSP(r13)
42114cf11afSPaul Mackerras	subi	r1,r1,64+INT_FRAME_SIZE
42214cf11afSPaul Mackerras	std	r9,_CCR(r1)
42314cf11afSPaul Mackerras	std	r10,GPR1(r1)
42414cf11afSPaul Mackerras	std	r11,_NIP(r1)
42514cf11afSPaul Mackerras	std	r12,_MSR(r1)
426b5bbeb23SPaul Mackerras	mfspr	r11,SPRN_DAR
427b5bbeb23SPaul Mackerras	mfspr	r12,SPRN_DSISR
42814cf11afSPaul Mackerras	std	r11,_DAR(r1)
42914cf11afSPaul Mackerras	std	r12,_DSISR(r1)
43014cf11afSPaul Mackerras	mflr	r10
43114cf11afSPaul Mackerras	mfctr	r11
43214cf11afSPaul Mackerras	mfxer	r12
43314cf11afSPaul Mackerras	std	r10,_LINK(r1)
43414cf11afSPaul Mackerras	std	r11,_CTR(r1)
43514cf11afSPaul Mackerras	std	r12,_XER(r1)
43614cf11afSPaul Mackerras	SAVE_GPR(0,r1)
43714cf11afSPaul Mackerras	SAVE_GPR(2,r1)
43814cf11afSPaul Mackerras	SAVE_4GPRS(3,r1)
43914cf11afSPaul Mackerras	SAVE_2GPRS(7,r1)
44014cf11afSPaul Mackerras	SAVE_10GPRS(12,r1)
44114cf11afSPaul Mackerras	SAVE_10GPRS(22,r1)
44268730401SOlof Johansson	lhz	r12,PACA_TRAP_SAVE(r13)
44368730401SOlof Johansson	std	r12,_TRAP(r1)
44414cf11afSPaul Mackerras	addi	r11,r1,INT_FRAME_SIZE
44514cf11afSPaul Mackerras	std	r11,0(r1)
44614cf11afSPaul Mackerras	li	r12,0
44714cf11afSPaul Mackerras	std	r12,0(r11)
44814cf11afSPaul Mackerras	ld	r2,PACATOC(r13)
44914cf11afSPaul Mackerras1:	addi	r3,r1,STACK_FRAME_OVERHEAD
45014cf11afSPaul Mackerras	bl	.kernel_bad_stack
45114cf11afSPaul Mackerras	b	1b
45214cf11afSPaul Mackerras
45314cf11afSPaul Mackerras/*
45414cf11afSPaul Mackerras * Return from an exception with minimal checks.
45514cf11afSPaul Mackerras * The caller is assumed to have done EXCEPTION_PROLOG_COMMON.
45614cf11afSPaul Mackerras * If interrupts have been enabled, or anything has been
45714cf11afSPaul Mackerras * done that might have changed the scheduling status of
45814cf11afSPaul Mackerras * any task or sent any task a signal, you should use
45914cf11afSPaul Mackerras * ret_from_except or ret_from_except_lite instead of this.
46014cf11afSPaul Mackerras */
461b0a779deSPaul Mackerrasfast_exc_return_irq:			/* restores irq state too */
462b0a779deSPaul Mackerras	ld	r3,SOFTE(r1)
463945feb17SBenjamin Herrenschmidt	TRACE_AND_RESTORE_IRQ(r3);
464b0a779deSPaul Mackerras	ld	r12,_MSR(r1)
465b0a779deSPaul Mackerras	rldicl	r4,r12,49,63		/* get MSR_EE to LSB */
466b0a779deSPaul Mackerras	stb	r4,PACAHARDIRQEN(r13)	/* restore paca->hard_enabled */
467b0a779deSPaul Mackerras	b	1f
468b0a779deSPaul Mackerras
46940ef8cbcSPaul Mackerras	.globl	fast_exception_return
47014cf11afSPaul Mackerrasfast_exception_return:
47114cf11afSPaul Mackerras	ld	r12,_MSR(r1)
472b0a779deSPaul Mackerras1:	ld	r11,_NIP(r1)
47314cf11afSPaul Mackerras	andi.	r3,r12,MSR_RI		/* check if RI is set */
47414cf11afSPaul Mackerras	beq-	unrecov_fer
475c6622f63SPaul Mackerras
476c6622f63SPaul Mackerras#ifdef CONFIG_VIRT_CPU_ACCOUNTING
477c6622f63SPaul Mackerras	andi.	r3,r12,MSR_PR
478c6622f63SPaul Mackerras	beq	2f
479c6622f63SPaul Mackerras	ACCOUNT_CPU_USER_EXIT(r3, r4)
480c6622f63SPaul Mackerras2:
481c6622f63SPaul Mackerras#endif
482c6622f63SPaul Mackerras
48314cf11afSPaul Mackerras	ld	r3,_CCR(r1)
48414cf11afSPaul Mackerras	ld	r4,_LINK(r1)
48514cf11afSPaul Mackerras	ld	r5,_CTR(r1)
48614cf11afSPaul Mackerras	ld	r6,_XER(r1)
48714cf11afSPaul Mackerras	mtcr	r3
48814cf11afSPaul Mackerras	mtlr	r4
48914cf11afSPaul Mackerras	mtctr	r5
49014cf11afSPaul Mackerras	mtxer	r6
49114cf11afSPaul Mackerras	REST_GPR(0, r1)
49214cf11afSPaul Mackerras	REST_8GPRS(2, r1)
49314cf11afSPaul Mackerras
49414cf11afSPaul Mackerras	mfmsr	r10
495d04c56f7SPaul Mackerras	rldicl	r10,r10,48,1		/* clear EE */
496d04c56f7SPaul Mackerras	rldicr	r10,r10,16,61		/* clear RI (LE is 0 already) */
49714cf11afSPaul Mackerras	mtmsrd	r10,1
49814cf11afSPaul Mackerras
499b5bbeb23SPaul Mackerras	mtspr	SPRN_SRR1,r12
500b5bbeb23SPaul Mackerras	mtspr	SPRN_SRR0,r11
50114cf11afSPaul Mackerras	REST_4GPRS(10, r1)
50214cf11afSPaul Mackerras	ld	r1,GPR1(r1)
50314cf11afSPaul Mackerras	rfid
50414cf11afSPaul Mackerras	b	.	/* prevent speculative execution */
50514cf11afSPaul Mackerras
50614cf11afSPaul Mackerrasunrecov_fer:
50714cf11afSPaul Mackerras	bl	.save_nvgprs
50814cf11afSPaul Mackerras1:	addi	r3,r1,STACK_FRAME_OVERHEAD
50914cf11afSPaul Mackerras	bl	.unrecoverable_exception
51014cf11afSPaul Mackerras	b	1b
51114cf11afSPaul Mackerras
51214cf11afSPaul Mackerras/*
51314cf11afSPaul Mackerras * Here r13 points to the paca, r9 contains the saved CR,
51414cf11afSPaul Mackerras * SRR0 and SRR1 are saved in r11 and r12,
51514cf11afSPaul Mackerras * r9 - r13 are saved in paca->exgen.
51614cf11afSPaul Mackerras */
51714cf11afSPaul Mackerras	.align	7
51814cf11afSPaul Mackerras	.globl data_access_common
51914cf11afSPaul Mackerrasdata_access_common:
520b5bbeb23SPaul Mackerras	mfspr	r10,SPRN_DAR
52114cf11afSPaul Mackerras	std	r10,PACA_EXGEN+EX_DAR(r13)
522b5bbeb23SPaul Mackerras	mfspr	r10,SPRN_DSISR
52314cf11afSPaul Mackerras	stw	r10,PACA_EXGEN+EX_DSISR(r13)
52414cf11afSPaul Mackerras	EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN)
52514cf11afSPaul Mackerras	ld	r3,PACA_EXGEN+EX_DAR(r13)
52614cf11afSPaul Mackerras	lwz	r4,PACA_EXGEN+EX_DSISR(r13)
52714cf11afSPaul Mackerras	li	r5,0x300
52814cf11afSPaul Mackerras	b	.do_hash_page	 	/* Try to handle as hpte fault */
52914cf11afSPaul Mackerras
53014cf11afSPaul Mackerras	.align	7
53114cf11afSPaul Mackerras	.globl instruction_access_common
53214cf11afSPaul Mackerrasinstruction_access_common:
53314cf11afSPaul Mackerras	EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN)
53414cf11afSPaul Mackerras	ld	r3,_NIP(r1)
53514cf11afSPaul Mackerras	andis.	r4,r12,0x5820
53614cf11afSPaul Mackerras	li	r5,0x400
53714cf11afSPaul Mackerras	b	.do_hash_page		/* Try to handle as hpte fault */
53814cf11afSPaul Mackerras
5393c726f8dSBenjamin Herrenschmidt/*
5403c726f8dSBenjamin Herrenschmidt * Here is the common SLB miss user that is used when going to virtual
5413c726f8dSBenjamin Herrenschmidt * mode for SLB misses, that is currently not used
5423c726f8dSBenjamin Herrenschmidt */
5433c726f8dSBenjamin Herrenschmidt#ifdef __DISABLED__
5443c726f8dSBenjamin Herrenschmidt	.align	7
5453c726f8dSBenjamin Herrenschmidt	.globl	slb_miss_user_common
5463c726f8dSBenjamin Herrenschmidtslb_miss_user_common:
5473c726f8dSBenjamin Herrenschmidt	mflr	r10
5483c726f8dSBenjamin Herrenschmidt	std	r3,PACA_EXGEN+EX_DAR(r13)
5493c726f8dSBenjamin Herrenschmidt	stw	r9,PACA_EXGEN+EX_CCR(r13)
5503c726f8dSBenjamin Herrenschmidt	std	r10,PACA_EXGEN+EX_LR(r13)
5513c726f8dSBenjamin Herrenschmidt	std	r11,PACA_EXGEN+EX_SRR0(r13)
5523c726f8dSBenjamin Herrenschmidt	bl	.slb_allocate_user
5533c726f8dSBenjamin Herrenschmidt
5543c726f8dSBenjamin Herrenschmidt	ld	r10,PACA_EXGEN+EX_LR(r13)
5553c726f8dSBenjamin Herrenschmidt	ld	r3,PACA_EXGEN+EX_R3(r13)
5563c726f8dSBenjamin Herrenschmidt	lwz	r9,PACA_EXGEN+EX_CCR(r13)
5573c726f8dSBenjamin Herrenschmidt	ld	r11,PACA_EXGEN+EX_SRR0(r13)
5583c726f8dSBenjamin Herrenschmidt	mtlr	r10
5593c726f8dSBenjamin Herrenschmidt	beq-	slb_miss_fault
5603c726f8dSBenjamin Herrenschmidt
5613c726f8dSBenjamin Herrenschmidt	andi.	r10,r12,MSR_RI		/* check for unrecoverable exception */
5623c726f8dSBenjamin Herrenschmidt	beq-	unrecov_user_slb
5633c726f8dSBenjamin Herrenschmidt	mfmsr	r10
5643c726f8dSBenjamin Herrenschmidt
5653c726f8dSBenjamin Herrenschmidt.machine push
5663c726f8dSBenjamin Herrenschmidt.machine "power4"
5673c726f8dSBenjamin Herrenschmidt	mtcrf	0x80,r9
5683c726f8dSBenjamin Herrenschmidt.machine pop
5693c726f8dSBenjamin Herrenschmidt
5703c726f8dSBenjamin Herrenschmidt	clrrdi	r10,r10,2		/* clear RI before setting SRR0/1 */
5713c726f8dSBenjamin Herrenschmidt	mtmsrd	r10,1
5723c726f8dSBenjamin Herrenschmidt
5733c726f8dSBenjamin Herrenschmidt	mtspr	SRR0,r11
5743c726f8dSBenjamin Herrenschmidt	mtspr	SRR1,r12
5753c726f8dSBenjamin Herrenschmidt
5763c726f8dSBenjamin Herrenschmidt	ld	r9,PACA_EXGEN+EX_R9(r13)
5773c726f8dSBenjamin Herrenschmidt	ld	r10,PACA_EXGEN+EX_R10(r13)
5783c726f8dSBenjamin Herrenschmidt	ld	r11,PACA_EXGEN+EX_R11(r13)
5793c726f8dSBenjamin Herrenschmidt	ld	r12,PACA_EXGEN+EX_R12(r13)
5803c726f8dSBenjamin Herrenschmidt	ld	r13,PACA_EXGEN+EX_R13(r13)
5813c726f8dSBenjamin Herrenschmidt	rfid
5823c726f8dSBenjamin Herrenschmidt	b	.
5833c726f8dSBenjamin Herrenschmidt
5843c726f8dSBenjamin Herrenschmidtslb_miss_fault:
5853c726f8dSBenjamin Herrenschmidt	EXCEPTION_PROLOG_COMMON(0x380, PACA_EXGEN)
5863c726f8dSBenjamin Herrenschmidt	ld	r4,PACA_EXGEN+EX_DAR(r13)
5873c726f8dSBenjamin Herrenschmidt	li	r5,0
5883c726f8dSBenjamin Herrenschmidt	std	r4,_DAR(r1)
5893c726f8dSBenjamin Herrenschmidt	std	r5,_DSISR(r1)
5903ccfc65cSPaul Mackerras	b	handle_page_fault
5913c726f8dSBenjamin Herrenschmidt
5923c726f8dSBenjamin Herrenschmidtunrecov_user_slb:
5933c726f8dSBenjamin Herrenschmidt	EXCEPTION_PROLOG_COMMON(0x4200, PACA_EXGEN)
5943c726f8dSBenjamin Herrenschmidt	DISABLE_INTS
5953c726f8dSBenjamin Herrenschmidt	bl	.save_nvgprs
5963c726f8dSBenjamin Herrenschmidt1:	addi	r3,r1,STACK_FRAME_OVERHEAD
5973c726f8dSBenjamin Herrenschmidt	bl	.unrecoverable_exception
5983c726f8dSBenjamin Herrenschmidt	b	1b
5993c726f8dSBenjamin Herrenschmidt
6003c726f8dSBenjamin Herrenschmidt#endif /* __DISABLED__ */
6013c726f8dSBenjamin Herrenschmidt
6023c726f8dSBenjamin Herrenschmidt
6033c726f8dSBenjamin Herrenschmidt/*
6043c726f8dSBenjamin Herrenschmidt * r13 points to the PACA, r9 contains the saved CR,
6053c726f8dSBenjamin Herrenschmidt * r12 contain the saved SRR1, SRR0 is still ready for return
6063c726f8dSBenjamin Herrenschmidt * r3 has the faulting address
6073c726f8dSBenjamin Herrenschmidt * r9 - r13 are saved in paca->exslb.
6083c726f8dSBenjamin Herrenschmidt * r3 is saved in paca->slb_r3
6093c726f8dSBenjamin Herrenschmidt * We assume we aren't going to take any exceptions during this procedure.
6103c726f8dSBenjamin Herrenschmidt */
6113c726f8dSBenjamin Herrenschmidt_GLOBAL(slb_miss_realmode)
6123c726f8dSBenjamin Herrenschmidt	mflr	r10
6133c726f8dSBenjamin Herrenschmidt
6143c726f8dSBenjamin Herrenschmidt	stw	r9,PACA_EXSLB+EX_CCR(r13)	/* save CR in exc. frame */
6153c726f8dSBenjamin Herrenschmidt	std	r10,PACA_EXSLB+EX_LR(r13)	/* save LR */
6163c726f8dSBenjamin Herrenschmidt
6173c726f8dSBenjamin Herrenschmidt	bl	.slb_allocate_realmode
6183c726f8dSBenjamin Herrenschmidt
6193c726f8dSBenjamin Herrenschmidt	/* All done -- return from exception. */
6203c726f8dSBenjamin Herrenschmidt
6213c726f8dSBenjamin Herrenschmidt	ld	r10,PACA_EXSLB+EX_LR(r13)
6223c726f8dSBenjamin Herrenschmidt	ld	r3,PACA_EXSLB+EX_R3(r13)
6233c726f8dSBenjamin Herrenschmidt	lwz	r9,PACA_EXSLB+EX_CCR(r13)	/* get saved CR */
6243c726f8dSBenjamin Herrenschmidt#ifdef CONFIG_PPC_ISERIES
6253f639ee8SStephen RothwellBEGIN_FW_FTR_SECTION
6263356bb9fSDavid Gibson	ld	r11,PACALPPACAPTR(r13)
6273356bb9fSDavid Gibson	ld	r11,LPPACASRR0(r11)		/* get SRR0 value */
6283f639ee8SStephen RothwellEND_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
6293c726f8dSBenjamin Herrenschmidt#endif /* CONFIG_PPC_ISERIES */
6303c726f8dSBenjamin Herrenschmidt
6313c726f8dSBenjamin Herrenschmidt	mtlr	r10
6323c726f8dSBenjamin Herrenschmidt
6333c726f8dSBenjamin Herrenschmidt	andi.	r10,r12,MSR_RI	/* check for unrecoverable exception */
634320787c7SPaul Mackerras	beq-	2f
6353c726f8dSBenjamin Herrenschmidt
6363c726f8dSBenjamin Herrenschmidt.machine	push
6373c726f8dSBenjamin Herrenschmidt.machine	"power4"
6383c726f8dSBenjamin Herrenschmidt	mtcrf	0x80,r9
6393c726f8dSBenjamin Herrenschmidt	mtcrf	0x01,r9		/* slb_allocate uses cr0 and cr7 */
6403c726f8dSBenjamin Herrenschmidt.machine	pop
6413c726f8dSBenjamin Herrenschmidt
6423c726f8dSBenjamin Herrenschmidt#ifdef CONFIG_PPC_ISERIES
6433f639ee8SStephen RothwellBEGIN_FW_FTR_SECTION
6443c726f8dSBenjamin Herrenschmidt	mtspr	SPRN_SRR0,r11
6453c726f8dSBenjamin Herrenschmidt	mtspr	SPRN_SRR1,r12
6463f639ee8SStephen RothwellEND_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
6473c726f8dSBenjamin Herrenschmidt#endif /* CONFIG_PPC_ISERIES */
6483c726f8dSBenjamin Herrenschmidt	ld	r9,PACA_EXSLB+EX_R9(r13)
6493c726f8dSBenjamin Herrenschmidt	ld	r10,PACA_EXSLB+EX_R10(r13)
6503c726f8dSBenjamin Herrenschmidt	ld	r11,PACA_EXSLB+EX_R11(r13)
6513c726f8dSBenjamin Herrenschmidt	ld	r12,PACA_EXSLB+EX_R12(r13)
6523c726f8dSBenjamin Herrenschmidt	ld	r13,PACA_EXSLB+EX_R13(r13)
6533c726f8dSBenjamin Herrenschmidt	rfid
6543c726f8dSBenjamin Herrenschmidt	b	.	/* prevent speculative execution */
6553c726f8dSBenjamin Herrenschmidt
656320787c7SPaul Mackerras2:
657320787c7SPaul Mackerras#ifdef CONFIG_PPC_ISERIES
658320787c7SPaul MackerrasBEGIN_FW_FTR_SECTION
659320787c7SPaul Mackerras	b	unrecov_slb
660320787c7SPaul MackerrasEND_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
661320787c7SPaul Mackerras#endif /* CONFIG_PPC_ISERIES */
662320787c7SPaul Mackerras	mfspr	r11,SPRN_SRR0
663320787c7SPaul Mackerras	clrrdi	r10,r13,32
664320787c7SPaul Mackerras	LOAD_HANDLER(r10,unrecov_slb)
665320787c7SPaul Mackerras	mtspr	SPRN_SRR0,r10
666320787c7SPaul Mackerras	mfmsr	r10
667320787c7SPaul Mackerras	ori	r10,r10,MSR_IR|MSR_DR|MSR_RI
668320787c7SPaul Mackerras	mtspr	SPRN_SRR1,r10
669320787c7SPaul Mackerras	rfid
670320787c7SPaul Mackerras	b	.
671320787c7SPaul Mackerras
6723c726f8dSBenjamin Herrenschmidtunrecov_slb:
6733c726f8dSBenjamin Herrenschmidt	EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
6743c726f8dSBenjamin Herrenschmidt	DISABLE_INTS
6753c726f8dSBenjamin Herrenschmidt	bl	.save_nvgprs
6763c726f8dSBenjamin Herrenschmidt1:	addi	r3,r1,STACK_FRAME_OVERHEAD
6773c726f8dSBenjamin Herrenschmidt	bl	.unrecoverable_exception
6783c726f8dSBenjamin Herrenschmidt	b	1b
6793c726f8dSBenjamin Herrenschmidt
68014cf11afSPaul Mackerras	.align	7
68114cf11afSPaul Mackerras	.globl hardware_interrupt_common
68214cf11afSPaul Mackerras	.globl hardware_interrupt_entry
68314cf11afSPaul Mackerrashardware_interrupt_common:
68414cf11afSPaul Mackerras	EXCEPTION_PROLOG_COMMON(0x500, PACA_EXGEN)
685f39224a8SPaul Mackerras	FINISH_NAP
68614cf11afSPaul Mackerrashardware_interrupt_entry:
68714cf11afSPaul Mackerras	DISABLE_INTS
688a416561bSOlof JohanssonBEGIN_FTR_SECTION
689cb2c9b27SAnton Blanchard	bl	.ppc64_runlatch_on
690a416561bSOlof JohanssonEND_FTR_SECTION_IFSET(CPU_FTR_CTRL)
69114cf11afSPaul Mackerras	addi	r3,r1,STACK_FRAME_OVERHEAD
69214cf11afSPaul Mackerras	bl	.do_IRQ
69314cf11afSPaul Mackerras	b	.ret_from_except_lite
69414cf11afSPaul Mackerras
695f39224a8SPaul Mackerras#ifdef CONFIG_PPC_970_NAP
696f39224a8SPaul Mackerraspower4_fixup_nap:
697f39224a8SPaul Mackerras	andc	r9,r9,r10
698f39224a8SPaul Mackerras	std	r9,TI_LOCAL_FLAGS(r11)
699f39224a8SPaul Mackerras	ld	r10,_LINK(r1)		/* make idle task do the */
700f39224a8SPaul Mackerras	std	r10,_NIP(r1)		/* equivalent of a blr */
701f39224a8SPaul Mackerras	blr
702f39224a8SPaul Mackerras#endif
703f39224a8SPaul Mackerras
70414cf11afSPaul Mackerras	.align	7
70514cf11afSPaul Mackerras	.globl alignment_common
70614cf11afSPaul Mackerrasalignment_common:
707b5bbeb23SPaul Mackerras	mfspr	r10,SPRN_DAR
70814cf11afSPaul Mackerras	std	r10,PACA_EXGEN+EX_DAR(r13)
709b5bbeb23SPaul Mackerras	mfspr	r10,SPRN_DSISR
71014cf11afSPaul Mackerras	stw	r10,PACA_EXGEN+EX_DSISR(r13)
71114cf11afSPaul Mackerras	EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN)
71214cf11afSPaul Mackerras	ld	r3,PACA_EXGEN+EX_DAR(r13)
71314cf11afSPaul Mackerras	lwz	r4,PACA_EXGEN+EX_DSISR(r13)
71414cf11afSPaul Mackerras	std	r3,_DAR(r1)
71514cf11afSPaul Mackerras	std	r4,_DSISR(r1)
71614cf11afSPaul Mackerras	bl	.save_nvgprs
71714cf11afSPaul Mackerras	addi	r3,r1,STACK_FRAME_OVERHEAD
71814cf11afSPaul Mackerras	ENABLE_INTS
71914cf11afSPaul Mackerras	bl	.alignment_exception
72014cf11afSPaul Mackerras	b	.ret_from_except
72114cf11afSPaul Mackerras
72214cf11afSPaul Mackerras	.align	7
72314cf11afSPaul Mackerras	.globl program_check_common
72414cf11afSPaul Mackerrasprogram_check_common:
72514cf11afSPaul Mackerras	EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
72614cf11afSPaul Mackerras	bl	.save_nvgprs
72714cf11afSPaul Mackerras	addi	r3,r1,STACK_FRAME_OVERHEAD
72814cf11afSPaul Mackerras	ENABLE_INTS
72914cf11afSPaul Mackerras	bl	.program_check_exception
73014cf11afSPaul Mackerras	b	.ret_from_except
73114cf11afSPaul Mackerras
73214cf11afSPaul Mackerras	.align	7
73314cf11afSPaul Mackerras	.globl fp_unavailable_common
73414cf11afSPaul Mackerrasfp_unavailable_common:
73514cf11afSPaul Mackerras	EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN)
7363ccfc65cSPaul Mackerras	bne	1f			/* if from user, just load it up */
73714cf11afSPaul Mackerras	bl	.save_nvgprs
73814cf11afSPaul Mackerras	addi	r3,r1,STACK_FRAME_OVERHEAD
73914cf11afSPaul Mackerras	ENABLE_INTS
74014cf11afSPaul Mackerras	bl	.kernel_fp_unavailable_exception
74114cf11afSPaul Mackerras	BUG_OPCODE
7423ccfc65cSPaul Mackerras1:	b	.load_up_fpu
74314cf11afSPaul Mackerras
74414cf11afSPaul Mackerras	.align	7
74514cf11afSPaul Mackerras	.globl altivec_unavailable_common
74614cf11afSPaul Mackerrasaltivec_unavailable_common:
74714cf11afSPaul Mackerras	EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN)
74814cf11afSPaul Mackerras#ifdef CONFIG_ALTIVEC
74914cf11afSPaul MackerrasBEGIN_FTR_SECTION
75014cf11afSPaul Mackerras	bne	.load_up_altivec	/* if from user, just load it up */
75114cf11afSPaul MackerrasEND_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
75214cf11afSPaul Mackerras#endif
75314cf11afSPaul Mackerras	bl	.save_nvgprs
75414cf11afSPaul Mackerras	addi	r3,r1,STACK_FRAME_OVERHEAD
75514cf11afSPaul Mackerras	ENABLE_INTS
75614cf11afSPaul Mackerras	bl	.altivec_unavailable_exception
75714cf11afSPaul Mackerras	b	.ret_from_except
75814cf11afSPaul Mackerras
75914cf11afSPaul Mackerras#ifdef CONFIG_ALTIVEC
76014cf11afSPaul Mackerras/*
76114cf11afSPaul Mackerras * load_up_altivec(unused, unused, tsk)
76214cf11afSPaul Mackerras * Disable VMX for the task which had it previously,
76314cf11afSPaul Mackerras * and save its vector registers in its thread_struct.
76414cf11afSPaul Mackerras * Enables the VMX for use in the kernel on return.
76514cf11afSPaul Mackerras * On SMP we know the VMX is free, since we give it up every
76614cf11afSPaul Mackerras * switch (ie, no lazy save of the vector registers).
76714cf11afSPaul Mackerras * On entry: r13 == 'current' && last_task_used_altivec != 'current'
76814cf11afSPaul Mackerras */
76914cf11afSPaul Mackerras_STATIC(load_up_altivec)
77014cf11afSPaul Mackerras	mfmsr	r5			/* grab the current MSR */
77114cf11afSPaul Mackerras	oris	r5,r5,MSR_VEC@h
77214cf11afSPaul Mackerras	mtmsrd	r5			/* enable use of VMX now */
77314cf11afSPaul Mackerras	isync
77414cf11afSPaul Mackerras
77514cf11afSPaul Mackerras/*
77614cf11afSPaul Mackerras * For SMP, we don't do lazy VMX switching because it just gets too
77714cf11afSPaul Mackerras * horrendously complex, especially when a task switches from one CPU
77814cf11afSPaul Mackerras * to another.  Instead we call giveup_altvec in switch_to.
77914cf11afSPaul Mackerras * VRSAVE isn't dealt with here, that is done in the normal context
78014cf11afSPaul Mackerras * switch code. Note that we could rely on vrsave value to eventually
78114cf11afSPaul Mackerras * avoid saving all of the VREGs here...
78214cf11afSPaul Mackerras */
78314cf11afSPaul Mackerras#ifndef CONFIG_SMP
78414cf11afSPaul Mackerras	ld	r3,last_task_used_altivec@got(r2)
78514cf11afSPaul Mackerras	ld	r4,0(r3)
78614cf11afSPaul Mackerras	cmpdi	0,r4,0
78714cf11afSPaul Mackerras	beq	1f
78814cf11afSPaul Mackerras	/* Save VMX state to last_task_used_altivec's THREAD struct */
78914cf11afSPaul Mackerras	addi	r4,r4,THREAD
79014cf11afSPaul Mackerras	SAVE_32VRS(0,r5,r4)
79114cf11afSPaul Mackerras	mfvscr	vr0
79214cf11afSPaul Mackerras	li	r10,THREAD_VSCR
79314cf11afSPaul Mackerras	stvx	vr0,r10,r4
79414cf11afSPaul Mackerras	/* Disable VMX for last_task_used_altivec */
79514cf11afSPaul Mackerras	ld	r5,PT_REGS(r4)
79614cf11afSPaul Mackerras	ld	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
79714cf11afSPaul Mackerras	lis	r6,MSR_VEC@h
79814cf11afSPaul Mackerras	andc	r4,r4,r6
79914cf11afSPaul Mackerras	std	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
80014cf11afSPaul Mackerras1:
80114cf11afSPaul Mackerras#endif /* CONFIG_SMP */
80214cf11afSPaul Mackerras	/* Hack: if we get an altivec unavailable trap with VRSAVE
80314cf11afSPaul Mackerras	 * set to all zeros, we assume this is a broken application
80414cf11afSPaul Mackerras	 * that fails to set it properly, and thus we switch it to
80514cf11afSPaul Mackerras	 * all 1's
80614cf11afSPaul Mackerras	 */
80714cf11afSPaul Mackerras	mfspr	r4,SPRN_VRSAVE
80814cf11afSPaul Mackerras	cmpdi	0,r4,0
80914cf11afSPaul Mackerras	bne+	1f
81014cf11afSPaul Mackerras	li	r4,-1
81114cf11afSPaul Mackerras	mtspr	SPRN_VRSAVE,r4
81214cf11afSPaul Mackerras1:
81314cf11afSPaul Mackerras	/* enable use of VMX after return */
81414cf11afSPaul Mackerras	ld	r4,PACACURRENT(r13)
81514cf11afSPaul Mackerras	addi	r5,r4,THREAD		/* Get THREAD */
81614cf11afSPaul Mackerras	oris	r12,r12,MSR_VEC@h
81714cf11afSPaul Mackerras	std	r12,_MSR(r1)
81814cf11afSPaul Mackerras	li	r4,1
81914cf11afSPaul Mackerras	li	r10,THREAD_VSCR
82014cf11afSPaul Mackerras	stw	r4,THREAD_USED_VR(r5)
82114cf11afSPaul Mackerras	lvx	vr0,r10,r5
82214cf11afSPaul Mackerras	mtvscr	vr0
82314cf11afSPaul Mackerras	REST_32VRS(0,r4,r5)
82414cf11afSPaul Mackerras#ifndef CONFIG_SMP
82514cf11afSPaul Mackerras	/* Update last_task_used_math to 'current' */
82614cf11afSPaul Mackerras	subi	r4,r5,THREAD		/* Back to 'current' */
82714cf11afSPaul Mackerras	std	r4,0(r3)
82814cf11afSPaul Mackerras#endif /* CONFIG_SMP */
82914cf11afSPaul Mackerras	/* restore registers and return */
83014cf11afSPaul Mackerras	b	fast_exception_return
83114cf11afSPaul Mackerras#endif /* CONFIG_ALTIVEC */
83214cf11afSPaul Mackerras
83314cf11afSPaul Mackerras/*
83414cf11afSPaul Mackerras * Hash table stuff
83514cf11afSPaul Mackerras */
83614cf11afSPaul Mackerras	.align	7
837945feb17SBenjamin Herrenschmidt_STATIC(do_hash_page)
83814cf11afSPaul Mackerras	std	r3,_DAR(r1)
83914cf11afSPaul Mackerras	std	r4,_DSISR(r1)
84014cf11afSPaul Mackerras
84114cf11afSPaul Mackerras	andis.	r0,r4,0xa450		/* weird error? */
8423ccfc65cSPaul Mackerras	bne-	handle_page_fault	/* if not, try to insert a HPTE */
84314cf11afSPaul MackerrasBEGIN_FTR_SECTION
84414cf11afSPaul Mackerras	andis.	r0,r4,0x0020		/* Is it a segment table fault? */
8453ccfc65cSPaul Mackerras	bne-	do_ste_alloc		/* If so handle it */
84614cf11afSPaul MackerrasEND_FTR_SECTION_IFCLR(CPU_FTR_SLB)
84714cf11afSPaul Mackerras
84814cf11afSPaul Mackerras	/*
849945feb17SBenjamin Herrenschmidt	 * On iSeries, we soft-disable interrupts here, then
850945feb17SBenjamin Herrenschmidt	 * hard-enable interrupts so that the hash_page code can spin on
851945feb17SBenjamin Herrenschmidt	 * the hash_table_lock without problems on a shared processor.
852945feb17SBenjamin Herrenschmidt	 */
853945feb17SBenjamin Herrenschmidt	DISABLE_INTS
854945feb17SBenjamin Herrenschmidt
855945feb17SBenjamin Herrenschmidt	/*
856945feb17SBenjamin Herrenschmidt	 * Currently, trace_hardirqs_off() will be called by DISABLE_INTS
857945feb17SBenjamin Herrenschmidt	 * and will clobber volatile registers when irq tracing is enabled
858945feb17SBenjamin Herrenschmidt	 * so we need to reload them. It may be possible to be smarter here
859945feb17SBenjamin Herrenschmidt	 * and move the irq tracing elsewhere but let's keep it simple for
860945feb17SBenjamin Herrenschmidt	 * now
861945feb17SBenjamin Herrenschmidt	 */
862945feb17SBenjamin Herrenschmidt#ifdef CONFIG_TRACE_IRQFLAGS
863945feb17SBenjamin Herrenschmidt	ld	r3,_DAR(r1)
864945feb17SBenjamin Herrenschmidt	ld	r4,_DSISR(r1)
865945feb17SBenjamin Herrenschmidt	ld	r5,_TRAP(r1)
866945feb17SBenjamin Herrenschmidt	ld	r12,_MSR(r1)
867945feb17SBenjamin Herrenschmidt	clrrdi	r5,r5,4
868945feb17SBenjamin Herrenschmidt#endif /* CONFIG_TRACE_IRQFLAGS */
869945feb17SBenjamin Herrenschmidt	/*
87014cf11afSPaul Mackerras	 * We need to set the _PAGE_USER bit if MSR_PR is set or if we are
87114cf11afSPaul Mackerras	 * accessing a userspace segment (even from the kernel). We assume
87214cf11afSPaul Mackerras	 * kernel addresses always have the high bit set.
87314cf11afSPaul Mackerras	 */
87414cf11afSPaul Mackerras	rlwinm	r4,r4,32-25+9,31-9,31-9	/* DSISR_STORE -> _PAGE_RW */
87514cf11afSPaul Mackerras	rotldi	r0,r3,15		/* Move high bit into MSR_PR posn */
87614cf11afSPaul Mackerras	orc	r0,r12,r0		/* MSR_PR | ~high_bit */
87714cf11afSPaul Mackerras	rlwimi	r4,r0,32-13,30,30	/* becomes _PAGE_USER access bit */
87814cf11afSPaul Mackerras	ori	r4,r4,1			/* add _PAGE_PRESENT */
87914cf11afSPaul Mackerras	rlwimi	r4,r5,22+2,31-2,31-2	/* Set _PAGE_EXEC if trap is 0x400 */
88014cf11afSPaul Mackerras
88114cf11afSPaul Mackerras	/*
88214cf11afSPaul Mackerras	 * r3 contains the faulting address
88314cf11afSPaul Mackerras	 * r4 contains the required access permissions
88414cf11afSPaul Mackerras	 * r5 contains the trap number
88514cf11afSPaul Mackerras	 *
88614cf11afSPaul Mackerras	 * at return r3 = 0 for success
88714cf11afSPaul Mackerras	 */
88814cf11afSPaul Mackerras	bl	.hash_page		/* build HPTE if possible */
88914cf11afSPaul Mackerras	cmpdi	r3,0			/* see if hash_page succeeded */
89014cf11afSPaul Mackerras
8913f639ee8SStephen RothwellBEGIN_FW_FTR_SECTION
89214cf11afSPaul Mackerras	/*
89314cf11afSPaul Mackerras	 * If we had interrupts soft-enabled at the point where the
89414cf11afSPaul Mackerras	 * DSI/ISI occurred, and an interrupt came in during hash_page,
89514cf11afSPaul Mackerras	 * handle it now.
89614cf11afSPaul Mackerras	 * We jump to ret_from_except_lite rather than fast_exception_return
89714cf11afSPaul Mackerras	 * because ret_from_except_lite will check for and handle pending
89814cf11afSPaul Mackerras	 * interrupts if necessary.
89914cf11afSPaul Mackerras	 */
9003ccfc65cSPaul Mackerras	beq	13f
901b0a779deSPaul MackerrasEND_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
902945feb17SBenjamin Herrenschmidt
903b0a779deSPaul MackerrasBEGIN_FW_FTR_SECTION
904b0a779deSPaul Mackerras	/*
905b0a779deSPaul Mackerras	 * Here we have interrupts hard-disabled, so it is sufficient
906b0a779deSPaul Mackerras	 * to restore paca->{soft,hard}_enable and get out.
907b0a779deSPaul Mackerras	 */
908b0a779deSPaul Mackerras	beq	fast_exc_return_irq	/* Return from exception on success */
909b0a779deSPaul MackerrasEND_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES)
910b0a779deSPaul Mackerras
91114cf11afSPaul Mackerras	/* For a hash failure, we don't bother re-enabling interrupts */
91214cf11afSPaul Mackerras	ble-	12f
91314cf11afSPaul Mackerras
91414cf11afSPaul Mackerras	/*
91514cf11afSPaul Mackerras	 * hash_page couldn't handle it, set soft interrupt enable back
916945feb17SBenjamin Herrenschmidt	 * to what it was before the trap.  Note that .raw_local_irq_restore
91714cf11afSPaul Mackerras	 * handles any interrupts pending at this point.
91814cf11afSPaul Mackerras	 */
91914cf11afSPaul Mackerras	ld	r3,SOFTE(r1)
920945feb17SBenjamin Herrenschmidt	TRACE_AND_RESTORE_IRQ_PARTIAL(r3, 11f)
921945feb17SBenjamin Herrenschmidt	bl	.raw_local_irq_restore
92214cf11afSPaul Mackerras	b	11f
92314cf11afSPaul Mackerras
92414cf11afSPaul Mackerras/* Here we have a page fault that hash_page can't handle. */
9253ccfc65cSPaul Mackerrashandle_page_fault:
92614cf11afSPaul Mackerras	ENABLE_INTS
92714cf11afSPaul Mackerras11:	ld	r4,_DAR(r1)
92814cf11afSPaul Mackerras	ld	r5,_DSISR(r1)
92914cf11afSPaul Mackerras	addi	r3,r1,STACK_FRAME_OVERHEAD
93014cf11afSPaul Mackerras	bl	.do_page_fault
93114cf11afSPaul Mackerras	cmpdi	r3,0
9323ccfc65cSPaul Mackerras	beq+	13f
93314cf11afSPaul Mackerras	bl	.save_nvgprs
93414cf11afSPaul Mackerras	mr	r5,r3
93514cf11afSPaul Mackerras	addi	r3,r1,STACK_FRAME_OVERHEAD
93614cf11afSPaul Mackerras	lwz	r4,_DAR(r1)
93714cf11afSPaul Mackerras	bl	.bad_page_fault
93814cf11afSPaul Mackerras	b	.ret_from_except
93914cf11afSPaul Mackerras
94079acbb3fSPaul Mackerras13:	b	.ret_from_except_lite
94179acbb3fSPaul Mackerras
94214cf11afSPaul Mackerras/* We have a page fault that hash_page could handle but HV refused
94314cf11afSPaul Mackerras * the PTE insertion
94414cf11afSPaul Mackerras */
94514cf11afSPaul Mackerras12:	bl	.save_nvgprs
946fa28237cSPaul Mackerras	mr	r5,r3
94714cf11afSPaul Mackerras	addi	r3,r1,STACK_FRAME_OVERHEAD
948a792e75dSBenjamin Herrenschmidt	ld	r4,_DAR(r1)
94914cf11afSPaul Mackerras	bl	.low_hash_fault
95014cf11afSPaul Mackerras	b	.ret_from_except
95114cf11afSPaul Mackerras
95214cf11afSPaul Mackerras	/* here we have a segment miss */
9533ccfc65cSPaul Mackerrasdo_ste_alloc:
95414cf11afSPaul Mackerras	bl	.ste_allocate		/* try to insert stab entry */
95514cf11afSPaul Mackerras	cmpdi	r3,0
9563ccfc65cSPaul Mackerras	bne-	handle_page_fault
9573ccfc65cSPaul Mackerras	b	fast_exception_return
95814cf11afSPaul Mackerras
95914cf11afSPaul Mackerras/*
96014cf11afSPaul Mackerras * r13 points to the PACA, r9 contains the saved CR,
96114cf11afSPaul Mackerras * r11 and r12 contain the saved SRR0 and SRR1.
96214cf11afSPaul Mackerras * r9 - r13 are saved in paca->exslb.
96314cf11afSPaul Mackerras * We assume we aren't going to take any exceptions during this procedure.
96414cf11afSPaul Mackerras * We assume (DAR >> 60) == 0xc.
96514cf11afSPaul Mackerras */
96614cf11afSPaul Mackerras	.align	7
96714cf11afSPaul Mackerras_GLOBAL(do_stab_bolted)
96814cf11afSPaul Mackerras	stw	r9,PACA_EXSLB+EX_CCR(r13)	/* save CR in exc. frame */
96914cf11afSPaul Mackerras	std	r11,PACA_EXSLB+EX_SRR0(r13)	/* save SRR0 in exc. frame */
97014cf11afSPaul Mackerras
97114cf11afSPaul Mackerras	/* Hash to the primary group */
97214cf11afSPaul Mackerras	ld	r10,PACASTABVIRT(r13)
973b5bbeb23SPaul Mackerras	mfspr	r11,SPRN_DAR
97414cf11afSPaul Mackerras	srdi	r11,r11,28
97514cf11afSPaul Mackerras	rldimi	r10,r11,7,52	/* r10 = first ste of the group */
97614cf11afSPaul Mackerras
97714cf11afSPaul Mackerras	/* Calculate VSID */
97814cf11afSPaul Mackerras	/* This is a kernel address, so protovsid = ESID */
9791189be65SPaul Mackerras	ASM_VSID_SCRAMBLE(r11, r9, 256M)
98014cf11afSPaul Mackerras	rldic	r9,r11,12,16	/* r9 = vsid << 12 */
98114cf11afSPaul Mackerras
98214cf11afSPaul Mackerras	/* Search the primary group for a free entry */
98314cf11afSPaul Mackerras1:	ld	r11,0(r10)	/* Test valid bit of the current ste	*/
98414cf11afSPaul Mackerras	andi.	r11,r11,0x80
98514cf11afSPaul Mackerras	beq	2f
98614cf11afSPaul Mackerras	addi	r10,r10,16
98714cf11afSPaul Mackerras	andi.	r11,r10,0x70
98814cf11afSPaul Mackerras	bne	1b
98914cf11afSPaul Mackerras
99014cf11afSPaul Mackerras	/* Stick for only searching the primary group for now.		*/
99114cf11afSPaul Mackerras	/* At least for now, we use a very simple random castout scheme */
99214cf11afSPaul Mackerras	/* Use the TB as a random number ;  OR in 1 to avoid entry 0	*/
99314cf11afSPaul Mackerras	mftb	r11
99414cf11afSPaul Mackerras	rldic	r11,r11,4,57	/* r11 = (r11 << 4) & 0x70 */
99514cf11afSPaul Mackerras	ori	r11,r11,0x10
99614cf11afSPaul Mackerras
99714cf11afSPaul Mackerras	/* r10 currently points to an ste one past the group of interest */
99814cf11afSPaul Mackerras	/* make it point to the randomly selected entry			*/
99914cf11afSPaul Mackerras	subi	r10,r10,128
100014cf11afSPaul Mackerras	or 	r10,r10,r11	/* r10 is the entry to invalidate	*/
100114cf11afSPaul Mackerras
100214cf11afSPaul Mackerras	isync			/* mark the entry invalid		*/
100314cf11afSPaul Mackerras	ld	r11,0(r10)
100414cf11afSPaul Mackerras	rldicl	r11,r11,56,1	/* clear the valid bit */
100514cf11afSPaul Mackerras	rotldi	r11,r11,8
100614cf11afSPaul Mackerras	std	r11,0(r10)
100714cf11afSPaul Mackerras	sync
100814cf11afSPaul Mackerras
100914cf11afSPaul Mackerras	clrrdi	r11,r11,28	/* Get the esid part of the ste		*/
101014cf11afSPaul Mackerras	slbie	r11
101114cf11afSPaul Mackerras
101214cf11afSPaul Mackerras2:	std	r9,8(r10)	/* Store the vsid part of the ste	*/
101314cf11afSPaul Mackerras	eieio
101414cf11afSPaul Mackerras
1015b5bbeb23SPaul Mackerras	mfspr	r11,SPRN_DAR		/* Get the new esid			*/
101614cf11afSPaul Mackerras	clrrdi	r11,r11,28	/* Permits a full 32b of ESID		*/
101714cf11afSPaul Mackerras	ori	r11,r11,0x90	/* Turn on valid and kp			*/
101814cf11afSPaul Mackerras	std	r11,0(r10)	/* Put new entry back into the stab	*/
101914cf11afSPaul Mackerras
102014cf11afSPaul Mackerras	sync
102114cf11afSPaul Mackerras
102214cf11afSPaul Mackerras	/* All done -- return from exception. */
102314cf11afSPaul Mackerras	lwz	r9,PACA_EXSLB+EX_CCR(r13)	/* get saved CR */
102414cf11afSPaul Mackerras	ld	r11,PACA_EXSLB+EX_SRR0(r13)	/* get saved SRR0 */
102514cf11afSPaul Mackerras
102614cf11afSPaul Mackerras	andi.	r10,r12,MSR_RI
102714cf11afSPaul Mackerras	beq-	unrecov_slb
102814cf11afSPaul Mackerras
102914cf11afSPaul Mackerras	mtcrf	0x80,r9			/* restore CR */
103014cf11afSPaul Mackerras
103114cf11afSPaul Mackerras	mfmsr	r10
103214cf11afSPaul Mackerras	clrrdi	r10,r10,2
103314cf11afSPaul Mackerras	mtmsrd	r10,1
103414cf11afSPaul Mackerras
1035b5bbeb23SPaul Mackerras	mtspr	SPRN_SRR0,r11
1036b5bbeb23SPaul Mackerras	mtspr	SPRN_SRR1,r12
103714cf11afSPaul Mackerras	ld	r9,PACA_EXSLB+EX_R9(r13)
103814cf11afSPaul Mackerras	ld	r10,PACA_EXSLB+EX_R10(r13)
103914cf11afSPaul Mackerras	ld	r11,PACA_EXSLB+EX_R11(r13)
104014cf11afSPaul Mackerras	ld	r12,PACA_EXSLB+EX_R12(r13)
104114cf11afSPaul Mackerras	ld	r13,PACA_EXSLB+EX_R13(r13)
104214cf11afSPaul Mackerras	rfid
104314cf11afSPaul Mackerras	b	.	/* prevent speculative execution */
104414cf11afSPaul Mackerras
104514cf11afSPaul Mackerras/*
104614cf11afSPaul Mackerras * Space for CPU0's segment table.
104714cf11afSPaul Mackerras *
104814cf11afSPaul Mackerras * On iSeries, the hypervisor must fill in at least one entry before
104916a15a30SStephen Rothwell * we get control (with relocate on).  The address is given to the hv
105016a15a30SStephen Rothwell * as a page number (see xLparMap below), so this must be at a
105114cf11afSPaul Mackerras * fixed address (the linker can't compute (u64)&initial_stab >>
105214cf11afSPaul Mackerras * PAGE_SHIFT).
105314cf11afSPaul Mackerras */
1054758438a7SMichael Ellerman	. = STAB0_OFFSET	/* 0x6000 */
105514cf11afSPaul Mackerras	.globl initial_stab
105614cf11afSPaul Mackerrasinitial_stab:
105714cf11afSPaul Mackerras	.space	4096
105814cf11afSPaul Mackerras
10599e4859efSStephen Rothwell#ifdef CONFIG_PPC_PSERIES
106014cf11afSPaul Mackerras/*
106114cf11afSPaul Mackerras * Data area reserved for FWNMI option.
106214cf11afSPaul Mackerras * This address (0x7000) is fixed by the RPA.
106314cf11afSPaul Mackerras */
106414cf11afSPaul Mackerras	.= 0x7000
106514cf11afSPaul Mackerras	.globl fwnmi_data_area
106614cf11afSPaul Mackerrasfwnmi_data_area:
10679e4859efSStephen Rothwell#endif /* CONFIG_PPC_PSERIES */
106814cf11afSPaul Mackerras
106914cf11afSPaul Mackerras	/* iSeries does not use the FWNMI stuff, so it is safe to put
107014cf11afSPaul Mackerras	 * this here, even if we later allow kernels that will boot on
107114cf11afSPaul Mackerras	 * both pSeries and iSeries */
107214cf11afSPaul Mackerras#ifdef CONFIG_PPC_ISERIES
107314cf11afSPaul Mackerras        . = LPARMAP_PHYS
107416a15a30SStephen Rothwell	.globl xLparMap
107516a15a30SStephen RothwellxLparMap:
107616a15a30SStephen Rothwell	.quad	HvEsidsToMap		/* xNumberEsids */
107716a15a30SStephen Rothwell	.quad	HvRangesToMap		/* xNumberRanges */
107816a15a30SStephen Rothwell	.quad	STAB0_PAGE		/* xSegmentTableOffs */
107916a15a30SStephen Rothwell	.zero	40			/* xRsvd */
108016a15a30SStephen Rothwell	/* xEsids (HvEsidsToMap entries of 2 quads) */
108116a15a30SStephen Rothwell	.quad	PAGE_OFFSET_ESID	/* xKernelEsid */
108216a15a30SStephen Rothwell	.quad	PAGE_OFFSET_VSID	/* xKernelVsid */
108316a15a30SStephen Rothwell	.quad	VMALLOC_START_ESID	/* xKernelEsid */
108416a15a30SStephen Rothwell	.quad	VMALLOC_START_VSID	/* xKernelVsid */
108516a15a30SStephen Rothwell	/* xRanges (HvRangesToMap entries of 3 quads) */
108616a15a30SStephen Rothwell	.quad	HvPagesToMap		/* xPages */
108716a15a30SStephen Rothwell	.quad	0			/* xOffset */
108816a15a30SStephen Rothwell	.quad	PAGE_OFFSET_VSID << (SID_SHIFT - HW_PAGE_SHIFT)	/* xVPN */
108916a15a30SStephen Rothwell
109014cf11afSPaul Mackerras#endif /* CONFIG_PPC_ISERIES */
109114cf11afSPaul Mackerras
10929e4859efSStephen Rothwell#ifdef CONFIG_PPC_PSERIES
109314cf11afSPaul Mackerras        . = 0x8000
10949e4859efSStephen Rothwell#endif /* CONFIG_PPC_PSERIES */
109514cf11afSPaul Mackerras
109614cf11afSPaul Mackerras/*
1097f39b7a55SOlof Johansson * On pSeries and most other platforms, secondary processors spin
1098f39b7a55SOlof Johansson * in the following code.
109914cf11afSPaul Mackerras * At entry, r3 = this processor's number (physical cpu id)
110014cf11afSPaul Mackerras */
1101f39b7a55SOlof Johansson_GLOBAL(generic_secondary_smp_init)
110214cf11afSPaul Mackerras	mr	r24,r3
110314cf11afSPaul Mackerras
110414cf11afSPaul Mackerras	/* turn on 64-bit mode */
110514cf11afSPaul Mackerras	bl	.enable_64b_mode
110614cf11afSPaul Mackerras
110714cf11afSPaul Mackerras	/* Set up a paca value for this processor. Since we have the
110814cf11afSPaul Mackerras	 * physical cpu id in r24, we need to search the pacas to find
110914cf11afSPaul Mackerras	 * which logical id maps to our physical one.
111014cf11afSPaul Mackerras	 */
1111e58c3495SDavid Gibson	LOAD_REG_IMMEDIATE(r13, paca)	/* Get base vaddr of paca array	 */
111214cf11afSPaul Mackerras	li	r5,0			/* logical cpu id                */
111314cf11afSPaul Mackerras1:	lhz	r6,PACAHWCPUID(r13)	/* Load HW procid from paca      */
111414cf11afSPaul Mackerras	cmpw	r6,r24			/* Compare to our id             */
111514cf11afSPaul Mackerras	beq	2f
111614cf11afSPaul Mackerras	addi	r13,r13,PACA_SIZE	/* Loop to next PACA on miss     */
111714cf11afSPaul Mackerras	addi	r5,r5,1
111814cf11afSPaul Mackerras	cmpwi	r5,NR_CPUS
111914cf11afSPaul Mackerras	blt	1b
112014cf11afSPaul Mackerras
112114cf11afSPaul Mackerras	mr	r3,r24			/* not found, copy phys to r3	 */
112214cf11afSPaul Mackerras	b	.kexec_wait		/* next kernel might do better	 */
112314cf11afSPaul Mackerras
1124b5bbeb23SPaul Mackerras2:	mtspr	SPRN_SPRG3,r13		/* Save vaddr of paca in SPRG3	 */
112514cf11afSPaul Mackerras	/* From now on, r24 is expected to be logical cpuid */
112614cf11afSPaul Mackerras	mr	r24,r5
112714cf11afSPaul Mackerras3:	HMT_LOW
112814cf11afSPaul Mackerras	lbz	r23,PACAPROCSTART(r13)	/* Test if this processor should */
112914cf11afSPaul Mackerras					/* start.			 */
113014cf11afSPaul Mackerras	sync
113114cf11afSPaul Mackerras
1132f39b7a55SOlof Johansson#ifndef CONFIG_SMP
1133f39b7a55SOlof Johansson	b	3b			/* Never go on non-SMP		 */
1134f39b7a55SOlof Johansson#else
1135f39b7a55SOlof Johansson	cmpwi	0,r23,0
1136f39b7a55SOlof Johansson	beq	3b			/* Loop until told to go	 */
1137f39b7a55SOlof Johansson
1138f39b7a55SOlof Johansson	/* See if we need to call a cpu state restore handler */
1139f39b7a55SOlof Johansson	LOAD_REG_IMMEDIATE(r23, cur_cpu_spec)
1140f39b7a55SOlof Johansson	ld	r23,0(r23)
1141f39b7a55SOlof Johansson	ld	r23,CPU_SPEC_RESTORE(r23)
1142f39b7a55SOlof Johansson	cmpdi	0,r23,0
1143f39b7a55SOlof Johansson	beq	4f
1144f39b7a55SOlof Johansson	ld	r23,0(r23)
1145f39b7a55SOlof Johansson	mtctr	r23
1146f39b7a55SOlof Johansson	bctrl
1147f39b7a55SOlof Johansson
1148f39b7a55SOlof Johansson4:	/* Create a temp kernel stack for use before relocation is on.	*/
114914cf11afSPaul Mackerras	ld	r1,PACAEMERGSP(r13)
115014cf11afSPaul Mackerras	subi	r1,r1,STACK_FRAME_OVERHEAD
115114cf11afSPaul Mackerras
1152c705677eSStephen Rothwell	b	__secondary_start
115314cf11afSPaul Mackerras#endif
115414cf11afSPaul Mackerras
115514cf11afSPaul Mackerras_STATIC(__mmu_off)
115614cf11afSPaul Mackerras	mfmsr	r3
115714cf11afSPaul Mackerras	andi.	r0,r3,MSR_IR|MSR_DR
115814cf11afSPaul Mackerras	beqlr
115914cf11afSPaul Mackerras	andc	r3,r3,r0
116014cf11afSPaul Mackerras	mtspr	SPRN_SRR0,r4
116114cf11afSPaul Mackerras	mtspr	SPRN_SRR1,r3
116214cf11afSPaul Mackerras	sync
116314cf11afSPaul Mackerras	rfid
116414cf11afSPaul Mackerras	b	.	/* prevent speculative execution */
116514cf11afSPaul Mackerras
116614cf11afSPaul Mackerras
116714cf11afSPaul Mackerras/*
116814cf11afSPaul Mackerras * Here is our main kernel entry point. We support currently 2 kind of entries
116914cf11afSPaul Mackerras * depending on the value of r5.
117014cf11afSPaul Mackerras *
117114cf11afSPaul Mackerras *   r5 != NULL -> OF entry, we go to prom_init, "legacy" parameter content
117214cf11afSPaul Mackerras *                 in r3...r7
117314cf11afSPaul Mackerras *
117414cf11afSPaul Mackerras *   r5 == NULL -> kexec style entry. r3 is a physical pointer to the
117514cf11afSPaul Mackerras *                 DT block, r4 is a physical pointer to the kernel itself
117614cf11afSPaul Mackerras *
117714cf11afSPaul Mackerras */
117814cf11afSPaul Mackerras_GLOBAL(__start_initialization_multiplatform)
117914cf11afSPaul Mackerras	/*
118014cf11afSPaul Mackerras	 * Are we booted from a PROM Of-type client-interface ?
118114cf11afSPaul Mackerras	 */
118214cf11afSPaul Mackerras	cmpldi	cr0,r5,0
1183939e60f6SStephen Rothwell	beq	1f
1184939e60f6SStephen Rothwell	b	.__boot_from_prom		/* yes -> prom */
1185939e60f6SStephen Rothwell1:
118614cf11afSPaul Mackerras	/* Save parameters */
118714cf11afSPaul Mackerras	mr	r31,r3
118814cf11afSPaul Mackerras	mr	r30,r4
118914cf11afSPaul Mackerras
119014cf11afSPaul Mackerras	/* Make sure we are running in 64 bits mode */
119114cf11afSPaul Mackerras	bl	.enable_64b_mode
119214cf11afSPaul Mackerras
119314cf11afSPaul Mackerras	/* Setup some critical 970 SPRs before switching MMU off */
1194f39b7a55SOlof Johansson	mfspr	r0,SPRN_PVR
1195f39b7a55SOlof Johansson	srwi	r0,r0,16
1196f39b7a55SOlof Johansson	cmpwi	r0,0x39		/* 970 */
1197f39b7a55SOlof Johansson	beq	1f
1198f39b7a55SOlof Johansson	cmpwi	r0,0x3c		/* 970FX */
1199f39b7a55SOlof Johansson	beq	1f
1200f39b7a55SOlof Johansson	cmpwi	r0,0x44		/* 970MP */
1201190a24f5SOlof Johansson	beq	1f
1202190a24f5SOlof Johansson	cmpwi	r0,0x45		/* 970GX */
1203f39b7a55SOlof Johansson	bne	2f
1204f39b7a55SOlof Johansson1:	bl	.__cpu_preinit_ppc970
1205f39b7a55SOlof Johansson2:
120614cf11afSPaul Mackerras
120714cf11afSPaul Mackerras	/* Switch off MMU if not already */
1208e58c3495SDavid Gibson	LOAD_REG_IMMEDIATE(r4, .__after_prom_start - KERNELBASE)
120914cf11afSPaul Mackerras	add	r4,r4,r30
121014cf11afSPaul Mackerras	bl	.__mmu_off
121114cf11afSPaul Mackerras	b	.__after_prom_start
121214cf11afSPaul Mackerras
1213939e60f6SStephen Rothwell_INIT_STATIC(__boot_from_prom)
121414cf11afSPaul Mackerras	/* Save parameters */
121514cf11afSPaul Mackerras	mr	r31,r3
121614cf11afSPaul Mackerras	mr	r30,r4
121714cf11afSPaul Mackerras	mr	r29,r5
121814cf11afSPaul Mackerras	mr	r28,r6
121914cf11afSPaul Mackerras	mr	r27,r7
122014cf11afSPaul Mackerras
12216088857bSOlaf Hering	/*
12226088857bSOlaf Hering	 * Align the stack to 16-byte boundary
12236088857bSOlaf Hering	 * Depending on the size and layout of the ELF sections in the initial
12246088857bSOlaf Hering	 * boot binary, the stack pointer will be unalignet on PowerMac
12256088857bSOlaf Hering	 */
1226c05b4770SLinus Torvalds	rldicr	r1,r1,0,59
1227c05b4770SLinus Torvalds
122814cf11afSPaul Mackerras	/* Make sure we are running in 64 bits mode */
122914cf11afSPaul Mackerras	bl	.enable_64b_mode
123014cf11afSPaul Mackerras
123114cf11afSPaul Mackerras	/* put a relocation offset into r3 */
123214cf11afSPaul Mackerras	bl	.reloc_offset
123314cf11afSPaul Mackerras
1234e58c3495SDavid Gibson	LOAD_REG_IMMEDIATE(r2,__toc_start)
123514cf11afSPaul Mackerras	addi	r2,r2,0x4000
123614cf11afSPaul Mackerras	addi	r2,r2,0x4000
123714cf11afSPaul Mackerras
123814cf11afSPaul Mackerras	/* Relocate the TOC from a virt addr to a real addr */
12395a408329SPaul Mackerras	add	r2,r2,r3
124014cf11afSPaul Mackerras
124114cf11afSPaul Mackerras	/* Restore parameters */
124214cf11afSPaul Mackerras	mr	r3,r31
124314cf11afSPaul Mackerras	mr	r4,r30
124414cf11afSPaul Mackerras	mr	r5,r29
124514cf11afSPaul Mackerras	mr	r6,r28
124614cf11afSPaul Mackerras	mr	r7,r27
124714cf11afSPaul Mackerras
124814cf11afSPaul Mackerras	/* Do all of the interaction with OF client interface */
124914cf11afSPaul Mackerras	bl	.prom_init
125014cf11afSPaul Mackerras	/* We never return */
125114cf11afSPaul Mackerras	trap
125214cf11afSPaul Mackerras
125314cf11afSPaul Mackerras_STATIC(__after_prom_start)
125414cf11afSPaul Mackerras
125514cf11afSPaul Mackerras/*
1256758438a7SMichael Ellerman * We need to run with __start at physical address PHYSICAL_START.
125714cf11afSPaul Mackerras * This will leave some code in the first 256B of
125814cf11afSPaul Mackerras * real memory, which are reserved for software use.
125914cf11afSPaul Mackerras * The remainder of the first page is loaded with the fixed
126014cf11afSPaul Mackerras * interrupt vectors.  The next two pages are filled with
126114cf11afSPaul Mackerras * unknown exception placeholders.
126214cf11afSPaul Mackerras *
126314cf11afSPaul Mackerras * Note: This process overwrites the OF exception vectors.
126414cf11afSPaul Mackerras *	r26 == relocation offset
126514cf11afSPaul Mackerras *	r27 == KERNELBASE
126614cf11afSPaul Mackerras */
126714cf11afSPaul Mackerras	bl	.reloc_offset
126814cf11afSPaul Mackerras	mr	r26,r3
1269e58c3495SDavid Gibson	LOAD_REG_IMMEDIATE(r27, KERNELBASE)
127014cf11afSPaul Mackerras
1271e58c3495SDavid Gibson	LOAD_REG_IMMEDIATE(r3, PHYSICAL_START)	/* target addr */
127214cf11afSPaul Mackerras
127314cf11afSPaul Mackerras	// XXX FIXME: Use phys returned by OF (r30)
12745a408329SPaul Mackerras	add	r4,r27,r26 		/* source addr			 */
127514cf11afSPaul Mackerras					/* current address of _start	 */
127614cf11afSPaul Mackerras					/*   i.e. where we are running	 */
127714cf11afSPaul Mackerras					/*	the source addr		 */
127814cf11afSPaul Mackerras
1279d0b79c54SJimi Xenidis	cmpdi	r4,0			/* In some cases the loader may  */
1280939e60f6SStephen Rothwell	bne	1f
1281939e60f6SStephen Rothwell	b	.start_here_multiplatform /* have already put us at zero */
1282d0b79c54SJimi Xenidis					/* so we can skip the copy.      */
1283939e60f6SStephen Rothwell1:	LOAD_REG_IMMEDIATE(r5,copy_to_here) /* # bytes of memory to copy */
128414cf11afSPaul Mackerras	sub	r5,r5,r27
128514cf11afSPaul Mackerras
128614cf11afSPaul Mackerras	li	r6,0x100		/* Start offset, the first 0x100 */
128714cf11afSPaul Mackerras					/* bytes were copied earlier.	 */
128814cf11afSPaul Mackerras
128914cf11afSPaul Mackerras	bl	.copy_and_flush		/* copy the first n bytes	 */
129014cf11afSPaul Mackerras					/* this includes the code being	 */
129114cf11afSPaul Mackerras					/* executed here.		 */
129214cf11afSPaul Mackerras
1293e58c3495SDavid Gibson	LOAD_REG_IMMEDIATE(r0, 4f)	/* Jump to the copy of this code */
129414cf11afSPaul Mackerras	mtctr	r0			/* that we just made/relocated	 */
129514cf11afSPaul Mackerras	bctr
129614cf11afSPaul Mackerras
1297e58c3495SDavid Gibson4:	LOAD_REG_IMMEDIATE(r5,klimit)
12985a408329SPaul Mackerras	add	r5,r5,r26
129914cf11afSPaul Mackerras	ld	r5,0(r5)		/* get the value of klimit */
130014cf11afSPaul Mackerras	sub	r5,r5,r27
130114cf11afSPaul Mackerras	bl	.copy_and_flush		/* copy the rest */
130214cf11afSPaul Mackerras	b	.start_here_multiplatform
130314cf11afSPaul Mackerras
130414cf11afSPaul Mackerras/*
130514cf11afSPaul Mackerras * Copy routine used to copy the kernel to start at physical address 0
130614cf11afSPaul Mackerras * and flush and invalidate the caches as needed.
130714cf11afSPaul Mackerras * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset
130814cf11afSPaul Mackerras * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.
130914cf11afSPaul Mackerras *
131014cf11afSPaul Mackerras * Note: this routine *only* clobbers r0, r6 and lr
131114cf11afSPaul Mackerras */
131214cf11afSPaul Mackerras_GLOBAL(copy_and_flush)
131314cf11afSPaul Mackerras	addi	r5,r5,-8
131414cf11afSPaul Mackerras	addi	r6,r6,-8
13155a2fe38dSOlof Johansson4:	li	r0,8			/* Use the smallest common	*/
131614cf11afSPaul Mackerras					/* denominator cache line	*/
131714cf11afSPaul Mackerras					/* size.  This results in	*/
131814cf11afSPaul Mackerras					/* extra cache line flushes	*/
131914cf11afSPaul Mackerras					/* but operation is correct.	*/
132014cf11afSPaul Mackerras					/* Can't get cache line size	*/
132114cf11afSPaul Mackerras					/* from NACA as it is being	*/
132214cf11afSPaul Mackerras					/* moved too.			*/
132314cf11afSPaul Mackerras
132414cf11afSPaul Mackerras	mtctr	r0			/* put # words/line in ctr	*/
132514cf11afSPaul Mackerras3:	addi	r6,r6,8			/* copy a cache line		*/
132614cf11afSPaul Mackerras	ldx	r0,r6,r4
132714cf11afSPaul Mackerras	stdx	r0,r6,r3
132814cf11afSPaul Mackerras	bdnz	3b
132914cf11afSPaul Mackerras	dcbst	r6,r3			/* write it to memory		*/
133014cf11afSPaul Mackerras	sync
133114cf11afSPaul Mackerras	icbi	r6,r3			/* flush the icache line	*/
133214cf11afSPaul Mackerras	cmpld	0,r6,r5
133314cf11afSPaul Mackerras	blt	4b
133414cf11afSPaul Mackerras	sync
133514cf11afSPaul Mackerras	addi	r5,r5,8
133614cf11afSPaul Mackerras	addi	r6,r6,8
133714cf11afSPaul Mackerras	blr
133814cf11afSPaul Mackerras
133914cf11afSPaul Mackerras.align 8
134014cf11afSPaul Mackerrascopy_to_here:
134114cf11afSPaul Mackerras
134214cf11afSPaul Mackerras#ifdef CONFIG_SMP
134314cf11afSPaul Mackerras#ifdef CONFIG_PPC_PMAC
134414cf11afSPaul Mackerras/*
134514cf11afSPaul Mackerras * On PowerMac, secondary processors starts from the reset vector, which
134614cf11afSPaul Mackerras * is temporarily turned into a call to one of the functions below.
134714cf11afSPaul Mackerras */
134814cf11afSPaul Mackerras	.section ".text";
134914cf11afSPaul Mackerras	.align 2 ;
135014cf11afSPaul Mackerras
135135499c01SPaul Mackerras	.globl	__secondary_start_pmac_0
135235499c01SPaul Mackerras__secondary_start_pmac_0:
135335499c01SPaul Mackerras	/* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */
135435499c01SPaul Mackerras	li	r24,0
135535499c01SPaul Mackerras	b	1f
135614cf11afSPaul Mackerras	li	r24,1
135735499c01SPaul Mackerras	b	1f
135814cf11afSPaul Mackerras	li	r24,2
135935499c01SPaul Mackerras	b	1f
136014cf11afSPaul Mackerras	li	r24,3
136135499c01SPaul Mackerras1:
136214cf11afSPaul Mackerras
136314cf11afSPaul Mackerras_GLOBAL(pmac_secondary_start)
136414cf11afSPaul Mackerras	/* turn on 64-bit mode */
136514cf11afSPaul Mackerras	bl	.enable_64b_mode
136614cf11afSPaul Mackerras
136714cf11afSPaul Mackerras	/* Copy some CPU settings from CPU 0 */
1368f39b7a55SOlof Johansson	bl	.__restore_cpu_ppc970
136914cf11afSPaul Mackerras
137014cf11afSPaul Mackerras	/* pSeries do that early though I don't think we really need it */
137114cf11afSPaul Mackerras	mfmsr	r3
137214cf11afSPaul Mackerras	ori	r3,r3,MSR_RI
137314cf11afSPaul Mackerras	mtmsrd	r3			/* RI on */
137414cf11afSPaul Mackerras
137514cf11afSPaul Mackerras	/* Set up a paca value for this processor. */
1376e58c3495SDavid Gibson	LOAD_REG_IMMEDIATE(r4, paca)	/* Get base vaddr of paca array	*/
137714cf11afSPaul Mackerras	mulli	r13,r24,PACA_SIZE	 /* Calculate vaddr of right paca */
137814cf11afSPaul Mackerras	add	r13,r13,r4		/* for this processor.		*/
1379b5bbeb23SPaul Mackerras	mtspr	SPRN_SPRG3,r13		 /* Save vaddr of paca in SPRG3	*/
138014cf11afSPaul Mackerras
138114cf11afSPaul Mackerras	/* Create a temp kernel stack for use before relocation is on.	*/
138214cf11afSPaul Mackerras	ld	r1,PACAEMERGSP(r13)
138314cf11afSPaul Mackerras	subi	r1,r1,STACK_FRAME_OVERHEAD
138414cf11afSPaul Mackerras
1385c705677eSStephen Rothwell	b	__secondary_start
138614cf11afSPaul Mackerras
138714cf11afSPaul Mackerras#endif /* CONFIG_PPC_PMAC */
138814cf11afSPaul Mackerras
138914cf11afSPaul Mackerras/*
139014cf11afSPaul Mackerras * This function is called after the master CPU has released the
139114cf11afSPaul Mackerras * secondary processors.  The execution environment is relocation off.
139214cf11afSPaul Mackerras * The paca for this processor has the following fields initialized at
139314cf11afSPaul Mackerras * this point:
139414cf11afSPaul Mackerras *   1. Processor number
139514cf11afSPaul Mackerras *   2. Segment table pointer (virtual address)
139614cf11afSPaul Mackerras * On entry the following are set:
139714cf11afSPaul Mackerras *   r1	= stack pointer.  vaddr for iSeries, raddr (temp stack) for pSeries
139814cf11afSPaul Mackerras *   r24   = cpu# (in Linux terms)
139914cf11afSPaul Mackerras *   r13   = paca virtual address
140014cf11afSPaul Mackerras *   SPRG3 = paca virtual address
140114cf11afSPaul Mackerras */
1402fc68e869SStephen Rothwell	.globl	__secondary_start
1403c705677eSStephen Rothwell__secondary_start:
1404799d6046SPaul Mackerras	/* Set thread priority to MEDIUM */
1405799d6046SPaul Mackerras	HMT_MEDIUM
140614cf11afSPaul Mackerras
1407799d6046SPaul Mackerras	/* Load TOC */
140814cf11afSPaul Mackerras	ld	r2,PACATOC(r13)
140914cf11afSPaul Mackerras
1410799d6046SPaul Mackerras	/* Do early setup for that CPU (stab, slb, hash table pointer) */
1411799d6046SPaul Mackerras	bl	.early_setup_secondary
141214cf11afSPaul Mackerras
141314cf11afSPaul Mackerras	/* Initialize the kernel stack.  Just a repeat for iSeries.	 */
1414e58c3495SDavid Gibson	LOAD_REG_ADDR(r3, current_set)
141514cf11afSPaul Mackerras	sldi	r28,r24,3		/* get current_set[cpu#]	 */
141614cf11afSPaul Mackerras	ldx	r1,r3,r28
141714cf11afSPaul Mackerras	addi	r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
141814cf11afSPaul Mackerras	std	r1,PACAKSAVE(r13)
141914cf11afSPaul Mackerras
1420799d6046SPaul Mackerras	/* Clear backchain so we get nice backtraces */
142114cf11afSPaul Mackerras	li	r7,0
142214cf11afSPaul Mackerras	mtlr	r7
142314cf11afSPaul Mackerras
142414cf11afSPaul Mackerras	/* enable MMU and jump to start_secondary */
1425e58c3495SDavid Gibson	LOAD_REG_ADDR(r3, .start_secondary_prolog)
1426e58c3495SDavid Gibson	LOAD_REG_IMMEDIATE(r4, MSR_KERNEL)
1427d04c56f7SPaul Mackerras#ifdef CONFIG_PPC_ISERIES
14283f639ee8SStephen RothwellBEGIN_FW_FTR_SECTION
142914cf11afSPaul Mackerras	ori	r4,r4,MSR_EE
1430ff3da2e0SBenjamin Herrenschmidt	li	r8,1
1431ff3da2e0SBenjamin Herrenschmidt	stb	r8,PACAHARDIRQEN(r13)
14323f639ee8SStephen RothwellEND_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
143314cf11afSPaul Mackerras#endif
1434d04c56f7SPaul MackerrasBEGIN_FW_FTR_SECTION
1435d04c56f7SPaul Mackerras	stb	r7,PACAHARDIRQEN(r13)
1436d04c56f7SPaul MackerrasEND_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES)
1437ff3da2e0SBenjamin Herrenschmidt	stb	r7,PACASOFTIRQEN(r13)
1438d04c56f7SPaul Mackerras
1439b5bbeb23SPaul Mackerras	mtspr	SPRN_SRR0,r3
1440b5bbeb23SPaul Mackerras	mtspr	SPRN_SRR1,r4
144114cf11afSPaul Mackerras	rfid
144214cf11afSPaul Mackerras	b	.	/* prevent speculative execution */
144314cf11afSPaul Mackerras
144414cf11afSPaul Mackerras/*
144514cf11afSPaul Mackerras * Running with relocation on at this point.  All we want to do is
144614cf11afSPaul Mackerras * zero the stack back-chain pointer before going into C code.
144714cf11afSPaul Mackerras */
144814cf11afSPaul Mackerras_GLOBAL(start_secondary_prolog)
144914cf11afSPaul Mackerras	li	r3,0
145014cf11afSPaul Mackerras	std	r3,0(r1)		/* Zero the stack frame pointer	*/
145114cf11afSPaul Mackerras	bl	.start_secondary
1452799d6046SPaul Mackerras	b	.
145314cf11afSPaul Mackerras#endif
145414cf11afSPaul Mackerras
145514cf11afSPaul Mackerras/*
145614cf11afSPaul Mackerras * This subroutine clobbers r11 and r12
145714cf11afSPaul Mackerras */
145814cf11afSPaul Mackerras_GLOBAL(enable_64b_mode)
145914cf11afSPaul Mackerras	mfmsr	r11			/* grab the current MSR */
146014cf11afSPaul Mackerras	li	r12,1
146114cf11afSPaul Mackerras	rldicr	r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
146214cf11afSPaul Mackerras	or	r11,r11,r12
146314cf11afSPaul Mackerras	li	r12,1
146414cf11afSPaul Mackerras	rldicr	r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
146514cf11afSPaul Mackerras	or	r11,r11,r12
146614cf11afSPaul Mackerras	mtmsrd	r11
146714cf11afSPaul Mackerras	isync
146814cf11afSPaul Mackerras	blr
146914cf11afSPaul Mackerras
147014cf11afSPaul Mackerras/*
147114cf11afSPaul Mackerras * This is where the main kernel code starts.
147214cf11afSPaul Mackerras */
1473939e60f6SStephen Rothwell_INIT_STATIC(start_here_multiplatform)
147414cf11afSPaul Mackerras	/* get a new offset, now that the kernel has moved. */
147514cf11afSPaul Mackerras	bl	.reloc_offset
147614cf11afSPaul Mackerras	mr	r26,r3
147714cf11afSPaul Mackerras
147814cf11afSPaul Mackerras	/* Clear out the BSS. It may have been done in prom_init,
147914cf11afSPaul Mackerras	 * already but that's irrelevant since prom_init will soon
148014cf11afSPaul Mackerras	 * be detached from the kernel completely. Besides, we need
148114cf11afSPaul Mackerras	 * to clear it now for kexec-style entry.
148214cf11afSPaul Mackerras	 */
1483e58c3495SDavid Gibson	LOAD_REG_IMMEDIATE(r11,__bss_stop)
1484e58c3495SDavid Gibson	LOAD_REG_IMMEDIATE(r8,__bss_start)
148514cf11afSPaul Mackerras	sub	r11,r11,r8		/* bss size			*/
148614cf11afSPaul Mackerras	addi	r11,r11,7		/* round up to an even double word */
148714cf11afSPaul Mackerras	rldicl. r11,r11,61,3		/* shift right by 3		*/
148814cf11afSPaul Mackerras	beq	4f
148914cf11afSPaul Mackerras	addi	r8,r8,-8
149014cf11afSPaul Mackerras	li	r0,0
149114cf11afSPaul Mackerras	mtctr	r11			/* zero this many doublewords	*/
149214cf11afSPaul Mackerras3:	stdu	r0,8(r8)
149314cf11afSPaul Mackerras	bdnz	3b
149414cf11afSPaul Mackerras4:
149514cf11afSPaul Mackerras
149614cf11afSPaul Mackerras	mfmsr	r6
149714cf11afSPaul Mackerras	ori	r6,r6,MSR_RI
149814cf11afSPaul Mackerras	mtmsrd	r6			/* RI on */
149914cf11afSPaul Mackerras
150014cf11afSPaul Mackerras	/* The following gets the stack and TOC set up with the regs */
150114cf11afSPaul Mackerras	/* pointing to the real addr of the kernel stack.  This is   */
150214cf11afSPaul Mackerras	/* all done to support the C function call below which sets  */
150314cf11afSPaul Mackerras	/* up the htab.  This is done because we have relocated the  */
150414cf11afSPaul Mackerras	/* kernel but are still running in real mode. */
150514cf11afSPaul Mackerras
1506e58c3495SDavid Gibson	LOAD_REG_IMMEDIATE(r3,init_thread_union)
15075a408329SPaul Mackerras	add	r3,r3,r26
150814cf11afSPaul Mackerras
150914cf11afSPaul Mackerras	/* set up a stack pointer (physical address) */
151014cf11afSPaul Mackerras	addi	r1,r3,THREAD_SIZE
151114cf11afSPaul Mackerras	li	r0,0
151214cf11afSPaul Mackerras	stdu	r0,-STACK_FRAME_OVERHEAD(r1)
151314cf11afSPaul Mackerras
151414cf11afSPaul Mackerras	/* set up the TOC (physical address) */
1515e58c3495SDavid Gibson	LOAD_REG_IMMEDIATE(r2,__toc_start)
151614cf11afSPaul Mackerras	addi	r2,r2,0x4000
151714cf11afSPaul Mackerras	addi	r2,r2,0x4000
15185a408329SPaul Mackerras	add	r2,r2,r26
151914cf11afSPaul Mackerras
1520945feb17SBenjamin Herrenschmidt	/* Set initial ptr to current */
1521945feb17SBenjamin Herrenschmidt	LOAD_REG_IMMEDIATE(r4, init_task)
1522945feb17SBenjamin Herrenschmidt	std	r4,PACACURRENT(r13)
1523945feb17SBenjamin Herrenschmidt
152414cf11afSPaul Mackerras	/* Do very early kernel initializations, including initial hash table,
152514cf11afSPaul Mackerras	 * stab and slb setup before we turn on relocation.	*/
152614cf11afSPaul Mackerras
152714cf11afSPaul Mackerras	/* Restore parameters passed from prom_init/kexec */
152814cf11afSPaul Mackerras	mr	r3,r31
152914cf11afSPaul Mackerras 	bl	.early_setup
153014cf11afSPaul Mackerras
1531e58c3495SDavid Gibson	LOAD_REG_IMMEDIATE(r3, .start_here_common)
1532e58c3495SDavid Gibson	LOAD_REG_IMMEDIATE(r4, MSR_KERNEL)
1533b5bbeb23SPaul Mackerras	mtspr	SPRN_SRR0,r3
1534b5bbeb23SPaul Mackerras	mtspr	SPRN_SRR1,r4
153514cf11afSPaul Mackerras	rfid
153614cf11afSPaul Mackerras	b	.	/* prevent speculative execution */
153714cf11afSPaul Mackerras
153814cf11afSPaul Mackerras	/* This is where all platforms converge execution */
1539fc68e869SStephen Rothwell_INIT_GLOBAL(start_here_common)
154014cf11afSPaul Mackerras	/* relocation is on at this point */
154114cf11afSPaul Mackerras
154214cf11afSPaul Mackerras	/* The following code sets up the SP and TOC now that we are */
154314cf11afSPaul Mackerras	/* running with translation enabled. */
154414cf11afSPaul Mackerras
1545e58c3495SDavid Gibson	LOAD_REG_IMMEDIATE(r3,init_thread_union)
154614cf11afSPaul Mackerras
154714cf11afSPaul Mackerras	/* set up the stack */
154814cf11afSPaul Mackerras	addi	r1,r3,THREAD_SIZE
154914cf11afSPaul Mackerras	li	r0,0
155014cf11afSPaul Mackerras	stdu	r0,-STACK_FRAME_OVERHEAD(r1)
155114cf11afSPaul Mackerras
155214cf11afSPaul Mackerras	/* Load the TOC */
155314cf11afSPaul Mackerras	ld	r2,PACATOC(r13)
155414cf11afSPaul Mackerras	std	r1,PACAKSAVE(r13)
155514cf11afSPaul Mackerras
155614cf11afSPaul Mackerras	bl	.setup_system
155714cf11afSPaul Mackerras
155814cf11afSPaul Mackerras	/* Load up the kernel context */
155914cf11afSPaul Mackerras5:
156014cf11afSPaul Mackerras	li	r5,0
1561d04c56f7SPaul Mackerras	stb	r5,PACASOFTIRQEN(r13)	/* Soft Disabled */
1562d04c56f7SPaul Mackerras#ifdef CONFIG_PPC_ISERIES
1563d04c56f7SPaul MackerrasBEGIN_FW_FTR_SECTION
156414cf11afSPaul Mackerras	mfmsr	r5
1565ff3da2e0SBenjamin Herrenschmidt	ori	r5,r5,MSR_EE		/* Hard Enabled on iSeries*/
156614cf11afSPaul Mackerras	mtmsrd	r5
1567ff3da2e0SBenjamin Herrenschmidt	li	r5,1
15683f639ee8SStephen RothwellEND_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
156914cf11afSPaul Mackerras#endif
1570ff3da2e0SBenjamin Herrenschmidt	stb	r5,PACAHARDIRQEN(r13)	/* Hard Disabled on others */
157114cf11afSPaul Mackerras
157214cf11afSPaul Mackerras	bl	.start_kernel
157314cf11afSPaul Mackerras
1574f1870f77SAnton Blanchard	/* Not reached */
1575f1870f77SAnton Blanchard	BUG_OPCODE
157614cf11afSPaul Mackerras
157714cf11afSPaul Mackerras/*
157814cf11afSPaul Mackerras * We put a few things here that have to be page-aligned.
157914cf11afSPaul Mackerras * This stuff goes at the beginning of the bss, which is page-aligned.
158014cf11afSPaul Mackerras */
158114cf11afSPaul Mackerras	.section ".bss"
158214cf11afSPaul Mackerras
158314cf11afSPaul Mackerras	.align	PAGE_SHIFT
158414cf11afSPaul Mackerras
158514cf11afSPaul Mackerras	.globl	empty_zero_page
158614cf11afSPaul Mackerrasempty_zero_page:
158714cf11afSPaul Mackerras	.space	PAGE_SIZE
158814cf11afSPaul Mackerras
158914cf11afSPaul Mackerras	.globl	swapper_pg_dir
159014cf11afSPaul Mackerrasswapper_pg_dir:
1591ee7a76daSStephen Rothwell	.space	PGD_TABLE_SIZE
1592