xref: /openbmc/linux/arch/powerpc/kernel/head_64.S (revision b6f6b98a)
114cf11afSPaul Mackerras/*
214cf11afSPaul Mackerras *  PowerPC version
314cf11afSPaul Mackerras *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
414cf11afSPaul Mackerras *
514cf11afSPaul Mackerras *  Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
614cf11afSPaul Mackerras *    Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
714cf11afSPaul Mackerras *  Adapted for Power Macintosh by Paul Mackerras.
814cf11afSPaul Mackerras *  Low-level exception handlers and MMU support
914cf11afSPaul Mackerras *  rewritten by Paul Mackerras.
1014cf11afSPaul Mackerras *    Copyright (C) 1996 Paul Mackerras.
1114cf11afSPaul Mackerras *
1214cf11afSPaul Mackerras *  Adapted for 64bit PowerPC by Dave Engebretsen, Peter Bergner, and
1314cf11afSPaul Mackerras *    Mike Corrigan {engebret|bergner|mikejc}@us.ibm.com
1414cf11afSPaul Mackerras *
1514cf11afSPaul Mackerras *  This file contains the low-level support and setup for the
1614cf11afSPaul Mackerras *  PowerPC-64 platform, including trap and interrupt dispatch.
1714cf11afSPaul Mackerras *
1814cf11afSPaul Mackerras *  This program is free software; you can redistribute it and/or
1914cf11afSPaul Mackerras *  modify it under the terms of the GNU General Public License
2014cf11afSPaul Mackerras *  as published by the Free Software Foundation; either version
2114cf11afSPaul Mackerras *  2 of the License, or (at your option) any later version.
2214cf11afSPaul Mackerras */
2314cf11afSPaul Mackerras
2414cf11afSPaul Mackerras#include <linux/threads.h>
25b5bbeb23SPaul Mackerras#include <asm/reg.h>
2614cf11afSPaul Mackerras#include <asm/page.h>
2714cf11afSPaul Mackerras#include <asm/mmu.h>
2814cf11afSPaul Mackerras#include <asm/ppc_asm.h>
2914cf11afSPaul Mackerras#include <asm/asm-offsets.h>
3014cf11afSPaul Mackerras#include <asm/bug.h>
3114cf11afSPaul Mackerras#include <asm/cputable.h>
3214cf11afSPaul Mackerras#include <asm/setup.h>
3314cf11afSPaul Mackerras#include <asm/hvcall.h>
34c43a55ffSKelly Daly#include <asm/iseries/lpar_map.h>
356cb7bfebSDavid Gibson#include <asm/thread_info.h>
363f639ee8SStephen Rothwell#include <asm/firmware.h>
3716a15a30SStephen Rothwell#include <asm/page_64.h>
38f9ff0f30SStephen Rothwell#include <asm/exception.h>
39945feb17SBenjamin Herrenschmidt#include <asm/irqflags.h>
4014cf11afSPaul Mackerras
4114cf11afSPaul Mackerras/*
4214cf11afSPaul Mackerras * We layout physical memory as follows:
4314cf11afSPaul Mackerras * 0x0000 - 0x00ff : Secondary processor spin code
4414cf11afSPaul Mackerras * 0x0100 - 0x2fff : pSeries Interrupt prologs
4514cf11afSPaul Mackerras * 0x3000 - 0x5fff : interrupt support, iSeries and common interrupt prologs
4614cf11afSPaul Mackerras * 0x6000 - 0x6fff : Initial (CPU0) segment table
4714cf11afSPaul Mackerras * 0x7000 - 0x7fff : FWNMI data area
4814cf11afSPaul Mackerras * 0x8000 -        : Early init and support code
4914cf11afSPaul Mackerras */
5014cf11afSPaul Mackerras
5114cf11afSPaul Mackerras/*
5214cf11afSPaul Mackerras *   SPRG Usage
5314cf11afSPaul Mackerras *
5414cf11afSPaul Mackerras *   Register	Definition
5514cf11afSPaul Mackerras *
5614cf11afSPaul Mackerras *   SPRG0	reserved for hypervisor
5714cf11afSPaul Mackerras *   SPRG1	temp - used to save gpr
5814cf11afSPaul Mackerras *   SPRG2	temp - used to save gpr
5914cf11afSPaul Mackerras *   SPRG3	virt addr of paca
6014cf11afSPaul Mackerras */
6114cf11afSPaul Mackerras
6214cf11afSPaul Mackerras/*
6314cf11afSPaul Mackerras * Entering into this code we make the following assumptions:
6414cf11afSPaul Mackerras *  For pSeries:
6514cf11afSPaul Mackerras *   1. The MMU is off & open firmware is running in real mode.
6614cf11afSPaul Mackerras *   2. The kernel is entered at __start
6714cf11afSPaul Mackerras *
6814cf11afSPaul Mackerras *  For iSeries:
6914cf11afSPaul Mackerras *   1. The MMU is on (as it always is for iSeries)
7014cf11afSPaul Mackerras *   2. The kernel is entered at system_reset_iSeries
7114cf11afSPaul Mackerras */
7214cf11afSPaul Mackerras
7314cf11afSPaul Mackerras	.text
7414cf11afSPaul Mackerras	.globl  _stext
7514cf11afSPaul Mackerras_stext:
7614cf11afSPaul Mackerras_GLOBAL(__start)
7714cf11afSPaul Mackerras	/* NOP this out unconditionally */
7814cf11afSPaul MackerrasBEGIN_FTR_SECTION
7914cf11afSPaul Mackerras	b	.__start_initialization_multiplatform
8014cf11afSPaul MackerrasEND_FTR_SECTION(0, 1)
8114cf11afSPaul Mackerras
8214cf11afSPaul Mackerras	/* Catch branch to 0 in real mode */
8314cf11afSPaul Mackerras	trap
8414cf11afSPaul Mackerras
8514cf11afSPaul Mackerras	/* Secondary processors spin on this value until it goes to 1. */
8614cf11afSPaul Mackerras	.globl  __secondary_hold_spinloop
8714cf11afSPaul Mackerras__secondary_hold_spinloop:
8814cf11afSPaul Mackerras	.llong	0x0
8914cf11afSPaul Mackerras
9014cf11afSPaul Mackerras	/* Secondary processors write this value with their cpu # */
9114cf11afSPaul Mackerras	/* after they enter the spin loop immediately below.	  */
9214cf11afSPaul Mackerras	.globl	__secondary_hold_acknowledge
9314cf11afSPaul Mackerras__secondary_hold_acknowledge:
9414cf11afSPaul Mackerras	.llong	0x0
9514cf11afSPaul Mackerras
961dce0e30SMichael Ellerman#ifdef CONFIG_PPC_ISERIES
971dce0e30SMichael Ellerman	/*
981dce0e30SMichael Ellerman	 * At offset 0x20, there is a pointer to iSeries LPAR data.
991dce0e30SMichael Ellerman	 * This is required by the hypervisor
1001dce0e30SMichael Ellerman	 */
1011dce0e30SMichael Ellerman	. = 0x20
1021dce0e30SMichael Ellerman	.llong hvReleaseData-KERNELBASE
1031dce0e30SMichael Ellerman#endif /* CONFIG_PPC_ISERIES */
1041dce0e30SMichael Ellerman
10514cf11afSPaul Mackerras	. = 0x60
10614cf11afSPaul Mackerras/*
10775423b7bSGeoff Levand * The following code is used to hold secondary processors
10875423b7bSGeoff Levand * in a spin loop after they have entered the kernel, but
10914cf11afSPaul Mackerras * before the bulk of the kernel has been relocated.  This code
11014cf11afSPaul Mackerras * is relocated to physical address 0x60 before prom_init is run.
11114cf11afSPaul Mackerras * All of it must fit below the first exception vector at 0x100.
11214cf11afSPaul Mackerras */
11314cf11afSPaul Mackerras_GLOBAL(__secondary_hold)
11414cf11afSPaul Mackerras	mfmsr	r24
11514cf11afSPaul Mackerras	ori	r24,r24,MSR_RI
11614cf11afSPaul Mackerras	mtmsrd	r24			/* RI on */
11714cf11afSPaul Mackerras
118f1870f77SAnton Blanchard	/* Grab our physical cpu number */
11914cf11afSPaul Mackerras	mr	r24,r3
12014cf11afSPaul Mackerras
12114cf11afSPaul Mackerras	/* Tell the master cpu we're here */
12214cf11afSPaul Mackerras	/* Relocation is off & we are located at an address less */
12314cf11afSPaul Mackerras	/* than 0x100, so only need to grab low order offset.    */
12414cf11afSPaul Mackerras	std	r24,__secondary_hold_acknowledge@l(0)
12514cf11afSPaul Mackerras	sync
12614cf11afSPaul Mackerras
12714cf11afSPaul Mackerras	/* All secondary cpus wait here until told to start. */
12814cf11afSPaul Mackerras100:	ld	r4,__secondary_hold_spinloop@l(0)
12914cf11afSPaul Mackerras	cmpdi	0,r4,1
13014cf11afSPaul Mackerras	bne	100b
13114cf11afSPaul Mackerras
132f1870f77SAnton Blanchard#if defined(CONFIG_SMP) || defined(CONFIG_KEXEC)
133f39b7a55SOlof Johansson	LOAD_REG_IMMEDIATE(r4, .generic_secondary_smp_init)
134758438a7SMichael Ellerman	mtctr	r4
13514cf11afSPaul Mackerras	mr	r3,r24
136758438a7SMichael Ellerman	bctr
13714cf11afSPaul Mackerras#else
13814cf11afSPaul Mackerras	BUG_OPCODE
13914cf11afSPaul Mackerras#endif
14014cf11afSPaul Mackerras
14114cf11afSPaul Mackerras/* This value is used to mark exception frames on the stack. */
14214cf11afSPaul Mackerras	.section ".toc","aw"
14314cf11afSPaul Mackerrasexception_marker:
14414cf11afSPaul Mackerras	.tc	ID_72656773_68657265[TC],0x7265677368657265
14514cf11afSPaul Mackerras	.text
14614cf11afSPaul Mackerras
14714cf11afSPaul Mackerras/*
14814cf11afSPaul Mackerras * This is the start of the interrupt handlers for pSeries
14914cf11afSPaul Mackerras * This code runs with relocation off.
15014cf11afSPaul Mackerras */
15114cf11afSPaul Mackerras	. = 0x100
15214cf11afSPaul Mackerras	.globl __start_interrupts
15314cf11afSPaul Mackerras__start_interrupts:
15414cf11afSPaul Mackerras
15514cf11afSPaul Mackerras	STD_EXCEPTION_PSERIES(0x100, system_reset)
15614cf11afSPaul Mackerras
15714cf11afSPaul Mackerras	. = 0x200
15814cf11afSPaul Mackerras_machine_check_pSeries:
15914cf11afSPaul Mackerras	HMT_MEDIUM
160b5bbeb23SPaul Mackerras	mtspr	SPRN_SPRG1,r13		/* save r13 */
16114cf11afSPaul Mackerras	EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
16214cf11afSPaul Mackerras
16314cf11afSPaul Mackerras	. = 0x300
16414cf11afSPaul Mackerras	.globl data_access_pSeries
16514cf11afSPaul Mackerrasdata_access_pSeries:
16614cf11afSPaul Mackerras	HMT_MEDIUM
167b5bbeb23SPaul Mackerras	mtspr	SPRN_SPRG1,r13
16814cf11afSPaul MackerrasBEGIN_FTR_SECTION
169b5bbeb23SPaul Mackerras	mtspr	SPRN_SPRG2,r12
170b5bbeb23SPaul Mackerras	mfspr	r13,SPRN_DAR
171b5bbeb23SPaul Mackerras	mfspr	r12,SPRN_DSISR
17214cf11afSPaul Mackerras	srdi	r13,r13,60
17314cf11afSPaul Mackerras	rlwimi	r13,r12,16,0x20
17414cf11afSPaul Mackerras	mfcr	r12
17514cf11afSPaul Mackerras	cmpwi	r13,0x2c
1763ccfc65cSPaul Mackerras	beq	do_stab_bolted_pSeries
17714cf11afSPaul Mackerras	mtcrf	0x80,r12
178b5bbeb23SPaul Mackerras	mfspr	r12,SPRN_SPRG2
17914cf11afSPaul MackerrasEND_FTR_SECTION_IFCLR(CPU_FTR_SLB)
18014cf11afSPaul Mackerras	EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common)
18114cf11afSPaul Mackerras
18214cf11afSPaul Mackerras	. = 0x380
18314cf11afSPaul Mackerras	.globl data_access_slb_pSeries
18414cf11afSPaul Mackerrasdata_access_slb_pSeries:
18514cf11afSPaul Mackerras	HMT_MEDIUM
186b5bbeb23SPaul Mackerras	mtspr	SPRN_SPRG1,r13
187b5bbeb23SPaul Mackerras	mfspr	r13,SPRN_SPRG3		/* get paca address into r13 */
1883c726f8dSBenjamin Herrenschmidt	std	r3,PACA_EXSLB+EX_R3(r13)
1893c726f8dSBenjamin Herrenschmidt	mfspr	r3,SPRN_DAR
19014cf11afSPaul Mackerras	std	r9,PACA_EXSLB+EX_R9(r13)	/* save r9 - r12 */
1913c726f8dSBenjamin Herrenschmidt	mfcr	r9
1923c726f8dSBenjamin Herrenschmidt#ifdef __DISABLED__
1933c726f8dSBenjamin Herrenschmidt	/* Keep that around for when we re-implement dynamic VSIDs */
1943c726f8dSBenjamin Herrenschmidt	cmpdi	r3,0
1953c726f8dSBenjamin Herrenschmidt	bge	slb_miss_user_pseries
1963c726f8dSBenjamin Herrenschmidt#endif /* __DISABLED__ */
19714cf11afSPaul Mackerras	std	r10,PACA_EXSLB+EX_R10(r13)
19814cf11afSPaul Mackerras	std	r11,PACA_EXSLB+EX_R11(r13)
19914cf11afSPaul Mackerras	std	r12,PACA_EXSLB+EX_R12(r13)
2003c726f8dSBenjamin Herrenschmidt	mfspr	r10,SPRN_SPRG1
2013c726f8dSBenjamin Herrenschmidt	std	r10,PACA_EXSLB+EX_R13(r13)
202b5bbeb23SPaul Mackerras	mfspr	r12,SPRN_SRR1		/* and SRR1 */
2033c726f8dSBenjamin Herrenschmidt	b	.slb_miss_realmode	/* Rel. branch works in real mode */
20414cf11afSPaul Mackerras
20514cf11afSPaul Mackerras	STD_EXCEPTION_PSERIES(0x400, instruction_access)
20614cf11afSPaul Mackerras
20714cf11afSPaul Mackerras	. = 0x480
20814cf11afSPaul Mackerras	.globl instruction_access_slb_pSeries
20914cf11afSPaul Mackerrasinstruction_access_slb_pSeries:
21014cf11afSPaul Mackerras	HMT_MEDIUM
211b5bbeb23SPaul Mackerras	mtspr	SPRN_SPRG1,r13
212b5bbeb23SPaul Mackerras	mfspr	r13,SPRN_SPRG3		/* get paca address into r13 */
2133c726f8dSBenjamin Herrenschmidt	std	r3,PACA_EXSLB+EX_R3(r13)
2143c726f8dSBenjamin Herrenschmidt	mfspr	r3,SPRN_SRR0		/* SRR0 is faulting address */
21514cf11afSPaul Mackerras	std	r9,PACA_EXSLB+EX_R9(r13)	/* save r9 - r12 */
2163c726f8dSBenjamin Herrenschmidt	mfcr	r9
2173c726f8dSBenjamin Herrenschmidt#ifdef __DISABLED__
2183c726f8dSBenjamin Herrenschmidt	/* Keep that around for when we re-implement dynamic VSIDs */
2193c726f8dSBenjamin Herrenschmidt	cmpdi	r3,0
2203c726f8dSBenjamin Herrenschmidt	bge	slb_miss_user_pseries
2213c726f8dSBenjamin Herrenschmidt#endif /* __DISABLED__ */
22214cf11afSPaul Mackerras	std	r10,PACA_EXSLB+EX_R10(r13)
22314cf11afSPaul Mackerras	std	r11,PACA_EXSLB+EX_R11(r13)
22414cf11afSPaul Mackerras	std	r12,PACA_EXSLB+EX_R12(r13)
2253c726f8dSBenjamin Herrenschmidt	mfspr	r10,SPRN_SPRG1
2263c726f8dSBenjamin Herrenschmidt	std	r10,PACA_EXSLB+EX_R13(r13)
227b5bbeb23SPaul Mackerras	mfspr	r12,SPRN_SRR1		/* and SRR1 */
2283c726f8dSBenjamin Herrenschmidt	b	.slb_miss_realmode	/* Rel. branch works in real mode */
22914cf11afSPaul Mackerras
230d04c56f7SPaul Mackerras	MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt)
23114cf11afSPaul Mackerras	STD_EXCEPTION_PSERIES(0x600, alignment)
23214cf11afSPaul Mackerras	STD_EXCEPTION_PSERIES(0x700, program_check)
23314cf11afSPaul Mackerras	STD_EXCEPTION_PSERIES(0x800, fp_unavailable)
234d04c56f7SPaul Mackerras	MASKABLE_EXCEPTION_PSERIES(0x900, decrementer)
23514cf11afSPaul Mackerras	STD_EXCEPTION_PSERIES(0xa00, trap_0a)
23614cf11afSPaul Mackerras	STD_EXCEPTION_PSERIES(0xb00, trap_0b)
23714cf11afSPaul Mackerras
23814cf11afSPaul Mackerras	. = 0xc00
23914cf11afSPaul Mackerras	.globl	system_call_pSeries
24014cf11afSPaul Mackerrassystem_call_pSeries:
24114cf11afSPaul Mackerras	HMT_MEDIUM
242745a14ccSPaul MackerrasBEGIN_FTR_SECTION
243745a14ccSPaul Mackerras	cmpdi	r0,0x1ebe
244745a14ccSPaul Mackerras	beq-	1f
245745a14ccSPaul MackerrasEND_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)
24614cf11afSPaul Mackerras	mr	r9,r13
24714cf11afSPaul Mackerras	mfmsr	r10
248b5bbeb23SPaul Mackerras	mfspr	r13,SPRN_SPRG3
249b5bbeb23SPaul Mackerras	mfspr	r11,SPRN_SRR0
25014cf11afSPaul Mackerras	clrrdi	r12,r13,32
25114cf11afSPaul Mackerras	oris	r12,r12,system_call_common@h
25214cf11afSPaul Mackerras	ori	r12,r12,system_call_common@l
253b5bbeb23SPaul Mackerras	mtspr	SPRN_SRR0,r12
25414cf11afSPaul Mackerras	ori	r10,r10,MSR_IR|MSR_DR|MSR_RI
255b5bbeb23SPaul Mackerras	mfspr	r12,SPRN_SRR1
256b5bbeb23SPaul Mackerras	mtspr	SPRN_SRR1,r10
25714cf11afSPaul Mackerras	rfid
25814cf11afSPaul Mackerras	b	.	/* prevent speculative execution */
25914cf11afSPaul Mackerras
260745a14ccSPaul Mackerras/* Fast LE/BE switch system call */
261745a14ccSPaul Mackerras1:	mfspr	r12,SPRN_SRR1
262745a14ccSPaul Mackerras	xori	r12,r12,MSR_LE
263745a14ccSPaul Mackerras	mtspr	SPRN_SRR1,r12
264745a14ccSPaul Mackerras	rfid		/* return to userspace */
265745a14ccSPaul Mackerras	b	.
266745a14ccSPaul Mackerras
26714cf11afSPaul Mackerras	STD_EXCEPTION_PSERIES(0xd00, single_step)
26814cf11afSPaul Mackerras	STD_EXCEPTION_PSERIES(0xe00, trap_0e)
26914cf11afSPaul Mackerras
27014cf11afSPaul Mackerras	/* We need to deal with the Altivec unavailable exception
27114cf11afSPaul Mackerras	 * here which is at 0xf20, thus in the middle of the
27214cf11afSPaul Mackerras	 * prolog code of the PerformanceMonitor one. A little
27314cf11afSPaul Mackerras	 * trickery is thus necessary
27414cf11afSPaul Mackerras	 */
27514cf11afSPaul Mackerras	. = 0xf00
27614cf11afSPaul Mackerras	b	performance_monitor_pSeries
27714cf11afSPaul Mackerras
27810e34392SMichael Neuling	. = 0xf20
27910e34392SMichael Neuling	b	altivec_unavailable_pSeries
28014cf11afSPaul Mackerras
281ce48b210SMichael Neuling	. = 0xf40
282ce48b210SMichael Neuling	b	vsx_unavailable_pSeries
283ce48b210SMichael Neuling
284acf7d768SBenjamin Herrenschmidt#ifdef CONFIG_CBE_RAS
285acf7d768SBenjamin Herrenschmidt	HSTD_EXCEPTION_PSERIES(0x1200, cbe_system_error)
286acf7d768SBenjamin Herrenschmidt#endif /* CONFIG_CBE_RAS */
28714cf11afSPaul Mackerras	STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint)
288acf7d768SBenjamin Herrenschmidt#ifdef CONFIG_CBE_RAS
289acf7d768SBenjamin Herrenschmidt	HSTD_EXCEPTION_PSERIES(0x1600, cbe_maintenance)
290acf7d768SBenjamin Herrenschmidt#endif /* CONFIG_CBE_RAS */
29114cf11afSPaul Mackerras	STD_EXCEPTION_PSERIES(0x1700, altivec_assist)
292acf7d768SBenjamin Herrenschmidt#ifdef CONFIG_CBE_RAS
293acf7d768SBenjamin Herrenschmidt	HSTD_EXCEPTION_PSERIES(0x1800, cbe_thermal)
294acf7d768SBenjamin Herrenschmidt#endif /* CONFIG_CBE_RAS */
29514cf11afSPaul Mackerras
29614cf11afSPaul Mackerras	. = 0x3000
29714cf11afSPaul Mackerras
29814cf11afSPaul Mackerras/*** pSeries interrupt support ***/
29914cf11afSPaul Mackerras
30014cf11afSPaul Mackerras	/* moved from 0xf00 */
301449d846dSLivio Soares	STD_EXCEPTION_PSERIES(., performance_monitor)
30210e34392SMichael Neuling	STD_EXCEPTION_PSERIES(., altivec_unavailable)
303ce48b210SMichael Neuling	STD_EXCEPTION_PSERIES(., vsx_unavailable)
304d04c56f7SPaul Mackerras
305d04c56f7SPaul Mackerras/*
306d04c56f7SPaul Mackerras * An interrupt came in while soft-disabled; clear EE in SRR1,
307d04c56f7SPaul Mackerras * clear paca->hard_enabled and return.
308d04c56f7SPaul Mackerras */
309d04c56f7SPaul Mackerrasmasked_interrupt:
310d04c56f7SPaul Mackerras	stb	r10,PACAHARDIRQEN(r13)
311d04c56f7SPaul Mackerras	mtcrf	0x80,r9
312d04c56f7SPaul Mackerras	ld	r9,PACA_EXGEN+EX_R9(r13)
313d04c56f7SPaul Mackerras	mfspr	r10,SPRN_SRR1
314d04c56f7SPaul Mackerras	rldicl	r10,r10,48,1		/* clear MSR_EE */
315d04c56f7SPaul Mackerras	rotldi	r10,r10,16
316d04c56f7SPaul Mackerras	mtspr	SPRN_SRR1,r10
317d04c56f7SPaul Mackerras	ld	r10,PACA_EXGEN+EX_R10(r13)
318d04c56f7SPaul Mackerras	mfspr	r13,SPRN_SPRG1
319d04c56f7SPaul Mackerras	rfid
320d04c56f7SPaul Mackerras	b	.
32114cf11afSPaul Mackerras
32214cf11afSPaul Mackerras	.align	7
3233ccfc65cSPaul Mackerrasdo_stab_bolted_pSeries:
32414cf11afSPaul Mackerras	mtcrf	0x80,r12
325b5bbeb23SPaul Mackerras	mfspr	r12,SPRN_SPRG2
32614cf11afSPaul Mackerras	EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted)
32714cf11afSPaul Mackerras
32814cf11afSPaul Mackerras/*
3293c726f8dSBenjamin Herrenschmidt * We have some room here  we use that to put
3303c726f8dSBenjamin Herrenschmidt * the peries slb miss user trampoline code so it's reasonably
3313c726f8dSBenjamin Herrenschmidt * away from slb_miss_user_common to avoid problems with rfid
3323c726f8dSBenjamin Herrenschmidt *
3333c726f8dSBenjamin Herrenschmidt * This is used for when the SLB miss handler has to go virtual,
3343c726f8dSBenjamin Herrenschmidt * which doesn't happen for now anymore but will once we re-implement
3353c726f8dSBenjamin Herrenschmidt * dynamic VSIDs for shared page tables
3363c726f8dSBenjamin Herrenschmidt */
3373c726f8dSBenjamin Herrenschmidt#ifdef __DISABLED__
3383c726f8dSBenjamin Herrenschmidtslb_miss_user_pseries:
3393c726f8dSBenjamin Herrenschmidt	std	r10,PACA_EXGEN+EX_R10(r13)
3403c726f8dSBenjamin Herrenschmidt	std	r11,PACA_EXGEN+EX_R11(r13)
3413c726f8dSBenjamin Herrenschmidt	std	r12,PACA_EXGEN+EX_R12(r13)
3423c726f8dSBenjamin Herrenschmidt	mfspr	r10,SPRG1
3433c726f8dSBenjamin Herrenschmidt	ld	r11,PACA_EXSLB+EX_R9(r13)
3443c726f8dSBenjamin Herrenschmidt	ld	r12,PACA_EXSLB+EX_R3(r13)
3453c726f8dSBenjamin Herrenschmidt	std	r10,PACA_EXGEN+EX_R13(r13)
3463c726f8dSBenjamin Herrenschmidt	std	r11,PACA_EXGEN+EX_R9(r13)
3473c726f8dSBenjamin Herrenschmidt	std	r12,PACA_EXGEN+EX_R3(r13)
3483c726f8dSBenjamin Herrenschmidt	clrrdi	r12,r13,32
3493c726f8dSBenjamin Herrenschmidt	mfmsr	r10
3503c726f8dSBenjamin Herrenschmidt	mfspr	r11,SRR0			/* save SRR0 */
3513c726f8dSBenjamin Herrenschmidt	ori	r12,r12,slb_miss_user_common@l	/* virt addr of handler */
3523c726f8dSBenjamin Herrenschmidt	ori	r10,r10,MSR_IR|MSR_DR|MSR_RI
3533c726f8dSBenjamin Herrenschmidt	mtspr	SRR0,r12
3543c726f8dSBenjamin Herrenschmidt	mfspr	r12,SRR1			/* and SRR1 */
3553c726f8dSBenjamin Herrenschmidt	mtspr	SRR1,r10
3563c726f8dSBenjamin Herrenschmidt	rfid
3573c726f8dSBenjamin Herrenschmidt	b	.				/* prevent spec. execution */
3583c726f8dSBenjamin Herrenschmidt#endif /* __DISABLED__ */
3593c726f8dSBenjamin Herrenschmidt
3609e4859efSStephen Rothwell#ifdef CONFIG_PPC_PSERIES
3613c726f8dSBenjamin Herrenschmidt/*
36214cf11afSPaul Mackerras * Vectors for the FWNMI option.  Share common code.
36314cf11afSPaul Mackerras */
36414cf11afSPaul Mackerras	.globl system_reset_fwnmi
3658c4f1f29SMichael Ellerman      .align 7
36614cf11afSPaul Mackerrassystem_reset_fwnmi:
36714cf11afSPaul Mackerras	HMT_MEDIUM
368b5bbeb23SPaul Mackerras	mtspr	SPRN_SPRG1,r13		/* save r13 */
3699fc0a92cSOlaf Hering	EXCEPTION_PROLOG_PSERIES_FORCE_64BIT(PACA_EXGEN, system_reset_common)
37014cf11afSPaul Mackerras
37114cf11afSPaul Mackerras	.globl machine_check_fwnmi
3728c4f1f29SMichael Ellerman      .align 7
37314cf11afSPaul Mackerrasmachine_check_fwnmi:
37414cf11afSPaul Mackerras	HMT_MEDIUM
375b5bbeb23SPaul Mackerras	mtspr	SPRN_SPRG1,r13		/* save r13 */
3769fc0a92cSOlaf Hering	EXCEPTION_PROLOG_PSERIES_FORCE_64BIT(PACA_EXMC, machine_check_common)
37714cf11afSPaul Mackerras
3789e4859efSStephen Rothwell#endif /* CONFIG_PPC_PSERIES */
3799e4859efSStephen Rothwell
38014cf11afSPaul Mackerras/*** Common interrupt handlers ***/
38114cf11afSPaul Mackerras
38214cf11afSPaul Mackerras	STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception)
38314cf11afSPaul Mackerras
38414cf11afSPaul Mackerras	/*
38514cf11afSPaul Mackerras	 * Machine check is different because we use a different
38614cf11afSPaul Mackerras	 * save area: PACA_EXMC instead of PACA_EXGEN.
38714cf11afSPaul Mackerras	 */
38814cf11afSPaul Mackerras	.align	7
38914cf11afSPaul Mackerras	.globl machine_check_common
39014cf11afSPaul Mackerrasmachine_check_common:
39114cf11afSPaul Mackerras	EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
392f39224a8SPaul Mackerras	FINISH_NAP
39314cf11afSPaul Mackerras	DISABLE_INTS
39414cf11afSPaul Mackerras	bl	.save_nvgprs
39514cf11afSPaul Mackerras	addi	r3,r1,STACK_FRAME_OVERHEAD
39614cf11afSPaul Mackerras	bl	.machine_check_exception
39714cf11afSPaul Mackerras	b	.ret_from_except
39814cf11afSPaul Mackerras
39914cf11afSPaul Mackerras	STD_EXCEPTION_COMMON_LITE(0x900, decrementer, .timer_interrupt)
40014cf11afSPaul Mackerras	STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception)
40114cf11afSPaul Mackerras	STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
40214cf11afSPaul Mackerras	STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
40314cf11afSPaul Mackerras	STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception)
404f39224a8SPaul Mackerras	STD_EXCEPTION_COMMON_IDLE(0xf00, performance_monitor, .performance_monitor_exception)
40514cf11afSPaul Mackerras	STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception)
40614cf11afSPaul Mackerras#ifdef CONFIG_ALTIVEC
40714cf11afSPaul Mackerras	STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception)
40814cf11afSPaul Mackerras#else
40914cf11afSPaul Mackerras	STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception)
41014cf11afSPaul Mackerras#endif
411acf7d768SBenjamin Herrenschmidt#ifdef CONFIG_CBE_RAS
412acf7d768SBenjamin Herrenschmidt	STD_EXCEPTION_COMMON(0x1200, cbe_system_error, .cbe_system_error_exception)
413acf7d768SBenjamin Herrenschmidt	STD_EXCEPTION_COMMON(0x1600, cbe_maintenance, .cbe_maintenance_exception)
414acf7d768SBenjamin Herrenschmidt	STD_EXCEPTION_COMMON(0x1800, cbe_thermal, .cbe_thermal_exception)
415acf7d768SBenjamin Herrenschmidt#endif /* CONFIG_CBE_RAS */
41614cf11afSPaul Mackerras
41714cf11afSPaul Mackerras/*
41814cf11afSPaul Mackerras * Here we have detected that the kernel stack pointer is bad.
41914cf11afSPaul Mackerras * R9 contains the saved CR, r13 points to the paca,
42014cf11afSPaul Mackerras * r10 contains the (bad) kernel stack pointer,
42114cf11afSPaul Mackerras * r11 and r12 contain the saved SRR0 and SRR1.
42214cf11afSPaul Mackerras * We switch to using an emergency stack, save the registers there,
42314cf11afSPaul Mackerras * and call kernel_bad_stack(), which panics.
42414cf11afSPaul Mackerras */
42514cf11afSPaul Mackerrasbad_stack:
42614cf11afSPaul Mackerras	ld	r1,PACAEMERGSP(r13)
42714cf11afSPaul Mackerras	subi	r1,r1,64+INT_FRAME_SIZE
42814cf11afSPaul Mackerras	std	r9,_CCR(r1)
42914cf11afSPaul Mackerras	std	r10,GPR1(r1)
43014cf11afSPaul Mackerras	std	r11,_NIP(r1)
43114cf11afSPaul Mackerras	std	r12,_MSR(r1)
432b5bbeb23SPaul Mackerras	mfspr	r11,SPRN_DAR
433b5bbeb23SPaul Mackerras	mfspr	r12,SPRN_DSISR
43414cf11afSPaul Mackerras	std	r11,_DAR(r1)
43514cf11afSPaul Mackerras	std	r12,_DSISR(r1)
43614cf11afSPaul Mackerras	mflr	r10
43714cf11afSPaul Mackerras	mfctr	r11
43814cf11afSPaul Mackerras	mfxer	r12
43914cf11afSPaul Mackerras	std	r10,_LINK(r1)
44014cf11afSPaul Mackerras	std	r11,_CTR(r1)
44114cf11afSPaul Mackerras	std	r12,_XER(r1)
44214cf11afSPaul Mackerras	SAVE_GPR(0,r1)
44314cf11afSPaul Mackerras	SAVE_GPR(2,r1)
44414cf11afSPaul Mackerras	SAVE_4GPRS(3,r1)
44514cf11afSPaul Mackerras	SAVE_2GPRS(7,r1)
44614cf11afSPaul Mackerras	SAVE_10GPRS(12,r1)
44714cf11afSPaul Mackerras	SAVE_10GPRS(22,r1)
44868730401SOlof Johansson	lhz	r12,PACA_TRAP_SAVE(r13)
44968730401SOlof Johansson	std	r12,_TRAP(r1)
45014cf11afSPaul Mackerras	addi	r11,r1,INT_FRAME_SIZE
45114cf11afSPaul Mackerras	std	r11,0(r1)
45214cf11afSPaul Mackerras	li	r12,0
45314cf11afSPaul Mackerras	std	r12,0(r11)
45414cf11afSPaul Mackerras	ld	r2,PACATOC(r13)
45514cf11afSPaul Mackerras1:	addi	r3,r1,STACK_FRAME_OVERHEAD
45614cf11afSPaul Mackerras	bl	.kernel_bad_stack
45714cf11afSPaul Mackerras	b	1b
45814cf11afSPaul Mackerras
45914cf11afSPaul Mackerras/*
46014cf11afSPaul Mackerras * Return from an exception with minimal checks.
46114cf11afSPaul Mackerras * The caller is assumed to have done EXCEPTION_PROLOG_COMMON.
46214cf11afSPaul Mackerras * If interrupts have been enabled, or anything has been
46314cf11afSPaul Mackerras * done that might have changed the scheduling status of
46414cf11afSPaul Mackerras * any task or sent any task a signal, you should use
46514cf11afSPaul Mackerras * ret_from_except or ret_from_except_lite instead of this.
46614cf11afSPaul Mackerras */
467b0a779deSPaul Mackerrasfast_exc_return_irq:			/* restores irq state too */
468b0a779deSPaul Mackerras	ld	r3,SOFTE(r1)
469945feb17SBenjamin Herrenschmidt	TRACE_AND_RESTORE_IRQ(r3);
470b0a779deSPaul Mackerras	ld	r12,_MSR(r1)
471b0a779deSPaul Mackerras	rldicl	r4,r12,49,63		/* get MSR_EE to LSB */
472b0a779deSPaul Mackerras	stb	r4,PACAHARDIRQEN(r13)	/* restore paca->hard_enabled */
473b0a779deSPaul Mackerras	b	1f
474b0a779deSPaul Mackerras
47540ef8cbcSPaul Mackerras	.globl	fast_exception_return
47614cf11afSPaul Mackerrasfast_exception_return:
47714cf11afSPaul Mackerras	ld	r12,_MSR(r1)
478b0a779deSPaul Mackerras1:	ld	r11,_NIP(r1)
47914cf11afSPaul Mackerras	andi.	r3,r12,MSR_RI		/* check if RI is set */
48014cf11afSPaul Mackerras	beq-	unrecov_fer
481c6622f63SPaul Mackerras
482c6622f63SPaul Mackerras#ifdef CONFIG_VIRT_CPU_ACCOUNTING
483c6622f63SPaul Mackerras	andi.	r3,r12,MSR_PR
484c6622f63SPaul Mackerras	beq	2f
485c6622f63SPaul Mackerras	ACCOUNT_CPU_USER_EXIT(r3, r4)
486c6622f63SPaul Mackerras2:
487c6622f63SPaul Mackerras#endif
488c6622f63SPaul Mackerras
48914cf11afSPaul Mackerras	ld	r3,_CCR(r1)
49014cf11afSPaul Mackerras	ld	r4,_LINK(r1)
49114cf11afSPaul Mackerras	ld	r5,_CTR(r1)
49214cf11afSPaul Mackerras	ld	r6,_XER(r1)
49314cf11afSPaul Mackerras	mtcr	r3
49414cf11afSPaul Mackerras	mtlr	r4
49514cf11afSPaul Mackerras	mtctr	r5
49614cf11afSPaul Mackerras	mtxer	r6
49714cf11afSPaul Mackerras	REST_GPR(0, r1)
49814cf11afSPaul Mackerras	REST_8GPRS(2, r1)
49914cf11afSPaul Mackerras
50014cf11afSPaul Mackerras	mfmsr	r10
501d04c56f7SPaul Mackerras	rldicl	r10,r10,48,1		/* clear EE */
502d04c56f7SPaul Mackerras	rldicr	r10,r10,16,61		/* clear RI (LE is 0 already) */
50314cf11afSPaul Mackerras	mtmsrd	r10,1
50414cf11afSPaul Mackerras
505b5bbeb23SPaul Mackerras	mtspr	SPRN_SRR1,r12
506b5bbeb23SPaul Mackerras	mtspr	SPRN_SRR0,r11
50714cf11afSPaul Mackerras	REST_4GPRS(10, r1)
50814cf11afSPaul Mackerras	ld	r1,GPR1(r1)
50914cf11afSPaul Mackerras	rfid
51014cf11afSPaul Mackerras	b	.	/* prevent speculative execution */
51114cf11afSPaul Mackerras
51214cf11afSPaul Mackerrasunrecov_fer:
51314cf11afSPaul Mackerras	bl	.save_nvgprs
51414cf11afSPaul Mackerras1:	addi	r3,r1,STACK_FRAME_OVERHEAD
51514cf11afSPaul Mackerras	bl	.unrecoverable_exception
51614cf11afSPaul Mackerras	b	1b
51714cf11afSPaul Mackerras
51814cf11afSPaul Mackerras/*
51914cf11afSPaul Mackerras * Here r13 points to the paca, r9 contains the saved CR,
52014cf11afSPaul Mackerras * SRR0 and SRR1 are saved in r11 and r12,
52114cf11afSPaul Mackerras * r9 - r13 are saved in paca->exgen.
52214cf11afSPaul Mackerras */
52314cf11afSPaul Mackerras	.align	7
52414cf11afSPaul Mackerras	.globl data_access_common
52514cf11afSPaul Mackerrasdata_access_common:
526b5bbeb23SPaul Mackerras	mfspr	r10,SPRN_DAR
52714cf11afSPaul Mackerras	std	r10,PACA_EXGEN+EX_DAR(r13)
528b5bbeb23SPaul Mackerras	mfspr	r10,SPRN_DSISR
52914cf11afSPaul Mackerras	stw	r10,PACA_EXGEN+EX_DSISR(r13)
53014cf11afSPaul Mackerras	EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN)
53114cf11afSPaul Mackerras	ld	r3,PACA_EXGEN+EX_DAR(r13)
53214cf11afSPaul Mackerras	lwz	r4,PACA_EXGEN+EX_DSISR(r13)
53314cf11afSPaul Mackerras	li	r5,0x300
53414cf11afSPaul Mackerras	b	.do_hash_page	 	/* Try to handle as hpte fault */
53514cf11afSPaul Mackerras
53614cf11afSPaul Mackerras	.align	7
53714cf11afSPaul Mackerras	.globl instruction_access_common
53814cf11afSPaul Mackerrasinstruction_access_common:
53914cf11afSPaul Mackerras	EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN)
54014cf11afSPaul Mackerras	ld	r3,_NIP(r1)
54114cf11afSPaul Mackerras	andis.	r4,r12,0x5820
54214cf11afSPaul Mackerras	li	r5,0x400
54314cf11afSPaul Mackerras	b	.do_hash_page		/* Try to handle as hpte fault */
54414cf11afSPaul Mackerras
5453c726f8dSBenjamin Herrenschmidt/*
5463c726f8dSBenjamin Herrenschmidt * Here is the common SLB miss user that is used when going to virtual
5473c726f8dSBenjamin Herrenschmidt * mode for SLB misses, that is currently not used
5483c726f8dSBenjamin Herrenschmidt */
5493c726f8dSBenjamin Herrenschmidt#ifdef __DISABLED__
5503c726f8dSBenjamin Herrenschmidt	.align	7
5513c726f8dSBenjamin Herrenschmidt	.globl	slb_miss_user_common
5523c726f8dSBenjamin Herrenschmidtslb_miss_user_common:
5533c726f8dSBenjamin Herrenschmidt	mflr	r10
5543c726f8dSBenjamin Herrenschmidt	std	r3,PACA_EXGEN+EX_DAR(r13)
5553c726f8dSBenjamin Herrenschmidt	stw	r9,PACA_EXGEN+EX_CCR(r13)
5563c726f8dSBenjamin Herrenschmidt	std	r10,PACA_EXGEN+EX_LR(r13)
5573c726f8dSBenjamin Herrenschmidt	std	r11,PACA_EXGEN+EX_SRR0(r13)
5583c726f8dSBenjamin Herrenschmidt	bl	.slb_allocate_user
5593c726f8dSBenjamin Herrenschmidt
5603c726f8dSBenjamin Herrenschmidt	ld	r10,PACA_EXGEN+EX_LR(r13)
5613c726f8dSBenjamin Herrenschmidt	ld	r3,PACA_EXGEN+EX_R3(r13)
5623c726f8dSBenjamin Herrenschmidt	lwz	r9,PACA_EXGEN+EX_CCR(r13)
5633c726f8dSBenjamin Herrenschmidt	ld	r11,PACA_EXGEN+EX_SRR0(r13)
5643c726f8dSBenjamin Herrenschmidt	mtlr	r10
5653c726f8dSBenjamin Herrenschmidt	beq-	slb_miss_fault
5663c726f8dSBenjamin Herrenschmidt
5673c726f8dSBenjamin Herrenschmidt	andi.	r10,r12,MSR_RI		/* check for unrecoverable exception */
5683c726f8dSBenjamin Herrenschmidt	beq-	unrecov_user_slb
5693c726f8dSBenjamin Herrenschmidt	mfmsr	r10
5703c726f8dSBenjamin Herrenschmidt
5713c726f8dSBenjamin Herrenschmidt.machine push
5723c726f8dSBenjamin Herrenschmidt.machine "power4"
5733c726f8dSBenjamin Herrenschmidt	mtcrf	0x80,r9
5743c726f8dSBenjamin Herrenschmidt.machine pop
5753c726f8dSBenjamin Herrenschmidt
5763c726f8dSBenjamin Herrenschmidt	clrrdi	r10,r10,2		/* clear RI before setting SRR0/1 */
5773c726f8dSBenjamin Herrenschmidt	mtmsrd	r10,1
5783c726f8dSBenjamin Herrenschmidt
5793c726f8dSBenjamin Herrenschmidt	mtspr	SRR0,r11
5803c726f8dSBenjamin Herrenschmidt	mtspr	SRR1,r12
5813c726f8dSBenjamin Herrenschmidt
5823c726f8dSBenjamin Herrenschmidt	ld	r9,PACA_EXGEN+EX_R9(r13)
5833c726f8dSBenjamin Herrenschmidt	ld	r10,PACA_EXGEN+EX_R10(r13)
5843c726f8dSBenjamin Herrenschmidt	ld	r11,PACA_EXGEN+EX_R11(r13)
5853c726f8dSBenjamin Herrenschmidt	ld	r12,PACA_EXGEN+EX_R12(r13)
5863c726f8dSBenjamin Herrenschmidt	ld	r13,PACA_EXGEN+EX_R13(r13)
5873c726f8dSBenjamin Herrenschmidt	rfid
5883c726f8dSBenjamin Herrenschmidt	b	.
5893c726f8dSBenjamin Herrenschmidt
5903c726f8dSBenjamin Herrenschmidtslb_miss_fault:
5913c726f8dSBenjamin Herrenschmidt	EXCEPTION_PROLOG_COMMON(0x380, PACA_EXGEN)
5923c726f8dSBenjamin Herrenschmidt	ld	r4,PACA_EXGEN+EX_DAR(r13)
5933c726f8dSBenjamin Herrenschmidt	li	r5,0
5943c726f8dSBenjamin Herrenschmidt	std	r4,_DAR(r1)
5953c726f8dSBenjamin Herrenschmidt	std	r5,_DSISR(r1)
5963ccfc65cSPaul Mackerras	b	handle_page_fault
5973c726f8dSBenjamin Herrenschmidt
5983c726f8dSBenjamin Herrenschmidtunrecov_user_slb:
5993c726f8dSBenjamin Herrenschmidt	EXCEPTION_PROLOG_COMMON(0x4200, PACA_EXGEN)
6003c726f8dSBenjamin Herrenschmidt	DISABLE_INTS
6013c726f8dSBenjamin Herrenschmidt	bl	.save_nvgprs
6023c726f8dSBenjamin Herrenschmidt1:	addi	r3,r1,STACK_FRAME_OVERHEAD
6033c726f8dSBenjamin Herrenschmidt	bl	.unrecoverable_exception
6043c726f8dSBenjamin Herrenschmidt	b	1b
6053c726f8dSBenjamin Herrenschmidt
6063c726f8dSBenjamin Herrenschmidt#endif /* __DISABLED__ */
6073c726f8dSBenjamin Herrenschmidt
6083c726f8dSBenjamin Herrenschmidt
6093c726f8dSBenjamin Herrenschmidt/*
6103c726f8dSBenjamin Herrenschmidt * r13 points to the PACA, r9 contains the saved CR,
6113c726f8dSBenjamin Herrenschmidt * r12 contain the saved SRR1, SRR0 is still ready for return
6123c726f8dSBenjamin Herrenschmidt * r3 has the faulting address
6133c726f8dSBenjamin Herrenschmidt * r9 - r13 are saved in paca->exslb.
6143c726f8dSBenjamin Herrenschmidt * r3 is saved in paca->slb_r3
6153c726f8dSBenjamin Herrenschmidt * We assume we aren't going to take any exceptions during this procedure.
6163c726f8dSBenjamin Herrenschmidt */
6173c726f8dSBenjamin Herrenschmidt_GLOBAL(slb_miss_realmode)
6183c726f8dSBenjamin Herrenschmidt	mflr	r10
6193c726f8dSBenjamin Herrenschmidt
6203c726f8dSBenjamin Herrenschmidt	stw	r9,PACA_EXSLB+EX_CCR(r13)	/* save CR in exc. frame */
6213c726f8dSBenjamin Herrenschmidt	std	r10,PACA_EXSLB+EX_LR(r13)	/* save LR */
6223c726f8dSBenjamin Herrenschmidt
6233c726f8dSBenjamin Herrenschmidt	bl	.slb_allocate_realmode
6243c726f8dSBenjamin Herrenschmidt
6253c726f8dSBenjamin Herrenschmidt	/* All done -- return from exception. */
6263c726f8dSBenjamin Herrenschmidt
6273c726f8dSBenjamin Herrenschmidt	ld	r10,PACA_EXSLB+EX_LR(r13)
6283c726f8dSBenjamin Herrenschmidt	ld	r3,PACA_EXSLB+EX_R3(r13)
6293c726f8dSBenjamin Herrenschmidt	lwz	r9,PACA_EXSLB+EX_CCR(r13)	/* get saved CR */
6303c726f8dSBenjamin Herrenschmidt#ifdef CONFIG_PPC_ISERIES
6313f639ee8SStephen RothwellBEGIN_FW_FTR_SECTION
6323356bb9fSDavid Gibson	ld	r11,PACALPPACAPTR(r13)
6333356bb9fSDavid Gibson	ld	r11,LPPACASRR0(r11)		/* get SRR0 value */
6343f639ee8SStephen RothwellEND_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
6353c726f8dSBenjamin Herrenschmidt#endif /* CONFIG_PPC_ISERIES */
6363c726f8dSBenjamin Herrenschmidt
6373c726f8dSBenjamin Herrenschmidt	mtlr	r10
6383c726f8dSBenjamin Herrenschmidt
6393c726f8dSBenjamin Herrenschmidt	andi.	r10,r12,MSR_RI	/* check for unrecoverable exception */
640320787c7SPaul Mackerras	beq-	2f
6413c726f8dSBenjamin Herrenschmidt
6423c726f8dSBenjamin Herrenschmidt.machine	push
6433c726f8dSBenjamin Herrenschmidt.machine	"power4"
6443c726f8dSBenjamin Herrenschmidt	mtcrf	0x80,r9
6453c726f8dSBenjamin Herrenschmidt	mtcrf	0x01,r9		/* slb_allocate uses cr0 and cr7 */
6463c726f8dSBenjamin Herrenschmidt.machine	pop
6473c726f8dSBenjamin Herrenschmidt
6483c726f8dSBenjamin Herrenschmidt#ifdef CONFIG_PPC_ISERIES
6493f639ee8SStephen RothwellBEGIN_FW_FTR_SECTION
6503c726f8dSBenjamin Herrenschmidt	mtspr	SPRN_SRR0,r11
6513c726f8dSBenjamin Herrenschmidt	mtspr	SPRN_SRR1,r12
6523f639ee8SStephen RothwellEND_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
6533c726f8dSBenjamin Herrenschmidt#endif /* CONFIG_PPC_ISERIES */
6543c726f8dSBenjamin Herrenschmidt	ld	r9,PACA_EXSLB+EX_R9(r13)
6553c726f8dSBenjamin Herrenschmidt	ld	r10,PACA_EXSLB+EX_R10(r13)
6563c726f8dSBenjamin Herrenschmidt	ld	r11,PACA_EXSLB+EX_R11(r13)
6573c726f8dSBenjamin Herrenschmidt	ld	r12,PACA_EXSLB+EX_R12(r13)
6583c726f8dSBenjamin Herrenschmidt	ld	r13,PACA_EXSLB+EX_R13(r13)
6593c726f8dSBenjamin Herrenschmidt	rfid
6603c726f8dSBenjamin Herrenschmidt	b	.	/* prevent speculative execution */
6613c726f8dSBenjamin Herrenschmidt
662320787c7SPaul Mackerras2:
663320787c7SPaul Mackerras#ifdef CONFIG_PPC_ISERIES
664320787c7SPaul MackerrasBEGIN_FW_FTR_SECTION
665320787c7SPaul Mackerras	b	unrecov_slb
666320787c7SPaul MackerrasEND_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
667320787c7SPaul Mackerras#endif /* CONFIG_PPC_ISERIES */
668320787c7SPaul Mackerras	mfspr	r11,SPRN_SRR0
669320787c7SPaul Mackerras	clrrdi	r10,r13,32
670320787c7SPaul Mackerras	LOAD_HANDLER(r10,unrecov_slb)
671320787c7SPaul Mackerras	mtspr	SPRN_SRR0,r10
672320787c7SPaul Mackerras	mfmsr	r10
673320787c7SPaul Mackerras	ori	r10,r10,MSR_IR|MSR_DR|MSR_RI
674320787c7SPaul Mackerras	mtspr	SPRN_SRR1,r10
675320787c7SPaul Mackerras	rfid
676320787c7SPaul Mackerras	b	.
677320787c7SPaul Mackerras
6783c726f8dSBenjamin Herrenschmidtunrecov_slb:
6793c726f8dSBenjamin Herrenschmidt	EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
6803c726f8dSBenjamin Herrenschmidt	DISABLE_INTS
6813c726f8dSBenjamin Herrenschmidt	bl	.save_nvgprs
6823c726f8dSBenjamin Herrenschmidt1:	addi	r3,r1,STACK_FRAME_OVERHEAD
6833c726f8dSBenjamin Herrenschmidt	bl	.unrecoverable_exception
6843c726f8dSBenjamin Herrenschmidt	b	1b
6853c726f8dSBenjamin Herrenschmidt
68614cf11afSPaul Mackerras	.align	7
68714cf11afSPaul Mackerras	.globl hardware_interrupt_common
68814cf11afSPaul Mackerras	.globl hardware_interrupt_entry
68914cf11afSPaul Mackerrashardware_interrupt_common:
69014cf11afSPaul Mackerras	EXCEPTION_PROLOG_COMMON(0x500, PACA_EXGEN)
691f39224a8SPaul Mackerras	FINISH_NAP
69214cf11afSPaul Mackerrashardware_interrupt_entry:
69314cf11afSPaul Mackerras	DISABLE_INTS
694a416561bSOlof JohanssonBEGIN_FTR_SECTION
695cb2c9b27SAnton Blanchard	bl	.ppc64_runlatch_on
696a416561bSOlof JohanssonEND_FTR_SECTION_IFSET(CPU_FTR_CTRL)
69714cf11afSPaul Mackerras	addi	r3,r1,STACK_FRAME_OVERHEAD
69814cf11afSPaul Mackerras	bl	.do_IRQ
69914cf11afSPaul Mackerras	b	.ret_from_except_lite
70014cf11afSPaul Mackerras
701f39224a8SPaul Mackerras#ifdef CONFIG_PPC_970_NAP
702f39224a8SPaul Mackerraspower4_fixup_nap:
703f39224a8SPaul Mackerras	andc	r9,r9,r10
704f39224a8SPaul Mackerras	std	r9,TI_LOCAL_FLAGS(r11)
705f39224a8SPaul Mackerras	ld	r10,_LINK(r1)		/* make idle task do the */
706f39224a8SPaul Mackerras	std	r10,_NIP(r1)		/* equivalent of a blr */
707f39224a8SPaul Mackerras	blr
708f39224a8SPaul Mackerras#endif
709f39224a8SPaul Mackerras
71014cf11afSPaul Mackerras	.align	7
71114cf11afSPaul Mackerras	.globl alignment_common
71214cf11afSPaul Mackerrasalignment_common:
713b5bbeb23SPaul Mackerras	mfspr	r10,SPRN_DAR
71414cf11afSPaul Mackerras	std	r10,PACA_EXGEN+EX_DAR(r13)
715b5bbeb23SPaul Mackerras	mfspr	r10,SPRN_DSISR
71614cf11afSPaul Mackerras	stw	r10,PACA_EXGEN+EX_DSISR(r13)
71714cf11afSPaul Mackerras	EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN)
71814cf11afSPaul Mackerras	ld	r3,PACA_EXGEN+EX_DAR(r13)
71914cf11afSPaul Mackerras	lwz	r4,PACA_EXGEN+EX_DSISR(r13)
72014cf11afSPaul Mackerras	std	r3,_DAR(r1)
72114cf11afSPaul Mackerras	std	r4,_DSISR(r1)
72214cf11afSPaul Mackerras	bl	.save_nvgprs
72314cf11afSPaul Mackerras	addi	r3,r1,STACK_FRAME_OVERHEAD
72414cf11afSPaul Mackerras	ENABLE_INTS
72514cf11afSPaul Mackerras	bl	.alignment_exception
72614cf11afSPaul Mackerras	b	.ret_from_except
72714cf11afSPaul Mackerras
72814cf11afSPaul Mackerras	.align	7
72914cf11afSPaul Mackerras	.globl program_check_common
73014cf11afSPaul Mackerrasprogram_check_common:
73114cf11afSPaul Mackerras	EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
73214cf11afSPaul Mackerras	bl	.save_nvgprs
73314cf11afSPaul Mackerras	addi	r3,r1,STACK_FRAME_OVERHEAD
73414cf11afSPaul Mackerras	ENABLE_INTS
73514cf11afSPaul Mackerras	bl	.program_check_exception
73614cf11afSPaul Mackerras	b	.ret_from_except
73714cf11afSPaul Mackerras
73814cf11afSPaul Mackerras	.align	7
73914cf11afSPaul Mackerras	.globl fp_unavailable_common
74014cf11afSPaul Mackerrasfp_unavailable_common:
74114cf11afSPaul Mackerras	EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN)
7423ccfc65cSPaul Mackerras	bne	1f			/* if from user, just load it up */
74314cf11afSPaul Mackerras	bl	.save_nvgprs
74414cf11afSPaul Mackerras	addi	r3,r1,STACK_FRAME_OVERHEAD
74514cf11afSPaul Mackerras	ENABLE_INTS
74614cf11afSPaul Mackerras	bl	.kernel_fp_unavailable_exception
74714cf11afSPaul Mackerras	BUG_OPCODE
7486f3d8e69SMichael Neuling1:	bl	.load_up_fpu
7496f3d8e69SMichael Neuling	b	fast_exception_return
75014cf11afSPaul Mackerras
75114cf11afSPaul Mackerras	.align	7
75214cf11afSPaul Mackerras	.globl altivec_unavailable_common
75314cf11afSPaul Mackerrasaltivec_unavailable_common:
75414cf11afSPaul Mackerras	EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN)
75514cf11afSPaul Mackerras#ifdef CONFIG_ALTIVEC
75614cf11afSPaul MackerrasBEGIN_FTR_SECTION
7576f3d8e69SMichael Neuling	beq	1f
7586f3d8e69SMichael Neuling	bl	.load_up_altivec
7596f3d8e69SMichael Neuling	b	fast_exception_return
7606f3d8e69SMichael Neuling1:
76114cf11afSPaul MackerrasEND_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
76214cf11afSPaul Mackerras#endif
76314cf11afSPaul Mackerras	bl	.save_nvgprs
76414cf11afSPaul Mackerras	addi	r3,r1,STACK_FRAME_OVERHEAD
76514cf11afSPaul Mackerras	ENABLE_INTS
76614cf11afSPaul Mackerras	bl	.altivec_unavailable_exception
76714cf11afSPaul Mackerras	b	.ret_from_except
76814cf11afSPaul Mackerras
76914cf11afSPaul Mackerras#ifdef CONFIG_ALTIVEC
77014cf11afSPaul Mackerras/*
77114cf11afSPaul Mackerras * load_up_altivec(unused, unused, tsk)
77214cf11afSPaul Mackerras * Disable VMX for the task which had it previously,
77314cf11afSPaul Mackerras * and save its vector registers in its thread_struct.
77414cf11afSPaul Mackerras * Enables the VMX for use in the kernel on return.
77514cf11afSPaul Mackerras * On SMP we know the VMX is free, since we give it up every
77614cf11afSPaul Mackerras * switch (ie, no lazy save of the vector registers).
77714cf11afSPaul Mackerras * On entry: r13 == 'current' && last_task_used_altivec != 'current'
77814cf11afSPaul Mackerras */
77914cf11afSPaul Mackerras_STATIC(load_up_altivec)
78014cf11afSPaul Mackerras	mfmsr	r5			/* grab the current MSR */
78114cf11afSPaul Mackerras	oris	r5,r5,MSR_VEC@h
78214cf11afSPaul Mackerras	mtmsrd	r5			/* enable use of VMX now */
78314cf11afSPaul Mackerras	isync
78414cf11afSPaul Mackerras
78514cf11afSPaul Mackerras/*
78614cf11afSPaul Mackerras * For SMP, we don't do lazy VMX switching because it just gets too
78714cf11afSPaul Mackerras * horrendously complex, especially when a task switches from one CPU
78814cf11afSPaul Mackerras * to another.  Instead we call giveup_altvec in switch_to.
78914cf11afSPaul Mackerras * VRSAVE isn't dealt with here, that is done in the normal context
79014cf11afSPaul Mackerras * switch code. Note that we could rely on vrsave value to eventually
79114cf11afSPaul Mackerras * avoid saving all of the VREGs here...
79214cf11afSPaul Mackerras */
79314cf11afSPaul Mackerras#ifndef CONFIG_SMP
79414cf11afSPaul Mackerras	ld	r3,last_task_used_altivec@got(r2)
79514cf11afSPaul Mackerras	ld	r4,0(r3)
79614cf11afSPaul Mackerras	cmpdi	0,r4,0
79714cf11afSPaul Mackerras	beq	1f
79814cf11afSPaul Mackerras	/* Save VMX state to last_task_used_altivec's THREAD struct */
79914cf11afSPaul Mackerras	addi	r4,r4,THREAD
80014cf11afSPaul Mackerras	SAVE_32VRS(0,r5,r4)
80114cf11afSPaul Mackerras	mfvscr	vr0
80214cf11afSPaul Mackerras	li	r10,THREAD_VSCR
80314cf11afSPaul Mackerras	stvx	vr0,r10,r4
80414cf11afSPaul Mackerras	/* Disable VMX for last_task_used_altivec */
80514cf11afSPaul Mackerras	ld	r5,PT_REGS(r4)
80614cf11afSPaul Mackerras	ld	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
80714cf11afSPaul Mackerras	lis	r6,MSR_VEC@h
80814cf11afSPaul Mackerras	andc	r4,r4,r6
80914cf11afSPaul Mackerras	std	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
81014cf11afSPaul Mackerras1:
81114cf11afSPaul Mackerras#endif /* CONFIG_SMP */
81214cf11afSPaul Mackerras	/* Hack: if we get an altivec unavailable trap with VRSAVE
81314cf11afSPaul Mackerras	 * set to all zeros, we assume this is a broken application
81414cf11afSPaul Mackerras	 * that fails to set it properly, and thus we switch it to
81514cf11afSPaul Mackerras	 * all 1's
81614cf11afSPaul Mackerras	 */
81714cf11afSPaul Mackerras	mfspr	r4,SPRN_VRSAVE
81814cf11afSPaul Mackerras	cmpdi	0,r4,0
81914cf11afSPaul Mackerras	bne+	1f
82014cf11afSPaul Mackerras	li	r4,-1
82114cf11afSPaul Mackerras	mtspr	SPRN_VRSAVE,r4
82214cf11afSPaul Mackerras1:
82314cf11afSPaul Mackerras	/* enable use of VMX after return */
82414cf11afSPaul Mackerras	ld	r4,PACACURRENT(r13)
82514cf11afSPaul Mackerras	addi	r5,r4,THREAD		/* Get THREAD */
82614cf11afSPaul Mackerras	oris	r12,r12,MSR_VEC@h
82714cf11afSPaul Mackerras	std	r12,_MSR(r1)
82814cf11afSPaul Mackerras	li	r4,1
82914cf11afSPaul Mackerras	li	r10,THREAD_VSCR
83014cf11afSPaul Mackerras	stw	r4,THREAD_USED_VR(r5)
83114cf11afSPaul Mackerras	lvx	vr0,r10,r5
83214cf11afSPaul Mackerras	mtvscr	vr0
83314cf11afSPaul Mackerras	REST_32VRS(0,r4,r5)
83414cf11afSPaul Mackerras#ifndef CONFIG_SMP
83514cf11afSPaul Mackerras	/* Update last_task_used_math to 'current' */
83614cf11afSPaul Mackerras	subi	r4,r5,THREAD		/* Back to 'current' */
83714cf11afSPaul Mackerras	std	r4,0(r3)
83814cf11afSPaul Mackerras#endif /* CONFIG_SMP */
83914cf11afSPaul Mackerras	/* restore registers and return */
8406f3d8e69SMichael Neuling	blr
84114cf11afSPaul Mackerras#endif /* CONFIG_ALTIVEC */
84214cf11afSPaul Mackerras
843ce48b210SMichael Neuling	.align	7
844ce48b210SMichael Neuling	.globl vsx_unavailable_common
845ce48b210SMichael Neulingvsx_unavailable_common:
846ce48b210SMichael Neuling	EXCEPTION_PROLOG_COMMON(0xf40, PACA_EXGEN)
847ce48b210SMichael Neuling#ifdef CONFIG_VSX
848ce48b210SMichael NeulingBEGIN_FTR_SECTION
849ce48b210SMichael Neuling	bne	.load_up_vsx
850ce48b210SMichael Neuling1:
851ce48b210SMichael NeulingEND_FTR_SECTION_IFSET(CPU_FTR_VSX)
852ce48b210SMichael Neuling#endif
853ce48b210SMichael Neuling	bl	.save_nvgprs
854ce48b210SMichael Neuling	addi	r3,r1,STACK_FRAME_OVERHEAD
855ce48b210SMichael Neuling	ENABLE_INTS
856ce48b210SMichael Neuling	bl	.vsx_unavailable_exception
857ce48b210SMichael Neuling	b	.ret_from_except
858ce48b210SMichael Neuling
859ce48b210SMichael Neuling#ifdef CONFIG_VSX
860ce48b210SMichael Neuling/*
861ce48b210SMichael Neuling * load_up_vsx(unused, unused, tsk)
862ce48b210SMichael Neuling * Disable VSX for the task which had it previously,
863ce48b210SMichael Neuling * and save its vector registers in its thread_struct.
864ce48b210SMichael Neuling * Reuse the fp and vsx saves, but first check to see if they have
865ce48b210SMichael Neuling * been saved already.
866ce48b210SMichael Neuling * On entry: r13 == 'current' && last_task_used_vsx != 'current'
867ce48b210SMichael Neuling */
868ce48b210SMichael Neuling_STATIC(load_up_vsx)
869ce48b210SMichael Neuling/* Load FP and VSX registers if they haven't been done yet */
870ce48b210SMichael Neuling	andi.	r5,r12,MSR_FP
871ce48b210SMichael Neuling	beql+	load_up_fpu		/* skip if already loaded */
872ce48b210SMichael Neuling	andis.	r5,r12,MSR_VEC@h
873ce48b210SMichael Neuling	beql+	load_up_altivec		/* skip if already loaded */
874ce48b210SMichael Neuling
875ce48b210SMichael Neuling#ifndef CONFIG_SMP
876ce48b210SMichael Neuling	ld	r3,last_task_used_vsx@got(r2)
877ce48b210SMichael Neuling	ld	r4,0(r3)
878ce48b210SMichael Neuling	cmpdi	0,r4,0
879ce48b210SMichael Neuling	beq	1f
880ce48b210SMichael Neuling	/* Disable VSX for last_task_used_vsx */
881ce48b210SMichael Neuling	addi	r4,r4,THREAD
882ce48b210SMichael Neuling	ld	r5,PT_REGS(r4)
883ce48b210SMichael Neuling	ld	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
884ce48b210SMichael Neuling	lis	r6,MSR_VSX@h
885ce48b210SMichael Neuling	andc	r6,r4,r6
886ce48b210SMichael Neuling	std	r6,_MSR-STACK_FRAME_OVERHEAD(r5)
887ce48b210SMichael Neuling1:
888ce48b210SMichael Neuling#endif /* CONFIG_SMP */
889ce48b210SMichael Neuling	ld	r4,PACACURRENT(r13)
890ce48b210SMichael Neuling	addi	r4,r4,THREAD		/* Get THREAD */
891ce48b210SMichael Neuling	li	r6,1
892ce48b210SMichael Neuling	stw	r6,THREAD_USED_VSR(r4) /* ... also set thread used vsr */
893ce48b210SMichael Neuling	/* enable use of VSX after return */
894ce48b210SMichael Neuling	oris	r12,r12,MSR_VSX@h
895ce48b210SMichael Neuling	std	r12,_MSR(r1)
896ce48b210SMichael Neuling#ifndef CONFIG_SMP
897ce48b210SMichael Neuling	/* Update last_task_used_math to 'current' */
898ce48b210SMichael Neuling	ld	r4,PACACURRENT(r13)
899ce48b210SMichael Neuling	std	r4,0(r3)
900ce48b210SMichael Neuling#endif /* CONFIG_SMP */
901ce48b210SMichael Neuling	b	fast_exception_return
902ce48b210SMichael Neuling#endif /* CONFIG_VSX */
903ce48b210SMichael Neuling
90414cf11afSPaul Mackerras/*
90514cf11afSPaul Mackerras * Hash table stuff
90614cf11afSPaul Mackerras */
90714cf11afSPaul Mackerras	.align	7
908945feb17SBenjamin Herrenschmidt_STATIC(do_hash_page)
90914cf11afSPaul Mackerras	std	r3,_DAR(r1)
91014cf11afSPaul Mackerras	std	r4,_DSISR(r1)
91114cf11afSPaul Mackerras
91214cf11afSPaul Mackerras	andis.	r0,r4,0xa450		/* weird error? */
9133ccfc65cSPaul Mackerras	bne-	handle_page_fault	/* if not, try to insert a HPTE */
91414cf11afSPaul MackerrasBEGIN_FTR_SECTION
91514cf11afSPaul Mackerras	andis.	r0,r4,0x0020		/* Is it a segment table fault? */
9163ccfc65cSPaul Mackerras	bne-	do_ste_alloc		/* If so handle it */
91714cf11afSPaul MackerrasEND_FTR_SECTION_IFCLR(CPU_FTR_SLB)
91814cf11afSPaul Mackerras
91914cf11afSPaul Mackerras	/*
920945feb17SBenjamin Herrenschmidt	 * On iSeries, we soft-disable interrupts here, then
921945feb17SBenjamin Herrenschmidt	 * hard-enable interrupts so that the hash_page code can spin on
922945feb17SBenjamin Herrenschmidt	 * the hash_table_lock without problems on a shared processor.
923945feb17SBenjamin Herrenschmidt	 */
924945feb17SBenjamin Herrenschmidt	DISABLE_INTS
925945feb17SBenjamin Herrenschmidt
926945feb17SBenjamin Herrenschmidt	/*
927945feb17SBenjamin Herrenschmidt	 * Currently, trace_hardirqs_off() will be called by DISABLE_INTS
928945feb17SBenjamin Herrenschmidt	 * and will clobber volatile registers when irq tracing is enabled
929945feb17SBenjamin Herrenschmidt	 * so we need to reload them. It may be possible to be smarter here
930945feb17SBenjamin Herrenschmidt	 * and move the irq tracing elsewhere but let's keep it simple for
931945feb17SBenjamin Herrenschmidt	 * now
932945feb17SBenjamin Herrenschmidt	 */
933945feb17SBenjamin Herrenschmidt#ifdef CONFIG_TRACE_IRQFLAGS
934945feb17SBenjamin Herrenschmidt	ld	r3,_DAR(r1)
935945feb17SBenjamin Herrenschmidt	ld	r4,_DSISR(r1)
936945feb17SBenjamin Herrenschmidt	ld	r5,_TRAP(r1)
937945feb17SBenjamin Herrenschmidt	ld	r12,_MSR(r1)
938945feb17SBenjamin Herrenschmidt	clrrdi	r5,r5,4
939945feb17SBenjamin Herrenschmidt#endif /* CONFIG_TRACE_IRQFLAGS */
940945feb17SBenjamin Herrenschmidt	/*
94114cf11afSPaul Mackerras	 * We need to set the _PAGE_USER bit if MSR_PR is set or if we are
94214cf11afSPaul Mackerras	 * accessing a userspace segment (even from the kernel). We assume
94314cf11afSPaul Mackerras	 * kernel addresses always have the high bit set.
94414cf11afSPaul Mackerras	 */
94514cf11afSPaul Mackerras	rlwinm	r4,r4,32-25+9,31-9,31-9	/* DSISR_STORE -> _PAGE_RW */
94614cf11afSPaul Mackerras	rotldi	r0,r3,15		/* Move high bit into MSR_PR posn */
94714cf11afSPaul Mackerras	orc	r0,r12,r0		/* MSR_PR | ~high_bit */
94814cf11afSPaul Mackerras	rlwimi	r4,r0,32-13,30,30	/* becomes _PAGE_USER access bit */
94914cf11afSPaul Mackerras	ori	r4,r4,1			/* add _PAGE_PRESENT */
95014cf11afSPaul Mackerras	rlwimi	r4,r5,22+2,31-2,31-2	/* Set _PAGE_EXEC if trap is 0x400 */
95114cf11afSPaul Mackerras
95214cf11afSPaul Mackerras	/*
95314cf11afSPaul Mackerras	 * r3 contains the faulting address
95414cf11afSPaul Mackerras	 * r4 contains the required access permissions
95514cf11afSPaul Mackerras	 * r5 contains the trap number
95614cf11afSPaul Mackerras	 *
95714cf11afSPaul Mackerras	 * at return r3 = 0 for success
95814cf11afSPaul Mackerras	 */
95914cf11afSPaul Mackerras	bl	.hash_page		/* build HPTE if possible */
96014cf11afSPaul Mackerras	cmpdi	r3,0			/* see if hash_page succeeded */
96114cf11afSPaul Mackerras
9623f639ee8SStephen RothwellBEGIN_FW_FTR_SECTION
96314cf11afSPaul Mackerras	/*
96414cf11afSPaul Mackerras	 * If we had interrupts soft-enabled at the point where the
96514cf11afSPaul Mackerras	 * DSI/ISI occurred, and an interrupt came in during hash_page,
96614cf11afSPaul Mackerras	 * handle it now.
96714cf11afSPaul Mackerras	 * We jump to ret_from_except_lite rather than fast_exception_return
96814cf11afSPaul Mackerras	 * because ret_from_except_lite will check for and handle pending
96914cf11afSPaul Mackerras	 * interrupts if necessary.
97014cf11afSPaul Mackerras	 */
9713ccfc65cSPaul Mackerras	beq	13f
972b0a779deSPaul MackerrasEND_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
973945feb17SBenjamin Herrenschmidt
974b0a779deSPaul MackerrasBEGIN_FW_FTR_SECTION
975b0a779deSPaul Mackerras	/*
976b0a779deSPaul Mackerras	 * Here we have interrupts hard-disabled, so it is sufficient
977b0a779deSPaul Mackerras	 * to restore paca->{soft,hard}_enable and get out.
978b0a779deSPaul Mackerras	 */
979b0a779deSPaul Mackerras	beq	fast_exc_return_irq	/* Return from exception on success */
980b0a779deSPaul MackerrasEND_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES)
981b0a779deSPaul Mackerras
98214cf11afSPaul Mackerras	/* For a hash failure, we don't bother re-enabling interrupts */
98314cf11afSPaul Mackerras	ble-	12f
98414cf11afSPaul Mackerras
98514cf11afSPaul Mackerras	/*
98614cf11afSPaul Mackerras	 * hash_page couldn't handle it, set soft interrupt enable back
987945feb17SBenjamin Herrenschmidt	 * to what it was before the trap.  Note that .raw_local_irq_restore
98814cf11afSPaul Mackerras	 * handles any interrupts pending at this point.
98914cf11afSPaul Mackerras	 */
99014cf11afSPaul Mackerras	ld	r3,SOFTE(r1)
991945feb17SBenjamin Herrenschmidt	TRACE_AND_RESTORE_IRQ_PARTIAL(r3, 11f)
992945feb17SBenjamin Herrenschmidt	bl	.raw_local_irq_restore
99314cf11afSPaul Mackerras	b	11f
99414cf11afSPaul Mackerras
99514cf11afSPaul Mackerras/* Here we have a page fault that hash_page can't handle. */
9963ccfc65cSPaul Mackerrashandle_page_fault:
99714cf11afSPaul Mackerras	ENABLE_INTS
99814cf11afSPaul Mackerras11:	ld	r4,_DAR(r1)
99914cf11afSPaul Mackerras	ld	r5,_DSISR(r1)
100014cf11afSPaul Mackerras	addi	r3,r1,STACK_FRAME_OVERHEAD
100114cf11afSPaul Mackerras	bl	.do_page_fault
100214cf11afSPaul Mackerras	cmpdi	r3,0
10033ccfc65cSPaul Mackerras	beq+	13f
100414cf11afSPaul Mackerras	bl	.save_nvgprs
100514cf11afSPaul Mackerras	mr	r5,r3
100614cf11afSPaul Mackerras	addi	r3,r1,STACK_FRAME_OVERHEAD
100714cf11afSPaul Mackerras	lwz	r4,_DAR(r1)
100814cf11afSPaul Mackerras	bl	.bad_page_fault
100914cf11afSPaul Mackerras	b	.ret_from_except
101014cf11afSPaul Mackerras
101179acbb3fSPaul Mackerras13:	b	.ret_from_except_lite
101279acbb3fSPaul Mackerras
101314cf11afSPaul Mackerras/* We have a page fault that hash_page could handle but HV refused
101414cf11afSPaul Mackerras * the PTE insertion
101514cf11afSPaul Mackerras */
101614cf11afSPaul Mackerras12:	bl	.save_nvgprs
1017fa28237cSPaul Mackerras	mr	r5,r3
101814cf11afSPaul Mackerras	addi	r3,r1,STACK_FRAME_OVERHEAD
1019a792e75dSBenjamin Herrenschmidt	ld	r4,_DAR(r1)
102014cf11afSPaul Mackerras	bl	.low_hash_fault
102114cf11afSPaul Mackerras	b	.ret_from_except
102214cf11afSPaul Mackerras
102314cf11afSPaul Mackerras	/* here we have a segment miss */
10243ccfc65cSPaul Mackerrasdo_ste_alloc:
102514cf11afSPaul Mackerras	bl	.ste_allocate		/* try to insert stab entry */
102614cf11afSPaul Mackerras	cmpdi	r3,0
10273ccfc65cSPaul Mackerras	bne-	handle_page_fault
10283ccfc65cSPaul Mackerras	b	fast_exception_return
102914cf11afSPaul Mackerras
103014cf11afSPaul Mackerras/*
103114cf11afSPaul Mackerras * r13 points to the PACA, r9 contains the saved CR,
103214cf11afSPaul Mackerras * r11 and r12 contain the saved SRR0 and SRR1.
103314cf11afSPaul Mackerras * r9 - r13 are saved in paca->exslb.
103414cf11afSPaul Mackerras * We assume we aren't going to take any exceptions during this procedure.
103514cf11afSPaul Mackerras * We assume (DAR >> 60) == 0xc.
103614cf11afSPaul Mackerras */
103714cf11afSPaul Mackerras	.align	7
103814cf11afSPaul Mackerras_GLOBAL(do_stab_bolted)
103914cf11afSPaul Mackerras	stw	r9,PACA_EXSLB+EX_CCR(r13)	/* save CR in exc. frame */
104014cf11afSPaul Mackerras	std	r11,PACA_EXSLB+EX_SRR0(r13)	/* save SRR0 in exc. frame */
104114cf11afSPaul Mackerras
104214cf11afSPaul Mackerras	/* Hash to the primary group */
104314cf11afSPaul Mackerras	ld	r10,PACASTABVIRT(r13)
1044b5bbeb23SPaul Mackerras	mfspr	r11,SPRN_DAR
104514cf11afSPaul Mackerras	srdi	r11,r11,28
104614cf11afSPaul Mackerras	rldimi	r10,r11,7,52	/* r10 = first ste of the group */
104714cf11afSPaul Mackerras
104814cf11afSPaul Mackerras	/* Calculate VSID */
104914cf11afSPaul Mackerras	/* This is a kernel address, so protovsid = ESID */
10501189be65SPaul Mackerras	ASM_VSID_SCRAMBLE(r11, r9, 256M)
105114cf11afSPaul Mackerras	rldic	r9,r11,12,16	/* r9 = vsid << 12 */
105214cf11afSPaul Mackerras
105314cf11afSPaul Mackerras	/* Search the primary group for a free entry */
105414cf11afSPaul Mackerras1:	ld	r11,0(r10)	/* Test valid bit of the current ste	*/
105514cf11afSPaul Mackerras	andi.	r11,r11,0x80
105614cf11afSPaul Mackerras	beq	2f
105714cf11afSPaul Mackerras	addi	r10,r10,16
105814cf11afSPaul Mackerras	andi.	r11,r10,0x70
105914cf11afSPaul Mackerras	bne	1b
106014cf11afSPaul Mackerras
106114cf11afSPaul Mackerras	/* Stick for only searching the primary group for now.		*/
106214cf11afSPaul Mackerras	/* At least for now, we use a very simple random castout scheme */
106314cf11afSPaul Mackerras	/* Use the TB as a random number ;  OR in 1 to avoid entry 0	*/
106414cf11afSPaul Mackerras	mftb	r11
106514cf11afSPaul Mackerras	rldic	r11,r11,4,57	/* r11 = (r11 << 4) & 0x70 */
106614cf11afSPaul Mackerras	ori	r11,r11,0x10
106714cf11afSPaul Mackerras
106814cf11afSPaul Mackerras	/* r10 currently points to an ste one past the group of interest */
106914cf11afSPaul Mackerras	/* make it point to the randomly selected entry			*/
107014cf11afSPaul Mackerras	subi	r10,r10,128
107114cf11afSPaul Mackerras	or 	r10,r10,r11	/* r10 is the entry to invalidate	*/
107214cf11afSPaul Mackerras
107314cf11afSPaul Mackerras	isync			/* mark the entry invalid		*/
107414cf11afSPaul Mackerras	ld	r11,0(r10)
107514cf11afSPaul Mackerras	rldicl	r11,r11,56,1	/* clear the valid bit */
107614cf11afSPaul Mackerras	rotldi	r11,r11,8
107714cf11afSPaul Mackerras	std	r11,0(r10)
107814cf11afSPaul Mackerras	sync
107914cf11afSPaul Mackerras
108014cf11afSPaul Mackerras	clrrdi	r11,r11,28	/* Get the esid part of the ste		*/
108114cf11afSPaul Mackerras	slbie	r11
108214cf11afSPaul Mackerras
108314cf11afSPaul Mackerras2:	std	r9,8(r10)	/* Store the vsid part of the ste	*/
108414cf11afSPaul Mackerras	eieio
108514cf11afSPaul Mackerras
1086b5bbeb23SPaul Mackerras	mfspr	r11,SPRN_DAR		/* Get the new esid			*/
108714cf11afSPaul Mackerras	clrrdi	r11,r11,28	/* Permits a full 32b of ESID		*/
108814cf11afSPaul Mackerras	ori	r11,r11,0x90	/* Turn on valid and kp			*/
108914cf11afSPaul Mackerras	std	r11,0(r10)	/* Put new entry back into the stab	*/
109014cf11afSPaul Mackerras
109114cf11afSPaul Mackerras	sync
109214cf11afSPaul Mackerras
109314cf11afSPaul Mackerras	/* All done -- return from exception. */
109414cf11afSPaul Mackerras	lwz	r9,PACA_EXSLB+EX_CCR(r13)	/* get saved CR */
109514cf11afSPaul Mackerras	ld	r11,PACA_EXSLB+EX_SRR0(r13)	/* get saved SRR0 */
109614cf11afSPaul Mackerras
109714cf11afSPaul Mackerras	andi.	r10,r12,MSR_RI
109814cf11afSPaul Mackerras	beq-	unrecov_slb
109914cf11afSPaul Mackerras
110014cf11afSPaul Mackerras	mtcrf	0x80,r9			/* restore CR */
110114cf11afSPaul Mackerras
110214cf11afSPaul Mackerras	mfmsr	r10
110314cf11afSPaul Mackerras	clrrdi	r10,r10,2
110414cf11afSPaul Mackerras	mtmsrd	r10,1
110514cf11afSPaul Mackerras
1106b5bbeb23SPaul Mackerras	mtspr	SPRN_SRR0,r11
1107b5bbeb23SPaul Mackerras	mtspr	SPRN_SRR1,r12
110814cf11afSPaul Mackerras	ld	r9,PACA_EXSLB+EX_R9(r13)
110914cf11afSPaul Mackerras	ld	r10,PACA_EXSLB+EX_R10(r13)
111014cf11afSPaul Mackerras	ld	r11,PACA_EXSLB+EX_R11(r13)
111114cf11afSPaul Mackerras	ld	r12,PACA_EXSLB+EX_R12(r13)
111214cf11afSPaul Mackerras	ld	r13,PACA_EXSLB+EX_R13(r13)
111314cf11afSPaul Mackerras	rfid
111414cf11afSPaul Mackerras	b	.	/* prevent speculative execution */
111514cf11afSPaul Mackerras
111614cf11afSPaul Mackerras/*
111714cf11afSPaul Mackerras * Space for CPU0's segment table.
111814cf11afSPaul Mackerras *
111914cf11afSPaul Mackerras * On iSeries, the hypervisor must fill in at least one entry before
112016a15a30SStephen Rothwell * we get control (with relocate on).  The address is given to the hv
112116a15a30SStephen Rothwell * as a page number (see xLparMap below), so this must be at a
112214cf11afSPaul Mackerras * fixed address (the linker can't compute (u64)&initial_stab >>
112314cf11afSPaul Mackerras * PAGE_SHIFT).
112414cf11afSPaul Mackerras */
1125758438a7SMichael Ellerman	. = STAB0_OFFSET	/* 0x6000 */
112614cf11afSPaul Mackerras	.globl initial_stab
112714cf11afSPaul Mackerrasinitial_stab:
112814cf11afSPaul Mackerras	.space	4096
112914cf11afSPaul Mackerras
11309e4859efSStephen Rothwell#ifdef CONFIG_PPC_PSERIES
113114cf11afSPaul Mackerras/*
113214cf11afSPaul Mackerras * Data area reserved for FWNMI option.
113314cf11afSPaul Mackerras * This address (0x7000) is fixed by the RPA.
113414cf11afSPaul Mackerras */
113514cf11afSPaul Mackerras	.= 0x7000
113614cf11afSPaul Mackerras	.globl fwnmi_data_area
113714cf11afSPaul Mackerrasfwnmi_data_area:
11389e4859efSStephen Rothwell#endif /* CONFIG_PPC_PSERIES */
113914cf11afSPaul Mackerras
114014cf11afSPaul Mackerras	/* iSeries does not use the FWNMI stuff, so it is safe to put
114114cf11afSPaul Mackerras	 * this here, even if we later allow kernels that will boot on
114214cf11afSPaul Mackerras	 * both pSeries and iSeries */
114314cf11afSPaul Mackerras#ifdef CONFIG_PPC_ISERIES
114414cf11afSPaul Mackerras        . = LPARMAP_PHYS
114516a15a30SStephen Rothwell	.globl xLparMap
114616a15a30SStephen RothwellxLparMap:
114716a15a30SStephen Rothwell	.quad	HvEsidsToMap		/* xNumberEsids */
114816a15a30SStephen Rothwell	.quad	HvRangesToMap		/* xNumberRanges */
114916a15a30SStephen Rothwell	.quad	STAB0_PAGE		/* xSegmentTableOffs */
115016a15a30SStephen Rothwell	.zero	40			/* xRsvd */
115116a15a30SStephen Rothwell	/* xEsids (HvEsidsToMap entries of 2 quads) */
115216a15a30SStephen Rothwell	.quad	PAGE_OFFSET_ESID	/* xKernelEsid */
115316a15a30SStephen Rothwell	.quad	PAGE_OFFSET_VSID	/* xKernelVsid */
115416a15a30SStephen Rothwell	.quad	VMALLOC_START_ESID	/* xKernelEsid */
115516a15a30SStephen Rothwell	.quad	VMALLOC_START_VSID	/* xKernelVsid */
115616a15a30SStephen Rothwell	/* xRanges (HvRangesToMap entries of 3 quads) */
115716a15a30SStephen Rothwell	.quad	HvPagesToMap		/* xPages */
115816a15a30SStephen Rothwell	.quad	0			/* xOffset */
115916a15a30SStephen Rothwell	.quad	PAGE_OFFSET_VSID << (SID_SHIFT - HW_PAGE_SHIFT)	/* xVPN */
116016a15a30SStephen Rothwell
116114cf11afSPaul Mackerras#endif /* CONFIG_PPC_ISERIES */
116214cf11afSPaul Mackerras
11639e4859efSStephen Rothwell#ifdef CONFIG_PPC_PSERIES
116414cf11afSPaul Mackerras        . = 0x8000
11659e4859efSStephen Rothwell#endif /* CONFIG_PPC_PSERIES */
116614cf11afSPaul Mackerras
116714cf11afSPaul Mackerras/*
1168f39b7a55SOlof Johansson * On pSeries and most other platforms, secondary processors spin
1169f39b7a55SOlof Johansson * in the following code.
117014cf11afSPaul Mackerras * At entry, r3 = this processor's number (physical cpu id)
117114cf11afSPaul Mackerras */
1172f39b7a55SOlof Johansson_GLOBAL(generic_secondary_smp_init)
117314cf11afSPaul Mackerras	mr	r24,r3
117414cf11afSPaul Mackerras
117514cf11afSPaul Mackerras	/* turn on 64-bit mode */
117614cf11afSPaul Mackerras	bl	.enable_64b_mode
117714cf11afSPaul Mackerras
117814cf11afSPaul Mackerras	/* Set up a paca value for this processor. Since we have the
117914cf11afSPaul Mackerras	 * physical cpu id in r24, we need to search the pacas to find
118014cf11afSPaul Mackerras	 * which logical id maps to our physical one.
118114cf11afSPaul Mackerras	 */
1182e58c3495SDavid Gibson	LOAD_REG_IMMEDIATE(r13, paca)	/* Get base vaddr of paca array	 */
118314cf11afSPaul Mackerras	li	r5,0			/* logical cpu id                */
118414cf11afSPaul Mackerras1:	lhz	r6,PACAHWCPUID(r13)	/* Load HW procid from paca      */
118514cf11afSPaul Mackerras	cmpw	r6,r24			/* Compare to our id             */
118614cf11afSPaul Mackerras	beq	2f
118714cf11afSPaul Mackerras	addi	r13,r13,PACA_SIZE	/* Loop to next PACA on miss     */
118814cf11afSPaul Mackerras	addi	r5,r5,1
118914cf11afSPaul Mackerras	cmpwi	r5,NR_CPUS
119014cf11afSPaul Mackerras	blt	1b
119114cf11afSPaul Mackerras
119214cf11afSPaul Mackerras	mr	r3,r24			/* not found, copy phys to r3	 */
119314cf11afSPaul Mackerras	b	.kexec_wait		/* next kernel might do better	 */
119414cf11afSPaul Mackerras
1195b5bbeb23SPaul Mackerras2:	mtspr	SPRN_SPRG3,r13		/* Save vaddr of paca in SPRG3	 */
119614cf11afSPaul Mackerras	/* From now on, r24 is expected to be logical cpuid */
119714cf11afSPaul Mackerras	mr	r24,r5
119814cf11afSPaul Mackerras3:	HMT_LOW
119914cf11afSPaul Mackerras	lbz	r23,PACAPROCSTART(r13)	/* Test if this processor should */
120014cf11afSPaul Mackerras					/* start.			 */
120114cf11afSPaul Mackerras
1202f39b7a55SOlof Johansson#ifndef CONFIG_SMP
1203f39b7a55SOlof Johansson	b	3b			/* Never go on non-SMP		 */
1204f39b7a55SOlof Johansson#else
1205f39b7a55SOlof Johansson	cmpwi	0,r23,0
1206f39b7a55SOlof Johansson	beq	3b			/* Loop until told to go	 */
1207f39b7a55SOlof Johansson
1208b6f6b98aSSonny Rao	sync				/* order paca.run and cur_cpu_spec */
1209b6f6b98aSSonny Rao
1210f39b7a55SOlof Johansson	/* See if we need to call a cpu state restore handler */
1211f39b7a55SOlof Johansson	LOAD_REG_IMMEDIATE(r23, cur_cpu_spec)
1212f39b7a55SOlof Johansson	ld	r23,0(r23)
1213f39b7a55SOlof Johansson	ld	r23,CPU_SPEC_RESTORE(r23)
1214f39b7a55SOlof Johansson	cmpdi	0,r23,0
1215f39b7a55SOlof Johansson	beq	4f
1216f39b7a55SOlof Johansson	ld	r23,0(r23)
1217f39b7a55SOlof Johansson	mtctr	r23
1218f39b7a55SOlof Johansson	bctrl
1219f39b7a55SOlof Johansson
1220f39b7a55SOlof Johansson4:	/* Create a temp kernel stack for use before relocation is on.	*/
122114cf11afSPaul Mackerras	ld	r1,PACAEMERGSP(r13)
122214cf11afSPaul Mackerras	subi	r1,r1,STACK_FRAME_OVERHEAD
122314cf11afSPaul Mackerras
1224c705677eSStephen Rothwell	b	__secondary_start
122514cf11afSPaul Mackerras#endif
122614cf11afSPaul Mackerras
122714cf11afSPaul Mackerras_STATIC(__mmu_off)
122814cf11afSPaul Mackerras	mfmsr	r3
122914cf11afSPaul Mackerras	andi.	r0,r3,MSR_IR|MSR_DR
123014cf11afSPaul Mackerras	beqlr
123114cf11afSPaul Mackerras	andc	r3,r3,r0
123214cf11afSPaul Mackerras	mtspr	SPRN_SRR0,r4
123314cf11afSPaul Mackerras	mtspr	SPRN_SRR1,r3
123414cf11afSPaul Mackerras	sync
123514cf11afSPaul Mackerras	rfid
123614cf11afSPaul Mackerras	b	.	/* prevent speculative execution */
123714cf11afSPaul Mackerras
123814cf11afSPaul Mackerras
123914cf11afSPaul Mackerras/*
124014cf11afSPaul Mackerras * Here is our main kernel entry point. We support currently 2 kind of entries
124114cf11afSPaul Mackerras * depending on the value of r5.
124214cf11afSPaul Mackerras *
124314cf11afSPaul Mackerras *   r5 != NULL -> OF entry, we go to prom_init, "legacy" parameter content
124414cf11afSPaul Mackerras *                 in r3...r7
124514cf11afSPaul Mackerras *
124614cf11afSPaul Mackerras *   r5 == NULL -> kexec style entry. r3 is a physical pointer to the
124714cf11afSPaul Mackerras *                 DT block, r4 is a physical pointer to the kernel itself
124814cf11afSPaul Mackerras *
124914cf11afSPaul Mackerras */
125014cf11afSPaul Mackerras_GLOBAL(__start_initialization_multiplatform)
125114cf11afSPaul Mackerras	/*
125214cf11afSPaul Mackerras	 * Are we booted from a PROM Of-type client-interface ?
125314cf11afSPaul Mackerras	 */
125414cf11afSPaul Mackerras	cmpldi	cr0,r5,0
1255939e60f6SStephen Rothwell	beq	1f
1256939e60f6SStephen Rothwell	b	.__boot_from_prom		/* yes -> prom */
1257939e60f6SStephen Rothwell1:
125814cf11afSPaul Mackerras	/* Save parameters */
125914cf11afSPaul Mackerras	mr	r31,r3
126014cf11afSPaul Mackerras	mr	r30,r4
126114cf11afSPaul Mackerras
126214cf11afSPaul Mackerras	/* Make sure we are running in 64 bits mode */
126314cf11afSPaul Mackerras	bl	.enable_64b_mode
126414cf11afSPaul Mackerras
126514cf11afSPaul Mackerras	/* Setup some critical 970 SPRs before switching MMU off */
1266f39b7a55SOlof Johansson	mfspr	r0,SPRN_PVR
1267f39b7a55SOlof Johansson	srwi	r0,r0,16
1268f39b7a55SOlof Johansson	cmpwi	r0,0x39		/* 970 */
1269f39b7a55SOlof Johansson	beq	1f
1270f39b7a55SOlof Johansson	cmpwi	r0,0x3c		/* 970FX */
1271f39b7a55SOlof Johansson	beq	1f
1272f39b7a55SOlof Johansson	cmpwi	r0,0x44		/* 970MP */
1273190a24f5SOlof Johansson	beq	1f
1274190a24f5SOlof Johansson	cmpwi	r0,0x45		/* 970GX */
1275f39b7a55SOlof Johansson	bne	2f
1276f39b7a55SOlof Johansson1:	bl	.__cpu_preinit_ppc970
1277f39b7a55SOlof Johansson2:
127814cf11afSPaul Mackerras
127914cf11afSPaul Mackerras	/* Switch off MMU if not already */
1280e58c3495SDavid Gibson	LOAD_REG_IMMEDIATE(r4, .__after_prom_start - KERNELBASE)
128114cf11afSPaul Mackerras	add	r4,r4,r30
128214cf11afSPaul Mackerras	bl	.__mmu_off
128314cf11afSPaul Mackerras	b	.__after_prom_start
128414cf11afSPaul Mackerras
1285939e60f6SStephen Rothwell_INIT_STATIC(__boot_from_prom)
128614cf11afSPaul Mackerras	/* Save parameters */
128714cf11afSPaul Mackerras	mr	r31,r3
128814cf11afSPaul Mackerras	mr	r30,r4
128914cf11afSPaul Mackerras	mr	r29,r5
129014cf11afSPaul Mackerras	mr	r28,r6
129114cf11afSPaul Mackerras	mr	r27,r7
129214cf11afSPaul Mackerras
12936088857bSOlaf Hering	/*
12946088857bSOlaf Hering	 * Align the stack to 16-byte boundary
12956088857bSOlaf Hering	 * Depending on the size and layout of the ELF sections in the initial
12966088857bSOlaf Hering	 * boot binary, the stack pointer will be unalignet on PowerMac
12976088857bSOlaf Hering	 */
1298c05b4770SLinus Torvalds	rldicr	r1,r1,0,59
1299c05b4770SLinus Torvalds
130014cf11afSPaul Mackerras	/* Make sure we are running in 64 bits mode */
130114cf11afSPaul Mackerras	bl	.enable_64b_mode
130214cf11afSPaul Mackerras
130314cf11afSPaul Mackerras	/* put a relocation offset into r3 */
130414cf11afSPaul Mackerras	bl	.reloc_offset
130514cf11afSPaul Mackerras
1306e58c3495SDavid Gibson	LOAD_REG_IMMEDIATE(r2,__toc_start)
130714cf11afSPaul Mackerras	addi	r2,r2,0x4000
130814cf11afSPaul Mackerras	addi	r2,r2,0x4000
130914cf11afSPaul Mackerras
131014cf11afSPaul Mackerras	/* Relocate the TOC from a virt addr to a real addr */
13115a408329SPaul Mackerras	add	r2,r2,r3
131214cf11afSPaul Mackerras
131314cf11afSPaul Mackerras	/* Restore parameters */
131414cf11afSPaul Mackerras	mr	r3,r31
131514cf11afSPaul Mackerras	mr	r4,r30
131614cf11afSPaul Mackerras	mr	r5,r29
131714cf11afSPaul Mackerras	mr	r6,r28
131814cf11afSPaul Mackerras	mr	r7,r27
131914cf11afSPaul Mackerras
132014cf11afSPaul Mackerras	/* Do all of the interaction with OF client interface */
132114cf11afSPaul Mackerras	bl	.prom_init
132214cf11afSPaul Mackerras	/* We never return */
132314cf11afSPaul Mackerras	trap
132414cf11afSPaul Mackerras
132514cf11afSPaul Mackerras_STATIC(__after_prom_start)
132614cf11afSPaul Mackerras
132714cf11afSPaul Mackerras/*
1328758438a7SMichael Ellerman * We need to run with __start at physical address PHYSICAL_START.
132914cf11afSPaul Mackerras * This will leave some code in the first 256B of
133014cf11afSPaul Mackerras * real memory, which are reserved for software use.
133114cf11afSPaul Mackerras * The remainder of the first page is loaded with the fixed
133214cf11afSPaul Mackerras * interrupt vectors.  The next two pages are filled with
133314cf11afSPaul Mackerras * unknown exception placeholders.
133414cf11afSPaul Mackerras *
133514cf11afSPaul Mackerras * Note: This process overwrites the OF exception vectors.
133614cf11afSPaul Mackerras *	r26 == relocation offset
133714cf11afSPaul Mackerras *	r27 == KERNELBASE
133814cf11afSPaul Mackerras */
133914cf11afSPaul Mackerras	bl	.reloc_offset
134014cf11afSPaul Mackerras	mr	r26,r3
1341e58c3495SDavid Gibson	LOAD_REG_IMMEDIATE(r27, KERNELBASE)
134214cf11afSPaul Mackerras
1343e58c3495SDavid Gibson	LOAD_REG_IMMEDIATE(r3, PHYSICAL_START)	/* target addr */
134414cf11afSPaul Mackerras
134514cf11afSPaul Mackerras	// XXX FIXME: Use phys returned by OF (r30)
13465a408329SPaul Mackerras	add	r4,r27,r26 		/* source addr			 */
134714cf11afSPaul Mackerras					/* current address of _start	 */
134814cf11afSPaul Mackerras					/*   i.e. where we are running	 */
134914cf11afSPaul Mackerras					/*	the source addr		 */
135014cf11afSPaul Mackerras
1351d0b79c54SJimi Xenidis	cmpdi	r4,0			/* In some cases the loader may  */
1352939e60f6SStephen Rothwell	bne	1f
1353939e60f6SStephen Rothwell	b	.start_here_multiplatform /* have already put us at zero */
1354d0b79c54SJimi Xenidis					/* so we can skip the copy.      */
1355939e60f6SStephen Rothwell1:	LOAD_REG_IMMEDIATE(r5,copy_to_here) /* # bytes of memory to copy */
135614cf11afSPaul Mackerras	sub	r5,r5,r27
135714cf11afSPaul Mackerras
135814cf11afSPaul Mackerras	li	r6,0x100		/* Start offset, the first 0x100 */
135914cf11afSPaul Mackerras					/* bytes were copied earlier.	 */
136014cf11afSPaul Mackerras
136114cf11afSPaul Mackerras	bl	.copy_and_flush		/* copy the first n bytes	 */
136214cf11afSPaul Mackerras					/* this includes the code being	 */
136314cf11afSPaul Mackerras					/* executed here.		 */
136414cf11afSPaul Mackerras
1365e58c3495SDavid Gibson	LOAD_REG_IMMEDIATE(r0, 4f)	/* Jump to the copy of this code */
136614cf11afSPaul Mackerras	mtctr	r0			/* that we just made/relocated	 */
136714cf11afSPaul Mackerras	bctr
136814cf11afSPaul Mackerras
1369e58c3495SDavid Gibson4:	LOAD_REG_IMMEDIATE(r5,klimit)
13705a408329SPaul Mackerras	add	r5,r5,r26
137114cf11afSPaul Mackerras	ld	r5,0(r5)		/* get the value of klimit */
137214cf11afSPaul Mackerras	sub	r5,r5,r27
137314cf11afSPaul Mackerras	bl	.copy_and_flush		/* copy the rest */
137414cf11afSPaul Mackerras	b	.start_here_multiplatform
137514cf11afSPaul Mackerras
137614cf11afSPaul Mackerras/*
137714cf11afSPaul Mackerras * Copy routine used to copy the kernel to start at physical address 0
137814cf11afSPaul Mackerras * and flush and invalidate the caches as needed.
137914cf11afSPaul Mackerras * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset
138014cf11afSPaul Mackerras * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.
138114cf11afSPaul Mackerras *
138214cf11afSPaul Mackerras * Note: this routine *only* clobbers r0, r6 and lr
138314cf11afSPaul Mackerras */
138414cf11afSPaul Mackerras_GLOBAL(copy_and_flush)
138514cf11afSPaul Mackerras	addi	r5,r5,-8
138614cf11afSPaul Mackerras	addi	r6,r6,-8
13875a2fe38dSOlof Johansson4:	li	r0,8			/* Use the smallest common	*/
138814cf11afSPaul Mackerras					/* denominator cache line	*/
138914cf11afSPaul Mackerras					/* size.  This results in	*/
139014cf11afSPaul Mackerras					/* extra cache line flushes	*/
139114cf11afSPaul Mackerras					/* but operation is correct.	*/
139214cf11afSPaul Mackerras					/* Can't get cache line size	*/
139314cf11afSPaul Mackerras					/* from NACA as it is being	*/
139414cf11afSPaul Mackerras					/* moved too.			*/
139514cf11afSPaul Mackerras
139614cf11afSPaul Mackerras	mtctr	r0			/* put # words/line in ctr	*/
139714cf11afSPaul Mackerras3:	addi	r6,r6,8			/* copy a cache line		*/
139814cf11afSPaul Mackerras	ldx	r0,r6,r4
139914cf11afSPaul Mackerras	stdx	r0,r6,r3
140014cf11afSPaul Mackerras	bdnz	3b
140114cf11afSPaul Mackerras	dcbst	r6,r3			/* write it to memory		*/
140214cf11afSPaul Mackerras	sync
140314cf11afSPaul Mackerras	icbi	r6,r3			/* flush the icache line	*/
140414cf11afSPaul Mackerras	cmpld	0,r6,r5
140514cf11afSPaul Mackerras	blt	4b
140614cf11afSPaul Mackerras	sync
140714cf11afSPaul Mackerras	addi	r5,r5,8
140814cf11afSPaul Mackerras	addi	r6,r6,8
140914cf11afSPaul Mackerras	blr
141014cf11afSPaul Mackerras
141114cf11afSPaul Mackerras.align 8
141214cf11afSPaul Mackerrascopy_to_here:
141314cf11afSPaul Mackerras
141414cf11afSPaul Mackerras#ifdef CONFIG_SMP
141514cf11afSPaul Mackerras#ifdef CONFIG_PPC_PMAC
141614cf11afSPaul Mackerras/*
141714cf11afSPaul Mackerras * On PowerMac, secondary processors starts from the reset vector, which
141814cf11afSPaul Mackerras * is temporarily turned into a call to one of the functions below.
141914cf11afSPaul Mackerras */
142014cf11afSPaul Mackerras	.section ".text";
142114cf11afSPaul Mackerras	.align 2 ;
142214cf11afSPaul Mackerras
142335499c01SPaul Mackerras	.globl	__secondary_start_pmac_0
142435499c01SPaul Mackerras__secondary_start_pmac_0:
142535499c01SPaul Mackerras	/* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */
142635499c01SPaul Mackerras	li	r24,0
142735499c01SPaul Mackerras	b	1f
142814cf11afSPaul Mackerras	li	r24,1
142935499c01SPaul Mackerras	b	1f
143014cf11afSPaul Mackerras	li	r24,2
143135499c01SPaul Mackerras	b	1f
143214cf11afSPaul Mackerras	li	r24,3
143335499c01SPaul Mackerras1:
143414cf11afSPaul Mackerras
143514cf11afSPaul Mackerras_GLOBAL(pmac_secondary_start)
143614cf11afSPaul Mackerras	/* turn on 64-bit mode */
143714cf11afSPaul Mackerras	bl	.enable_64b_mode
143814cf11afSPaul Mackerras
143914cf11afSPaul Mackerras	/* Copy some CPU settings from CPU 0 */
1440f39b7a55SOlof Johansson	bl	.__restore_cpu_ppc970
144114cf11afSPaul Mackerras
144214cf11afSPaul Mackerras	/* pSeries do that early though I don't think we really need it */
144314cf11afSPaul Mackerras	mfmsr	r3
144414cf11afSPaul Mackerras	ori	r3,r3,MSR_RI
144514cf11afSPaul Mackerras	mtmsrd	r3			/* RI on */
144614cf11afSPaul Mackerras
144714cf11afSPaul Mackerras	/* Set up a paca value for this processor. */
1448e58c3495SDavid Gibson	LOAD_REG_IMMEDIATE(r4, paca)	/* Get base vaddr of paca array	*/
144914cf11afSPaul Mackerras	mulli	r13,r24,PACA_SIZE	 /* Calculate vaddr of right paca */
145014cf11afSPaul Mackerras	add	r13,r13,r4		/* for this processor.		*/
1451b5bbeb23SPaul Mackerras	mtspr	SPRN_SPRG3,r13		 /* Save vaddr of paca in SPRG3	*/
145214cf11afSPaul Mackerras
145314cf11afSPaul Mackerras	/* Create a temp kernel stack for use before relocation is on.	*/
145414cf11afSPaul Mackerras	ld	r1,PACAEMERGSP(r13)
145514cf11afSPaul Mackerras	subi	r1,r1,STACK_FRAME_OVERHEAD
145614cf11afSPaul Mackerras
1457c705677eSStephen Rothwell	b	__secondary_start
145814cf11afSPaul Mackerras
145914cf11afSPaul Mackerras#endif /* CONFIG_PPC_PMAC */
146014cf11afSPaul Mackerras
146114cf11afSPaul Mackerras/*
146214cf11afSPaul Mackerras * This function is called after the master CPU has released the
146314cf11afSPaul Mackerras * secondary processors.  The execution environment is relocation off.
146414cf11afSPaul Mackerras * The paca for this processor has the following fields initialized at
146514cf11afSPaul Mackerras * this point:
146614cf11afSPaul Mackerras *   1. Processor number
146714cf11afSPaul Mackerras *   2. Segment table pointer (virtual address)
146814cf11afSPaul Mackerras * On entry the following are set:
146914cf11afSPaul Mackerras *   r1	= stack pointer.  vaddr for iSeries, raddr (temp stack) for pSeries
147014cf11afSPaul Mackerras *   r24   = cpu# (in Linux terms)
147114cf11afSPaul Mackerras *   r13   = paca virtual address
147214cf11afSPaul Mackerras *   SPRG3 = paca virtual address
147314cf11afSPaul Mackerras */
1474fc68e869SStephen Rothwell	.globl	__secondary_start
1475c705677eSStephen Rothwell__secondary_start:
1476799d6046SPaul Mackerras	/* Set thread priority to MEDIUM */
1477799d6046SPaul Mackerras	HMT_MEDIUM
147814cf11afSPaul Mackerras
1479799d6046SPaul Mackerras	/* Load TOC */
148014cf11afSPaul Mackerras	ld	r2,PACATOC(r13)
148114cf11afSPaul Mackerras
1482799d6046SPaul Mackerras	/* Do early setup for that CPU (stab, slb, hash table pointer) */
1483799d6046SPaul Mackerras	bl	.early_setup_secondary
148414cf11afSPaul Mackerras
148514cf11afSPaul Mackerras	/* Initialize the kernel stack.  Just a repeat for iSeries.	 */
1486e58c3495SDavid Gibson	LOAD_REG_ADDR(r3, current_set)
148714cf11afSPaul Mackerras	sldi	r28,r24,3		/* get current_set[cpu#]	 */
148814cf11afSPaul Mackerras	ldx	r1,r3,r28
148914cf11afSPaul Mackerras	addi	r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
149014cf11afSPaul Mackerras	std	r1,PACAKSAVE(r13)
149114cf11afSPaul Mackerras
1492799d6046SPaul Mackerras	/* Clear backchain so we get nice backtraces */
149314cf11afSPaul Mackerras	li	r7,0
149414cf11afSPaul Mackerras	mtlr	r7
149514cf11afSPaul Mackerras
149614cf11afSPaul Mackerras	/* enable MMU and jump to start_secondary */
1497e58c3495SDavid Gibson	LOAD_REG_ADDR(r3, .start_secondary_prolog)
1498e58c3495SDavid Gibson	LOAD_REG_IMMEDIATE(r4, MSR_KERNEL)
1499d04c56f7SPaul Mackerras#ifdef CONFIG_PPC_ISERIES
15003f639ee8SStephen RothwellBEGIN_FW_FTR_SECTION
150114cf11afSPaul Mackerras	ori	r4,r4,MSR_EE
1502ff3da2e0SBenjamin Herrenschmidt	li	r8,1
1503ff3da2e0SBenjamin Herrenschmidt	stb	r8,PACAHARDIRQEN(r13)
15043f639ee8SStephen RothwellEND_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
150514cf11afSPaul Mackerras#endif
1506d04c56f7SPaul MackerrasBEGIN_FW_FTR_SECTION
1507d04c56f7SPaul Mackerras	stb	r7,PACAHARDIRQEN(r13)
1508d04c56f7SPaul MackerrasEND_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES)
1509ff3da2e0SBenjamin Herrenschmidt	stb	r7,PACASOFTIRQEN(r13)
1510d04c56f7SPaul Mackerras
1511b5bbeb23SPaul Mackerras	mtspr	SPRN_SRR0,r3
1512b5bbeb23SPaul Mackerras	mtspr	SPRN_SRR1,r4
151314cf11afSPaul Mackerras	rfid
151414cf11afSPaul Mackerras	b	.	/* prevent speculative execution */
151514cf11afSPaul Mackerras
151614cf11afSPaul Mackerras/*
151714cf11afSPaul Mackerras * Running with relocation on at this point.  All we want to do is
151814cf11afSPaul Mackerras * zero the stack back-chain pointer before going into C code.
151914cf11afSPaul Mackerras */
152014cf11afSPaul Mackerras_GLOBAL(start_secondary_prolog)
152114cf11afSPaul Mackerras	li	r3,0
152214cf11afSPaul Mackerras	std	r3,0(r1)		/* Zero the stack frame pointer	*/
152314cf11afSPaul Mackerras	bl	.start_secondary
1524799d6046SPaul Mackerras	b	.
152514cf11afSPaul Mackerras#endif
152614cf11afSPaul Mackerras
152714cf11afSPaul Mackerras/*
152814cf11afSPaul Mackerras * This subroutine clobbers r11 and r12
152914cf11afSPaul Mackerras */
153014cf11afSPaul Mackerras_GLOBAL(enable_64b_mode)
153114cf11afSPaul Mackerras	mfmsr	r11			/* grab the current MSR */
153214cf11afSPaul Mackerras	li	r12,1
153314cf11afSPaul Mackerras	rldicr	r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
153414cf11afSPaul Mackerras	or	r11,r11,r12
153514cf11afSPaul Mackerras	li	r12,1
153614cf11afSPaul Mackerras	rldicr	r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
153714cf11afSPaul Mackerras	or	r11,r11,r12
153814cf11afSPaul Mackerras	mtmsrd	r11
153914cf11afSPaul Mackerras	isync
154014cf11afSPaul Mackerras	blr
154114cf11afSPaul Mackerras
154214cf11afSPaul Mackerras/*
154314cf11afSPaul Mackerras * This is where the main kernel code starts.
154414cf11afSPaul Mackerras */
1545939e60f6SStephen Rothwell_INIT_STATIC(start_here_multiplatform)
154614cf11afSPaul Mackerras	/* get a new offset, now that the kernel has moved. */
154714cf11afSPaul Mackerras	bl	.reloc_offset
154814cf11afSPaul Mackerras	mr	r26,r3
154914cf11afSPaul Mackerras
155014cf11afSPaul Mackerras	/* Clear out the BSS. It may have been done in prom_init,
155114cf11afSPaul Mackerras	 * already but that's irrelevant since prom_init will soon
155214cf11afSPaul Mackerras	 * be detached from the kernel completely. Besides, we need
155314cf11afSPaul Mackerras	 * to clear it now for kexec-style entry.
155414cf11afSPaul Mackerras	 */
1555e58c3495SDavid Gibson	LOAD_REG_IMMEDIATE(r11,__bss_stop)
1556e58c3495SDavid Gibson	LOAD_REG_IMMEDIATE(r8,__bss_start)
155714cf11afSPaul Mackerras	sub	r11,r11,r8		/* bss size			*/
155814cf11afSPaul Mackerras	addi	r11,r11,7		/* round up to an even double word */
155914cf11afSPaul Mackerras	rldicl. r11,r11,61,3		/* shift right by 3		*/
156014cf11afSPaul Mackerras	beq	4f
156114cf11afSPaul Mackerras	addi	r8,r8,-8
156214cf11afSPaul Mackerras	li	r0,0
156314cf11afSPaul Mackerras	mtctr	r11			/* zero this many doublewords	*/
156414cf11afSPaul Mackerras3:	stdu	r0,8(r8)
156514cf11afSPaul Mackerras	bdnz	3b
156614cf11afSPaul Mackerras4:
156714cf11afSPaul Mackerras
156814cf11afSPaul Mackerras	mfmsr	r6
156914cf11afSPaul Mackerras	ori	r6,r6,MSR_RI
157014cf11afSPaul Mackerras	mtmsrd	r6			/* RI on */
157114cf11afSPaul Mackerras
157214cf11afSPaul Mackerras	/* The following gets the stack and TOC set up with the regs */
157314cf11afSPaul Mackerras	/* pointing to the real addr of the kernel stack.  This is   */
157414cf11afSPaul Mackerras	/* all done to support the C function call below which sets  */
157514cf11afSPaul Mackerras	/* up the htab.  This is done because we have relocated the  */
157614cf11afSPaul Mackerras	/* kernel but are still running in real mode. */
157714cf11afSPaul Mackerras
1578e58c3495SDavid Gibson	LOAD_REG_IMMEDIATE(r3,init_thread_union)
15795a408329SPaul Mackerras	add	r3,r3,r26
158014cf11afSPaul Mackerras
158114cf11afSPaul Mackerras	/* set up a stack pointer (physical address) */
158214cf11afSPaul Mackerras	addi	r1,r3,THREAD_SIZE
158314cf11afSPaul Mackerras	li	r0,0
158414cf11afSPaul Mackerras	stdu	r0,-STACK_FRAME_OVERHEAD(r1)
158514cf11afSPaul Mackerras
158614cf11afSPaul Mackerras	/* set up the TOC (physical address) */
1587e58c3495SDavid Gibson	LOAD_REG_IMMEDIATE(r2,__toc_start)
158814cf11afSPaul Mackerras	addi	r2,r2,0x4000
158914cf11afSPaul Mackerras	addi	r2,r2,0x4000
15905a408329SPaul Mackerras	add	r2,r2,r26
159114cf11afSPaul Mackerras
159214cf11afSPaul Mackerras	/* Do very early kernel initializations, including initial hash table,
159314cf11afSPaul Mackerras	 * stab and slb setup before we turn on relocation.	*/
159414cf11afSPaul Mackerras
159514cf11afSPaul Mackerras	/* Restore parameters passed from prom_init/kexec */
159614cf11afSPaul Mackerras	mr	r3,r31
159714cf11afSPaul Mackerras 	bl	.early_setup
159814cf11afSPaul Mackerras
1599e58c3495SDavid Gibson	LOAD_REG_IMMEDIATE(r3, .start_here_common)
1600e58c3495SDavid Gibson	LOAD_REG_IMMEDIATE(r4, MSR_KERNEL)
1601b5bbeb23SPaul Mackerras	mtspr	SPRN_SRR0,r3
1602b5bbeb23SPaul Mackerras	mtspr	SPRN_SRR1,r4
160314cf11afSPaul Mackerras	rfid
160414cf11afSPaul Mackerras	b	.	/* prevent speculative execution */
160514cf11afSPaul Mackerras
160614cf11afSPaul Mackerras	/* This is where all platforms converge execution */
1607fc68e869SStephen Rothwell_INIT_GLOBAL(start_here_common)
160814cf11afSPaul Mackerras	/* relocation is on at this point */
160914cf11afSPaul Mackerras
161014cf11afSPaul Mackerras	/* The following code sets up the SP and TOC now that we are */
161114cf11afSPaul Mackerras	/* running with translation enabled. */
161214cf11afSPaul Mackerras
1613e58c3495SDavid Gibson	LOAD_REG_IMMEDIATE(r3,init_thread_union)
161414cf11afSPaul Mackerras
161514cf11afSPaul Mackerras	/* set up the stack */
161614cf11afSPaul Mackerras	addi	r1,r3,THREAD_SIZE
161714cf11afSPaul Mackerras	li	r0,0
161814cf11afSPaul Mackerras	stdu	r0,-STACK_FRAME_OVERHEAD(r1)
161914cf11afSPaul Mackerras
162014cf11afSPaul Mackerras	/* Load the TOC */
162114cf11afSPaul Mackerras	ld	r2,PACATOC(r13)
162214cf11afSPaul Mackerras	std	r1,PACAKSAVE(r13)
162314cf11afSPaul Mackerras
162414cf11afSPaul Mackerras	bl	.setup_system
162514cf11afSPaul Mackerras
162614cf11afSPaul Mackerras	/* Load up the kernel context */
162714cf11afSPaul Mackerras5:
162814cf11afSPaul Mackerras	li	r5,0
1629d04c56f7SPaul Mackerras	stb	r5,PACASOFTIRQEN(r13)	/* Soft Disabled */
1630d04c56f7SPaul Mackerras#ifdef CONFIG_PPC_ISERIES
1631d04c56f7SPaul MackerrasBEGIN_FW_FTR_SECTION
163214cf11afSPaul Mackerras	mfmsr	r5
1633ff3da2e0SBenjamin Herrenschmidt	ori	r5,r5,MSR_EE		/* Hard Enabled on iSeries*/
163414cf11afSPaul Mackerras	mtmsrd	r5
1635ff3da2e0SBenjamin Herrenschmidt	li	r5,1
16363f639ee8SStephen RothwellEND_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
163714cf11afSPaul Mackerras#endif
1638ff3da2e0SBenjamin Herrenschmidt	stb	r5,PACAHARDIRQEN(r13)	/* Hard Disabled on others */
163914cf11afSPaul Mackerras
164014cf11afSPaul Mackerras	bl	.start_kernel
164114cf11afSPaul Mackerras
1642f1870f77SAnton Blanchard	/* Not reached */
1643f1870f77SAnton Blanchard	BUG_OPCODE
164414cf11afSPaul Mackerras
164514cf11afSPaul Mackerras/*
164614cf11afSPaul Mackerras * We put a few things here that have to be page-aligned.
164714cf11afSPaul Mackerras * This stuff goes at the beginning of the bss, which is page-aligned.
164814cf11afSPaul Mackerras */
164914cf11afSPaul Mackerras	.section ".bss"
165014cf11afSPaul Mackerras
165114cf11afSPaul Mackerras	.align	PAGE_SHIFT
165214cf11afSPaul Mackerras
165314cf11afSPaul Mackerras	.globl	empty_zero_page
165414cf11afSPaul Mackerrasempty_zero_page:
165514cf11afSPaul Mackerras	.space	PAGE_SIZE
165614cf11afSPaul Mackerras
165714cf11afSPaul Mackerras	.globl	swapper_pg_dir
165814cf11afSPaul Mackerrasswapper_pg_dir:
1659ee7a76daSStephen Rothwell	.space	PGD_TABLE_SIZE
1660