xref: /openbmc/linux/arch/powerpc/kernel/head_64.S (revision 9a955167)
114cf11afSPaul Mackerras/*
214cf11afSPaul Mackerras *  PowerPC version
314cf11afSPaul Mackerras *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
414cf11afSPaul Mackerras *
514cf11afSPaul Mackerras *  Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
614cf11afSPaul Mackerras *    Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
714cf11afSPaul Mackerras *  Adapted for Power Macintosh by Paul Mackerras.
814cf11afSPaul Mackerras *  Low-level exception handlers and MMU support
914cf11afSPaul Mackerras *  rewritten by Paul Mackerras.
1014cf11afSPaul Mackerras *    Copyright (C) 1996 Paul Mackerras.
1114cf11afSPaul Mackerras *
1214cf11afSPaul Mackerras *  Adapted for 64bit PowerPC by Dave Engebretsen, Peter Bergner, and
1314cf11afSPaul Mackerras *    Mike Corrigan {engebret|bergner|mikejc}@us.ibm.com
1414cf11afSPaul Mackerras *
1514cf11afSPaul Mackerras *  This file contains the low-level support and setup for the
1614cf11afSPaul Mackerras *  PowerPC-64 platform, including trap and interrupt dispatch.
1714cf11afSPaul Mackerras *
1814cf11afSPaul Mackerras *  This program is free software; you can redistribute it and/or
1914cf11afSPaul Mackerras *  modify it under the terms of the GNU General Public License
2014cf11afSPaul Mackerras *  as published by the Free Software Foundation; either version
2114cf11afSPaul Mackerras *  2 of the License, or (at your option) any later version.
2214cf11afSPaul Mackerras */
2314cf11afSPaul Mackerras
2414cf11afSPaul Mackerras#include <linux/threads.h>
25b5bbeb23SPaul Mackerras#include <asm/reg.h>
2614cf11afSPaul Mackerras#include <asm/page.h>
2714cf11afSPaul Mackerras#include <asm/mmu.h>
2814cf11afSPaul Mackerras#include <asm/ppc_asm.h>
2914cf11afSPaul Mackerras#include <asm/asm-offsets.h>
3014cf11afSPaul Mackerras#include <asm/bug.h>
3114cf11afSPaul Mackerras#include <asm/cputable.h>
3214cf11afSPaul Mackerras#include <asm/setup.h>
3314cf11afSPaul Mackerras#include <asm/hvcall.h>
34c43a55ffSKelly Daly#include <asm/iseries/lpar_map.h>
356cb7bfebSDavid Gibson#include <asm/thread_info.h>
363f639ee8SStephen Rothwell#include <asm/firmware.h>
3716a15a30SStephen Rothwell#include <asm/page_64.h>
38f9ff0f30SStephen Rothwell#include <asm/exception.h>
39945feb17SBenjamin Herrenschmidt#include <asm/irqflags.h>
4014cf11afSPaul Mackerras
4114cf11afSPaul Mackerras/*
4214cf11afSPaul Mackerras * We layout physical memory as follows:
4314cf11afSPaul Mackerras * 0x0000 - 0x00ff : Secondary processor spin code
4414cf11afSPaul Mackerras * 0x0100 - 0x2fff : pSeries Interrupt prologs
4514cf11afSPaul Mackerras * 0x3000 - 0x5fff : interrupt support, iSeries and common interrupt prologs
4614cf11afSPaul Mackerras * 0x6000 - 0x6fff : Initial (CPU0) segment table
4714cf11afSPaul Mackerras * 0x7000 - 0x7fff : FWNMI data area
4814cf11afSPaul Mackerras * 0x8000 -        : Early init and support code
4914cf11afSPaul Mackerras */
5014cf11afSPaul Mackerras
5114cf11afSPaul Mackerras/*
5214cf11afSPaul Mackerras *   SPRG Usage
5314cf11afSPaul Mackerras *
5414cf11afSPaul Mackerras *   Register	Definition
5514cf11afSPaul Mackerras *
5614cf11afSPaul Mackerras *   SPRG0	reserved for hypervisor
5714cf11afSPaul Mackerras *   SPRG1	temp - used to save gpr
5814cf11afSPaul Mackerras *   SPRG2	temp - used to save gpr
5914cf11afSPaul Mackerras *   SPRG3	virt addr of paca
6014cf11afSPaul Mackerras */
6114cf11afSPaul Mackerras
6214cf11afSPaul Mackerras/*
6314cf11afSPaul Mackerras * Entering into this code we make the following assumptions:
6414cf11afSPaul Mackerras *  For pSeries:
6514cf11afSPaul Mackerras *   1. The MMU is off & open firmware is running in real mode.
6614cf11afSPaul Mackerras *   2. The kernel is entered at __start
6714cf11afSPaul Mackerras *
6814cf11afSPaul Mackerras *  For iSeries:
6914cf11afSPaul Mackerras *   1. The MMU is on (as it always is for iSeries)
7014cf11afSPaul Mackerras *   2. The kernel is entered at system_reset_iSeries
7114cf11afSPaul Mackerras */
7214cf11afSPaul Mackerras
7314cf11afSPaul Mackerras	.text
7414cf11afSPaul Mackerras	.globl  _stext
7514cf11afSPaul Mackerras_stext:
7614cf11afSPaul Mackerras_GLOBAL(__start)
7714cf11afSPaul Mackerras	/* NOP this out unconditionally */
7814cf11afSPaul MackerrasBEGIN_FTR_SECTION
7914cf11afSPaul Mackerras	b	.__start_initialization_multiplatform
8014cf11afSPaul MackerrasEND_FTR_SECTION(0, 1)
8114cf11afSPaul Mackerras
8214cf11afSPaul Mackerras	/* Catch branch to 0 in real mode */
8314cf11afSPaul Mackerras	trap
8414cf11afSPaul Mackerras
8514cf11afSPaul Mackerras	/* Secondary processors spin on this value until it goes to 1. */
8614cf11afSPaul Mackerras	.globl  __secondary_hold_spinloop
8714cf11afSPaul Mackerras__secondary_hold_spinloop:
8814cf11afSPaul Mackerras	.llong	0x0
8914cf11afSPaul Mackerras
9014cf11afSPaul Mackerras	/* Secondary processors write this value with their cpu # */
9114cf11afSPaul Mackerras	/* after they enter the spin loop immediately below.	  */
9214cf11afSPaul Mackerras	.globl	__secondary_hold_acknowledge
9314cf11afSPaul Mackerras__secondary_hold_acknowledge:
9414cf11afSPaul Mackerras	.llong	0x0
9514cf11afSPaul Mackerras
961dce0e30SMichael Ellerman#ifdef CONFIG_PPC_ISERIES
971dce0e30SMichael Ellerman	/*
981dce0e30SMichael Ellerman	 * At offset 0x20, there is a pointer to iSeries LPAR data.
991dce0e30SMichael Ellerman	 * This is required by the hypervisor
1001dce0e30SMichael Ellerman	 */
1011dce0e30SMichael Ellerman	. = 0x20
1021dce0e30SMichael Ellerman	.llong hvReleaseData-KERNELBASE
1031dce0e30SMichael Ellerman#endif /* CONFIG_PPC_ISERIES */
1041dce0e30SMichael Ellerman
10514cf11afSPaul Mackerras	. = 0x60
10614cf11afSPaul Mackerras/*
10775423b7bSGeoff Levand * The following code is used to hold secondary processors
10875423b7bSGeoff Levand * in a spin loop after they have entered the kernel, but
10914cf11afSPaul Mackerras * before the bulk of the kernel has been relocated.  This code
11014cf11afSPaul Mackerras * is relocated to physical address 0x60 before prom_init is run.
11114cf11afSPaul Mackerras * All of it must fit below the first exception vector at 0x100.
11214cf11afSPaul Mackerras */
11314cf11afSPaul Mackerras_GLOBAL(__secondary_hold)
11414cf11afSPaul Mackerras	mfmsr	r24
11514cf11afSPaul Mackerras	ori	r24,r24,MSR_RI
11614cf11afSPaul Mackerras	mtmsrd	r24			/* RI on */
11714cf11afSPaul Mackerras
118f1870f77SAnton Blanchard	/* Grab our physical cpu number */
11914cf11afSPaul Mackerras	mr	r24,r3
12014cf11afSPaul Mackerras
12114cf11afSPaul Mackerras	/* Tell the master cpu we're here */
12214cf11afSPaul Mackerras	/* Relocation is off & we are located at an address less */
12314cf11afSPaul Mackerras	/* than 0x100, so only need to grab low order offset.    */
12414cf11afSPaul Mackerras	std	r24,__secondary_hold_acknowledge@l(0)
12514cf11afSPaul Mackerras	sync
12614cf11afSPaul Mackerras
12714cf11afSPaul Mackerras	/* All secondary cpus wait here until told to start. */
12814cf11afSPaul Mackerras100:	ld	r4,__secondary_hold_spinloop@l(0)
12914cf11afSPaul Mackerras	cmpdi	0,r4,1
13014cf11afSPaul Mackerras	bne	100b
13114cf11afSPaul Mackerras
132f1870f77SAnton Blanchard#if defined(CONFIG_SMP) || defined(CONFIG_KEXEC)
133f39b7a55SOlof Johansson	LOAD_REG_IMMEDIATE(r4, .generic_secondary_smp_init)
134758438a7SMichael Ellerman	mtctr	r4
13514cf11afSPaul Mackerras	mr	r3,r24
136758438a7SMichael Ellerman	bctr
13714cf11afSPaul Mackerras#else
13814cf11afSPaul Mackerras	BUG_OPCODE
13914cf11afSPaul Mackerras#endif
14014cf11afSPaul Mackerras
14114cf11afSPaul Mackerras/* This value is used to mark exception frames on the stack. */
14214cf11afSPaul Mackerras	.section ".toc","aw"
14314cf11afSPaul Mackerrasexception_marker:
14414cf11afSPaul Mackerras	.tc	ID_72656773_68657265[TC],0x7265677368657265
14514cf11afSPaul Mackerras	.text
14614cf11afSPaul Mackerras
14714cf11afSPaul Mackerras/*
14814cf11afSPaul Mackerras * This is the start of the interrupt handlers for pSeries
14914cf11afSPaul Mackerras * This code runs with relocation off.
15014cf11afSPaul Mackerras */
15114cf11afSPaul Mackerras	. = 0x100
15214cf11afSPaul Mackerras	.globl __start_interrupts
15314cf11afSPaul Mackerras__start_interrupts:
15414cf11afSPaul Mackerras
15514cf11afSPaul Mackerras	STD_EXCEPTION_PSERIES(0x100, system_reset)
15614cf11afSPaul Mackerras
15714cf11afSPaul Mackerras	. = 0x200
15814cf11afSPaul Mackerras_machine_check_pSeries:
15914cf11afSPaul Mackerras	HMT_MEDIUM
160b5bbeb23SPaul Mackerras	mtspr	SPRN_SPRG1,r13		/* save r13 */
16114cf11afSPaul Mackerras	EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
16214cf11afSPaul Mackerras
16314cf11afSPaul Mackerras	. = 0x300
16414cf11afSPaul Mackerras	.globl data_access_pSeries
16514cf11afSPaul Mackerrasdata_access_pSeries:
16614cf11afSPaul Mackerras	HMT_MEDIUM
167b5bbeb23SPaul Mackerras	mtspr	SPRN_SPRG1,r13
16814cf11afSPaul MackerrasBEGIN_FTR_SECTION
169b5bbeb23SPaul Mackerras	mtspr	SPRN_SPRG2,r12
170b5bbeb23SPaul Mackerras	mfspr	r13,SPRN_DAR
171b5bbeb23SPaul Mackerras	mfspr	r12,SPRN_DSISR
17214cf11afSPaul Mackerras	srdi	r13,r13,60
17314cf11afSPaul Mackerras	rlwimi	r13,r12,16,0x20
17414cf11afSPaul Mackerras	mfcr	r12
17514cf11afSPaul Mackerras	cmpwi	r13,0x2c
1763ccfc65cSPaul Mackerras	beq	do_stab_bolted_pSeries
17714cf11afSPaul Mackerras	mtcrf	0x80,r12
178b5bbeb23SPaul Mackerras	mfspr	r12,SPRN_SPRG2
17914cf11afSPaul MackerrasEND_FTR_SECTION_IFCLR(CPU_FTR_SLB)
18014cf11afSPaul Mackerras	EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common)
18114cf11afSPaul Mackerras
18214cf11afSPaul Mackerras	. = 0x380
18314cf11afSPaul Mackerras	.globl data_access_slb_pSeries
18414cf11afSPaul Mackerrasdata_access_slb_pSeries:
18514cf11afSPaul Mackerras	HMT_MEDIUM
186b5bbeb23SPaul Mackerras	mtspr	SPRN_SPRG1,r13
187b5bbeb23SPaul Mackerras	mfspr	r13,SPRN_SPRG3		/* get paca address into r13 */
1883c726f8dSBenjamin Herrenschmidt	std	r3,PACA_EXSLB+EX_R3(r13)
1893c726f8dSBenjamin Herrenschmidt	mfspr	r3,SPRN_DAR
19014cf11afSPaul Mackerras	std	r9,PACA_EXSLB+EX_R9(r13)	/* save r9 - r12 */
1913c726f8dSBenjamin Herrenschmidt	mfcr	r9
1923c726f8dSBenjamin Herrenschmidt#ifdef __DISABLED__
1933c726f8dSBenjamin Herrenschmidt	/* Keep that around for when we re-implement dynamic VSIDs */
1943c726f8dSBenjamin Herrenschmidt	cmpdi	r3,0
1953c726f8dSBenjamin Herrenschmidt	bge	slb_miss_user_pseries
1963c726f8dSBenjamin Herrenschmidt#endif /* __DISABLED__ */
19714cf11afSPaul Mackerras	std	r10,PACA_EXSLB+EX_R10(r13)
19814cf11afSPaul Mackerras	std	r11,PACA_EXSLB+EX_R11(r13)
19914cf11afSPaul Mackerras	std	r12,PACA_EXSLB+EX_R12(r13)
2003c726f8dSBenjamin Herrenschmidt	mfspr	r10,SPRN_SPRG1
2013c726f8dSBenjamin Herrenschmidt	std	r10,PACA_EXSLB+EX_R13(r13)
202b5bbeb23SPaul Mackerras	mfspr	r12,SPRN_SRR1		/* and SRR1 */
2033c726f8dSBenjamin Herrenschmidt	b	.slb_miss_realmode	/* Rel. branch works in real mode */
20414cf11afSPaul Mackerras
20514cf11afSPaul Mackerras	STD_EXCEPTION_PSERIES(0x400, instruction_access)
20614cf11afSPaul Mackerras
20714cf11afSPaul Mackerras	. = 0x480
20814cf11afSPaul Mackerras	.globl instruction_access_slb_pSeries
20914cf11afSPaul Mackerrasinstruction_access_slb_pSeries:
21014cf11afSPaul Mackerras	HMT_MEDIUM
211b5bbeb23SPaul Mackerras	mtspr	SPRN_SPRG1,r13
212b5bbeb23SPaul Mackerras	mfspr	r13,SPRN_SPRG3		/* get paca address into r13 */
2133c726f8dSBenjamin Herrenschmidt	std	r3,PACA_EXSLB+EX_R3(r13)
2143c726f8dSBenjamin Herrenschmidt	mfspr	r3,SPRN_SRR0		/* SRR0 is faulting address */
21514cf11afSPaul Mackerras	std	r9,PACA_EXSLB+EX_R9(r13)	/* save r9 - r12 */
2163c726f8dSBenjamin Herrenschmidt	mfcr	r9
2173c726f8dSBenjamin Herrenschmidt#ifdef __DISABLED__
2183c726f8dSBenjamin Herrenschmidt	/* Keep that around for when we re-implement dynamic VSIDs */
2193c726f8dSBenjamin Herrenschmidt	cmpdi	r3,0
2203c726f8dSBenjamin Herrenschmidt	bge	slb_miss_user_pseries
2213c726f8dSBenjamin Herrenschmidt#endif /* __DISABLED__ */
22214cf11afSPaul Mackerras	std	r10,PACA_EXSLB+EX_R10(r13)
22314cf11afSPaul Mackerras	std	r11,PACA_EXSLB+EX_R11(r13)
22414cf11afSPaul Mackerras	std	r12,PACA_EXSLB+EX_R12(r13)
2253c726f8dSBenjamin Herrenschmidt	mfspr	r10,SPRN_SPRG1
2263c726f8dSBenjamin Herrenschmidt	std	r10,PACA_EXSLB+EX_R13(r13)
227b5bbeb23SPaul Mackerras	mfspr	r12,SPRN_SRR1		/* and SRR1 */
2283c726f8dSBenjamin Herrenschmidt	b	.slb_miss_realmode	/* Rel. branch works in real mode */
22914cf11afSPaul Mackerras
230d04c56f7SPaul Mackerras	MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt)
23114cf11afSPaul Mackerras	STD_EXCEPTION_PSERIES(0x600, alignment)
23214cf11afSPaul Mackerras	STD_EXCEPTION_PSERIES(0x700, program_check)
23314cf11afSPaul Mackerras	STD_EXCEPTION_PSERIES(0x800, fp_unavailable)
234d04c56f7SPaul Mackerras	MASKABLE_EXCEPTION_PSERIES(0x900, decrementer)
23514cf11afSPaul Mackerras	STD_EXCEPTION_PSERIES(0xa00, trap_0a)
23614cf11afSPaul Mackerras	STD_EXCEPTION_PSERIES(0xb00, trap_0b)
23714cf11afSPaul Mackerras
23814cf11afSPaul Mackerras	. = 0xc00
23914cf11afSPaul Mackerras	.globl	system_call_pSeries
24014cf11afSPaul Mackerrassystem_call_pSeries:
24114cf11afSPaul Mackerras	HMT_MEDIUM
242745a14ccSPaul MackerrasBEGIN_FTR_SECTION
243745a14ccSPaul Mackerras	cmpdi	r0,0x1ebe
244745a14ccSPaul Mackerras	beq-	1f
245745a14ccSPaul MackerrasEND_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)
24614cf11afSPaul Mackerras	mr	r9,r13
24714cf11afSPaul Mackerras	mfmsr	r10
248b5bbeb23SPaul Mackerras	mfspr	r13,SPRN_SPRG3
249b5bbeb23SPaul Mackerras	mfspr	r11,SPRN_SRR0
25014cf11afSPaul Mackerras	clrrdi	r12,r13,32
25114cf11afSPaul Mackerras	oris	r12,r12,system_call_common@h
25214cf11afSPaul Mackerras	ori	r12,r12,system_call_common@l
253b5bbeb23SPaul Mackerras	mtspr	SPRN_SRR0,r12
25414cf11afSPaul Mackerras	ori	r10,r10,MSR_IR|MSR_DR|MSR_RI
255b5bbeb23SPaul Mackerras	mfspr	r12,SPRN_SRR1
256b5bbeb23SPaul Mackerras	mtspr	SPRN_SRR1,r10
25714cf11afSPaul Mackerras	rfid
25814cf11afSPaul Mackerras	b	.	/* prevent speculative execution */
25914cf11afSPaul Mackerras
260745a14ccSPaul Mackerras/* Fast LE/BE switch system call */
261745a14ccSPaul Mackerras1:	mfspr	r12,SPRN_SRR1
262745a14ccSPaul Mackerras	xori	r12,r12,MSR_LE
263745a14ccSPaul Mackerras	mtspr	SPRN_SRR1,r12
264745a14ccSPaul Mackerras	rfid		/* return to userspace */
265745a14ccSPaul Mackerras	b	.
266745a14ccSPaul Mackerras
26714cf11afSPaul Mackerras	STD_EXCEPTION_PSERIES(0xd00, single_step)
26814cf11afSPaul Mackerras	STD_EXCEPTION_PSERIES(0xe00, trap_0e)
26914cf11afSPaul Mackerras
27014cf11afSPaul Mackerras	/* We need to deal with the Altivec unavailable exception
27114cf11afSPaul Mackerras	 * here which is at 0xf20, thus in the middle of the
27214cf11afSPaul Mackerras	 * prolog code of the PerformanceMonitor one. A little
27314cf11afSPaul Mackerras	 * trickery is thus necessary
27414cf11afSPaul Mackerras	 */
27514cf11afSPaul Mackerras	. = 0xf00
27614cf11afSPaul Mackerras	b	performance_monitor_pSeries
27714cf11afSPaul Mackerras
27810e34392SMichael Neuling	. = 0xf20
27910e34392SMichael Neuling	b	altivec_unavailable_pSeries
28014cf11afSPaul Mackerras
281ce48b210SMichael Neuling	. = 0xf40
282ce48b210SMichael Neuling	b	vsx_unavailable_pSeries
283ce48b210SMichael Neuling
284acf7d768SBenjamin Herrenschmidt#ifdef CONFIG_CBE_RAS
285acf7d768SBenjamin Herrenschmidt	HSTD_EXCEPTION_PSERIES(0x1200, cbe_system_error)
286acf7d768SBenjamin Herrenschmidt#endif /* CONFIG_CBE_RAS */
28714cf11afSPaul Mackerras	STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint)
288acf7d768SBenjamin Herrenschmidt#ifdef CONFIG_CBE_RAS
289acf7d768SBenjamin Herrenschmidt	HSTD_EXCEPTION_PSERIES(0x1600, cbe_maintenance)
290acf7d768SBenjamin Herrenschmidt#endif /* CONFIG_CBE_RAS */
29114cf11afSPaul Mackerras	STD_EXCEPTION_PSERIES(0x1700, altivec_assist)
292acf7d768SBenjamin Herrenschmidt#ifdef CONFIG_CBE_RAS
293acf7d768SBenjamin Herrenschmidt	HSTD_EXCEPTION_PSERIES(0x1800, cbe_thermal)
294acf7d768SBenjamin Herrenschmidt#endif /* CONFIG_CBE_RAS */
29514cf11afSPaul Mackerras
29614cf11afSPaul Mackerras	. = 0x3000
29714cf11afSPaul Mackerras
29814cf11afSPaul Mackerras/*** pSeries interrupt support ***/
29914cf11afSPaul Mackerras
30014cf11afSPaul Mackerras	/* moved from 0xf00 */
301449d846dSLivio Soares	STD_EXCEPTION_PSERIES(., performance_monitor)
30210e34392SMichael Neuling	STD_EXCEPTION_PSERIES(., altivec_unavailable)
303ce48b210SMichael Neuling	STD_EXCEPTION_PSERIES(., vsx_unavailable)
304d04c56f7SPaul Mackerras
305d04c56f7SPaul Mackerras/*
306d04c56f7SPaul Mackerras * An interrupt came in while soft-disabled; clear EE in SRR1,
307d04c56f7SPaul Mackerras * clear paca->hard_enabled and return.
308d04c56f7SPaul Mackerras */
309d04c56f7SPaul Mackerrasmasked_interrupt:
310d04c56f7SPaul Mackerras	stb	r10,PACAHARDIRQEN(r13)
311d04c56f7SPaul Mackerras	mtcrf	0x80,r9
312d04c56f7SPaul Mackerras	ld	r9,PACA_EXGEN+EX_R9(r13)
313d04c56f7SPaul Mackerras	mfspr	r10,SPRN_SRR1
314d04c56f7SPaul Mackerras	rldicl	r10,r10,48,1		/* clear MSR_EE */
315d04c56f7SPaul Mackerras	rotldi	r10,r10,16
316d04c56f7SPaul Mackerras	mtspr	SPRN_SRR1,r10
317d04c56f7SPaul Mackerras	ld	r10,PACA_EXGEN+EX_R10(r13)
318d04c56f7SPaul Mackerras	mfspr	r13,SPRN_SPRG1
319d04c56f7SPaul Mackerras	rfid
320d04c56f7SPaul Mackerras	b	.
32114cf11afSPaul Mackerras
32214cf11afSPaul Mackerras	.align	7
3233ccfc65cSPaul Mackerrasdo_stab_bolted_pSeries:
32414cf11afSPaul Mackerras	mtcrf	0x80,r12
325b5bbeb23SPaul Mackerras	mfspr	r12,SPRN_SPRG2
32614cf11afSPaul Mackerras	EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted)
32714cf11afSPaul Mackerras
3289a955167SPaul Mackerras#ifdef CONFIG_PPC_PSERIES
32914cf11afSPaul Mackerras/*
3309a955167SPaul Mackerras * Vectors for the FWNMI option.  Share common code.
3319a955167SPaul Mackerras */
3329a955167SPaul Mackerras	.globl system_reset_fwnmi
3339a955167SPaul Mackerras      .align 7
3349a955167SPaul Mackerrassystem_reset_fwnmi:
3359a955167SPaul Mackerras	HMT_MEDIUM
3369a955167SPaul Mackerras	mtspr	SPRN_SPRG1,r13		/* save r13 */
3379a955167SPaul Mackerras	EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common)
3389a955167SPaul Mackerras
3399a955167SPaul Mackerras	.globl machine_check_fwnmi
3409a955167SPaul Mackerras      .align 7
3419a955167SPaul Mackerrasmachine_check_fwnmi:
3429a955167SPaul Mackerras	HMT_MEDIUM
3439a955167SPaul Mackerras	mtspr	SPRN_SPRG1,r13		/* save r13 */
3449a955167SPaul Mackerras	EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
3459a955167SPaul Mackerras
3469a955167SPaul Mackerras#endif /* CONFIG_PPC_PSERIES */
3479a955167SPaul Mackerras
3489a955167SPaul Mackerras#ifdef __DISABLED__
3499a955167SPaul Mackerras/*
3503c726f8dSBenjamin Herrenschmidt * This is used for when the SLB miss handler has to go virtual,
3513c726f8dSBenjamin Herrenschmidt * which doesn't happen for now anymore but will once we re-implement
3523c726f8dSBenjamin Herrenschmidt * dynamic VSIDs for shared page tables
3533c726f8dSBenjamin Herrenschmidt */
3543c726f8dSBenjamin Herrenschmidtslb_miss_user_pseries:
3553c726f8dSBenjamin Herrenschmidt	std	r10,PACA_EXGEN+EX_R10(r13)
3563c726f8dSBenjamin Herrenschmidt	std	r11,PACA_EXGEN+EX_R11(r13)
3573c726f8dSBenjamin Herrenschmidt	std	r12,PACA_EXGEN+EX_R12(r13)
3583c726f8dSBenjamin Herrenschmidt	mfspr	r10,SPRG1
3593c726f8dSBenjamin Herrenschmidt	ld	r11,PACA_EXSLB+EX_R9(r13)
3603c726f8dSBenjamin Herrenschmidt	ld	r12,PACA_EXSLB+EX_R3(r13)
3613c726f8dSBenjamin Herrenschmidt	std	r10,PACA_EXGEN+EX_R13(r13)
3623c726f8dSBenjamin Herrenschmidt	std	r11,PACA_EXGEN+EX_R9(r13)
3633c726f8dSBenjamin Herrenschmidt	std	r12,PACA_EXGEN+EX_R3(r13)
3643c726f8dSBenjamin Herrenschmidt	clrrdi	r12,r13,32
3653c726f8dSBenjamin Herrenschmidt	mfmsr	r10
3663c726f8dSBenjamin Herrenschmidt	mfspr	r11,SRR0			/* save SRR0 */
3673c726f8dSBenjamin Herrenschmidt	ori	r12,r12,slb_miss_user_common@l	/* virt addr of handler */
3683c726f8dSBenjamin Herrenschmidt	ori	r10,r10,MSR_IR|MSR_DR|MSR_RI
3693c726f8dSBenjamin Herrenschmidt	mtspr	SRR0,r12
3703c726f8dSBenjamin Herrenschmidt	mfspr	r12,SRR1			/* and SRR1 */
3713c726f8dSBenjamin Herrenschmidt	mtspr	SRR1,r10
3723c726f8dSBenjamin Herrenschmidt	rfid
3733c726f8dSBenjamin Herrenschmidt	b	.				/* prevent spec. execution */
3743c726f8dSBenjamin Herrenschmidt#endif /* __DISABLED__ */
3753c726f8dSBenjamin Herrenschmidt
3769a955167SPaul Mackerras	.align	7
3779a955167SPaul Mackerras	.globl	__end_interrupts
3789a955167SPaul Mackerras__end_interrupts:
3799a955167SPaul Mackerras
3803c726f8dSBenjamin Herrenschmidt/*
3819a955167SPaul Mackerras * Code from here down to __end_handlers is invoked from the
3829a955167SPaul Mackerras * exception prologs above.
38314cf11afSPaul Mackerras */
3849e4859efSStephen Rothwell
38514cf11afSPaul Mackerras/*** Common interrupt handlers ***/
38614cf11afSPaul Mackerras
38714cf11afSPaul Mackerras	STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception)
38814cf11afSPaul Mackerras
38914cf11afSPaul Mackerras	/*
39014cf11afSPaul Mackerras	 * Machine check is different because we use a different
39114cf11afSPaul Mackerras	 * save area: PACA_EXMC instead of PACA_EXGEN.
39214cf11afSPaul Mackerras	 */
39314cf11afSPaul Mackerras	.align	7
39414cf11afSPaul Mackerras	.globl machine_check_common
39514cf11afSPaul Mackerrasmachine_check_common:
39614cf11afSPaul Mackerras	EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
397f39224a8SPaul Mackerras	FINISH_NAP
39814cf11afSPaul Mackerras	DISABLE_INTS
39914cf11afSPaul Mackerras	bl	.save_nvgprs
40014cf11afSPaul Mackerras	addi	r3,r1,STACK_FRAME_OVERHEAD
40114cf11afSPaul Mackerras	bl	.machine_check_exception
40214cf11afSPaul Mackerras	b	.ret_from_except
40314cf11afSPaul Mackerras
40414cf11afSPaul Mackerras	STD_EXCEPTION_COMMON_LITE(0x900, decrementer, .timer_interrupt)
40514cf11afSPaul Mackerras	STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception)
40614cf11afSPaul Mackerras	STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
40714cf11afSPaul Mackerras	STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
40814cf11afSPaul Mackerras	STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception)
409f39224a8SPaul Mackerras	STD_EXCEPTION_COMMON_IDLE(0xf00, performance_monitor, .performance_monitor_exception)
41014cf11afSPaul Mackerras	STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception)
41114cf11afSPaul Mackerras#ifdef CONFIG_ALTIVEC
41214cf11afSPaul Mackerras	STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception)
41314cf11afSPaul Mackerras#else
41414cf11afSPaul Mackerras	STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception)
41514cf11afSPaul Mackerras#endif
416acf7d768SBenjamin Herrenschmidt#ifdef CONFIG_CBE_RAS
417acf7d768SBenjamin Herrenschmidt	STD_EXCEPTION_COMMON(0x1200, cbe_system_error, .cbe_system_error_exception)
418acf7d768SBenjamin Herrenschmidt	STD_EXCEPTION_COMMON(0x1600, cbe_maintenance, .cbe_maintenance_exception)
419acf7d768SBenjamin Herrenschmidt	STD_EXCEPTION_COMMON(0x1800, cbe_thermal, .cbe_thermal_exception)
420acf7d768SBenjamin Herrenschmidt#endif /* CONFIG_CBE_RAS */
42114cf11afSPaul Mackerras
42214cf11afSPaul Mackerras/*
42314cf11afSPaul Mackerras * Here we have detected that the kernel stack pointer is bad.
42414cf11afSPaul Mackerras * R9 contains the saved CR, r13 points to the paca,
42514cf11afSPaul Mackerras * r10 contains the (bad) kernel stack pointer,
42614cf11afSPaul Mackerras * r11 and r12 contain the saved SRR0 and SRR1.
42714cf11afSPaul Mackerras * We switch to using an emergency stack, save the registers there,
42814cf11afSPaul Mackerras * and call kernel_bad_stack(), which panics.
42914cf11afSPaul Mackerras */
43014cf11afSPaul Mackerrasbad_stack:
43114cf11afSPaul Mackerras	ld	r1,PACAEMERGSP(r13)
43214cf11afSPaul Mackerras	subi	r1,r1,64+INT_FRAME_SIZE
43314cf11afSPaul Mackerras	std	r9,_CCR(r1)
43414cf11afSPaul Mackerras	std	r10,GPR1(r1)
43514cf11afSPaul Mackerras	std	r11,_NIP(r1)
43614cf11afSPaul Mackerras	std	r12,_MSR(r1)
437b5bbeb23SPaul Mackerras	mfspr	r11,SPRN_DAR
438b5bbeb23SPaul Mackerras	mfspr	r12,SPRN_DSISR
43914cf11afSPaul Mackerras	std	r11,_DAR(r1)
44014cf11afSPaul Mackerras	std	r12,_DSISR(r1)
44114cf11afSPaul Mackerras	mflr	r10
44214cf11afSPaul Mackerras	mfctr	r11
44314cf11afSPaul Mackerras	mfxer	r12
44414cf11afSPaul Mackerras	std	r10,_LINK(r1)
44514cf11afSPaul Mackerras	std	r11,_CTR(r1)
44614cf11afSPaul Mackerras	std	r12,_XER(r1)
44714cf11afSPaul Mackerras	SAVE_GPR(0,r1)
44814cf11afSPaul Mackerras	SAVE_GPR(2,r1)
44914cf11afSPaul Mackerras	SAVE_4GPRS(3,r1)
45014cf11afSPaul Mackerras	SAVE_2GPRS(7,r1)
45114cf11afSPaul Mackerras	SAVE_10GPRS(12,r1)
45214cf11afSPaul Mackerras	SAVE_10GPRS(22,r1)
45368730401SOlof Johansson	lhz	r12,PACA_TRAP_SAVE(r13)
45468730401SOlof Johansson	std	r12,_TRAP(r1)
45514cf11afSPaul Mackerras	addi	r11,r1,INT_FRAME_SIZE
45614cf11afSPaul Mackerras	std	r11,0(r1)
45714cf11afSPaul Mackerras	li	r12,0
45814cf11afSPaul Mackerras	std	r12,0(r11)
45914cf11afSPaul Mackerras	ld	r2,PACATOC(r13)
46014cf11afSPaul Mackerras1:	addi	r3,r1,STACK_FRAME_OVERHEAD
46114cf11afSPaul Mackerras	bl	.kernel_bad_stack
46214cf11afSPaul Mackerras	b	1b
46314cf11afSPaul Mackerras
46414cf11afSPaul Mackerras/*
46514cf11afSPaul Mackerras * Here r13 points to the paca, r9 contains the saved CR,
46614cf11afSPaul Mackerras * SRR0 and SRR1 are saved in r11 and r12,
46714cf11afSPaul Mackerras * r9 - r13 are saved in paca->exgen.
46814cf11afSPaul Mackerras */
46914cf11afSPaul Mackerras	.align	7
47014cf11afSPaul Mackerras	.globl data_access_common
47114cf11afSPaul Mackerrasdata_access_common:
472b5bbeb23SPaul Mackerras	mfspr	r10,SPRN_DAR
47314cf11afSPaul Mackerras	std	r10,PACA_EXGEN+EX_DAR(r13)
474b5bbeb23SPaul Mackerras	mfspr	r10,SPRN_DSISR
47514cf11afSPaul Mackerras	stw	r10,PACA_EXGEN+EX_DSISR(r13)
47614cf11afSPaul Mackerras	EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN)
47714cf11afSPaul Mackerras	ld	r3,PACA_EXGEN+EX_DAR(r13)
47814cf11afSPaul Mackerras	lwz	r4,PACA_EXGEN+EX_DSISR(r13)
47914cf11afSPaul Mackerras	li	r5,0x300
48014cf11afSPaul Mackerras	b	.do_hash_page	 	/* Try to handle as hpte fault */
48114cf11afSPaul Mackerras
48214cf11afSPaul Mackerras	.align	7
48314cf11afSPaul Mackerras	.globl instruction_access_common
48414cf11afSPaul Mackerrasinstruction_access_common:
48514cf11afSPaul Mackerras	EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN)
48614cf11afSPaul Mackerras	ld	r3,_NIP(r1)
48714cf11afSPaul Mackerras	andis.	r4,r12,0x5820
48814cf11afSPaul Mackerras	li	r5,0x400
48914cf11afSPaul Mackerras	b	.do_hash_page		/* Try to handle as hpte fault */
49014cf11afSPaul Mackerras
4913c726f8dSBenjamin Herrenschmidt/*
4923c726f8dSBenjamin Herrenschmidt * Here is the common SLB miss user that is used when going to virtual
4933c726f8dSBenjamin Herrenschmidt * mode for SLB misses, that is currently not used
4943c726f8dSBenjamin Herrenschmidt */
4953c726f8dSBenjamin Herrenschmidt#ifdef __DISABLED__
4963c726f8dSBenjamin Herrenschmidt	.align	7
4973c726f8dSBenjamin Herrenschmidt	.globl	slb_miss_user_common
4983c726f8dSBenjamin Herrenschmidtslb_miss_user_common:
4993c726f8dSBenjamin Herrenschmidt	mflr	r10
5003c726f8dSBenjamin Herrenschmidt	std	r3,PACA_EXGEN+EX_DAR(r13)
5013c726f8dSBenjamin Herrenschmidt	stw	r9,PACA_EXGEN+EX_CCR(r13)
5023c726f8dSBenjamin Herrenschmidt	std	r10,PACA_EXGEN+EX_LR(r13)
5033c726f8dSBenjamin Herrenschmidt	std	r11,PACA_EXGEN+EX_SRR0(r13)
5043c726f8dSBenjamin Herrenschmidt	bl	.slb_allocate_user
5053c726f8dSBenjamin Herrenschmidt
5063c726f8dSBenjamin Herrenschmidt	ld	r10,PACA_EXGEN+EX_LR(r13)
5073c726f8dSBenjamin Herrenschmidt	ld	r3,PACA_EXGEN+EX_R3(r13)
5083c726f8dSBenjamin Herrenschmidt	lwz	r9,PACA_EXGEN+EX_CCR(r13)
5093c726f8dSBenjamin Herrenschmidt	ld	r11,PACA_EXGEN+EX_SRR0(r13)
5103c726f8dSBenjamin Herrenschmidt	mtlr	r10
5113c726f8dSBenjamin Herrenschmidt	beq-	slb_miss_fault
5123c726f8dSBenjamin Herrenschmidt
5133c726f8dSBenjamin Herrenschmidt	andi.	r10,r12,MSR_RI		/* check for unrecoverable exception */
5143c726f8dSBenjamin Herrenschmidt	beq-	unrecov_user_slb
5153c726f8dSBenjamin Herrenschmidt	mfmsr	r10
5163c726f8dSBenjamin Herrenschmidt
5173c726f8dSBenjamin Herrenschmidt.machine push
5183c726f8dSBenjamin Herrenschmidt.machine "power4"
5193c726f8dSBenjamin Herrenschmidt	mtcrf	0x80,r9
5203c726f8dSBenjamin Herrenschmidt.machine pop
5213c726f8dSBenjamin Herrenschmidt
5223c726f8dSBenjamin Herrenschmidt	clrrdi	r10,r10,2		/* clear RI before setting SRR0/1 */
5233c726f8dSBenjamin Herrenschmidt	mtmsrd	r10,1
5243c726f8dSBenjamin Herrenschmidt
5253c726f8dSBenjamin Herrenschmidt	mtspr	SRR0,r11
5263c726f8dSBenjamin Herrenschmidt	mtspr	SRR1,r12
5273c726f8dSBenjamin Herrenschmidt
5283c726f8dSBenjamin Herrenschmidt	ld	r9,PACA_EXGEN+EX_R9(r13)
5293c726f8dSBenjamin Herrenschmidt	ld	r10,PACA_EXGEN+EX_R10(r13)
5303c726f8dSBenjamin Herrenschmidt	ld	r11,PACA_EXGEN+EX_R11(r13)
5313c726f8dSBenjamin Herrenschmidt	ld	r12,PACA_EXGEN+EX_R12(r13)
5323c726f8dSBenjamin Herrenschmidt	ld	r13,PACA_EXGEN+EX_R13(r13)
5333c726f8dSBenjamin Herrenschmidt	rfid
5343c726f8dSBenjamin Herrenschmidt	b	.
5353c726f8dSBenjamin Herrenschmidt
5363c726f8dSBenjamin Herrenschmidtslb_miss_fault:
5373c726f8dSBenjamin Herrenschmidt	EXCEPTION_PROLOG_COMMON(0x380, PACA_EXGEN)
5383c726f8dSBenjamin Herrenschmidt	ld	r4,PACA_EXGEN+EX_DAR(r13)
5393c726f8dSBenjamin Herrenschmidt	li	r5,0
5403c726f8dSBenjamin Herrenschmidt	std	r4,_DAR(r1)
5413c726f8dSBenjamin Herrenschmidt	std	r5,_DSISR(r1)
5423ccfc65cSPaul Mackerras	b	handle_page_fault
5433c726f8dSBenjamin Herrenschmidt
5443c726f8dSBenjamin Herrenschmidtunrecov_user_slb:
5453c726f8dSBenjamin Herrenschmidt	EXCEPTION_PROLOG_COMMON(0x4200, PACA_EXGEN)
5463c726f8dSBenjamin Herrenschmidt	DISABLE_INTS
5473c726f8dSBenjamin Herrenschmidt	bl	.save_nvgprs
5483c726f8dSBenjamin Herrenschmidt1:	addi	r3,r1,STACK_FRAME_OVERHEAD
5493c726f8dSBenjamin Herrenschmidt	bl	.unrecoverable_exception
5503c726f8dSBenjamin Herrenschmidt	b	1b
5513c726f8dSBenjamin Herrenschmidt
5523c726f8dSBenjamin Herrenschmidt#endif /* __DISABLED__ */
5533c726f8dSBenjamin Herrenschmidt
5543c726f8dSBenjamin Herrenschmidt
5553c726f8dSBenjamin Herrenschmidt/*
5563c726f8dSBenjamin Herrenschmidt * r13 points to the PACA, r9 contains the saved CR,
5573c726f8dSBenjamin Herrenschmidt * r12 contain the saved SRR1, SRR0 is still ready for return
5583c726f8dSBenjamin Herrenschmidt * r3 has the faulting address
5593c726f8dSBenjamin Herrenschmidt * r9 - r13 are saved in paca->exslb.
5603c726f8dSBenjamin Herrenschmidt * r3 is saved in paca->slb_r3
5613c726f8dSBenjamin Herrenschmidt * We assume we aren't going to take any exceptions during this procedure.
5623c726f8dSBenjamin Herrenschmidt */
5633c726f8dSBenjamin Herrenschmidt_GLOBAL(slb_miss_realmode)
5643c726f8dSBenjamin Herrenschmidt	mflr	r10
5653c726f8dSBenjamin Herrenschmidt
5663c726f8dSBenjamin Herrenschmidt	stw	r9,PACA_EXSLB+EX_CCR(r13)	/* save CR in exc. frame */
5673c726f8dSBenjamin Herrenschmidt	std	r10,PACA_EXSLB+EX_LR(r13)	/* save LR */
5683c726f8dSBenjamin Herrenschmidt
5693c726f8dSBenjamin Herrenschmidt	bl	.slb_allocate_realmode
5703c726f8dSBenjamin Herrenschmidt
5713c726f8dSBenjamin Herrenschmidt	/* All done -- return from exception. */
5723c726f8dSBenjamin Herrenschmidt
5733c726f8dSBenjamin Herrenschmidt	ld	r10,PACA_EXSLB+EX_LR(r13)
5743c726f8dSBenjamin Herrenschmidt	ld	r3,PACA_EXSLB+EX_R3(r13)
5753c726f8dSBenjamin Herrenschmidt	lwz	r9,PACA_EXSLB+EX_CCR(r13)	/* get saved CR */
5763c726f8dSBenjamin Herrenschmidt#ifdef CONFIG_PPC_ISERIES
5773f639ee8SStephen RothwellBEGIN_FW_FTR_SECTION
5783356bb9fSDavid Gibson	ld	r11,PACALPPACAPTR(r13)
5793356bb9fSDavid Gibson	ld	r11,LPPACASRR0(r11)		/* get SRR0 value */
5803f639ee8SStephen RothwellEND_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
5813c726f8dSBenjamin Herrenschmidt#endif /* CONFIG_PPC_ISERIES */
5823c726f8dSBenjamin Herrenschmidt
5833c726f8dSBenjamin Herrenschmidt	mtlr	r10
5843c726f8dSBenjamin Herrenschmidt
5853c726f8dSBenjamin Herrenschmidt	andi.	r10,r12,MSR_RI	/* check for unrecoverable exception */
586320787c7SPaul Mackerras	beq-	2f
5873c726f8dSBenjamin Herrenschmidt
5883c726f8dSBenjamin Herrenschmidt.machine	push
5893c726f8dSBenjamin Herrenschmidt.machine	"power4"
5903c726f8dSBenjamin Herrenschmidt	mtcrf	0x80,r9
5913c726f8dSBenjamin Herrenschmidt	mtcrf	0x01,r9		/* slb_allocate uses cr0 and cr7 */
5923c726f8dSBenjamin Herrenschmidt.machine	pop
5933c726f8dSBenjamin Herrenschmidt
5943c726f8dSBenjamin Herrenschmidt#ifdef CONFIG_PPC_ISERIES
5953f639ee8SStephen RothwellBEGIN_FW_FTR_SECTION
5963c726f8dSBenjamin Herrenschmidt	mtspr	SPRN_SRR0,r11
5973c726f8dSBenjamin Herrenschmidt	mtspr	SPRN_SRR1,r12
5983f639ee8SStephen RothwellEND_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
5993c726f8dSBenjamin Herrenschmidt#endif /* CONFIG_PPC_ISERIES */
6003c726f8dSBenjamin Herrenschmidt	ld	r9,PACA_EXSLB+EX_R9(r13)
6013c726f8dSBenjamin Herrenschmidt	ld	r10,PACA_EXSLB+EX_R10(r13)
6023c726f8dSBenjamin Herrenschmidt	ld	r11,PACA_EXSLB+EX_R11(r13)
6033c726f8dSBenjamin Herrenschmidt	ld	r12,PACA_EXSLB+EX_R12(r13)
6043c726f8dSBenjamin Herrenschmidt	ld	r13,PACA_EXSLB+EX_R13(r13)
6053c726f8dSBenjamin Herrenschmidt	rfid
6063c726f8dSBenjamin Herrenschmidt	b	.	/* prevent speculative execution */
6073c726f8dSBenjamin Herrenschmidt
608320787c7SPaul Mackerras2:
609320787c7SPaul Mackerras#ifdef CONFIG_PPC_ISERIES
610320787c7SPaul MackerrasBEGIN_FW_FTR_SECTION
611320787c7SPaul Mackerras	b	unrecov_slb
612320787c7SPaul MackerrasEND_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
613320787c7SPaul Mackerras#endif /* CONFIG_PPC_ISERIES */
614320787c7SPaul Mackerras	mfspr	r11,SPRN_SRR0
615320787c7SPaul Mackerras	clrrdi	r10,r13,32
616320787c7SPaul Mackerras	LOAD_HANDLER(r10,unrecov_slb)
617320787c7SPaul Mackerras	mtspr	SPRN_SRR0,r10
618320787c7SPaul Mackerras	mfmsr	r10
619320787c7SPaul Mackerras	ori	r10,r10,MSR_IR|MSR_DR|MSR_RI
620320787c7SPaul Mackerras	mtspr	SPRN_SRR1,r10
621320787c7SPaul Mackerras	rfid
622320787c7SPaul Mackerras	b	.
623320787c7SPaul Mackerras
6243c726f8dSBenjamin Herrenschmidtunrecov_slb:
6253c726f8dSBenjamin Herrenschmidt	EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
6263c726f8dSBenjamin Herrenschmidt	DISABLE_INTS
6273c726f8dSBenjamin Herrenschmidt	bl	.save_nvgprs
6283c726f8dSBenjamin Herrenschmidt1:	addi	r3,r1,STACK_FRAME_OVERHEAD
6293c726f8dSBenjamin Herrenschmidt	bl	.unrecoverable_exception
6303c726f8dSBenjamin Herrenschmidt	b	1b
6313c726f8dSBenjamin Herrenschmidt
63214cf11afSPaul Mackerras	.align	7
63314cf11afSPaul Mackerras	.globl hardware_interrupt_common
63414cf11afSPaul Mackerras	.globl hardware_interrupt_entry
63514cf11afSPaul Mackerrashardware_interrupt_common:
63614cf11afSPaul Mackerras	EXCEPTION_PROLOG_COMMON(0x500, PACA_EXGEN)
637f39224a8SPaul Mackerras	FINISH_NAP
63814cf11afSPaul Mackerrashardware_interrupt_entry:
63914cf11afSPaul Mackerras	DISABLE_INTS
640a416561bSOlof JohanssonBEGIN_FTR_SECTION
641cb2c9b27SAnton Blanchard	bl	.ppc64_runlatch_on
642a416561bSOlof JohanssonEND_FTR_SECTION_IFSET(CPU_FTR_CTRL)
64314cf11afSPaul Mackerras	addi	r3,r1,STACK_FRAME_OVERHEAD
64414cf11afSPaul Mackerras	bl	.do_IRQ
64514cf11afSPaul Mackerras	b	.ret_from_except_lite
64614cf11afSPaul Mackerras
647f39224a8SPaul Mackerras#ifdef CONFIG_PPC_970_NAP
648f39224a8SPaul Mackerraspower4_fixup_nap:
649f39224a8SPaul Mackerras	andc	r9,r9,r10
650f39224a8SPaul Mackerras	std	r9,TI_LOCAL_FLAGS(r11)
651f39224a8SPaul Mackerras	ld	r10,_LINK(r1)		/* make idle task do the */
652f39224a8SPaul Mackerras	std	r10,_NIP(r1)		/* equivalent of a blr */
653f39224a8SPaul Mackerras	blr
654f39224a8SPaul Mackerras#endif
655f39224a8SPaul Mackerras
65614cf11afSPaul Mackerras	.align	7
65714cf11afSPaul Mackerras	.globl alignment_common
65814cf11afSPaul Mackerrasalignment_common:
659b5bbeb23SPaul Mackerras	mfspr	r10,SPRN_DAR
66014cf11afSPaul Mackerras	std	r10,PACA_EXGEN+EX_DAR(r13)
661b5bbeb23SPaul Mackerras	mfspr	r10,SPRN_DSISR
66214cf11afSPaul Mackerras	stw	r10,PACA_EXGEN+EX_DSISR(r13)
66314cf11afSPaul Mackerras	EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN)
66414cf11afSPaul Mackerras	ld	r3,PACA_EXGEN+EX_DAR(r13)
66514cf11afSPaul Mackerras	lwz	r4,PACA_EXGEN+EX_DSISR(r13)
66614cf11afSPaul Mackerras	std	r3,_DAR(r1)
66714cf11afSPaul Mackerras	std	r4,_DSISR(r1)
66814cf11afSPaul Mackerras	bl	.save_nvgprs
66914cf11afSPaul Mackerras	addi	r3,r1,STACK_FRAME_OVERHEAD
67014cf11afSPaul Mackerras	ENABLE_INTS
67114cf11afSPaul Mackerras	bl	.alignment_exception
67214cf11afSPaul Mackerras	b	.ret_from_except
67314cf11afSPaul Mackerras
67414cf11afSPaul Mackerras	.align	7
67514cf11afSPaul Mackerras	.globl program_check_common
67614cf11afSPaul Mackerrasprogram_check_common:
67714cf11afSPaul Mackerras	EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
67814cf11afSPaul Mackerras	bl	.save_nvgprs
67914cf11afSPaul Mackerras	addi	r3,r1,STACK_FRAME_OVERHEAD
68014cf11afSPaul Mackerras	ENABLE_INTS
68114cf11afSPaul Mackerras	bl	.program_check_exception
68214cf11afSPaul Mackerras	b	.ret_from_except
68314cf11afSPaul Mackerras
68414cf11afSPaul Mackerras	.align	7
68514cf11afSPaul Mackerras	.globl fp_unavailable_common
68614cf11afSPaul Mackerrasfp_unavailable_common:
68714cf11afSPaul Mackerras	EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN)
6883ccfc65cSPaul Mackerras	bne	1f			/* if from user, just load it up */
68914cf11afSPaul Mackerras	bl	.save_nvgprs
69014cf11afSPaul Mackerras	addi	r3,r1,STACK_FRAME_OVERHEAD
69114cf11afSPaul Mackerras	ENABLE_INTS
69214cf11afSPaul Mackerras	bl	.kernel_fp_unavailable_exception
69314cf11afSPaul Mackerras	BUG_OPCODE
6946f3d8e69SMichael Neuling1:	bl	.load_up_fpu
6956f3d8e69SMichael Neuling	b	fast_exception_return
69614cf11afSPaul Mackerras
69714cf11afSPaul Mackerras	.align	7
69814cf11afSPaul Mackerras	.globl altivec_unavailable_common
69914cf11afSPaul Mackerrasaltivec_unavailable_common:
70014cf11afSPaul Mackerras	EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN)
70114cf11afSPaul Mackerras#ifdef CONFIG_ALTIVEC
70214cf11afSPaul MackerrasBEGIN_FTR_SECTION
7036f3d8e69SMichael Neuling	beq	1f
7046f3d8e69SMichael Neuling	bl	.load_up_altivec
7056f3d8e69SMichael Neuling	b	fast_exception_return
7066f3d8e69SMichael Neuling1:
70714cf11afSPaul MackerrasEND_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
70814cf11afSPaul Mackerras#endif
70914cf11afSPaul Mackerras	bl	.save_nvgprs
71014cf11afSPaul Mackerras	addi	r3,r1,STACK_FRAME_OVERHEAD
71114cf11afSPaul Mackerras	ENABLE_INTS
71214cf11afSPaul Mackerras	bl	.altivec_unavailable_exception
71314cf11afSPaul Mackerras	b	.ret_from_except
71414cf11afSPaul Mackerras
7159a955167SPaul Mackerras	.align	7
7169a955167SPaul Mackerras	.globl vsx_unavailable_common
7179a955167SPaul Mackerrasvsx_unavailable_common:
7189a955167SPaul Mackerras	EXCEPTION_PROLOG_COMMON(0xf40, PACA_EXGEN)
7199a955167SPaul Mackerras#ifdef CONFIG_VSX
7209a955167SPaul MackerrasBEGIN_FTR_SECTION
7219a955167SPaul Mackerras	bne	.load_up_vsx
7229a955167SPaul Mackerras1:
7239a955167SPaul MackerrasEND_FTR_SECTION_IFSET(CPU_FTR_VSX)
7249a955167SPaul Mackerras#endif
7259a955167SPaul Mackerras	bl	.save_nvgprs
7269a955167SPaul Mackerras	addi	r3,r1,STACK_FRAME_OVERHEAD
7279a955167SPaul Mackerras	ENABLE_INTS
7289a955167SPaul Mackerras	bl	.vsx_unavailable_exception
7299a955167SPaul Mackerras	b	.ret_from_except
7309a955167SPaul Mackerras
7319a955167SPaul Mackerras	.align	7
7329a955167SPaul Mackerras	.globl	__end_handlers
7339a955167SPaul Mackerras__end_handlers:
7349a955167SPaul Mackerras
7359a955167SPaul Mackerras/*
7369a955167SPaul Mackerras * Return from an exception with minimal checks.
7379a955167SPaul Mackerras * The caller is assumed to have done EXCEPTION_PROLOG_COMMON.
7389a955167SPaul Mackerras * If interrupts have been enabled, or anything has been
7399a955167SPaul Mackerras * done that might have changed the scheduling status of
7409a955167SPaul Mackerras * any task or sent any task a signal, you should use
7419a955167SPaul Mackerras * ret_from_except or ret_from_except_lite instead of this.
7429a955167SPaul Mackerras */
7439a955167SPaul Mackerrasfast_exc_return_irq:			/* restores irq state too */
7449a955167SPaul Mackerras	ld	r3,SOFTE(r1)
7459a955167SPaul Mackerras	TRACE_AND_RESTORE_IRQ(r3);
7469a955167SPaul Mackerras	ld	r12,_MSR(r1)
7479a955167SPaul Mackerras	rldicl	r4,r12,49,63		/* get MSR_EE to LSB */
7489a955167SPaul Mackerras	stb	r4,PACAHARDIRQEN(r13)	/* restore paca->hard_enabled */
7499a955167SPaul Mackerras	b	1f
7509a955167SPaul Mackerras
7519a955167SPaul Mackerras	.globl	fast_exception_return
7529a955167SPaul Mackerrasfast_exception_return:
7539a955167SPaul Mackerras	ld	r12,_MSR(r1)
7549a955167SPaul Mackerras1:	ld	r11,_NIP(r1)
7559a955167SPaul Mackerras	andi.	r3,r12,MSR_RI		/* check if RI is set */
7569a955167SPaul Mackerras	beq-	unrecov_fer
7579a955167SPaul Mackerras
7589a955167SPaul Mackerras#ifdef CONFIG_VIRT_CPU_ACCOUNTING
7599a955167SPaul Mackerras	andi.	r3,r12,MSR_PR
7609a955167SPaul Mackerras	beq	2f
7619a955167SPaul Mackerras	ACCOUNT_CPU_USER_EXIT(r3, r4)
7629a955167SPaul Mackerras2:
7639a955167SPaul Mackerras#endif
7649a955167SPaul Mackerras
7659a955167SPaul Mackerras	ld	r3,_CCR(r1)
7669a955167SPaul Mackerras	ld	r4,_LINK(r1)
7679a955167SPaul Mackerras	ld	r5,_CTR(r1)
7689a955167SPaul Mackerras	ld	r6,_XER(r1)
7699a955167SPaul Mackerras	mtcr	r3
7709a955167SPaul Mackerras	mtlr	r4
7719a955167SPaul Mackerras	mtctr	r5
7729a955167SPaul Mackerras	mtxer	r6
7739a955167SPaul Mackerras	REST_GPR(0, r1)
7749a955167SPaul Mackerras	REST_8GPRS(2, r1)
7759a955167SPaul Mackerras
7769a955167SPaul Mackerras	mfmsr	r10
7779a955167SPaul Mackerras	rldicl	r10,r10,48,1		/* clear EE */
7789a955167SPaul Mackerras	rldicr	r10,r10,16,61		/* clear RI (LE is 0 already) */
7799a955167SPaul Mackerras	mtmsrd	r10,1
7809a955167SPaul Mackerras
7819a955167SPaul Mackerras	mtspr	SPRN_SRR1,r12
7829a955167SPaul Mackerras	mtspr	SPRN_SRR0,r11
7839a955167SPaul Mackerras	REST_4GPRS(10, r1)
7849a955167SPaul Mackerras	ld	r1,GPR1(r1)
7859a955167SPaul Mackerras	rfid
7869a955167SPaul Mackerras	b	.	/* prevent speculative execution */
7879a955167SPaul Mackerras
7889a955167SPaul Mackerrasunrecov_fer:
7899a955167SPaul Mackerras	bl	.save_nvgprs
7909a955167SPaul Mackerras1:	addi	r3,r1,STACK_FRAME_OVERHEAD
7919a955167SPaul Mackerras	bl	.unrecoverable_exception
7929a955167SPaul Mackerras	b	1b
7939a955167SPaul Mackerras
79414cf11afSPaul Mackerras#ifdef CONFIG_ALTIVEC
79514cf11afSPaul Mackerras/*
79614cf11afSPaul Mackerras * load_up_altivec(unused, unused, tsk)
79714cf11afSPaul Mackerras * Disable VMX for the task which had it previously,
79814cf11afSPaul Mackerras * and save its vector registers in its thread_struct.
79914cf11afSPaul Mackerras * Enables the VMX for use in the kernel on return.
80014cf11afSPaul Mackerras * On SMP we know the VMX is free, since we give it up every
80114cf11afSPaul Mackerras * switch (ie, no lazy save of the vector registers).
80214cf11afSPaul Mackerras * On entry: r13 == 'current' && last_task_used_altivec != 'current'
80314cf11afSPaul Mackerras */
80414cf11afSPaul Mackerras_STATIC(load_up_altivec)
80514cf11afSPaul Mackerras	mfmsr	r5			/* grab the current MSR */
80614cf11afSPaul Mackerras	oris	r5,r5,MSR_VEC@h
80714cf11afSPaul Mackerras	mtmsrd	r5			/* enable use of VMX now */
80814cf11afSPaul Mackerras	isync
80914cf11afSPaul Mackerras
81014cf11afSPaul Mackerras/*
81114cf11afSPaul Mackerras * For SMP, we don't do lazy VMX switching because it just gets too
81214cf11afSPaul Mackerras * horrendously complex, especially when a task switches from one CPU
81314cf11afSPaul Mackerras * to another.  Instead we call giveup_altvec in switch_to.
81414cf11afSPaul Mackerras * VRSAVE isn't dealt with here, that is done in the normal context
81514cf11afSPaul Mackerras * switch code. Note that we could rely on vrsave value to eventually
81614cf11afSPaul Mackerras * avoid saving all of the VREGs here...
81714cf11afSPaul Mackerras */
81814cf11afSPaul Mackerras#ifndef CONFIG_SMP
81914cf11afSPaul Mackerras	ld	r3,last_task_used_altivec@got(r2)
82014cf11afSPaul Mackerras	ld	r4,0(r3)
82114cf11afSPaul Mackerras	cmpdi	0,r4,0
82214cf11afSPaul Mackerras	beq	1f
82314cf11afSPaul Mackerras	/* Save VMX state to last_task_used_altivec's THREAD struct */
82414cf11afSPaul Mackerras	addi	r4,r4,THREAD
82514cf11afSPaul Mackerras	SAVE_32VRS(0,r5,r4)
82614cf11afSPaul Mackerras	mfvscr	vr0
82714cf11afSPaul Mackerras	li	r10,THREAD_VSCR
82814cf11afSPaul Mackerras	stvx	vr0,r10,r4
82914cf11afSPaul Mackerras	/* Disable VMX for last_task_used_altivec */
83014cf11afSPaul Mackerras	ld	r5,PT_REGS(r4)
83114cf11afSPaul Mackerras	ld	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
83214cf11afSPaul Mackerras	lis	r6,MSR_VEC@h
83314cf11afSPaul Mackerras	andc	r4,r4,r6
83414cf11afSPaul Mackerras	std	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
83514cf11afSPaul Mackerras1:
83614cf11afSPaul Mackerras#endif /* CONFIG_SMP */
83714cf11afSPaul Mackerras	/* Hack: if we get an altivec unavailable trap with VRSAVE
83814cf11afSPaul Mackerras	 * set to all zeros, we assume this is a broken application
83914cf11afSPaul Mackerras	 * that fails to set it properly, and thus we switch it to
84014cf11afSPaul Mackerras	 * all 1's
84114cf11afSPaul Mackerras	 */
84214cf11afSPaul Mackerras	mfspr	r4,SPRN_VRSAVE
84314cf11afSPaul Mackerras	cmpdi	0,r4,0
84414cf11afSPaul Mackerras	bne+	1f
84514cf11afSPaul Mackerras	li	r4,-1
84614cf11afSPaul Mackerras	mtspr	SPRN_VRSAVE,r4
84714cf11afSPaul Mackerras1:
84814cf11afSPaul Mackerras	/* enable use of VMX after return */
84914cf11afSPaul Mackerras	ld	r4,PACACURRENT(r13)
85014cf11afSPaul Mackerras	addi	r5,r4,THREAD		/* Get THREAD */
85114cf11afSPaul Mackerras	oris	r12,r12,MSR_VEC@h
85214cf11afSPaul Mackerras	std	r12,_MSR(r1)
85314cf11afSPaul Mackerras	li	r4,1
85414cf11afSPaul Mackerras	li	r10,THREAD_VSCR
85514cf11afSPaul Mackerras	stw	r4,THREAD_USED_VR(r5)
85614cf11afSPaul Mackerras	lvx	vr0,r10,r5
85714cf11afSPaul Mackerras	mtvscr	vr0
85814cf11afSPaul Mackerras	REST_32VRS(0,r4,r5)
85914cf11afSPaul Mackerras#ifndef CONFIG_SMP
86014cf11afSPaul Mackerras	/* Update last_task_used_math to 'current' */
86114cf11afSPaul Mackerras	subi	r4,r5,THREAD		/* Back to 'current' */
86214cf11afSPaul Mackerras	std	r4,0(r3)
86314cf11afSPaul Mackerras#endif /* CONFIG_SMP */
86414cf11afSPaul Mackerras	/* restore registers and return */
8656f3d8e69SMichael Neuling	blr
86614cf11afSPaul Mackerras#endif /* CONFIG_ALTIVEC */
86714cf11afSPaul Mackerras
868ce48b210SMichael Neuling#ifdef CONFIG_VSX
869ce48b210SMichael Neuling/*
870ce48b210SMichael Neuling * load_up_vsx(unused, unused, tsk)
871ce48b210SMichael Neuling * Disable VSX for the task which had it previously,
872ce48b210SMichael Neuling * and save its vector registers in its thread_struct.
873ce48b210SMichael Neuling * Reuse the fp and vsx saves, but first check to see if they have
874ce48b210SMichael Neuling * been saved already.
875ce48b210SMichael Neuling * On entry: r13 == 'current' && last_task_used_vsx != 'current'
876ce48b210SMichael Neuling */
877ce48b210SMichael Neuling_STATIC(load_up_vsx)
878ce48b210SMichael Neuling/* Load FP and VSX registers if they haven't been done yet */
879ce48b210SMichael Neuling	andi.	r5,r12,MSR_FP
880ce48b210SMichael Neuling	beql+	load_up_fpu		/* skip if already loaded */
881ce48b210SMichael Neuling	andis.	r5,r12,MSR_VEC@h
882ce48b210SMichael Neuling	beql+	load_up_altivec		/* skip if already loaded */
883ce48b210SMichael Neuling
884ce48b210SMichael Neuling#ifndef CONFIG_SMP
885ce48b210SMichael Neuling	ld	r3,last_task_used_vsx@got(r2)
886ce48b210SMichael Neuling	ld	r4,0(r3)
887ce48b210SMichael Neuling	cmpdi	0,r4,0
888ce48b210SMichael Neuling	beq	1f
889ce48b210SMichael Neuling	/* Disable VSX for last_task_used_vsx */
890ce48b210SMichael Neuling	addi	r4,r4,THREAD
891ce48b210SMichael Neuling	ld	r5,PT_REGS(r4)
892ce48b210SMichael Neuling	ld	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
893ce48b210SMichael Neuling	lis	r6,MSR_VSX@h
894ce48b210SMichael Neuling	andc	r6,r4,r6
895ce48b210SMichael Neuling	std	r6,_MSR-STACK_FRAME_OVERHEAD(r5)
896ce48b210SMichael Neuling1:
897ce48b210SMichael Neuling#endif /* CONFIG_SMP */
898ce48b210SMichael Neuling	ld	r4,PACACURRENT(r13)
899ce48b210SMichael Neuling	addi	r4,r4,THREAD		/* Get THREAD */
900ce48b210SMichael Neuling	li	r6,1
901ce48b210SMichael Neuling	stw	r6,THREAD_USED_VSR(r4) /* ... also set thread used vsr */
902ce48b210SMichael Neuling	/* enable use of VSX after return */
903ce48b210SMichael Neuling	oris	r12,r12,MSR_VSX@h
904ce48b210SMichael Neuling	std	r12,_MSR(r1)
905ce48b210SMichael Neuling#ifndef CONFIG_SMP
906ce48b210SMichael Neuling	/* Update last_task_used_math to 'current' */
907ce48b210SMichael Neuling	ld	r4,PACACURRENT(r13)
908ce48b210SMichael Neuling	std	r4,0(r3)
909ce48b210SMichael Neuling#endif /* CONFIG_SMP */
910ce48b210SMichael Neuling	b	fast_exception_return
911ce48b210SMichael Neuling#endif /* CONFIG_VSX */
912ce48b210SMichael Neuling
91314cf11afSPaul Mackerras/*
91414cf11afSPaul Mackerras * Hash table stuff
91514cf11afSPaul Mackerras */
91614cf11afSPaul Mackerras	.align	7
917945feb17SBenjamin Herrenschmidt_STATIC(do_hash_page)
91814cf11afSPaul Mackerras	std	r3,_DAR(r1)
91914cf11afSPaul Mackerras	std	r4,_DSISR(r1)
92014cf11afSPaul Mackerras
92114cf11afSPaul Mackerras	andis.	r0,r4,0xa450		/* weird error? */
9223ccfc65cSPaul Mackerras	bne-	handle_page_fault	/* if not, try to insert a HPTE */
92314cf11afSPaul MackerrasBEGIN_FTR_SECTION
92414cf11afSPaul Mackerras	andis.	r0,r4,0x0020		/* Is it a segment table fault? */
9253ccfc65cSPaul Mackerras	bne-	do_ste_alloc		/* If so handle it */
92614cf11afSPaul MackerrasEND_FTR_SECTION_IFCLR(CPU_FTR_SLB)
92714cf11afSPaul Mackerras
92814cf11afSPaul Mackerras	/*
929945feb17SBenjamin Herrenschmidt	 * On iSeries, we soft-disable interrupts here, then
930945feb17SBenjamin Herrenschmidt	 * hard-enable interrupts so that the hash_page code can spin on
931945feb17SBenjamin Herrenschmidt	 * the hash_table_lock without problems on a shared processor.
932945feb17SBenjamin Herrenschmidt	 */
933945feb17SBenjamin Herrenschmidt	DISABLE_INTS
934945feb17SBenjamin Herrenschmidt
935945feb17SBenjamin Herrenschmidt	/*
936945feb17SBenjamin Herrenschmidt	 * Currently, trace_hardirqs_off() will be called by DISABLE_INTS
937945feb17SBenjamin Herrenschmidt	 * and will clobber volatile registers when irq tracing is enabled
938945feb17SBenjamin Herrenschmidt	 * so we need to reload them. It may be possible to be smarter here
939945feb17SBenjamin Herrenschmidt	 * and move the irq tracing elsewhere but let's keep it simple for
940945feb17SBenjamin Herrenschmidt	 * now
941945feb17SBenjamin Herrenschmidt	 */
942945feb17SBenjamin Herrenschmidt#ifdef CONFIG_TRACE_IRQFLAGS
943945feb17SBenjamin Herrenschmidt	ld	r3,_DAR(r1)
944945feb17SBenjamin Herrenschmidt	ld	r4,_DSISR(r1)
945945feb17SBenjamin Herrenschmidt	ld	r5,_TRAP(r1)
946945feb17SBenjamin Herrenschmidt	ld	r12,_MSR(r1)
947945feb17SBenjamin Herrenschmidt	clrrdi	r5,r5,4
948945feb17SBenjamin Herrenschmidt#endif /* CONFIG_TRACE_IRQFLAGS */
949945feb17SBenjamin Herrenschmidt	/*
95014cf11afSPaul Mackerras	 * We need to set the _PAGE_USER bit if MSR_PR is set or if we are
95114cf11afSPaul Mackerras	 * accessing a userspace segment (even from the kernel). We assume
95214cf11afSPaul Mackerras	 * kernel addresses always have the high bit set.
95314cf11afSPaul Mackerras	 */
95414cf11afSPaul Mackerras	rlwinm	r4,r4,32-25+9,31-9,31-9	/* DSISR_STORE -> _PAGE_RW */
95514cf11afSPaul Mackerras	rotldi	r0,r3,15		/* Move high bit into MSR_PR posn */
95614cf11afSPaul Mackerras	orc	r0,r12,r0		/* MSR_PR | ~high_bit */
95714cf11afSPaul Mackerras	rlwimi	r4,r0,32-13,30,30	/* becomes _PAGE_USER access bit */
95814cf11afSPaul Mackerras	ori	r4,r4,1			/* add _PAGE_PRESENT */
95914cf11afSPaul Mackerras	rlwimi	r4,r5,22+2,31-2,31-2	/* Set _PAGE_EXEC if trap is 0x400 */
96014cf11afSPaul Mackerras
96114cf11afSPaul Mackerras	/*
96214cf11afSPaul Mackerras	 * r3 contains the faulting address
96314cf11afSPaul Mackerras	 * r4 contains the required access permissions
96414cf11afSPaul Mackerras	 * r5 contains the trap number
96514cf11afSPaul Mackerras	 *
96614cf11afSPaul Mackerras	 * at return r3 = 0 for success
96714cf11afSPaul Mackerras	 */
96814cf11afSPaul Mackerras	bl	.hash_page		/* build HPTE if possible */
96914cf11afSPaul Mackerras	cmpdi	r3,0			/* see if hash_page succeeded */
97014cf11afSPaul Mackerras
9713f639ee8SStephen RothwellBEGIN_FW_FTR_SECTION
97214cf11afSPaul Mackerras	/*
97314cf11afSPaul Mackerras	 * If we had interrupts soft-enabled at the point where the
97414cf11afSPaul Mackerras	 * DSI/ISI occurred, and an interrupt came in during hash_page,
97514cf11afSPaul Mackerras	 * handle it now.
97614cf11afSPaul Mackerras	 * We jump to ret_from_except_lite rather than fast_exception_return
97714cf11afSPaul Mackerras	 * because ret_from_except_lite will check for and handle pending
97814cf11afSPaul Mackerras	 * interrupts if necessary.
97914cf11afSPaul Mackerras	 */
9803ccfc65cSPaul Mackerras	beq	13f
981b0a779deSPaul MackerrasEND_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
982945feb17SBenjamin Herrenschmidt
983b0a779deSPaul MackerrasBEGIN_FW_FTR_SECTION
984b0a779deSPaul Mackerras	/*
985b0a779deSPaul Mackerras	 * Here we have interrupts hard-disabled, so it is sufficient
986b0a779deSPaul Mackerras	 * to restore paca->{soft,hard}_enable and get out.
987b0a779deSPaul Mackerras	 */
988b0a779deSPaul Mackerras	beq	fast_exc_return_irq	/* Return from exception on success */
989b0a779deSPaul MackerrasEND_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES)
990b0a779deSPaul Mackerras
99114cf11afSPaul Mackerras	/* For a hash failure, we don't bother re-enabling interrupts */
99214cf11afSPaul Mackerras	ble-	12f
99314cf11afSPaul Mackerras
99414cf11afSPaul Mackerras	/*
99514cf11afSPaul Mackerras	 * hash_page couldn't handle it, set soft interrupt enable back
996945feb17SBenjamin Herrenschmidt	 * to what it was before the trap.  Note that .raw_local_irq_restore
99714cf11afSPaul Mackerras	 * handles any interrupts pending at this point.
99814cf11afSPaul Mackerras	 */
99914cf11afSPaul Mackerras	ld	r3,SOFTE(r1)
1000945feb17SBenjamin Herrenschmidt	TRACE_AND_RESTORE_IRQ_PARTIAL(r3, 11f)
1001945feb17SBenjamin Herrenschmidt	bl	.raw_local_irq_restore
100214cf11afSPaul Mackerras	b	11f
100314cf11afSPaul Mackerras
100414cf11afSPaul Mackerras/* Here we have a page fault that hash_page can't handle. */
10053ccfc65cSPaul Mackerrashandle_page_fault:
100614cf11afSPaul Mackerras	ENABLE_INTS
100714cf11afSPaul Mackerras11:	ld	r4,_DAR(r1)
100814cf11afSPaul Mackerras	ld	r5,_DSISR(r1)
100914cf11afSPaul Mackerras	addi	r3,r1,STACK_FRAME_OVERHEAD
101014cf11afSPaul Mackerras	bl	.do_page_fault
101114cf11afSPaul Mackerras	cmpdi	r3,0
10123ccfc65cSPaul Mackerras	beq+	13f
101314cf11afSPaul Mackerras	bl	.save_nvgprs
101414cf11afSPaul Mackerras	mr	r5,r3
101514cf11afSPaul Mackerras	addi	r3,r1,STACK_FRAME_OVERHEAD
101614cf11afSPaul Mackerras	lwz	r4,_DAR(r1)
101714cf11afSPaul Mackerras	bl	.bad_page_fault
101814cf11afSPaul Mackerras	b	.ret_from_except
101914cf11afSPaul Mackerras
102079acbb3fSPaul Mackerras13:	b	.ret_from_except_lite
102179acbb3fSPaul Mackerras
102214cf11afSPaul Mackerras/* We have a page fault that hash_page could handle but HV refused
102314cf11afSPaul Mackerras * the PTE insertion
102414cf11afSPaul Mackerras */
102514cf11afSPaul Mackerras12:	bl	.save_nvgprs
1026fa28237cSPaul Mackerras	mr	r5,r3
102714cf11afSPaul Mackerras	addi	r3,r1,STACK_FRAME_OVERHEAD
1028a792e75dSBenjamin Herrenschmidt	ld	r4,_DAR(r1)
102914cf11afSPaul Mackerras	bl	.low_hash_fault
103014cf11afSPaul Mackerras	b	.ret_from_except
103114cf11afSPaul Mackerras
103214cf11afSPaul Mackerras	/* here we have a segment miss */
10333ccfc65cSPaul Mackerrasdo_ste_alloc:
103414cf11afSPaul Mackerras	bl	.ste_allocate		/* try to insert stab entry */
103514cf11afSPaul Mackerras	cmpdi	r3,0
10363ccfc65cSPaul Mackerras	bne-	handle_page_fault
10373ccfc65cSPaul Mackerras	b	fast_exception_return
103814cf11afSPaul Mackerras
103914cf11afSPaul Mackerras/*
104014cf11afSPaul Mackerras * r13 points to the PACA, r9 contains the saved CR,
104114cf11afSPaul Mackerras * r11 and r12 contain the saved SRR0 and SRR1.
104214cf11afSPaul Mackerras * r9 - r13 are saved in paca->exslb.
104314cf11afSPaul Mackerras * We assume we aren't going to take any exceptions during this procedure.
104414cf11afSPaul Mackerras * We assume (DAR >> 60) == 0xc.
104514cf11afSPaul Mackerras */
104614cf11afSPaul Mackerras	.align	7
104714cf11afSPaul Mackerras_GLOBAL(do_stab_bolted)
104814cf11afSPaul Mackerras	stw	r9,PACA_EXSLB+EX_CCR(r13)	/* save CR in exc. frame */
104914cf11afSPaul Mackerras	std	r11,PACA_EXSLB+EX_SRR0(r13)	/* save SRR0 in exc. frame */
105014cf11afSPaul Mackerras
105114cf11afSPaul Mackerras	/* Hash to the primary group */
105214cf11afSPaul Mackerras	ld	r10,PACASTABVIRT(r13)
1053b5bbeb23SPaul Mackerras	mfspr	r11,SPRN_DAR
105414cf11afSPaul Mackerras	srdi	r11,r11,28
105514cf11afSPaul Mackerras	rldimi	r10,r11,7,52	/* r10 = first ste of the group */
105614cf11afSPaul Mackerras
105714cf11afSPaul Mackerras	/* Calculate VSID */
105814cf11afSPaul Mackerras	/* This is a kernel address, so protovsid = ESID */
10591189be65SPaul Mackerras	ASM_VSID_SCRAMBLE(r11, r9, 256M)
106014cf11afSPaul Mackerras	rldic	r9,r11,12,16	/* r9 = vsid << 12 */
106114cf11afSPaul Mackerras
106214cf11afSPaul Mackerras	/* Search the primary group for a free entry */
106314cf11afSPaul Mackerras1:	ld	r11,0(r10)	/* Test valid bit of the current ste	*/
106414cf11afSPaul Mackerras	andi.	r11,r11,0x80
106514cf11afSPaul Mackerras	beq	2f
106614cf11afSPaul Mackerras	addi	r10,r10,16
106714cf11afSPaul Mackerras	andi.	r11,r10,0x70
106814cf11afSPaul Mackerras	bne	1b
106914cf11afSPaul Mackerras
107014cf11afSPaul Mackerras	/* Stick for only searching the primary group for now.		*/
107114cf11afSPaul Mackerras	/* At least for now, we use a very simple random castout scheme */
107214cf11afSPaul Mackerras	/* Use the TB as a random number ;  OR in 1 to avoid entry 0	*/
107314cf11afSPaul Mackerras	mftb	r11
107414cf11afSPaul Mackerras	rldic	r11,r11,4,57	/* r11 = (r11 << 4) & 0x70 */
107514cf11afSPaul Mackerras	ori	r11,r11,0x10
107614cf11afSPaul Mackerras
107714cf11afSPaul Mackerras	/* r10 currently points to an ste one past the group of interest */
107814cf11afSPaul Mackerras	/* make it point to the randomly selected entry			*/
107914cf11afSPaul Mackerras	subi	r10,r10,128
108014cf11afSPaul Mackerras	or 	r10,r10,r11	/* r10 is the entry to invalidate	*/
108114cf11afSPaul Mackerras
108214cf11afSPaul Mackerras	isync			/* mark the entry invalid		*/
108314cf11afSPaul Mackerras	ld	r11,0(r10)
108414cf11afSPaul Mackerras	rldicl	r11,r11,56,1	/* clear the valid bit */
108514cf11afSPaul Mackerras	rotldi	r11,r11,8
108614cf11afSPaul Mackerras	std	r11,0(r10)
108714cf11afSPaul Mackerras	sync
108814cf11afSPaul Mackerras
108914cf11afSPaul Mackerras	clrrdi	r11,r11,28	/* Get the esid part of the ste		*/
109014cf11afSPaul Mackerras	slbie	r11
109114cf11afSPaul Mackerras
109214cf11afSPaul Mackerras2:	std	r9,8(r10)	/* Store the vsid part of the ste	*/
109314cf11afSPaul Mackerras	eieio
109414cf11afSPaul Mackerras
1095b5bbeb23SPaul Mackerras	mfspr	r11,SPRN_DAR		/* Get the new esid			*/
109614cf11afSPaul Mackerras	clrrdi	r11,r11,28	/* Permits a full 32b of ESID		*/
109714cf11afSPaul Mackerras	ori	r11,r11,0x90	/* Turn on valid and kp			*/
109814cf11afSPaul Mackerras	std	r11,0(r10)	/* Put new entry back into the stab	*/
109914cf11afSPaul Mackerras
110014cf11afSPaul Mackerras	sync
110114cf11afSPaul Mackerras
110214cf11afSPaul Mackerras	/* All done -- return from exception. */
110314cf11afSPaul Mackerras	lwz	r9,PACA_EXSLB+EX_CCR(r13)	/* get saved CR */
110414cf11afSPaul Mackerras	ld	r11,PACA_EXSLB+EX_SRR0(r13)	/* get saved SRR0 */
110514cf11afSPaul Mackerras
110614cf11afSPaul Mackerras	andi.	r10,r12,MSR_RI
110714cf11afSPaul Mackerras	beq-	unrecov_slb
110814cf11afSPaul Mackerras
110914cf11afSPaul Mackerras	mtcrf	0x80,r9			/* restore CR */
111014cf11afSPaul Mackerras
111114cf11afSPaul Mackerras	mfmsr	r10
111214cf11afSPaul Mackerras	clrrdi	r10,r10,2
111314cf11afSPaul Mackerras	mtmsrd	r10,1
111414cf11afSPaul Mackerras
1115b5bbeb23SPaul Mackerras	mtspr	SPRN_SRR0,r11
1116b5bbeb23SPaul Mackerras	mtspr	SPRN_SRR1,r12
111714cf11afSPaul Mackerras	ld	r9,PACA_EXSLB+EX_R9(r13)
111814cf11afSPaul Mackerras	ld	r10,PACA_EXSLB+EX_R10(r13)
111914cf11afSPaul Mackerras	ld	r11,PACA_EXSLB+EX_R11(r13)
112014cf11afSPaul Mackerras	ld	r12,PACA_EXSLB+EX_R12(r13)
112114cf11afSPaul Mackerras	ld	r13,PACA_EXSLB+EX_R13(r13)
112214cf11afSPaul Mackerras	rfid
112314cf11afSPaul Mackerras	b	.	/* prevent speculative execution */
112414cf11afSPaul Mackerras
112514cf11afSPaul Mackerras/*
112614cf11afSPaul Mackerras * Space for CPU0's segment table.
112714cf11afSPaul Mackerras *
112814cf11afSPaul Mackerras * On iSeries, the hypervisor must fill in at least one entry before
112916a15a30SStephen Rothwell * we get control (with relocate on).  The address is given to the hv
113016a15a30SStephen Rothwell * as a page number (see xLparMap below), so this must be at a
113114cf11afSPaul Mackerras * fixed address (the linker can't compute (u64)&initial_stab >>
113214cf11afSPaul Mackerras * PAGE_SHIFT).
113314cf11afSPaul Mackerras */
1134758438a7SMichael Ellerman	. = STAB0_OFFSET	/* 0x6000 */
113514cf11afSPaul Mackerras	.globl initial_stab
113614cf11afSPaul Mackerrasinitial_stab:
113714cf11afSPaul Mackerras	.space	4096
113814cf11afSPaul Mackerras
11399e4859efSStephen Rothwell#ifdef CONFIG_PPC_PSERIES
114014cf11afSPaul Mackerras/*
114114cf11afSPaul Mackerras * Data area reserved for FWNMI option.
114214cf11afSPaul Mackerras * This address (0x7000) is fixed by the RPA.
114314cf11afSPaul Mackerras */
114414cf11afSPaul Mackerras	.= 0x7000
114514cf11afSPaul Mackerras	.globl fwnmi_data_area
114614cf11afSPaul Mackerrasfwnmi_data_area:
11479e4859efSStephen Rothwell#endif /* CONFIG_PPC_PSERIES */
114814cf11afSPaul Mackerras
114914cf11afSPaul Mackerras	/* iSeries does not use the FWNMI stuff, so it is safe to put
115014cf11afSPaul Mackerras	 * this here, even if we later allow kernels that will boot on
115114cf11afSPaul Mackerras	 * both pSeries and iSeries */
115214cf11afSPaul Mackerras#ifdef CONFIG_PPC_ISERIES
115314cf11afSPaul Mackerras        . = LPARMAP_PHYS
115416a15a30SStephen Rothwell	.globl xLparMap
115516a15a30SStephen RothwellxLparMap:
115616a15a30SStephen Rothwell	.quad	HvEsidsToMap		/* xNumberEsids */
115716a15a30SStephen Rothwell	.quad	HvRangesToMap		/* xNumberRanges */
115816a15a30SStephen Rothwell	.quad	STAB0_PAGE		/* xSegmentTableOffs */
115916a15a30SStephen Rothwell	.zero	40			/* xRsvd */
116016a15a30SStephen Rothwell	/* xEsids (HvEsidsToMap entries of 2 quads) */
116116a15a30SStephen Rothwell	.quad	PAGE_OFFSET_ESID	/* xKernelEsid */
116216a15a30SStephen Rothwell	.quad	PAGE_OFFSET_VSID	/* xKernelVsid */
116316a15a30SStephen Rothwell	.quad	VMALLOC_START_ESID	/* xKernelEsid */
116416a15a30SStephen Rothwell	.quad	VMALLOC_START_VSID	/* xKernelVsid */
116516a15a30SStephen Rothwell	/* xRanges (HvRangesToMap entries of 3 quads) */
116616a15a30SStephen Rothwell	.quad	HvPagesToMap		/* xPages */
116716a15a30SStephen Rothwell	.quad	0			/* xOffset */
116816a15a30SStephen Rothwell	.quad	PAGE_OFFSET_VSID << (SID_SHIFT - HW_PAGE_SHIFT)	/* xVPN */
116916a15a30SStephen Rothwell
117014cf11afSPaul Mackerras#endif /* CONFIG_PPC_ISERIES */
117114cf11afSPaul Mackerras
11729e4859efSStephen Rothwell#ifdef CONFIG_PPC_PSERIES
117314cf11afSPaul Mackerras        . = 0x8000
11749e4859efSStephen Rothwell#endif /* CONFIG_PPC_PSERIES */
117514cf11afSPaul Mackerras
117614cf11afSPaul Mackerras/*
1177f39b7a55SOlof Johansson * On pSeries and most other platforms, secondary processors spin
1178f39b7a55SOlof Johansson * in the following code.
117914cf11afSPaul Mackerras * At entry, r3 = this processor's number (physical cpu id)
118014cf11afSPaul Mackerras */
1181f39b7a55SOlof Johansson_GLOBAL(generic_secondary_smp_init)
118214cf11afSPaul Mackerras	mr	r24,r3
118314cf11afSPaul Mackerras
118414cf11afSPaul Mackerras	/* turn on 64-bit mode */
118514cf11afSPaul Mackerras	bl	.enable_64b_mode
118614cf11afSPaul Mackerras
118714cf11afSPaul Mackerras	/* Set up a paca value for this processor. Since we have the
118814cf11afSPaul Mackerras	 * physical cpu id in r24, we need to search the pacas to find
118914cf11afSPaul Mackerras	 * which logical id maps to our physical one.
119014cf11afSPaul Mackerras	 */
1191e58c3495SDavid Gibson	LOAD_REG_IMMEDIATE(r13, paca)	/* Get base vaddr of paca array	 */
119214cf11afSPaul Mackerras	li	r5,0			/* logical cpu id                */
119314cf11afSPaul Mackerras1:	lhz	r6,PACAHWCPUID(r13)	/* Load HW procid from paca      */
119414cf11afSPaul Mackerras	cmpw	r6,r24			/* Compare to our id             */
119514cf11afSPaul Mackerras	beq	2f
119614cf11afSPaul Mackerras	addi	r13,r13,PACA_SIZE	/* Loop to next PACA on miss     */
119714cf11afSPaul Mackerras	addi	r5,r5,1
119814cf11afSPaul Mackerras	cmpwi	r5,NR_CPUS
119914cf11afSPaul Mackerras	blt	1b
120014cf11afSPaul Mackerras
120114cf11afSPaul Mackerras	mr	r3,r24			/* not found, copy phys to r3	 */
120214cf11afSPaul Mackerras	b	.kexec_wait		/* next kernel might do better	 */
120314cf11afSPaul Mackerras
1204b5bbeb23SPaul Mackerras2:	mtspr	SPRN_SPRG3,r13		/* Save vaddr of paca in SPRG3	 */
120514cf11afSPaul Mackerras	/* From now on, r24 is expected to be logical cpuid */
120614cf11afSPaul Mackerras	mr	r24,r5
120714cf11afSPaul Mackerras3:	HMT_LOW
120814cf11afSPaul Mackerras	lbz	r23,PACAPROCSTART(r13)	/* Test if this processor should */
120914cf11afSPaul Mackerras					/* start.			 */
121014cf11afSPaul Mackerras
1211f39b7a55SOlof Johansson#ifndef CONFIG_SMP
1212f39b7a55SOlof Johansson	b	3b			/* Never go on non-SMP		 */
1213f39b7a55SOlof Johansson#else
1214f39b7a55SOlof Johansson	cmpwi	0,r23,0
1215f39b7a55SOlof Johansson	beq	3b			/* Loop until told to go	 */
1216f39b7a55SOlof Johansson
1217b6f6b98aSSonny Rao	sync				/* order paca.run and cur_cpu_spec */
1218b6f6b98aSSonny Rao
1219f39b7a55SOlof Johansson	/* See if we need to call a cpu state restore handler */
1220f39b7a55SOlof Johansson	LOAD_REG_IMMEDIATE(r23, cur_cpu_spec)
1221f39b7a55SOlof Johansson	ld	r23,0(r23)
1222f39b7a55SOlof Johansson	ld	r23,CPU_SPEC_RESTORE(r23)
1223f39b7a55SOlof Johansson	cmpdi	0,r23,0
1224f39b7a55SOlof Johansson	beq	4f
1225f39b7a55SOlof Johansson	ld	r23,0(r23)
1226f39b7a55SOlof Johansson	mtctr	r23
1227f39b7a55SOlof Johansson	bctrl
1228f39b7a55SOlof Johansson
1229f39b7a55SOlof Johansson4:	/* Create a temp kernel stack for use before relocation is on.	*/
123014cf11afSPaul Mackerras	ld	r1,PACAEMERGSP(r13)
123114cf11afSPaul Mackerras	subi	r1,r1,STACK_FRAME_OVERHEAD
123214cf11afSPaul Mackerras
1233c705677eSStephen Rothwell	b	__secondary_start
123414cf11afSPaul Mackerras#endif
123514cf11afSPaul Mackerras
123614cf11afSPaul Mackerras_STATIC(__mmu_off)
123714cf11afSPaul Mackerras	mfmsr	r3
123814cf11afSPaul Mackerras	andi.	r0,r3,MSR_IR|MSR_DR
123914cf11afSPaul Mackerras	beqlr
124014cf11afSPaul Mackerras	andc	r3,r3,r0
124114cf11afSPaul Mackerras	mtspr	SPRN_SRR0,r4
124214cf11afSPaul Mackerras	mtspr	SPRN_SRR1,r3
124314cf11afSPaul Mackerras	sync
124414cf11afSPaul Mackerras	rfid
124514cf11afSPaul Mackerras	b	.	/* prevent speculative execution */
124614cf11afSPaul Mackerras
124714cf11afSPaul Mackerras
124814cf11afSPaul Mackerras/*
124914cf11afSPaul Mackerras * Here is our main kernel entry point. We support currently 2 kind of entries
125014cf11afSPaul Mackerras * depending on the value of r5.
125114cf11afSPaul Mackerras *
125214cf11afSPaul Mackerras *   r5 != NULL -> OF entry, we go to prom_init, "legacy" parameter content
125314cf11afSPaul Mackerras *                 in r3...r7
125414cf11afSPaul Mackerras *
125514cf11afSPaul Mackerras *   r5 == NULL -> kexec style entry. r3 is a physical pointer to the
125614cf11afSPaul Mackerras *                 DT block, r4 is a physical pointer to the kernel itself
125714cf11afSPaul Mackerras *
125814cf11afSPaul Mackerras */
125914cf11afSPaul Mackerras_GLOBAL(__start_initialization_multiplatform)
126014cf11afSPaul Mackerras	/*
126114cf11afSPaul Mackerras	 * Are we booted from a PROM Of-type client-interface ?
126214cf11afSPaul Mackerras	 */
126314cf11afSPaul Mackerras	cmpldi	cr0,r5,0
1264939e60f6SStephen Rothwell	beq	1f
1265939e60f6SStephen Rothwell	b	.__boot_from_prom		/* yes -> prom */
1266939e60f6SStephen Rothwell1:
126714cf11afSPaul Mackerras	/* Save parameters */
126814cf11afSPaul Mackerras	mr	r31,r3
126914cf11afSPaul Mackerras	mr	r30,r4
127014cf11afSPaul Mackerras
127114cf11afSPaul Mackerras	/* Make sure we are running in 64 bits mode */
127214cf11afSPaul Mackerras	bl	.enable_64b_mode
127314cf11afSPaul Mackerras
127414cf11afSPaul Mackerras	/* Setup some critical 970 SPRs before switching MMU off */
1275f39b7a55SOlof Johansson	mfspr	r0,SPRN_PVR
1276f39b7a55SOlof Johansson	srwi	r0,r0,16
1277f39b7a55SOlof Johansson	cmpwi	r0,0x39		/* 970 */
1278f39b7a55SOlof Johansson	beq	1f
1279f39b7a55SOlof Johansson	cmpwi	r0,0x3c		/* 970FX */
1280f39b7a55SOlof Johansson	beq	1f
1281f39b7a55SOlof Johansson	cmpwi	r0,0x44		/* 970MP */
1282190a24f5SOlof Johansson	beq	1f
1283190a24f5SOlof Johansson	cmpwi	r0,0x45		/* 970GX */
1284f39b7a55SOlof Johansson	bne	2f
1285f39b7a55SOlof Johansson1:	bl	.__cpu_preinit_ppc970
1286f39b7a55SOlof Johansson2:
128714cf11afSPaul Mackerras
128814cf11afSPaul Mackerras	/* Switch off MMU if not already */
1289e58c3495SDavid Gibson	LOAD_REG_IMMEDIATE(r4, .__after_prom_start - KERNELBASE)
129014cf11afSPaul Mackerras	add	r4,r4,r30
129114cf11afSPaul Mackerras	bl	.__mmu_off
129214cf11afSPaul Mackerras	b	.__after_prom_start
129314cf11afSPaul Mackerras
1294939e60f6SStephen Rothwell_INIT_STATIC(__boot_from_prom)
129514cf11afSPaul Mackerras	/* Save parameters */
129614cf11afSPaul Mackerras	mr	r31,r3
129714cf11afSPaul Mackerras	mr	r30,r4
129814cf11afSPaul Mackerras	mr	r29,r5
129914cf11afSPaul Mackerras	mr	r28,r6
130014cf11afSPaul Mackerras	mr	r27,r7
130114cf11afSPaul Mackerras
13026088857bSOlaf Hering	/*
13036088857bSOlaf Hering	 * Align the stack to 16-byte boundary
13046088857bSOlaf Hering	 * Depending on the size and layout of the ELF sections in the initial
13056088857bSOlaf Hering	 * boot binary, the stack pointer will be unalignet on PowerMac
13066088857bSOlaf Hering	 */
1307c05b4770SLinus Torvalds	rldicr	r1,r1,0,59
1308c05b4770SLinus Torvalds
130914cf11afSPaul Mackerras	/* Make sure we are running in 64 bits mode */
131014cf11afSPaul Mackerras	bl	.enable_64b_mode
131114cf11afSPaul Mackerras
131214cf11afSPaul Mackerras	/* put a relocation offset into r3 */
131314cf11afSPaul Mackerras	bl	.reloc_offset
131414cf11afSPaul Mackerras
1315e58c3495SDavid Gibson	LOAD_REG_IMMEDIATE(r2,__toc_start)
131614cf11afSPaul Mackerras	addi	r2,r2,0x4000
131714cf11afSPaul Mackerras	addi	r2,r2,0x4000
131814cf11afSPaul Mackerras
131914cf11afSPaul Mackerras	/* Relocate the TOC from a virt addr to a real addr */
13205a408329SPaul Mackerras	add	r2,r2,r3
132114cf11afSPaul Mackerras
132214cf11afSPaul Mackerras	/* Restore parameters */
132314cf11afSPaul Mackerras	mr	r3,r31
132414cf11afSPaul Mackerras	mr	r4,r30
132514cf11afSPaul Mackerras	mr	r5,r29
132614cf11afSPaul Mackerras	mr	r6,r28
132714cf11afSPaul Mackerras	mr	r7,r27
132814cf11afSPaul Mackerras
132914cf11afSPaul Mackerras	/* Do all of the interaction with OF client interface */
133014cf11afSPaul Mackerras	bl	.prom_init
133114cf11afSPaul Mackerras	/* We never return */
133214cf11afSPaul Mackerras	trap
133314cf11afSPaul Mackerras
133414cf11afSPaul Mackerras_STATIC(__after_prom_start)
133514cf11afSPaul Mackerras
133614cf11afSPaul Mackerras/*
1337758438a7SMichael Ellerman * We need to run with __start at physical address PHYSICAL_START.
133814cf11afSPaul Mackerras * This will leave some code in the first 256B of
133914cf11afSPaul Mackerras * real memory, which are reserved for software use.
134014cf11afSPaul Mackerras * The remainder of the first page is loaded with the fixed
134114cf11afSPaul Mackerras * interrupt vectors.  The next two pages are filled with
134214cf11afSPaul Mackerras * unknown exception placeholders.
134314cf11afSPaul Mackerras *
134414cf11afSPaul Mackerras * Note: This process overwrites the OF exception vectors.
134514cf11afSPaul Mackerras *	r26 == relocation offset
134614cf11afSPaul Mackerras *	r27 == KERNELBASE
134714cf11afSPaul Mackerras */
134814cf11afSPaul Mackerras	bl	.reloc_offset
134914cf11afSPaul Mackerras	mr	r26,r3
1350e58c3495SDavid Gibson	LOAD_REG_IMMEDIATE(r27, KERNELBASE)
135114cf11afSPaul Mackerras
1352e58c3495SDavid Gibson	LOAD_REG_IMMEDIATE(r3, PHYSICAL_START)	/* target addr */
135314cf11afSPaul Mackerras
135414cf11afSPaul Mackerras	// XXX FIXME: Use phys returned by OF (r30)
13555a408329SPaul Mackerras	add	r4,r27,r26 		/* source addr			 */
135614cf11afSPaul Mackerras					/* current address of _start	 */
135714cf11afSPaul Mackerras					/*   i.e. where we are running	 */
135814cf11afSPaul Mackerras					/*	the source addr		 */
135914cf11afSPaul Mackerras
1360d0b79c54SJimi Xenidis	cmpdi	r4,0			/* In some cases the loader may  */
1361939e60f6SStephen Rothwell	bne	1f
1362939e60f6SStephen Rothwell	b	.start_here_multiplatform /* have already put us at zero */
1363d0b79c54SJimi Xenidis					/* so we can skip the copy.      */
1364939e60f6SStephen Rothwell1:	LOAD_REG_IMMEDIATE(r5,copy_to_here) /* # bytes of memory to copy */
136514cf11afSPaul Mackerras	sub	r5,r5,r27
136614cf11afSPaul Mackerras
136714cf11afSPaul Mackerras	li	r6,0x100		/* Start offset, the first 0x100 */
136814cf11afSPaul Mackerras					/* bytes were copied earlier.	 */
136914cf11afSPaul Mackerras
137014cf11afSPaul Mackerras	bl	.copy_and_flush		/* copy the first n bytes	 */
137114cf11afSPaul Mackerras					/* this includes the code being	 */
137214cf11afSPaul Mackerras					/* executed here.		 */
137314cf11afSPaul Mackerras
1374e58c3495SDavid Gibson	LOAD_REG_IMMEDIATE(r0, 4f)	/* Jump to the copy of this code */
137514cf11afSPaul Mackerras	mtctr	r0			/* that we just made/relocated	 */
137614cf11afSPaul Mackerras	bctr
137714cf11afSPaul Mackerras
1378e58c3495SDavid Gibson4:	LOAD_REG_IMMEDIATE(r5,klimit)
13795a408329SPaul Mackerras	add	r5,r5,r26
138014cf11afSPaul Mackerras	ld	r5,0(r5)		/* get the value of klimit */
138114cf11afSPaul Mackerras	sub	r5,r5,r27
138214cf11afSPaul Mackerras	bl	.copy_and_flush		/* copy the rest */
138314cf11afSPaul Mackerras	b	.start_here_multiplatform
138414cf11afSPaul Mackerras
138514cf11afSPaul Mackerras/*
138614cf11afSPaul Mackerras * Copy routine used to copy the kernel to start at physical address 0
138714cf11afSPaul Mackerras * and flush and invalidate the caches as needed.
138814cf11afSPaul Mackerras * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset
138914cf11afSPaul Mackerras * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.
139014cf11afSPaul Mackerras *
139114cf11afSPaul Mackerras * Note: this routine *only* clobbers r0, r6 and lr
139214cf11afSPaul Mackerras */
139314cf11afSPaul Mackerras_GLOBAL(copy_and_flush)
139414cf11afSPaul Mackerras	addi	r5,r5,-8
139514cf11afSPaul Mackerras	addi	r6,r6,-8
13965a2fe38dSOlof Johansson4:	li	r0,8			/* Use the smallest common	*/
139714cf11afSPaul Mackerras					/* denominator cache line	*/
139814cf11afSPaul Mackerras					/* size.  This results in	*/
139914cf11afSPaul Mackerras					/* extra cache line flushes	*/
140014cf11afSPaul Mackerras					/* but operation is correct.	*/
140114cf11afSPaul Mackerras					/* Can't get cache line size	*/
140214cf11afSPaul Mackerras					/* from NACA as it is being	*/
140314cf11afSPaul Mackerras					/* moved too.			*/
140414cf11afSPaul Mackerras
140514cf11afSPaul Mackerras	mtctr	r0			/* put # words/line in ctr	*/
140614cf11afSPaul Mackerras3:	addi	r6,r6,8			/* copy a cache line		*/
140714cf11afSPaul Mackerras	ldx	r0,r6,r4
140814cf11afSPaul Mackerras	stdx	r0,r6,r3
140914cf11afSPaul Mackerras	bdnz	3b
141014cf11afSPaul Mackerras	dcbst	r6,r3			/* write it to memory		*/
141114cf11afSPaul Mackerras	sync
141214cf11afSPaul Mackerras	icbi	r6,r3			/* flush the icache line	*/
141314cf11afSPaul Mackerras	cmpld	0,r6,r5
141414cf11afSPaul Mackerras	blt	4b
141514cf11afSPaul Mackerras	sync
141614cf11afSPaul Mackerras	addi	r5,r5,8
141714cf11afSPaul Mackerras	addi	r6,r6,8
141814cf11afSPaul Mackerras	blr
141914cf11afSPaul Mackerras
142014cf11afSPaul Mackerras.align 8
142114cf11afSPaul Mackerrascopy_to_here:
142214cf11afSPaul Mackerras
142314cf11afSPaul Mackerras#ifdef CONFIG_SMP
142414cf11afSPaul Mackerras#ifdef CONFIG_PPC_PMAC
142514cf11afSPaul Mackerras/*
142614cf11afSPaul Mackerras * On PowerMac, secondary processors starts from the reset vector, which
142714cf11afSPaul Mackerras * is temporarily turned into a call to one of the functions below.
142814cf11afSPaul Mackerras */
142914cf11afSPaul Mackerras	.section ".text";
143014cf11afSPaul Mackerras	.align 2 ;
143114cf11afSPaul Mackerras
143235499c01SPaul Mackerras	.globl	__secondary_start_pmac_0
143335499c01SPaul Mackerras__secondary_start_pmac_0:
143435499c01SPaul Mackerras	/* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */
143535499c01SPaul Mackerras	li	r24,0
143635499c01SPaul Mackerras	b	1f
143714cf11afSPaul Mackerras	li	r24,1
143835499c01SPaul Mackerras	b	1f
143914cf11afSPaul Mackerras	li	r24,2
144035499c01SPaul Mackerras	b	1f
144114cf11afSPaul Mackerras	li	r24,3
144235499c01SPaul Mackerras1:
144314cf11afSPaul Mackerras
144414cf11afSPaul Mackerras_GLOBAL(pmac_secondary_start)
144514cf11afSPaul Mackerras	/* turn on 64-bit mode */
144614cf11afSPaul Mackerras	bl	.enable_64b_mode
144714cf11afSPaul Mackerras
144814cf11afSPaul Mackerras	/* Copy some CPU settings from CPU 0 */
1449f39b7a55SOlof Johansson	bl	.__restore_cpu_ppc970
145014cf11afSPaul Mackerras
145114cf11afSPaul Mackerras	/* pSeries do that early though I don't think we really need it */
145214cf11afSPaul Mackerras	mfmsr	r3
145314cf11afSPaul Mackerras	ori	r3,r3,MSR_RI
145414cf11afSPaul Mackerras	mtmsrd	r3			/* RI on */
145514cf11afSPaul Mackerras
145614cf11afSPaul Mackerras	/* Set up a paca value for this processor. */
1457e58c3495SDavid Gibson	LOAD_REG_IMMEDIATE(r4, paca)	/* Get base vaddr of paca array	*/
145814cf11afSPaul Mackerras	mulli	r13,r24,PACA_SIZE	 /* Calculate vaddr of right paca */
145914cf11afSPaul Mackerras	add	r13,r13,r4		/* for this processor.		*/
1460b5bbeb23SPaul Mackerras	mtspr	SPRN_SPRG3,r13		 /* Save vaddr of paca in SPRG3	*/
146114cf11afSPaul Mackerras
146214cf11afSPaul Mackerras	/* Create a temp kernel stack for use before relocation is on.	*/
146314cf11afSPaul Mackerras	ld	r1,PACAEMERGSP(r13)
146414cf11afSPaul Mackerras	subi	r1,r1,STACK_FRAME_OVERHEAD
146514cf11afSPaul Mackerras
1466c705677eSStephen Rothwell	b	__secondary_start
146714cf11afSPaul Mackerras
146814cf11afSPaul Mackerras#endif /* CONFIG_PPC_PMAC */
146914cf11afSPaul Mackerras
147014cf11afSPaul Mackerras/*
147114cf11afSPaul Mackerras * This function is called after the master CPU has released the
147214cf11afSPaul Mackerras * secondary processors.  The execution environment is relocation off.
147314cf11afSPaul Mackerras * The paca for this processor has the following fields initialized at
147414cf11afSPaul Mackerras * this point:
147514cf11afSPaul Mackerras *   1. Processor number
147614cf11afSPaul Mackerras *   2. Segment table pointer (virtual address)
147714cf11afSPaul Mackerras * On entry the following are set:
147814cf11afSPaul Mackerras *   r1	= stack pointer.  vaddr for iSeries, raddr (temp stack) for pSeries
147914cf11afSPaul Mackerras *   r24   = cpu# (in Linux terms)
148014cf11afSPaul Mackerras *   r13   = paca virtual address
148114cf11afSPaul Mackerras *   SPRG3 = paca virtual address
148214cf11afSPaul Mackerras */
1483fc68e869SStephen Rothwell	.globl	__secondary_start
1484c705677eSStephen Rothwell__secondary_start:
1485799d6046SPaul Mackerras	/* Set thread priority to MEDIUM */
1486799d6046SPaul Mackerras	HMT_MEDIUM
148714cf11afSPaul Mackerras
1488799d6046SPaul Mackerras	/* Load TOC */
148914cf11afSPaul Mackerras	ld	r2,PACATOC(r13)
149014cf11afSPaul Mackerras
1491799d6046SPaul Mackerras	/* Do early setup for that CPU (stab, slb, hash table pointer) */
1492799d6046SPaul Mackerras	bl	.early_setup_secondary
149314cf11afSPaul Mackerras
149414cf11afSPaul Mackerras	/* Initialize the kernel stack.  Just a repeat for iSeries.	 */
1495e58c3495SDavid Gibson	LOAD_REG_ADDR(r3, current_set)
149614cf11afSPaul Mackerras	sldi	r28,r24,3		/* get current_set[cpu#]	 */
149714cf11afSPaul Mackerras	ldx	r1,r3,r28
149814cf11afSPaul Mackerras	addi	r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
149914cf11afSPaul Mackerras	std	r1,PACAKSAVE(r13)
150014cf11afSPaul Mackerras
1501799d6046SPaul Mackerras	/* Clear backchain so we get nice backtraces */
150214cf11afSPaul Mackerras	li	r7,0
150314cf11afSPaul Mackerras	mtlr	r7
150414cf11afSPaul Mackerras
150514cf11afSPaul Mackerras	/* enable MMU and jump to start_secondary */
1506e58c3495SDavid Gibson	LOAD_REG_ADDR(r3, .start_secondary_prolog)
1507e58c3495SDavid Gibson	LOAD_REG_IMMEDIATE(r4, MSR_KERNEL)
1508d04c56f7SPaul Mackerras#ifdef CONFIG_PPC_ISERIES
15093f639ee8SStephen RothwellBEGIN_FW_FTR_SECTION
151014cf11afSPaul Mackerras	ori	r4,r4,MSR_EE
1511ff3da2e0SBenjamin Herrenschmidt	li	r8,1
1512ff3da2e0SBenjamin Herrenschmidt	stb	r8,PACAHARDIRQEN(r13)
15133f639ee8SStephen RothwellEND_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
151414cf11afSPaul Mackerras#endif
1515d04c56f7SPaul MackerrasBEGIN_FW_FTR_SECTION
1516d04c56f7SPaul Mackerras	stb	r7,PACAHARDIRQEN(r13)
1517d04c56f7SPaul MackerrasEND_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES)
1518ff3da2e0SBenjamin Herrenschmidt	stb	r7,PACASOFTIRQEN(r13)
1519d04c56f7SPaul Mackerras
1520b5bbeb23SPaul Mackerras	mtspr	SPRN_SRR0,r3
1521b5bbeb23SPaul Mackerras	mtspr	SPRN_SRR1,r4
152214cf11afSPaul Mackerras	rfid
152314cf11afSPaul Mackerras	b	.	/* prevent speculative execution */
152414cf11afSPaul Mackerras
152514cf11afSPaul Mackerras/*
152614cf11afSPaul Mackerras * Running with relocation on at this point.  All we want to do is
152714cf11afSPaul Mackerras * zero the stack back-chain pointer before going into C code.
152814cf11afSPaul Mackerras */
152914cf11afSPaul Mackerras_GLOBAL(start_secondary_prolog)
153014cf11afSPaul Mackerras	li	r3,0
153114cf11afSPaul Mackerras	std	r3,0(r1)		/* Zero the stack frame pointer	*/
153214cf11afSPaul Mackerras	bl	.start_secondary
1533799d6046SPaul Mackerras	b	.
153414cf11afSPaul Mackerras#endif
153514cf11afSPaul Mackerras
153614cf11afSPaul Mackerras/*
153714cf11afSPaul Mackerras * This subroutine clobbers r11 and r12
153814cf11afSPaul Mackerras */
153914cf11afSPaul Mackerras_GLOBAL(enable_64b_mode)
154014cf11afSPaul Mackerras	mfmsr	r11			/* grab the current MSR */
154114cf11afSPaul Mackerras	li	r12,1
154214cf11afSPaul Mackerras	rldicr	r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
154314cf11afSPaul Mackerras	or	r11,r11,r12
154414cf11afSPaul Mackerras	li	r12,1
154514cf11afSPaul Mackerras	rldicr	r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
154614cf11afSPaul Mackerras	or	r11,r11,r12
154714cf11afSPaul Mackerras	mtmsrd	r11
154814cf11afSPaul Mackerras	isync
154914cf11afSPaul Mackerras	blr
155014cf11afSPaul Mackerras
155114cf11afSPaul Mackerras/*
155214cf11afSPaul Mackerras * This is where the main kernel code starts.
155314cf11afSPaul Mackerras */
1554939e60f6SStephen Rothwell_INIT_STATIC(start_here_multiplatform)
155514cf11afSPaul Mackerras	/* get a new offset, now that the kernel has moved. */
155614cf11afSPaul Mackerras	bl	.reloc_offset
155714cf11afSPaul Mackerras	mr	r26,r3
155814cf11afSPaul Mackerras
155914cf11afSPaul Mackerras	/* Clear out the BSS. It may have been done in prom_init,
156014cf11afSPaul Mackerras	 * already but that's irrelevant since prom_init will soon
156114cf11afSPaul Mackerras	 * be detached from the kernel completely. Besides, we need
156214cf11afSPaul Mackerras	 * to clear it now for kexec-style entry.
156314cf11afSPaul Mackerras	 */
1564e58c3495SDavid Gibson	LOAD_REG_IMMEDIATE(r11,__bss_stop)
1565e58c3495SDavid Gibson	LOAD_REG_IMMEDIATE(r8,__bss_start)
156614cf11afSPaul Mackerras	sub	r11,r11,r8		/* bss size			*/
156714cf11afSPaul Mackerras	addi	r11,r11,7		/* round up to an even double word */
156814cf11afSPaul Mackerras	rldicl. r11,r11,61,3		/* shift right by 3		*/
156914cf11afSPaul Mackerras	beq	4f
157014cf11afSPaul Mackerras	addi	r8,r8,-8
157114cf11afSPaul Mackerras	li	r0,0
157214cf11afSPaul Mackerras	mtctr	r11			/* zero this many doublewords	*/
157314cf11afSPaul Mackerras3:	stdu	r0,8(r8)
157414cf11afSPaul Mackerras	bdnz	3b
157514cf11afSPaul Mackerras4:
157614cf11afSPaul Mackerras
157714cf11afSPaul Mackerras	mfmsr	r6
157814cf11afSPaul Mackerras	ori	r6,r6,MSR_RI
157914cf11afSPaul Mackerras	mtmsrd	r6			/* RI on */
158014cf11afSPaul Mackerras
158114cf11afSPaul Mackerras	/* The following gets the stack and TOC set up with the regs */
158214cf11afSPaul Mackerras	/* pointing to the real addr of the kernel stack.  This is   */
158314cf11afSPaul Mackerras	/* all done to support the C function call below which sets  */
158414cf11afSPaul Mackerras	/* up the htab.  This is done because we have relocated the  */
158514cf11afSPaul Mackerras	/* kernel but are still running in real mode. */
158614cf11afSPaul Mackerras
1587e58c3495SDavid Gibson	LOAD_REG_IMMEDIATE(r3,init_thread_union)
15885a408329SPaul Mackerras	add	r3,r3,r26
158914cf11afSPaul Mackerras
159014cf11afSPaul Mackerras	/* set up a stack pointer (physical address) */
159114cf11afSPaul Mackerras	addi	r1,r3,THREAD_SIZE
159214cf11afSPaul Mackerras	li	r0,0
159314cf11afSPaul Mackerras	stdu	r0,-STACK_FRAME_OVERHEAD(r1)
159414cf11afSPaul Mackerras
159514cf11afSPaul Mackerras	/* set up the TOC (physical address) */
1596e58c3495SDavid Gibson	LOAD_REG_IMMEDIATE(r2,__toc_start)
159714cf11afSPaul Mackerras	addi	r2,r2,0x4000
159814cf11afSPaul Mackerras	addi	r2,r2,0x4000
15995a408329SPaul Mackerras	add	r2,r2,r26
160014cf11afSPaul Mackerras
160114cf11afSPaul Mackerras	/* Do very early kernel initializations, including initial hash table,
160214cf11afSPaul Mackerras	 * stab and slb setup before we turn on relocation.	*/
160314cf11afSPaul Mackerras
160414cf11afSPaul Mackerras	/* Restore parameters passed from prom_init/kexec */
160514cf11afSPaul Mackerras	mr	r3,r31
160614cf11afSPaul Mackerras 	bl	.early_setup
160714cf11afSPaul Mackerras
1608e58c3495SDavid Gibson	LOAD_REG_IMMEDIATE(r3, .start_here_common)
1609e58c3495SDavid Gibson	LOAD_REG_IMMEDIATE(r4, MSR_KERNEL)
1610b5bbeb23SPaul Mackerras	mtspr	SPRN_SRR0,r3
1611b5bbeb23SPaul Mackerras	mtspr	SPRN_SRR1,r4
161214cf11afSPaul Mackerras	rfid
161314cf11afSPaul Mackerras	b	.	/* prevent speculative execution */
161414cf11afSPaul Mackerras
161514cf11afSPaul Mackerras	/* This is where all platforms converge execution */
1616fc68e869SStephen Rothwell_INIT_GLOBAL(start_here_common)
161714cf11afSPaul Mackerras	/* relocation is on at this point */
161814cf11afSPaul Mackerras
161914cf11afSPaul Mackerras	/* The following code sets up the SP and TOC now that we are */
162014cf11afSPaul Mackerras	/* running with translation enabled. */
162114cf11afSPaul Mackerras
1622e58c3495SDavid Gibson	LOAD_REG_IMMEDIATE(r3,init_thread_union)
162314cf11afSPaul Mackerras
162414cf11afSPaul Mackerras	/* set up the stack */
162514cf11afSPaul Mackerras	addi	r1,r3,THREAD_SIZE
162614cf11afSPaul Mackerras	li	r0,0
162714cf11afSPaul Mackerras	stdu	r0,-STACK_FRAME_OVERHEAD(r1)
162814cf11afSPaul Mackerras
162914cf11afSPaul Mackerras	/* Load the TOC */
163014cf11afSPaul Mackerras	ld	r2,PACATOC(r13)
163114cf11afSPaul Mackerras	std	r1,PACAKSAVE(r13)
163214cf11afSPaul Mackerras
163314cf11afSPaul Mackerras	bl	.setup_system
163414cf11afSPaul Mackerras
163514cf11afSPaul Mackerras	/* Load up the kernel context */
163614cf11afSPaul Mackerras5:
163714cf11afSPaul Mackerras	li	r5,0
1638d04c56f7SPaul Mackerras	stb	r5,PACASOFTIRQEN(r13)	/* Soft Disabled */
1639d04c56f7SPaul Mackerras#ifdef CONFIG_PPC_ISERIES
1640d04c56f7SPaul MackerrasBEGIN_FW_FTR_SECTION
164114cf11afSPaul Mackerras	mfmsr	r5
1642ff3da2e0SBenjamin Herrenschmidt	ori	r5,r5,MSR_EE		/* Hard Enabled on iSeries*/
164314cf11afSPaul Mackerras	mtmsrd	r5
1644ff3da2e0SBenjamin Herrenschmidt	li	r5,1
16453f639ee8SStephen RothwellEND_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
164614cf11afSPaul Mackerras#endif
1647ff3da2e0SBenjamin Herrenschmidt	stb	r5,PACAHARDIRQEN(r13)	/* Hard Disabled on others */
164814cf11afSPaul Mackerras
164914cf11afSPaul Mackerras	bl	.start_kernel
165014cf11afSPaul Mackerras
1651f1870f77SAnton Blanchard	/* Not reached */
1652f1870f77SAnton Blanchard	BUG_OPCODE
165314cf11afSPaul Mackerras
165414cf11afSPaul Mackerras/*
165514cf11afSPaul Mackerras * We put a few things here that have to be page-aligned.
165614cf11afSPaul Mackerras * This stuff goes at the beginning of the bss, which is page-aligned.
165714cf11afSPaul Mackerras */
165814cf11afSPaul Mackerras	.section ".bss"
165914cf11afSPaul Mackerras
166014cf11afSPaul Mackerras	.align	PAGE_SHIFT
166114cf11afSPaul Mackerras
166214cf11afSPaul Mackerras	.globl	empty_zero_page
166314cf11afSPaul Mackerrasempty_zero_page:
166414cf11afSPaul Mackerras	.space	PAGE_SIZE
166514cf11afSPaul Mackerras
166614cf11afSPaul Mackerras	.globl	swapper_pg_dir
166714cf11afSPaul Mackerrasswapper_pg_dir:
1668ee7a76daSStephen Rothwell	.space	PGD_TABLE_SIZE
1669