xref: /openbmc/linux/arch/powerpc/kernel/head_64.S (revision 14cf11af)
114cf11afSPaul Mackerras/*
214cf11afSPaul Mackerras *  arch/ppc64/kernel/head.S
314cf11afSPaul Mackerras *
414cf11afSPaul Mackerras *  PowerPC version
514cf11afSPaul Mackerras *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
614cf11afSPaul Mackerras *
714cf11afSPaul Mackerras *  Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
814cf11afSPaul Mackerras *    Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
914cf11afSPaul Mackerras *  Adapted for Power Macintosh by Paul Mackerras.
1014cf11afSPaul Mackerras *  Low-level exception handlers and MMU support
1114cf11afSPaul Mackerras *  rewritten by Paul Mackerras.
1214cf11afSPaul Mackerras *    Copyright (C) 1996 Paul Mackerras.
1314cf11afSPaul Mackerras *
1414cf11afSPaul Mackerras *  Adapted for 64bit PowerPC by Dave Engebretsen, Peter Bergner, and
1514cf11afSPaul Mackerras *    Mike Corrigan {engebret|bergner|mikejc}@us.ibm.com
1614cf11afSPaul Mackerras *
1714cf11afSPaul Mackerras *  This file contains the low-level support and setup for the
1814cf11afSPaul Mackerras *  PowerPC-64 platform, including trap and interrupt dispatch.
1914cf11afSPaul Mackerras *
2014cf11afSPaul Mackerras *  This program is free software; you can redistribute it and/or
2114cf11afSPaul Mackerras *  modify it under the terms of the GNU General Public License
2214cf11afSPaul Mackerras *  as published by the Free Software Foundation; either version
2314cf11afSPaul Mackerras *  2 of the License, or (at your option) any later version.
2414cf11afSPaul Mackerras */
2514cf11afSPaul Mackerras
2614cf11afSPaul Mackerras#include <linux/config.h>
2714cf11afSPaul Mackerras#include <linux/threads.h>
2814cf11afSPaul Mackerras#include <asm/processor.h>
2914cf11afSPaul Mackerras#include <asm/page.h>
3014cf11afSPaul Mackerras#include <asm/mmu.h>
3114cf11afSPaul Mackerras#include <asm/systemcfg.h>
3214cf11afSPaul Mackerras#include <asm/ppc_asm.h>
3314cf11afSPaul Mackerras#include <asm/asm-offsets.h>
3414cf11afSPaul Mackerras#include <asm/bug.h>
3514cf11afSPaul Mackerras#include <asm/cputable.h>
3614cf11afSPaul Mackerras#include <asm/setup.h>
3714cf11afSPaul Mackerras#include <asm/hvcall.h>
3814cf11afSPaul Mackerras#include <asm/iSeries/LparMap.h>
3914cf11afSPaul Mackerras
4014cf11afSPaul Mackerras#ifdef CONFIG_PPC_ISERIES
4114cf11afSPaul Mackerras#define DO_SOFT_DISABLE
4214cf11afSPaul Mackerras#endif
4314cf11afSPaul Mackerras
4414cf11afSPaul Mackerras/*
4514cf11afSPaul Mackerras * We layout physical memory as follows:
4614cf11afSPaul Mackerras * 0x0000 - 0x00ff : Secondary processor spin code
4714cf11afSPaul Mackerras * 0x0100 - 0x2fff : pSeries Interrupt prologs
4814cf11afSPaul Mackerras * 0x3000 - 0x5fff : interrupt support, iSeries and common interrupt prologs
4914cf11afSPaul Mackerras * 0x6000 - 0x6fff : Initial (CPU0) segment table
5014cf11afSPaul Mackerras * 0x7000 - 0x7fff : FWNMI data area
5114cf11afSPaul Mackerras * 0x8000 -        : Early init and support code
5214cf11afSPaul Mackerras */
5314cf11afSPaul Mackerras
5414cf11afSPaul Mackerras/*
5514cf11afSPaul Mackerras *   SPRG Usage
5614cf11afSPaul Mackerras *
5714cf11afSPaul Mackerras *   Register	Definition
5814cf11afSPaul Mackerras *
5914cf11afSPaul Mackerras *   SPRG0	reserved for hypervisor
6014cf11afSPaul Mackerras *   SPRG1	temp - used to save gpr
6114cf11afSPaul Mackerras *   SPRG2	temp - used to save gpr
6214cf11afSPaul Mackerras *   SPRG3	virt addr of paca
6314cf11afSPaul Mackerras */
6414cf11afSPaul Mackerras
6514cf11afSPaul Mackerras/*
6614cf11afSPaul Mackerras * Entering into this code we make the following assumptions:
6714cf11afSPaul Mackerras *  For pSeries:
6814cf11afSPaul Mackerras *   1. The MMU is off & open firmware is running in real mode.
6914cf11afSPaul Mackerras *   2. The kernel is entered at __start
7014cf11afSPaul Mackerras *
7114cf11afSPaul Mackerras *  For iSeries:
7214cf11afSPaul Mackerras *   1. The MMU is on (as it always is for iSeries)
7314cf11afSPaul Mackerras *   2. The kernel is entered at system_reset_iSeries
7414cf11afSPaul Mackerras */
7514cf11afSPaul Mackerras
7614cf11afSPaul Mackerras	.text
7714cf11afSPaul Mackerras	.globl  _stext
7814cf11afSPaul Mackerras_stext:
7914cf11afSPaul Mackerras#ifdef CONFIG_PPC_MULTIPLATFORM
8014cf11afSPaul Mackerras_GLOBAL(__start)
8114cf11afSPaul Mackerras	/* NOP this out unconditionally */
8214cf11afSPaul MackerrasBEGIN_FTR_SECTION
8314cf11afSPaul Mackerras	b .__start_initialization_multiplatform
8414cf11afSPaul MackerrasEND_FTR_SECTION(0, 1)
8514cf11afSPaul Mackerras#endif /* CONFIG_PPC_MULTIPLATFORM */
8614cf11afSPaul Mackerras
8714cf11afSPaul Mackerras	/* Catch branch to 0 in real mode */
8814cf11afSPaul Mackerras	trap
8914cf11afSPaul Mackerras
9014cf11afSPaul Mackerras#ifdef CONFIG_PPC_ISERIES
9114cf11afSPaul Mackerras	/*
9214cf11afSPaul Mackerras	 * At offset 0x20, there is a pointer to iSeries LPAR data.
9314cf11afSPaul Mackerras	 * This is required by the hypervisor
9414cf11afSPaul Mackerras	 */
9514cf11afSPaul Mackerras	. = 0x20
9614cf11afSPaul Mackerras	.llong hvReleaseData-KERNELBASE
9714cf11afSPaul Mackerras
9814cf11afSPaul Mackerras	/*
9914cf11afSPaul Mackerras	 * At offset 0x28 and 0x30 are offsets to the mschunks_map
10014cf11afSPaul Mackerras	 * array (used by the iSeries LPAR debugger to do translation
10114cf11afSPaul Mackerras	 * between physical addresses and absolute addresses) and
10214cf11afSPaul Mackerras	 * to the pidhash table (also used by the debugger)
10314cf11afSPaul Mackerras	 */
10414cf11afSPaul Mackerras	.llong mschunks_map-KERNELBASE
10514cf11afSPaul Mackerras	.llong 0	/* pidhash-KERNELBASE SFRXXX */
10614cf11afSPaul Mackerras
10714cf11afSPaul Mackerras	/* Offset 0x38 - Pointer to start of embedded System.map */
10814cf11afSPaul Mackerras	.globl	embedded_sysmap_start
10914cf11afSPaul Mackerrasembedded_sysmap_start:
11014cf11afSPaul Mackerras	.llong	0
11114cf11afSPaul Mackerras	/* Offset 0x40 - Pointer to end of embedded System.map */
11214cf11afSPaul Mackerras	.globl	embedded_sysmap_end
11314cf11afSPaul Mackerrasembedded_sysmap_end:
11414cf11afSPaul Mackerras	.llong	0
11514cf11afSPaul Mackerras
11614cf11afSPaul Mackerras#endif /* CONFIG_PPC_ISERIES */
11714cf11afSPaul Mackerras
11814cf11afSPaul Mackerras	/* Secondary processors spin on this value until it goes to 1. */
11914cf11afSPaul Mackerras	.globl  __secondary_hold_spinloop
12014cf11afSPaul Mackerras__secondary_hold_spinloop:
12114cf11afSPaul Mackerras	.llong	0x0
12214cf11afSPaul Mackerras
12314cf11afSPaul Mackerras	/* Secondary processors write this value with their cpu # */
12414cf11afSPaul Mackerras	/* after they enter the spin loop immediately below.	  */
12514cf11afSPaul Mackerras	.globl	__secondary_hold_acknowledge
12614cf11afSPaul Mackerras__secondary_hold_acknowledge:
12714cf11afSPaul Mackerras	.llong	0x0
12814cf11afSPaul Mackerras
12914cf11afSPaul Mackerras	. = 0x60
13014cf11afSPaul Mackerras/*
13114cf11afSPaul Mackerras * The following code is used on pSeries to hold secondary processors
13214cf11afSPaul Mackerras * in a spin loop after they have been freed from OpenFirmware, but
13314cf11afSPaul Mackerras * before the bulk of the kernel has been relocated.  This code
13414cf11afSPaul Mackerras * is relocated to physical address 0x60 before prom_init is run.
13514cf11afSPaul Mackerras * All of it must fit below the first exception vector at 0x100.
13614cf11afSPaul Mackerras */
13714cf11afSPaul Mackerras_GLOBAL(__secondary_hold)
13814cf11afSPaul Mackerras	mfmsr	r24
13914cf11afSPaul Mackerras	ori	r24,r24,MSR_RI
14014cf11afSPaul Mackerras	mtmsrd	r24			/* RI on */
14114cf11afSPaul Mackerras
14214cf11afSPaul Mackerras	/* Grab our linux cpu number */
14314cf11afSPaul Mackerras	mr	r24,r3
14414cf11afSPaul Mackerras
14514cf11afSPaul Mackerras	/* Tell the master cpu we're here */
14614cf11afSPaul Mackerras	/* Relocation is off & we are located at an address less */
14714cf11afSPaul Mackerras	/* than 0x100, so only need to grab low order offset.    */
14814cf11afSPaul Mackerras	std	r24,__secondary_hold_acknowledge@l(0)
14914cf11afSPaul Mackerras	sync
15014cf11afSPaul Mackerras
15114cf11afSPaul Mackerras	/* All secondary cpus wait here until told to start. */
15214cf11afSPaul Mackerras100:	ld	r4,__secondary_hold_spinloop@l(0)
15314cf11afSPaul Mackerras	cmpdi	0,r4,1
15414cf11afSPaul Mackerras	bne	100b
15514cf11afSPaul Mackerras
15614cf11afSPaul Mackerras#ifdef CONFIG_HMT
15714cf11afSPaul Mackerras	b	.hmt_init
15814cf11afSPaul Mackerras#else
15914cf11afSPaul Mackerras#ifdef CONFIG_SMP
16014cf11afSPaul Mackerras	mr	r3,r24
16114cf11afSPaul Mackerras	b	.pSeries_secondary_smp_init
16214cf11afSPaul Mackerras#else
16314cf11afSPaul Mackerras	BUG_OPCODE
16414cf11afSPaul Mackerras#endif
16514cf11afSPaul Mackerras#endif
16614cf11afSPaul Mackerras
16714cf11afSPaul Mackerras/* This value is used to mark exception frames on the stack. */
16814cf11afSPaul Mackerras	.section ".toc","aw"
16914cf11afSPaul Mackerrasexception_marker:
17014cf11afSPaul Mackerras	.tc	ID_72656773_68657265[TC],0x7265677368657265
17114cf11afSPaul Mackerras	.text
17214cf11afSPaul Mackerras
17314cf11afSPaul Mackerras/*
17414cf11afSPaul Mackerras * The following macros define the code that appears as
17514cf11afSPaul Mackerras * the prologue to each of the exception handlers.  They
17614cf11afSPaul Mackerras * are split into two parts to allow a single kernel binary
17714cf11afSPaul Mackerras * to be used for pSeries and iSeries.
17814cf11afSPaul Mackerras * LOL.  One day... - paulus
17914cf11afSPaul Mackerras */
18014cf11afSPaul Mackerras
18114cf11afSPaul Mackerras/*
18214cf11afSPaul Mackerras * We make as much of the exception code common between native
18314cf11afSPaul Mackerras * exception handlers (including pSeries LPAR) and iSeries LPAR
18414cf11afSPaul Mackerras * implementations as possible.
18514cf11afSPaul Mackerras */
18614cf11afSPaul Mackerras
18714cf11afSPaul Mackerras/*
18814cf11afSPaul Mackerras * This is the start of the interrupt handlers for pSeries
18914cf11afSPaul Mackerras * This code runs with relocation off.
19014cf11afSPaul Mackerras */
19114cf11afSPaul Mackerras#define EX_R9		0
19214cf11afSPaul Mackerras#define EX_R10		8
19314cf11afSPaul Mackerras#define EX_R11		16
19414cf11afSPaul Mackerras#define EX_R12		24
19514cf11afSPaul Mackerras#define EX_R13		32
19614cf11afSPaul Mackerras#define EX_SRR0		40
19714cf11afSPaul Mackerras#define EX_R3		40	/* SLB miss saves R3, but not SRR0 */
19814cf11afSPaul Mackerras#define EX_DAR		48
19914cf11afSPaul Mackerras#define EX_LR		48	/* SLB miss saves LR, but not DAR */
20014cf11afSPaul Mackerras#define EX_DSISR	56
20114cf11afSPaul Mackerras#define EX_CCR		60
20214cf11afSPaul Mackerras
20314cf11afSPaul Mackerras#define EXCEPTION_PROLOG_PSERIES(area, label)				\
20414cf11afSPaul Mackerras	mfspr	r13,SPRG3;		/* get paca address into r13 */	\
20514cf11afSPaul Mackerras	std	r9,area+EX_R9(r13);	/* save r9 - r12 */		\
20614cf11afSPaul Mackerras	std	r10,area+EX_R10(r13);					\
20714cf11afSPaul Mackerras	std	r11,area+EX_R11(r13);					\
20814cf11afSPaul Mackerras	std	r12,area+EX_R12(r13);					\
20914cf11afSPaul Mackerras	mfspr	r9,SPRG1;						\
21014cf11afSPaul Mackerras	std	r9,area+EX_R13(r13);					\
21114cf11afSPaul Mackerras	mfcr	r9;							\
21214cf11afSPaul Mackerras	clrrdi	r12,r13,32;		/* get high part of &label */	\
21314cf11afSPaul Mackerras	mfmsr	r10;							\
21414cf11afSPaul Mackerras	mfspr	r11,SRR0;		/* save SRR0 */			\
21514cf11afSPaul Mackerras	ori	r12,r12,(label)@l;	/* virt addr of handler */	\
21614cf11afSPaul Mackerras	ori	r10,r10,MSR_IR|MSR_DR|MSR_RI;				\
21714cf11afSPaul Mackerras	mtspr	SRR0,r12;						\
21814cf11afSPaul Mackerras	mfspr	r12,SRR1;		/* and SRR1 */			\
21914cf11afSPaul Mackerras	mtspr	SRR1,r10;						\
22014cf11afSPaul Mackerras	rfid;								\
22114cf11afSPaul Mackerras	b	.	/* prevent speculative execution */
22214cf11afSPaul Mackerras
22314cf11afSPaul Mackerras/*
22414cf11afSPaul Mackerras * This is the start of the interrupt handlers for iSeries
22514cf11afSPaul Mackerras * This code runs with relocation on.
22614cf11afSPaul Mackerras */
22714cf11afSPaul Mackerras#define EXCEPTION_PROLOG_ISERIES_1(area)				\
22814cf11afSPaul Mackerras	mfspr	r13,SPRG3;		/* get paca address into r13 */	\
22914cf11afSPaul Mackerras	std	r9,area+EX_R9(r13);	/* save r9 - r12 */		\
23014cf11afSPaul Mackerras	std	r10,area+EX_R10(r13);					\
23114cf11afSPaul Mackerras	std	r11,area+EX_R11(r13);					\
23214cf11afSPaul Mackerras	std	r12,area+EX_R12(r13);					\
23314cf11afSPaul Mackerras	mfspr	r9,SPRG1;						\
23414cf11afSPaul Mackerras	std	r9,area+EX_R13(r13);					\
23514cf11afSPaul Mackerras	mfcr	r9
23614cf11afSPaul Mackerras
23714cf11afSPaul Mackerras#define EXCEPTION_PROLOG_ISERIES_2					\
23814cf11afSPaul Mackerras	mfmsr	r10;							\
23914cf11afSPaul Mackerras	ld	r11,PACALPPACA+LPPACASRR0(r13);				\
24014cf11afSPaul Mackerras	ld	r12,PACALPPACA+LPPACASRR1(r13);				\
24114cf11afSPaul Mackerras	ori	r10,r10,MSR_RI;						\
24214cf11afSPaul Mackerras	mtmsrd	r10,1
24314cf11afSPaul Mackerras
24414cf11afSPaul Mackerras/*
24514cf11afSPaul Mackerras * The common exception prolog is used for all except a few exceptions
24614cf11afSPaul Mackerras * such as a segment miss on a kernel address.  We have to be prepared
24714cf11afSPaul Mackerras * to take another exception from the point where we first touch the
24814cf11afSPaul Mackerras * kernel stack onwards.
24914cf11afSPaul Mackerras *
25014cf11afSPaul Mackerras * On entry r13 points to the paca, r9-r13 are saved in the paca,
25114cf11afSPaul Mackerras * r9 contains the saved CR, r11 and r12 contain the saved SRR0 and
25214cf11afSPaul Mackerras * SRR1, and relocation is on.
25314cf11afSPaul Mackerras */
25414cf11afSPaul Mackerras#define EXCEPTION_PROLOG_COMMON(n, area)				   \
25514cf11afSPaul Mackerras	andi.	r10,r12,MSR_PR;		/* See if coming from user	*/ \
25614cf11afSPaul Mackerras	mr	r10,r1;			/* Save r1			*/ \
25714cf11afSPaul Mackerras	subi	r1,r1,INT_FRAME_SIZE;	/* alloc frame on kernel stack	*/ \
25814cf11afSPaul Mackerras	beq-	1f;							   \
25914cf11afSPaul Mackerras	ld	r1,PACAKSAVE(r13);	/* kernel stack to use		*/ \
26014cf11afSPaul Mackerras1:	cmpdi	cr1,r1,0;		/* check if r1 is in userspace	*/ \
26114cf11afSPaul Mackerras	bge-	cr1,bad_stack;		/* abort if it is		*/ \
26214cf11afSPaul Mackerras	std	r9,_CCR(r1);		/* save CR in stackframe	*/ \
26314cf11afSPaul Mackerras	std	r11,_NIP(r1);		/* save SRR0 in stackframe	*/ \
26414cf11afSPaul Mackerras	std	r12,_MSR(r1);		/* save SRR1 in stackframe	*/ \
26514cf11afSPaul Mackerras	std	r10,0(r1);		/* make stack chain pointer	*/ \
26614cf11afSPaul Mackerras	std	r0,GPR0(r1);		/* save r0 in stackframe	*/ \
26714cf11afSPaul Mackerras	std	r10,GPR1(r1);		/* save r1 in stackframe	*/ \
26814cf11afSPaul Mackerras	std	r2,GPR2(r1);		/* save r2 in stackframe	*/ \
26914cf11afSPaul Mackerras	SAVE_4GPRS(3, r1);		/* save r3 - r6 in stackframe	*/ \
27014cf11afSPaul Mackerras	SAVE_2GPRS(7, r1);		/* save r7, r8 in stackframe	*/ \
27114cf11afSPaul Mackerras	ld	r9,area+EX_R9(r13);	/* move r9, r10 to stackframe	*/ \
27214cf11afSPaul Mackerras	ld	r10,area+EX_R10(r13);					   \
27314cf11afSPaul Mackerras	std	r9,GPR9(r1);						   \
27414cf11afSPaul Mackerras	std	r10,GPR10(r1);						   \
27514cf11afSPaul Mackerras	ld	r9,area+EX_R11(r13);	/* move r11 - r13 to stackframe	*/ \
27614cf11afSPaul Mackerras	ld	r10,area+EX_R12(r13);					   \
27714cf11afSPaul Mackerras	ld	r11,area+EX_R13(r13);					   \
27814cf11afSPaul Mackerras	std	r9,GPR11(r1);						   \
27914cf11afSPaul Mackerras	std	r10,GPR12(r1);						   \
28014cf11afSPaul Mackerras	std	r11,GPR13(r1);						   \
28114cf11afSPaul Mackerras	ld	r2,PACATOC(r13);	/* get kernel TOC into r2	*/ \
28214cf11afSPaul Mackerras	mflr	r9;			/* save LR in stackframe	*/ \
28314cf11afSPaul Mackerras	std	r9,_LINK(r1);						   \
28414cf11afSPaul Mackerras	mfctr	r10;			/* save CTR in stackframe	*/ \
28514cf11afSPaul Mackerras	std	r10,_CTR(r1);						   \
28614cf11afSPaul Mackerras	mfspr	r11,XER;		/* save XER in stackframe	*/ \
28714cf11afSPaul Mackerras	std	r11,_XER(r1);						   \
28814cf11afSPaul Mackerras	li	r9,(n)+1;						   \
28914cf11afSPaul Mackerras	std	r9,_TRAP(r1);		/* set trap number		*/ \
29014cf11afSPaul Mackerras	li	r10,0;							   \
29114cf11afSPaul Mackerras	ld	r11,exception_marker@toc(r2);				   \
29214cf11afSPaul Mackerras	std	r10,RESULT(r1);		/* clear regs->result		*/ \
29314cf11afSPaul Mackerras	std	r11,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame	*/
29414cf11afSPaul Mackerras
29514cf11afSPaul Mackerras/*
29614cf11afSPaul Mackerras * Exception vectors.
29714cf11afSPaul Mackerras */
29814cf11afSPaul Mackerras#define STD_EXCEPTION_PSERIES(n, label)			\
29914cf11afSPaul Mackerras	. = n;						\
30014cf11afSPaul Mackerras	.globl label##_pSeries;				\
30114cf11afSPaul Mackerraslabel##_pSeries:					\
30214cf11afSPaul Mackerras	HMT_MEDIUM;					\
30314cf11afSPaul Mackerras	mtspr	SPRG1,r13;		/* save r13 */	\
30414cf11afSPaul Mackerras	RUNLATCH_ON(r13);				\
30514cf11afSPaul Mackerras	EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common)
30614cf11afSPaul Mackerras
30714cf11afSPaul Mackerras#define STD_EXCEPTION_ISERIES(n, label, area)		\
30814cf11afSPaul Mackerras	.globl label##_iSeries;				\
30914cf11afSPaul Mackerraslabel##_iSeries:					\
31014cf11afSPaul Mackerras	HMT_MEDIUM;					\
31114cf11afSPaul Mackerras	mtspr	SPRG1,r13;		/* save r13 */	\
31214cf11afSPaul Mackerras	RUNLATCH_ON(r13);				\
31314cf11afSPaul Mackerras	EXCEPTION_PROLOG_ISERIES_1(area);		\
31414cf11afSPaul Mackerras	EXCEPTION_PROLOG_ISERIES_2;			\
31514cf11afSPaul Mackerras	b	label##_common
31614cf11afSPaul Mackerras
31714cf11afSPaul Mackerras#define MASKABLE_EXCEPTION_ISERIES(n, label)				\
31814cf11afSPaul Mackerras	.globl label##_iSeries;						\
31914cf11afSPaul Mackerraslabel##_iSeries:							\
32014cf11afSPaul Mackerras	HMT_MEDIUM;							\
32114cf11afSPaul Mackerras	mtspr	SPRG1,r13;		/* save r13 */			\
32214cf11afSPaul Mackerras	RUNLATCH_ON(r13);						\
32314cf11afSPaul Mackerras	EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN);				\
32414cf11afSPaul Mackerras	lbz	r10,PACAPROCENABLED(r13);				\
32514cf11afSPaul Mackerras	cmpwi	0,r10,0;						\
32614cf11afSPaul Mackerras	beq-	label##_iSeries_masked;					\
32714cf11afSPaul Mackerras	EXCEPTION_PROLOG_ISERIES_2;					\
32814cf11afSPaul Mackerras	b	label##_common;						\
32914cf11afSPaul Mackerras
33014cf11afSPaul Mackerras#ifdef DO_SOFT_DISABLE
33114cf11afSPaul Mackerras#define DISABLE_INTS				\
33214cf11afSPaul Mackerras	lbz	r10,PACAPROCENABLED(r13);	\
33314cf11afSPaul Mackerras	li	r11,0;				\
33414cf11afSPaul Mackerras	std	r10,SOFTE(r1);			\
33514cf11afSPaul Mackerras	mfmsr	r10;				\
33614cf11afSPaul Mackerras	stb	r11,PACAPROCENABLED(r13);	\
33714cf11afSPaul Mackerras	ori	r10,r10,MSR_EE;			\
33814cf11afSPaul Mackerras	mtmsrd	r10,1
33914cf11afSPaul Mackerras
34014cf11afSPaul Mackerras#define ENABLE_INTS				\
34114cf11afSPaul Mackerras	lbz	r10,PACAPROCENABLED(r13);	\
34214cf11afSPaul Mackerras	mfmsr	r11;				\
34314cf11afSPaul Mackerras	std	r10,SOFTE(r1);			\
34414cf11afSPaul Mackerras	ori	r11,r11,MSR_EE;			\
34514cf11afSPaul Mackerras	mtmsrd	r11,1
34614cf11afSPaul Mackerras
34714cf11afSPaul Mackerras#else	/* hard enable/disable interrupts */
34814cf11afSPaul Mackerras#define DISABLE_INTS
34914cf11afSPaul Mackerras
35014cf11afSPaul Mackerras#define ENABLE_INTS				\
35114cf11afSPaul Mackerras	ld	r12,_MSR(r1);			\
35214cf11afSPaul Mackerras	mfmsr	r11;				\
35314cf11afSPaul Mackerras	rlwimi	r11,r12,0,MSR_EE;		\
35414cf11afSPaul Mackerras	mtmsrd	r11,1
35514cf11afSPaul Mackerras
35614cf11afSPaul Mackerras#endif
35714cf11afSPaul Mackerras
35814cf11afSPaul Mackerras#define STD_EXCEPTION_COMMON(trap, label, hdlr)		\
35914cf11afSPaul Mackerras	.align	7;					\
36014cf11afSPaul Mackerras	.globl label##_common;				\
36114cf11afSPaul Mackerraslabel##_common:						\
36214cf11afSPaul Mackerras	EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN);	\
36314cf11afSPaul Mackerras	DISABLE_INTS;					\
36414cf11afSPaul Mackerras	bl	.save_nvgprs;				\
36514cf11afSPaul Mackerras	addi	r3,r1,STACK_FRAME_OVERHEAD;		\
36614cf11afSPaul Mackerras	bl	hdlr;					\
36714cf11afSPaul Mackerras	b	.ret_from_except
36814cf11afSPaul Mackerras
36914cf11afSPaul Mackerras#define STD_EXCEPTION_COMMON_LITE(trap, label, hdlr)	\
37014cf11afSPaul Mackerras	.align	7;					\
37114cf11afSPaul Mackerras	.globl label##_common;				\
37214cf11afSPaul Mackerraslabel##_common:						\
37314cf11afSPaul Mackerras	EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN);	\
37414cf11afSPaul Mackerras	DISABLE_INTS;					\
37514cf11afSPaul Mackerras	addi	r3,r1,STACK_FRAME_OVERHEAD;		\
37614cf11afSPaul Mackerras	bl	hdlr;					\
37714cf11afSPaul Mackerras	b	.ret_from_except_lite
37814cf11afSPaul Mackerras
37914cf11afSPaul Mackerras/*
38014cf11afSPaul Mackerras * Start of pSeries system interrupt routines
38114cf11afSPaul Mackerras */
38214cf11afSPaul Mackerras	. = 0x100
38314cf11afSPaul Mackerras	.globl __start_interrupts
38414cf11afSPaul Mackerras__start_interrupts:
38514cf11afSPaul Mackerras
38614cf11afSPaul Mackerras	STD_EXCEPTION_PSERIES(0x100, system_reset)
38714cf11afSPaul Mackerras
38814cf11afSPaul Mackerras	. = 0x200
38914cf11afSPaul Mackerras_machine_check_pSeries:
39014cf11afSPaul Mackerras	HMT_MEDIUM
39114cf11afSPaul Mackerras	mtspr	SPRG1,r13		/* save r13 */
39214cf11afSPaul Mackerras	RUNLATCH_ON(r13)
39314cf11afSPaul Mackerras	EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
39414cf11afSPaul Mackerras
39514cf11afSPaul Mackerras	. = 0x300
39614cf11afSPaul Mackerras	.globl data_access_pSeries
39714cf11afSPaul Mackerrasdata_access_pSeries:
39814cf11afSPaul Mackerras	HMT_MEDIUM
39914cf11afSPaul Mackerras	mtspr	SPRG1,r13
40014cf11afSPaul MackerrasBEGIN_FTR_SECTION
40114cf11afSPaul Mackerras	mtspr	SPRG2,r12
40214cf11afSPaul Mackerras	mfspr	r13,DAR
40314cf11afSPaul Mackerras	mfspr	r12,DSISR
40414cf11afSPaul Mackerras	srdi	r13,r13,60
40514cf11afSPaul Mackerras	rlwimi	r13,r12,16,0x20
40614cf11afSPaul Mackerras	mfcr	r12
40714cf11afSPaul Mackerras	cmpwi	r13,0x2c
40814cf11afSPaul Mackerras	beq	.do_stab_bolted_pSeries
40914cf11afSPaul Mackerras	mtcrf	0x80,r12
41014cf11afSPaul Mackerras	mfspr	r12,SPRG2
41114cf11afSPaul MackerrasEND_FTR_SECTION_IFCLR(CPU_FTR_SLB)
41214cf11afSPaul Mackerras	EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common)
41314cf11afSPaul Mackerras
41414cf11afSPaul Mackerras	. = 0x380
41514cf11afSPaul Mackerras	.globl data_access_slb_pSeries
41614cf11afSPaul Mackerrasdata_access_slb_pSeries:
41714cf11afSPaul Mackerras	HMT_MEDIUM
41814cf11afSPaul Mackerras	mtspr	SPRG1,r13
41914cf11afSPaul Mackerras	RUNLATCH_ON(r13)
42014cf11afSPaul Mackerras	mfspr	r13,SPRG3		/* get paca address into r13 */
42114cf11afSPaul Mackerras	std	r9,PACA_EXSLB+EX_R9(r13)	/* save r9 - r12 */
42214cf11afSPaul Mackerras	std	r10,PACA_EXSLB+EX_R10(r13)
42314cf11afSPaul Mackerras	std	r11,PACA_EXSLB+EX_R11(r13)
42414cf11afSPaul Mackerras	std	r12,PACA_EXSLB+EX_R12(r13)
42514cf11afSPaul Mackerras	std	r3,PACA_EXSLB+EX_R3(r13)
42614cf11afSPaul Mackerras	mfspr	r9,SPRG1
42714cf11afSPaul Mackerras	std	r9,PACA_EXSLB+EX_R13(r13)
42814cf11afSPaul Mackerras	mfcr	r9
42914cf11afSPaul Mackerras	mfspr	r12,SRR1		/* and SRR1 */
43014cf11afSPaul Mackerras	mfspr	r3,DAR
43114cf11afSPaul Mackerras	b	.do_slb_miss		/* Rel. branch works in real mode */
43214cf11afSPaul Mackerras
43314cf11afSPaul Mackerras	STD_EXCEPTION_PSERIES(0x400, instruction_access)
43414cf11afSPaul Mackerras
43514cf11afSPaul Mackerras	. = 0x480
43614cf11afSPaul Mackerras	.globl instruction_access_slb_pSeries
43714cf11afSPaul Mackerrasinstruction_access_slb_pSeries:
43814cf11afSPaul Mackerras	HMT_MEDIUM
43914cf11afSPaul Mackerras	mtspr	SPRG1,r13
44014cf11afSPaul Mackerras	RUNLATCH_ON(r13)
44114cf11afSPaul Mackerras	mfspr	r13,SPRG3		/* get paca address into r13 */
44214cf11afSPaul Mackerras	std	r9,PACA_EXSLB+EX_R9(r13)	/* save r9 - r12 */
44314cf11afSPaul Mackerras	std	r10,PACA_EXSLB+EX_R10(r13)
44414cf11afSPaul Mackerras	std	r11,PACA_EXSLB+EX_R11(r13)
44514cf11afSPaul Mackerras	std	r12,PACA_EXSLB+EX_R12(r13)
44614cf11afSPaul Mackerras	std	r3,PACA_EXSLB+EX_R3(r13)
44714cf11afSPaul Mackerras	mfspr	r9,SPRG1
44814cf11afSPaul Mackerras	std	r9,PACA_EXSLB+EX_R13(r13)
44914cf11afSPaul Mackerras	mfcr	r9
45014cf11afSPaul Mackerras	mfspr	r12,SRR1		/* and SRR1 */
45114cf11afSPaul Mackerras	mfspr	r3,SRR0			/* SRR0 is faulting address */
45214cf11afSPaul Mackerras	b	.do_slb_miss		/* Rel. branch works in real mode */
45314cf11afSPaul Mackerras
45414cf11afSPaul Mackerras	STD_EXCEPTION_PSERIES(0x500, hardware_interrupt)
45514cf11afSPaul Mackerras	STD_EXCEPTION_PSERIES(0x600, alignment)
45614cf11afSPaul Mackerras	STD_EXCEPTION_PSERIES(0x700, program_check)
45714cf11afSPaul Mackerras	STD_EXCEPTION_PSERIES(0x800, fp_unavailable)
45814cf11afSPaul Mackerras	STD_EXCEPTION_PSERIES(0x900, decrementer)
45914cf11afSPaul Mackerras	STD_EXCEPTION_PSERIES(0xa00, trap_0a)
46014cf11afSPaul Mackerras	STD_EXCEPTION_PSERIES(0xb00, trap_0b)
46114cf11afSPaul Mackerras
46214cf11afSPaul Mackerras	. = 0xc00
46314cf11afSPaul Mackerras	.globl	system_call_pSeries
46414cf11afSPaul Mackerrassystem_call_pSeries:
46514cf11afSPaul Mackerras	HMT_MEDIUM
46614cf11afSPaul Mackerras	RUNLATCH_ON(r9)
46714cf11afSPaul Mackerras	mr	r9,r13
46814cf11afSPaul Mackerras	mfmsr	r10
46914cf11afSPaul Mackerras	mfspr	r13,SPRG3
47014cf11afSPaul Mackerras	mfspr	r11,SRR0
47114cf11afSPaul Mackerras	clrrdi	r12,r13,32
47214cf11afSPaul Mackerras	oris	r12,r12,system_call_common@h
47314cf11afSPaul Mackerras	ori	r12,r12,system_call_common@l
47414cf11afSPaul Mackerras	mtspr	SRR0,r12
47514cf11afSPaul Mackerras	ori	r10,r10,MSR_IR|MSR_DR|MSR_RI
47614cf11afSPaul Mackerras	mfspr	r12,SRR1
47714cf11afSPaul Mackerras	mtspr	SRR1,r10
47814cf11afSPaul Mackerras	rfid
47914cf11afSPaul Mackerras	b	.	/* prevent speculative execution */
48014cf11afSPaul Mackerras
48114cf11afSPaul Mackerras	STD_EXCEPTION_PSERIES(0xd00, single_step)
48214cf11afSPaul Mackerras	STD_EXCEPTION_PSERIES(0xe00, trap_0e)
48314cf11afSPaul Mackerras
48414cf11afSPaul Mackerras	/* We need to deal with the Altivec unavailable exception
48514cf11afSPaul Mackerras	 * here which is at 0xf20, thus in the middle of the
48614cf11afSPaul Mackerras	 * prolog code of the PerformanceMonitor one. A little
48714cf11afSPaul Mackerras	 * trickery is thus necessary
48814cf11afSPaul Mackerras	 */
48914cf11afSPaul Mackerras	. = 0xf00
49014cf11afSPaul Mackerras	b	performance_monitor_pSeries
49114cf11afSPaul Mackerras
49214cf11afSPaul Mackerras	STD_EXCEPTION_PSERIES(0xf20, altivec_unavailable)
49314cf11afSPaul Mackerras
49414cf11afSPaul Mackerras	STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint)
49514cf11afSPaul Mackerras	STD_EXCEPTION_PSERIES(0x1700, altivec_assist)
49614cf11afSPaul Mackerras
49714cf11afSPaul Mackerras	. = 0x3000
49814cf11afSPaul Mackerras
49914cf11afSPaul Mackerras/*** pSeries interrupt support ***/
50014cf11afSPaul Mackerras
50114cf11afSPaul Mackerras	/* moved from 0xf00 */
50214cf11afSPaul Mackerras	STD_EXCEPTION_PSERIES(., performance_monitor)
50314cf11afSPaul Mackerras
50414cf11afSPaul Mackerras	.align	7
50514cf11afSPaul Mackerras_GLOBAL(do_stab_bolted_pSeries)
50614cf11afSPaul Mackerras	mtcrf	0x80,r12
50714cf11afSPaul Mackerras	mfspr	r12,SPRG2
50814cf11afSPaul Mackerras	EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted)
50914cf11afSPaul Mackerras
51014cf11afSPaul Mackerras/*
51114cf11afSPaul Mackerras * Vectors for the FWNMI option.  Share common code.
51214cf11afSPaul Mackerras */
51314cf11afSPaul Mackerras      .globl system_reset_fwnmi
51414cf11afSPaul Mackerrassystem_reset_fwnmi:
51514cf11afSPaul Mackerras      HMT_MEDIUM
51614cf11afSPaul Mackerras      mtspr   SPRG1,r13               /* save r13 */
51714cf11afSPaul Mackerras      RUNLATCH_ON(r13)
51814cf11afSPaul Mackerras      EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common)
51914cf11afSPaul Mackerras
52014cf11afSPaul Mackerras      .globl machine_check_fwnmi
52114cf11afSPaul Mackerrasmachine_check_fwnmi:
52214cf11afSPaul Mackerras      HMT_MEDIUM
52314cf11afSPaul Mackerras      mtspr   SPRG1,r13               /* save r13 */
52414cf11afSPaul Mackerras      RUNLATCH_ON(r13)
52514cf11afSPaul Mackerras      EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
52614cf11afSPaul Mackerras
52714cf11afSPaul Mackerras#ifdef CONFIG_PPC_ISERIES
52814cf11afSPaul Mackerras/***  ISeries-LPAR interrupt handlers ***/
52914cf11afSPaul Mackerras
53014cf11afSPaul Mackerras	STD_EXCEPTION_ISERIES(0x200, machine_check, PACA_EXMC)
53114cf11afSPaul Mackerras
53214cf11afSPaul Mackerras	.globl data_access_iSeries
53314cf11afSPaul Mackerrasdata_access_iSeries:
53414cf11afSPaul Mackerras	mtspr	SPRG1,r13
53514cf11afSPaul MackerrasBEGIN_FTR_SECTION
53614cf11afSPaul Mackerras	mtspr	SPRG2,r12
53714cf11afSPaul Mackerras	mfspr	r13,DAR
53814cf11afSPaul Mackerras	mfspr	r12,DSISR
53914cf11afSPaul Mackerras	srdi	r13,r13,60
54014cf11afSPaul Mackerras	rlwimi	r13,r12,16,0x20
54114cf11afSPaul Mackerras	mfcr	r12
54214cf11afSPaul Mackerras	cmpwi	r13,0x2c
54314cf11afSPaul Mackerras	beq	.do_stab_bolted_iSeries
54414cf11afSPaul Mackerras	mtcrf	0x80,r12
54514cf11afSPaul Mackerras	mfspr	r12,SPRG2
54614cf11afSPaul MackerrasEND_FTR_SECTION_IFCLR(CPU_FTR_SLB)
54714cf11afSPaul Mackerras	EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN)
54814cf11afSPaul Mackerras	EXCEPTION_PROLOG_ISERIES_2
54914cf11afSPaul Mackerras	b	data_access_common
55014cf11afSPaul Mackerras
55114cf11afSPaul Mackerras.do_stab_bolted_iSeries:
55214cf11afSPaul Mackerras	mtcrf	0x80,r12
55314cf11afSPaul Mackerras	mfspr	r12,SPRG2
55414cf11afSPaul Mackerras	EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
55514cf11afSPaul Mackerras	EXCEPTION_PROLOG_ISERIES_2
55614cf11afSPaul Mackerras	b	.do_stab_bolted
55714cf11afSPaul Mackerras
55814cf11afSPaul Mackerras	.globl	data_access_slb_iSeries
55914cf11afSPaul Mackerrasdata_access_slb_iSeries:
56014cf11afSPaul Mackerras	mtspr	SPRG1,r13		/* save r13 */
56114cf11afSPaul Mackerras	EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
56214cf11afSPaul Mackerras	std	r3,PACA_EXSLB+EX_R3(r13)
56314cf11afSPaul Mackerras	ld	r12,PACALPPACA+LPPACASRR1(r13)
56414cf11afSPaul Mackerras	mfspr	r3,DAR
56514cf11afSPaul Mackerras	b	.do_slb_miss
56614cf11afSPaul Mackerras
56714cf11afSPaul Mackerras	STD_EXCEPTION_ISERIES(0x400, instruction_access, PACA_EXGEN)
56814cf11afSPaul Mackerras
56914cf11afSPaul Mackerras	.globl	instruction_access_slb_iSeries
57014cf11afSPaul Mackerrasinstruction_access_slb_iSeries:
57114cf11afSPaul Mackerras	mtspr	SPRG1,r13		/* save r13 */
57214cf11afSPaul Mackerras	EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
57314cf11afSPaul Mackerras	std	r3,PACA_EXSLB+EX_R3(r13)
57414cf11afSPaul Mackerras	ld	r12,PACALPPACA+LPPACASRR1(r13)
57514cf11afSPaul Mackerras	ld	r3,PACALPPACA+LPPACASRR0(r13)
57614cf11afSPaul Mackerras	b	.do_slb_miss
57714cf11afSPaul Mackerras
57814cf11afSPaul Mackerras	MASKABLE_EXCEPTION_ISERIES(0x500, hardware_interrupt)
57914cf11afSPaul Mackerras	STD_EXCEPTION_ISERIES(0x600, alignment, PACA_EXGEN)
58014cf11afSPaul Mackerras	STD_EXCEPTION_ISERIES(0x700, program_check, PACA_EXGEN)
58114cf11afSPaul Mackerras	STD_EXCEPTION_ISERIES(0x800, fp_unavailable, PACA_EXGEN)
58214cf11afSPaul Mackerras	MASKABLE_EXCEPTION_ISERIES(0x900, decrementer)
58314cf11afSPaul Mackerras	STD_EXCEPTION_ISERIES(0xa00, trap_0a, PACA_EXGEN)
58414cf11afSPaul Mackerras	STD_EXCEPTION_ISERIES(0xb00, trap_0b, PACA_EXGEN)
58514cf11afSPaul Mackerras
58614cf11afSPaul Mackerras	.globl	system_call_iSeries
58714cf11afSPaul Mackerrassystem_call_iSeries:
58814cf11afSPaul Mackerras	mr	r9,r13
58914cf11afSPaul Mackerras	mfspr	r13,SPRG3
59014cf11afSPaul Mackerras	EXCEPTION_PROLOG_ISERIES_2
59114cf11afSPaul Mackerras	b	system_call_common
59214cf11afSPaul Mackerras
59314cf11afSPaul Mackerras	STD_EXCEPTION_ISERIES( 0xd00, single_step, PACA_EXGEN)
59414cf11afSPaul Mackerras	STD_EXCEPTION_ISERIES( 0xe00, trap_0e, PACA_EXGEN)
59514cf11afSPaul Mackerras	STD_EXCEPTION_ISERIES( 0xf00, performance_monitor, PACA_EXGEN)
59614cf11afSPaul Mackerras
59714cf11afSPaul Mackerras	.globl system_reset_iSeries
59814cf11afSPaul Mackerrassystem_reset_iSeries:
59914cf11afSPaul Mackerras	mfspr	r13,SPRG3		/* Get paca address */
60014cf11afSPaul Mackerras	mfmsr	r24
60114cf11afSPaul Mackerras	ori	r24,r24,MSR_RI
60214cf11afSPaul Mackerras	mtmsrd	r24			/* RI on */
60314cf11afSPaul Mackerras	lhz	r24,PACAPACAINDEX(r13)	/* Get processor # */
60414cf11afSPaul Mackerras	cmpwi	0,r24,0			/* Are we processor 0? */
60514cf11afSPaul Mackerras	beq	.__start_initialization_iSeries	/* Start up the first processor */
60614cf11afSPaul Mackerras	mfspr	r4,SPRN_CTRLF
60714cf11afSPaul Mackerras	li	r5,CTRL_RUNLATCH	/* Turn off the run light */
60814cf11afSPaul Mackerras	andc	r4,r4,r5
60914cf11afSPaul Mackerras	mtspr	SPRN_CTRLT,r4
61014cf11afSPaul Mackerras
61114cf11afSPaul Mackerras1:
61214cf11afSPaul Mackerras	HMT_LOW
61314cf11afSPaul Mackerras#ifdef CONFIG_SMP
61414cf11afSPaul Mackerras	lbz	r23,PACAPROCSTART(r13)	/* Test if this processor
61514cf11afSPaul Mackerras					 * should start */
61614cf11afSPaul Mackerras	sync
61714cf11afSPaul Mackerras	LOADADDR(r3,current_set)
61814cf11afSPaul Mackerras	sldi	r28,r24,3		/* get current_set[cpu#] */
61914cf11afSPaul Mackerras	ldx	r3,r3,r28
62014cf11afSPaul Mackerras	addi	r1,r3,THREAD_SIZE
62114cf11afSPaul Mackerras	subi	r1,r1,STACK_FRAME_OVERHEAD
62214cf11afSPaul Mackerras
62314cf11afSPaul Mackerras	cmpwi	0,r23,0
62414cf11afSPaul Mackerras	beq	iSeries_secondary_smp_loop	/* Loop until told to go */
62514cf11afSPaul Mackerras	bne	.__secondary_start		/* Loop until told to go */
62614cf11afSPaul MackerrasiSeries_secondary_smp_loop:
62714cf11afSPaul Mackerras	/* Let the Hypervisor know we are alive */
62814cf11afSPaul Mackerras	/* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */
62914cf11afSPaul Mackerras	lis	r3,0x8002
63014cf11afSPaul Mackerras	rldicr	r3,r3,32,15		/* r0 = (r3 << 32) & 0xffff000000000000 */
63114cf11afSPaul Mackerras#else /* CONFIG_SMP */
63214cf11afSPaul Mackerras	/* Yield the processor.  This is required for non-SMP kernels
63314cf11afSPaul Mackerras		which are running on multi-threaded machines. */
63414cf11afSPaul Mackerras	lis	r3,0x8000
63514cf11afSPaul Mackerras	rldicr	r3,r3,32,15		/* r3 = (r3 << 32) & 0xffff000000000000 */
63614cf11afSPaul Mackerras	addi	r3,r3,18		/* r3 = 0x8000000000000012 which is "yield" */
63714cf11afSPaul Mackerras	li	r4,0			/* "yield timed" */
63814cf11afSPaul Mackerras	li	r5,-1			/* "yield forever" */
63914cf11afSPaul Mackerras#endif /* CONFIG_SMP */
64014cf11afSPaul Mackerras	li	r0,-1			/* r0=-1 indicates a Hypervisor call */
64114cf11afSPaul Mackerras	sc				/* Invoke the hypervisor via a system call */
64214cf11afSPaul Mackerras	mfspr	r13,SPRG3		/* Put r13 back ???? */
64314cf11afSPaul Mackerras	b	1b			/* If SMP not configured, secondaries
64414cf11afSPaul Mackerras					 * loop forever */
64514cf11afSPaul Mackerras
64614cf11afSPaul Mackerras	.globl decrementer_iSeries_masked
64714cf11afSPaul Mackerrasdecrementer_iSeries_masked:
64814cf11afSPaul Mackerras	li	r11,1
64914cf11afSPaul Mackerras	stb	r11,PACALPPACA+LPPACADECRINT(r13)
65014cf11afSPaul Mackerras	lwz	r12,PACADEFAULTDECR(r13)
65114cf11afSPaul Mackerras	mtspr	SPRN_DEC,r12
65214cf11afSPaul Mackerras	/* fall through */
65314cf11afSPaul Mackerras
65414cf11afSPaul Mackerras	.globl hardware_interrupt_iSeries_masked
65514cf11afSPaul Mackerrashardware_interrupt_iSeries_masked:
65614cf11afSPaul Mackerras	mtcrf	0x80,r9		/* Restore regs */
65714cf11afSPaul Mackerras	ld	r11,PACALPPACA+LPPACASRR0(r13)
65814cf11afSPaul Mackerras	ld	r12,PACALPPACA+LPPACASRR1(r13)
65914cf11afSPaul Mackerras	mtspr	SRR0,r11
66014cf11afSPaul Mackerras	mtspr	SRR1,r12
66114cf11afSPaul Mackerras	ld	r9,PACA_EXGEN+EX_R9(r13)
66214cf11afSPaul Mackerras	ld	r10,PACA_EXGEN+EX_R10(r13)
66314cf11afSPaul Mackerras	ld	r11,PACA_EXGEN+EX_R11(r13)
66414cf11afSPaul Mackerras	ld	r12,PACA_EXGEN+EX_R12(r13)
66514cf11afSPaul Mackerras	ld	r13,PACA_EXGEN+EX_R13(r13)
66614cf11afSPaul Mackerras	rfid
66714cf11afSPaul Mackerras	b	.	/* prevent speculative execution */
66814cf11afSPaul Mackerras#endif /* CONFIG_PPC_ISERIES */
66914cf11afSPaul Mackerras
67014cf11afSPaul Mackerras/*** Common interrupt handlers ***/
67114cf11afSPaul Mackerras
67214cf11afSPaul Mackerras	STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception)
67314cf11afSPaul Mackerras
67414cf11afSPaul Mackerras	/*
67514cf11afSPaul Mackerras	 * Machine check is different because we use a different
67614cf11afSPaul Mackerras	 * save area: PACA_EXMC instead of PACA_EXGEN.
67714cf11afSPaul Mackerras	 */
67814cf11afSPaul Mackerras	.align	7
67914cf11afSPaul Mackerras	.globl machine_check_common
68014cf11afSPaul Mackerrasmachine_check_common:
68114cf11afSPaul Mackerras	EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
68214cf11afSPaul Mackerras	DISABLE_INTS
68314cf11afSPaul Mackerras	bl	.save_nvgprs
68414cf11afSPaul Mackerras	addi	r3,r1,STACK_FRAME_OVERHEAD
68514cf11afSPaul Mackerras	bl	.machine_check_exception
68614cf11afSPaul Mackerras	b	.ret_from_except
68714cf11afSPaul Mackerras
68814cf11afSPaul Mackerras	STD_EXCEPTION_COMMON_LITE(0x900, decrementer, .timer_interrupt)
68914cf11afSPaul Mackerras	STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception)
69014cf11afSPaul Mackerras	STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
69114cf11afSPaul Mackerras	STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
69214cf11afSPaul Mackerras	STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception)
69314cf11afSPaul Mackerras	STD_EXCEPTION_COMMON(0xf00, performance_monitor, .performance_monitor_exception)
69414cf11afSPaul Mackerras	STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception)
69514cf11afSPaul Mackerras#ifdef CONFIG_ALTIVEC
69614cf11afSPaul Mackerras	STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception)
69714cf11afSPaul Mackerras#else
69814cf11afSPaul Mackerras	STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception)
69914cf11afSPaul Mackerras#endif
70014cf11afSPaul Mackerras
70114cf11afSPaul Mackerras/*
70214cf11afSPaul Mackerras * Here we have detected that the kernel stack pointer is bad.
70314cf11afSPaul Mackerras * R9 contains the saved CR, r13 points to the paca,
70414cf11afSPaul Mackerras * r10 contains the (bad) kernel stack pointer,
70514cf11afSPaul Mackerras * r11 and r12 contain the saved SRR0 and SRR1.
70614cf11afSPaul Mackerras * We switch to using an emergency stack, save the registers there,
70714cf11afSPaul Mackerras * and call kernel_bad_stack(), which panics.
70814cf11afSPaul Mackerras */
70914cf11afSPaul Mackerrasbad_stack:
71014cf11afSPaul Mackerras	ld	r1,PACAEMERGSP(r13)
71114cf11afSPaul Mackerras	subi	r1,r1,64+INT_FRAME_SIZE
71214cf11afSPaul Mackerras	std	r9,_CCR(r1)
71314cf11afSPaul Mackerras	std	r10,GPR1(r1)
71414cf11afSPaul Mackerras	std	r11,_NIP(r1)
71514cf11afSPaul Mackerras	std	r12,_MSR(r1)
71614cf11afSPaul Mackerras	mfspr	r11,DAR
71714cf11afSPaul Mackerras	mfspr	r12,DSISR
71814cf11afSPaul Mackerras	std	r11,_DAR(r1)
71914cf11afSPaul Mackerras	std	r12,_DSISR(r1)
72014cf11afSPaul Mackerras	mflr	r10
72114cf11afSPaul Mackerras	mfctr	r11
72214cf11afSPaul Mackerras	mfxer	r12
72314cf11afSPaul Mackerras	std	r10,_LINK(r1)
72414cf11afSPaul Mackerras	std	r11,_CTR(r1)
72514cf11afSPaul Mackerras	std	r12,_XER(r1)
72614cf11afSPaul Mackerras	SAVE_GPR(0,r1)
72714cf11afSPaul Mackerras	SAVE_GPR(2,r1)
72814cf11afSPaul Mackerras	SAVE_4GPRS(3,r1)
72914cf11afSPaul Mackerras	SAVE_2GPRS(7,r1)
73014cf11afSPaul Mackerras	SAVE_10GPRS(12,r1)
73114cf11afSPaul Mackerras	SAVE_10GPRS(22,r1)
73214cf11afSPaul Mackerras	addi	r11,r1,INT_FRAME_SIZE
73314cf11afSPaul Mackerras	std	r11,0(r1)
73414cf11afSPaul Mackerras	li	r12,0
73514cf11afSPaul Mackerras	std	r12,0(r11)
73614cf11afSPaul Mackerras	ld	r2,PACATOC(r13)
73714cf11afSPaul Mackerras1:	addi	r3,r1,STACK_FRAME_OVERHEAD
73814cf11afSPaul Mackerras	bl	.kernel_bad_stack
73914cf11afSPaul Mackerras	b	1b
74014cf11afSPaul Mackerras
74114cf11afSPaul Mackerras/*
74214cf11afSPaul Mackerras * Return from an exception with minimal checks.
74314cf11afSPaul Mackerras * The caller is assumed to have done EXCEPTION_PROLOG_COMMON.
74414cf11afSPaul Mackerras * If interrupts have been enabled, or anything has been
74514cf11afSPaul Mackerras * done that might have changed the scheduling status of
74614cf11afSPaul Mackerras * any task or sent any task a signal, you should use
74714cf11afSPaul Mackerras * ret_from_except or ret_from_except_lite instead of this.
74814cf11afSPaul Mackerras */
74914cf11afSPaul Mackerrasfast_exception_return:
75014cf11afSPaul Mackerras	ld	r12,_MSR(r1)
75114cf11afSPaul Mackerras	ld	r11,_NIP(r1)
75214cf11afSPaul Mackerras	andi.	r3,r12,MSR_RI		/* check if RI is set */
75314cf11afSPaul Mackerras	beq-	unrecov_fer
75414cf11afSPaul Mackerras	ld	r3,_CCR(r1)
75514cf11afSPaul Mackerras	ld	r4,_LINK(r1)
75614cf11afSPaul Mackerras	ld	r5,_CTR(r1)
75714cf11afSPaul Mackerras	ld	r6,_XER(r1)
75814cf11afSPaul Mackerras	mtcr	r3
75914cf11afSPaul Mackerras	mtlr	r4
76014cf11afSPaul Mackerras	mtctr	r5
76114cf11afSPaul Mackerras	mtxer	r6
76214cf11afSPaul Mackerras	REST_GPR(0, r1)
76314cf11afSPaul Mackerras	REST_8GPRS(2, r1)
76414cf11afSPaul Mackerras
76514cf11afSPaul Mackerras	mfmsr	r10
76614cf11afSPaul Mackerras	clrrdi	r10,r10,2		/* clear RI (LE is 0 already) */
76714cf11afSPaul Mackerras	mtmsrd	r10,1
76814cf11afSPaul Mackerras
76914cf11afSPaul Mackerras	mtspr	SRR1,r12
77014cf11afSPaul Mackerras	mtspr	SRR0,r11
77114cf11afSPaul Mackerras	REST_4GPRS(10, r1)
77214cf11afSPaul Mackerras	ld	r1,GPR1(r1)
77314cf11afSPaul Mackerras	rfid
77414cf11afSPaul Mackerras	b	.	/* prevent speculative execution */
77514cf11afSPaul Mackerras
77614cf11afSPaul Mackerrasunrecov_fer:
77714cf11afSPaul Mackerras	bl	.save_nvgprs
77814cf11afSPaul Mackerras1:	addi	r3,r1,STACK_FRAME_OVERHEAD
77914cf11afSPaul Mackerras	bl	.unrecoverable_exception
78014cf11afSPaul Mackerras	b	1b
78114cf11afSPaul Mackerras
78214cf11afSPaul Mackerras/*
78314cf11afSPaul Mackerras * Here r13 points to the paca, r9 contains the saved CR,
78414cf11afSPaul Mackerras * SRR0 and SRR1 are saved in r11 and r12,
78514cf11afSPaul Mackerras * r9 - r13 are saved in paca->exgen.
78614cf11afSPaul Mackerras */
78714cf11afSPaul Mackerras	.align	7
78814cf11afSPaul Mackerras	.globl data_access_common
78914cf11afSPaul Mackerrasdata_access_common:
79014cf11afSPaul Mackerras	RUNLATCH_ON(r10)		/* It wont fit in the 0x300 handler */
79114cf11afSPaul Mackerras	mfspr	r10,DAR
79214cf11afSPaul Mackerras	std	r10,PACA_EXGEN+EX_DAR(r13)
79314cf11afSPaul Mackerras	mfspr	r10,DSISR
79414cf11afSPaul Mackerras	stw	r10,PACA_EXGEN+EX_DSISR(r13)
79514cf11afSPaul Mackerras	EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN)
79614cf11afSPaul Mackerras	ld	r3,PACA_EXGEN+EX_DAR(r13)
79714cf11afSPaul Mackerras	lwz	r4,PACA_EXGEN+EX_DSISR(r13)
79814cf11afSPaul Mackerras	li	r5,0x300
79914cf11afSPaul Mackerras	b	.do_hash_page	 	/* Try to handle as hpte fault */
80014cf11afSPaul Mackerras
80114cf11afSPaul Mackerras	.align	7
80214cf11afSPaul Mackerras	.globl instruction_access_common
80314cf11afSPaul Mackerrasinstruction_access_common:
80414cf11afSPaul Mackerras	EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN)
80514cf11afSPaul Mackerras	ld	r3,_NIP(r1)
80614cf11afSPaul Mackerras	andis.	r4,r12,0x5820
80714cf11afSPaul Mackerras	li	r5,0x400
80814cf11afSPaul Mackerras	b	.do_hash_page		/* Try to handle as hpte fault */
80914cf11afSPaul Mackerras
81014cf11afSPaul Mackerras	.align	7
81114cf11afSPaul Mackerras	.globl hardware_interrupt_common
81214cf11afSPaul Mackerras	.globl hardware_interrupt_entry
81314cf11afSPaul Mackerrashardware_interrupt_common:
81414cf11afSPaul Mackerras	EXCEPTION_PROLOG_COMMON(0x500, PACA_EXGEN)
81514cf11afSPaul Mackerrashardware_interrupt_entry:
81614cf11afSPaul Mackerras	DISABLE_INTS
81714cf11afSPaul Mackerras	addi	r3,r1,STACK_FRAME_OVERHEAD
81814cf11afSPaul Mackerras	bl	.do_IRQ
81914cf11afSPaul Mackerras	b	.ret_from_except_lite
82014cf11afSPaul Mackerras
82114cf11afSPaul Mackerras	.align	7
82214cf11afSPaul Mackerras	.globl alignment_common
82314cf11afSPaul Mackerrasalignment_common:
82414cf11afSPaul Mackerras	mfspr	r10,DAR
82514cf11afSPaul Mackerras	std	r10,PACA_EXGEN+EX_DAR(r13)
82614cf11afSPaul Mackerras	mfspr	r10,DSISR
82714cf11afSPaul Mackerras	stw	r10,PACA_EXGEN+EX_DSISR(r13)
82814cf11afSPaul Mackerras	EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN)
82914cf11afSPaul Mackerras	ld	r3,PACA_EXGEN+EX_DAR(r13)
83014cf11afSPaul Mackerras	lwz	r4,PACA_EXGEN+EX_DSISR(r13)
83114cf11afSPaul Mackerras	std	r3,_DAR(r1)
83214cf11afSPaul Mackerras	std	r4,_DSISR(r1)
83314cf11afSPaul Mackerras	bl	.save_nvgprs
83414cf11afSPaul Mackerras	addi	r3,r1,STACK_FRAME_OVERHEAD
83514cf11afSPaul Mackerras	ENABLE_INTS
83614cf11afSPaul Mackerras	bl	.alignment_exception
83714cf11afSPaul Mackerras	b	.ret_from_except
83814cf11afSPaul Mackerras
83914cf11afSPaul Mackerras	.align	7
84014cf11afSPaul Mackerras	.globl program_check_common
84114cf11afSPaul Mackerrasprogram_check_common:
84214cf11afSPaul Mackerras	EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
84314cf11afSPaul Mackerras	bl	.save_nvgprs
84414cf11afSPaul Mackerras	addi	r3,r1,STACK_FRAME_OVERHEAD
84514cf11afSPaul Mackerras	ENABLE_INTS
84614cf11afSPaul Mackerras	bl	.program_check_exception
84714cf11afSPaul Mackerras	b	.ret_from_except
84814cf11afSPaul Mackerras
84914cf11afSPaul Mackerras	.align	7
85014cf11afSPaul Mackerras	.globl fp_unavailable_common
85114cf11afSPaul Mackerrasfp_unavailable_common:
85214cf11afSPaul Mackerras	EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN)
85314cf11afSPaul Mackerras	bne	.load_up_fpu		/* if from user, just load it up */
85414cf11afSPaul Mackerras	bl	.save_nvgprs
85514cf11afSPaul Mackerras	addi	r3,r1,STACK_FRAME_OVERHEAD
85614cf11afSPaul Mackerras	ENABLE_INTS
85714cf11afSPaul Mackerras	bl	.kernel_fp_unavailable_exception
85814cf11afSPaul Mackerras	BUG_OPCODE
85914cf11afSPaul Mackerras
86014cf11afSPaul Mackerras/*
86114cf11afSPaul Mackerras * load_up_fpu(unused, unused, tsk)
86214cf11afSPaul Mackerras * Disable FP for the task which had the FPU previously,
86314cf11afSPaul Mackerras * and save its floating-point registers in its thread_struct.
86414cf11afSPaul Mackerras * Enables the FPU for use in the kernel on return.
86514cf11afSPaul Mackerras * On SMP we know the fpu is free, since we give it up every
86614cf11afSPaul Mackerras * switch (ie, no lazy save of the FP registers).
86714cf11afSPaul Mackerras * On entry: r13 == 'current' && last_task_used_math != 'current'
86814cf11afSPaul Mackerras */
86914cf11afSPaul Mackerras_STATIC(load_up_fpu)
87014cf11afSPaul Mackerras	mfmsr	r5			/* grab the current MSR */
87114cf11afSPaul Mackerras	ori	r5,r5,MSR_FP
87214cf11afSPaul Mackerras	mtmsrd	r5			/* enable use of fpu now */
87314cf11afSPaul Mackerras	isync
87414cf11afSPaul Mackerras/*
87514cf11afSPaul Mackerras * For SMP, we don't do lazy FPU switching because it just gets too
87614cf11afSPaul Mackerras * horrendously complex, especially when a task switches from one CPU
87714cf11afSPaul Mackerras * to another.  Instead we call giveup_fpu in switch_to.
87814cf11afSPaul Mackerras *
87914cf11afSPaul Mackerras */
88014cf11afSPaul Mackerras#ifndef CONFIG_SMP
88114cf11afSPaul Mackerras	ld	r3,last_task_used_math@got(r2)
88214cf11afSPaul Mackerras	ld	r4,0(r3)
88314cf11afSPaul Mackerras	cmpdi	0,r4,0
88414cf11afSPaul Mackerras	beq	1f
88514cf11afSPaul Mackerras	/* Save FP state to last_task_used_math's THREAD struct */
88614cf11afSPaul Mackerras	addi	r4,r4,THREAD
88714cf11afSPaul Mackerras	SAVE_32FPRS(0, r4)
88814cf11afSPaul Mackerras	mffs	fr0
88914cf11afSPaul Mackerras	stfd	fr0,THREAD_FPSCR(r4)
89014cf11afSPaul Mackerras	/* Disable FP for last_task_used_math */
89114cf11afSPaul Mackerras	ld	r5,PT_REGS(r4)
89214cf11afSPaul Mackerras	ld	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
89314cf11afSPaul Mackerras	li	r6,MSR_FP|MSR_FE0|MSR_FE1
89414cf11afSPaul Mackerras	andc	r4,r4,r6
89514cf11afSPaul Mackerras	std	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
89614cf11afSPaul Mackerras1:
89714cf11afSPaul Mackerras#endif /* CONFIG_SMP */
89814cf11afSPaul Mackerras	/* enable use of FP after return */
89914cf11afSPaul Mackerras	ld	r4,PACACURRENT(r13)
90014cf11afSPaul Mackerras	addi	r5,r4,THREAD		/* Get THREAD */
90114cf11afSPaul Mackerras	ld	r4,THREAD_FPEXC_MODE(r5)
90214cf11afSPaul Mackerras	ori	r12,r12,MSR_FP
90314cf11afSPaul Mackerras	or	r12,r12,r4
90414cf11afSPaul Mackerras	std	r12,_MSR(r1)
90514cf11afSPaul Mackerras	lfd	fr0,THREAD_FPSCR(r5)
90614cf11afSPaul Mackerras	mtfsf	0xff,fr0
90714cf11afSPaul Mackerras	REST_32FPRS(0, r5)
90814cf11afSPaul Mackerras#ifndef CONFIG_SMP
90914cf11afSPaul Mackerras	/* Update last_task_used_math to 'current' */
91014cf11afSPaul Mackerras	subi	r4,r5,THREAD		/* Back to 'current' */
91114cf11afSPaul Mackerras	std	r4,0(r3)
91214cf11afSPaul Mackerras#endif /* CONFIG_SMP */
91314cf11afSPaul Mackerras	/* restore registers and return */
91414cf11afSPaul Mackerras	b	fast_exception_return
91514cf11afSPaul Mackerras
91614cf11afSPaul Mackerras	.align	7
91714cf11afSPaul Mackerras	.globl altivec_unavailable_common
91814cf11afSPaul Mackerrasaltivec_unavailable_common:
91914cf11afSPaul Mackerras	EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN)
92014cf11afSPaul Mackerras#ifdef CONFIG_ALTIVEC
92114cf11afSPaul MackerrasBEGIN_FTR_SECTION
92214cf11afSPaul Mackerras	bne	.load_up_altivec	/* if from user, just load it up */
92314cf11afSPaul MackerrasEND_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
92414cf11afSPaul Mackerras#endif
92514cf11afSPaul Mackerras	bl	.save_nvgprs
92614cf11afSPaul Mackerras	addi	r3,r1,STACK_FRAME_OVERHEAD
92714cf11afSPaul Mackerras	ENABLE_INTS
92814cf11afSPaul Mackerras	bl	.altivec_unavailable_exception
92914cf11afSPaul Mackerras	b	.ret_from_except
93014cf11afSPaul Mackerras
93114cf11afSPaul Mackerras#ifdef CONFIG_ALTIVEC
93214cf11afSPaul Mackerras/*
93314cf11afSPaul Mackerras * load_up_altivec(unused, unused, tsk)
93414cf11afSPaul Mackerras * Disable VMX for the task which had it previously,
93514cf11afSPaul Mackerras * and save its vector registers in its thread_struct.
93614cf11afSPaul Mackerras * Enables the VMX for use in the kernel on return.
93714cf11afSPaul Mackerras * On SMP we know the VMX is free, since we give it up every
93814cf11afSPaul Mackerras * switch (ie, no lazy save of the vector registers).
93914cf11afSPaul Mackerras * On entry: r13 == 'current' && last_task_used_altivec != 'current'
94014cf11afSPaul Mackerras */
94114cf11afSPaul Mackerras_STATIC(load_up_altivec)
94214cf11afSPaul Mackerras	mfmsr	r5			/* grab the current MSR */
94314cf11afSPaul Mackerras	oris	r5,r5,MSR_VEC@h
94414cf11afSPaul Mackerras	mtmsrd	r5			/* enable use of VMX now */
94514cf11afSPaul Mackerras	isync
94614cf11afSPaul Mackerras
94714cf11afSPaul Mackerras/*
94814cf11afSPaul Mackerras * For SMP, we don't do lazy VMX switching because it just gets too
94914cf11afSPaul Mackerras * horrendously complex, especially when a task switches from one CPU
95014cf11afSPaul Mackerras * to another.  Instead we call giveup_altvec in switch_to.
95114cf11afSPaul Mackerras * VRSAVE isn't dealt with here, that is done in the normal context
95214cf11afSPaul Mackerras * switch code. Note that we could rely on vrsave value to eventually
95314cf11afSPaul Mackerras * avoid saving all of the VREGs here...
95414cf11afSPaul Mackerras */
95514cf11afSPaul Mackerras#ifndef CONFIG_SMP
95614cf11afSPaul Mackerras	ld	r3,last_task_used_altivec@got(r2)
95714cf11afSPaul Mackerras	ld	r4,0(r3)
95814cf11afSPaul Mackerras	cmpdi	0,r4,0
95914cf11afSPaul Mackerras	beq	1f
96014cf11afSPaul Mackerras	/* Save VMX state to last_task_used_altivec's THREAD struct */
96114cf11afSPaul Mackerras	addi	r4,r4,THREAD
96214cf11afSPaul Mackerras	SAVE_32VRS(0,r5,r4)
96314cf11afSPaul Mackerras	mfvscr	vr0
96414cf11afSPaul Mackerras	li	r10,THREAD_VSCR
96514cf11afSPaul Mackerras	stvx	vr0,r10,r4
96614cf11afSPaul Mackerras	/* Disable VMX for last_task_used_altivec */
96714cf11afSPaul Mackerras	ld	r5,PT_REGS(r4)
96814cf11afSPaul Mackerras	ld	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
96914cf11afSPaul Mackerras	lis	r6,MSR_VEC@h
97014cf11afSPaul Mackerras	andc	r4,r4,r6
97114cf11afSPaul Mackerras	std	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
97214cf11afSPaul Mackerras1:
97314cf11afSPaul Mackerras#endif /* CONFIG_SMP */
97414cf11afSPaul Mackerras	/* Hack: if we get an altivec unavailable trap with VRSAVE
97514cf11afSPaul Mackerras	 * set to all zeros, we assume this is a broken application
97614cf11afSPaul Mackerras	 * that fails to set it properly, and thus we switch it to
97714cf11afSPaul Mackerras	 * all 1's
97814cf11afSPaul Mackerras	 */
97914cf11afSPaul Mackerras	mfspr	r4,SPRN_VRSAVE
98014cf11afSPaul Mackerras	cmpdi	0,r4,0
98114cf11afSPaul Mackerras	bne+	1f
98214cf11afSPaul Mackerras	li	r4,-1
98314cf11afSPaul Mackerras	mtspr	SPRN_VRSAVE,r4
98414cf11afSPaul Mackerras1:
98514cf11afSPaul Mackerras	/* enable use of VMX after return */
98614cf11afSPaul Mackerras	ld	r4,PACACURRENT(r13)
98714cf11afSPaul Mackerras	addi	r5,r4,THREAD		/* Get THREAD */
98814cf11afSPaul Mackerras	oris	r12,r12,MSR_VEC@h
98914cf11afSPaul Mackerras	std	r12,_MSR(r1)
99014cf11afSPaul Mackerras	li	r4,1
99114cf11afSPaul Mackerras	li	r10,THREAD_VSCR
99214cf11afSPaul Mackerras	stw	r4,THREAD_USED_VR(r5)
99314cf11afSPaul Mackerras	lvx	vr0,r10,r5
99414cf11afSPaul Mackerras	mtvscr	vr0
99514cf11afSPaul Mackerras	REST_32VRS(0,r4,r5)
99614cf11afSPaul Mackerras#ifndef CONFIG_SMP
99714cf11afSPaul Mackerras	/* Update last_task_used_math to 'current' */
99814cf11afSPaul Mackerras	subi	r4,r5,THREAD		/* Back to 'current' */
99914cf11afSPaul Mackerras	std	r4,0(r3)
100014cf11afSPaul Mackerras#endif /* CONFIG_SMP */
100114cf11afSPaul Mackerras	/* restore registers and return */
100214cf11afSPaul Mackerras	b	fast_exception_return
100314cf11afSPaul Mackerras#endif /* CONFIG_ALTIVEC */
100414cf11afSPaul Mackerras
100514cf11afSPaul Mackerras/*
100614cf11afSPaul Mackerras * Hash table stuff
100714cf11afSPaul Mackerras */
100814cf11afSPaul Mackerras	.align	7
100914cf11afSPaul Mackerras_GLOBAL(do_hash_page)
101014cf11afSPaul Mackerras	std	r3,_DAR(r1)
101114cf11afSPaul Mackerras	std	r4,_DSISR(r1)
101214cf11afSPaul Mackerras
101314cf11afSPaul Mackerras	andis.	r0,r4,0xa450		/* weird error? */
101414cf11afSPaul Mackerras	bne-	.handle_page_fault	/* if not, try to insert a HPTE */
101514cf11afSPaul MackerrasBEGIN_FTR_SECTION
101614cf11afSPaul Mackerras	andis.	r0,r4,0x0020		/* Is it a segment table fault? */
101714cf11afSPaul Mackerras	bne-	.do_ste_alloc		/* If so handle it */
101814cf11afSPaul MackerrasEND_FTR_SECTION_IFCLR(CPU_FTR_SLB)
101914cf11afSPaul Mackerras
102014cf11afSPaul Mackerras	/*
102114cf11afSPaul Mackerras	 * We need to set the _PAGE_USER bit if MSR_PR is set or if we are
102214cf11afSPaul Mackerras	 * accessing a userspace segment (even from the kernel). We assume
102314cf11afSPaul Mackerras	 * kernel addresses always have the high bit set.
102414cf11afSPaul Mackerras	 */
102514cf11afSPaul Mackerras	rlwinm	r4,r4,32-25+9,31-9,31-9	/* DSISR_STORE -> _PAGE_RW */
102614cf11afSPaul Mackerras	rotldi	r0,r3,15		/* Move high bit into MSR_PR posn */
102714cf11afSPaul Mackerras	orc	r0,r12,r0		/* MSR_PR | ~high_bit */
102814cf11afSPaul Mackerras	rlwimi	r4,r0,32-13,30,30	/* becomes _PAGE_USER access bit */
102914cf11afSPaul Mackerras	ori	r4,r4,1			/* add _PAGE_PRESENT */
103014cf11afSPaul Mackerras	rlwimi	r4,r5,22+2,31-2,31-2	/* Set _PAGE_EXEC if trap is 0x400 */
103114cf11afSPaul Mackerras
103214cf11afSPaul Mackerras	/*
103314cf11afSPaul Mackerras	 * On iSeries, we soft-disable interrupts here, then
103414cf11afSPaul Mackerras	 * hard-enable interrupts so that the hash_page code can spin on
103514cf11afSPaul Mackerras	 * the hash_table_lock without problems on a shared processor.
103614cf11afSPaul Mackerras	 */
103714cf11afSPaul Mackerras	DISABLE_INTS
103814cf11afSPaul Mackerras
103914cf11afSPaul Mackerras	/*
104014cf11afSPaul Mackerras	 * r3 contains the faulting address
104114cf11afSPaul Mackerras	 * r4 contains the required access permissions
104214cf11afSPaul Mackerras	 * r5 contains the trap number
104314cf11afSPaul Mackerras	 *
104414cf11afSPaul Mackerras	 * at return r3 = 0 for success
104514cf11afSPaul Mackerras	 */
104614cf11afSPaul Mackerras	bl	.hash_page		/* build HPTE if possible */
104714cf11afSPaul Mackerras	cmpdi	r3,0			/* see if hash_page succeeded */
104814cf11afSPaul Mackerras
104914cf11afSPaul Mackerras#ifdef DO_SOFT_DISABLE
105014cf11afSPaul Mackerras	/*
105114cf11afSPaul Mackerras	 * If we had interrupts soft-enabled at the point where the
105214cf11afSPaul Mackerras	 * DSI/ISI occurred, and an interrupt came in during hash_page,
105314cf11afSPaul Mackerras	 * handle it now.
105414cf11afSPaul Mackerras	 * We jump to ret_from_except_lite rather than fast_exception_return
105514cf11afSPaul Mackerras	 * because ret_from_except_lite will check for and handle pending
105614cf11afSPaul Mackerras	 * interrupts if necessary.
105714cf11afSPaul Mackerras	 */
105814cf11afSPaul Mackerras	beq	.ret_from_except_lite
105914cf11afSPaul Mackerras	/* For a hash failure, we don't bother re-enabling interrupts */
106014cf11afSPaul Mackerras	ble-	12f
106114cf11afSPaul Mackerras
106214cf11afSPaul Mackerras	/*
106314cf11afSPaul Mackerras	 * hash_page couldn't handle it, set soft interrupt enable back
106414cf11afSPaul Mackerras	 * to what it was before the trap.  Note that .local_irq_restore
106514cf11afSPaul Mackerras	 * handles any interrupts pending at this point.
106614cf11afSPaul Mackerras	 */
106714cf11afSPaul Mackerras	ld	r3,SOFTE(r1)
106814cf11afSPaul Mackerras	bl	.local_irq_restore
106914cf11afSPaul Mackerras	b	11f
107014cf11afSPaul Mackerras#else
107114cf11afSPaul Mackerras	beq	fast_exception_return   /* Return from exception on success */
107214cf11afSPaul Mackerras	ble-	12f			/* Failure return from hash_page */
107314cf11afSPaul Mackerras
107414cf11afSPaul Mackerras	/* fall through */
107514cf11afSPaul Mackerras#endif
107614cf11afSPaul Mackerras
107714cf11afSPaul Mackerras/* Here we have a page fault that hash_page can't handle. */
107814cf11afSPaul Mackerras_GLOBAL(handle_page_fault)
107914cf11afSPaul Mackerras	ENABLE_INTS
108014cf11afSPaul Mackerras11:	ld	r4,_DAR(r1)
108114cf11afSPaul Mackerras	ld	r5,_DSISR(r1)
108214cf11afSPaul Mackerras	addi	r3,r1,STACK_FRAME_OVERHEAD
108314cf11afSPaul Mackerras	bl	.do_page_fault
108414cf11afSPaul Mackerras	cmpdi	r3,0
108514cf11afSPaul Mackerras	beq+	.ret_from_except_lite
108614cf11afSPaul Mackerras	bl	.save_nvgprs
108714cf11afSPaul Mackerras	mr	r5,r3
108814cf11afSPaul Mackerras	addi	r3,r1,STACK_FRAME_OVERHEAD
108914cf11afSPaul Mackerras	lwz	r4,_DAR(r1)
109014cf11afSPaul Mackerras	bl	.bad_page_fault
109114cf11afSPaul Mackerras	b	.ret_from_except
109214cf11afSPaul Mackerras
109314cf11afSPaul Mackerras/* We have a page fault that hash_page could handle but HV refused
109414cf11afSPaul Mackerras * the PTE insertion
109514cf11afSPaul Mackerras */
109614cf11afSPaul Mackerras12:	bl	.save_nvgprs
109714cf11afSPaul Mackerras	addi	r3,r1,STACK_FRAME_OVERHEAD
109814cf11afSPaul Mackerras	lwz	r4,_DAR(r1)
109914cf11afSPaul Mackerras	bl	.low_hash_fault
110014cf11afSPaul Mackerras	b	.ret_from_except
110114cf11afSPaul Mackerras
110214cf11afSPaul Mackerras	/* here we have a segment miss */
110314cf11afSPaul Mackerras_GLOBAL(do_ste_alloc)
110414cf11afSPaul Mackerras	bl	.ste_allocate		/* try to insert stab entry */
110514cf11afSPaul Mackerras	cmpdi	r3,0
110614cf11afSPaul Mackerras	beq+	fast_exception_return
110714cf11afSPaul Mackerras	b	.handle_page_fault
110814cf11afSPaul Mackerras
110914cf11afSPaul Mackerras/*
111014cf11afSPaul Mackerras * r13 points to the PACA, r9 contains the saved CR,
111114cf11afSPaul Mackerras * r11 and r12 contain the saved SRR0 and SRR1.
111214cf11afSPaul Mackerras * r9 - r13 are saved in paca->exslb.
111314cf11afSPaul Mackerras * We assume we aren't going to take any exceptions during this procedure.
111414cf11afSPaul Mackerras * We assume (DAR >> 60) == 0xc.
111514cf11afSPaul Mackerras */
111614cf11afSPaul Mackerras	.align	7
111714cf11afSPaul Mackerras_GLOBAL(do_stab_bolted)
111814cf11afSPaul Mackerras	stw	r9,PACA_EXSLB+EX_CCR(r13)	/* save CR in exc. frame */
111914cf11afSPaul Mackerras	std	r11,PACA_EXSLB+EX_SRR0(r13)	/* save SRR0 in exc. frame */
112014cf11afSPaul Mackerras
112114cf11afSPaul Mackerras	/* Hash to the primary group */
112214cf11afSPaul Mackerras	ld	r10,PACASTABVIRT(r13)
112314cf11afSPaul Mackerras	mfspr	r11,DAR
112414cf11afSPaul Mackerras	srdi	r11,r11,28
112514cf11afSPaul Mackerras	rldimi	r10,r11,7,52	/* r10 = first ste of the group */
112614cf11afSPaul Mackerras
112714cf11afSPaul Mackerras	/* Calculate VSID */
112814cf11afSPaul Mackerras	/* This is a kernel address, so protovsid = ESID */
112914cf11afSPaul Mackerras	ASM_VSID_SCRAMBLE(r11, r9)
113014cf11afSPaul Mackerras	rldic	r9,r11,12,16	/* r9 = vsid << 12 */
113114cf11afSPaul Mackerras
113214cf11afSPaul Mackerras	/* Search the primary group for a free entry */
113314cf11afSPaul Mackerras1:	ld	r11,0(r10)	/* Test valid bit of the current ste	*/
113414cf11afSPaul Mackerras	andi.	r11,r11,0x80
113514cf11afSPaul Mackerras	beq	2f
113614cf11afSPaul Mackerras	addi	r10,r10,16
113714cf11afSPaul Mackerras	andi.	r11,r10,0x70
113814cf11afSPaul Mackerras	bne	1b
113914cf11afSPaul Mackerras
114014cf11afSPaul Mackerras	/* Stick for only searching the primary group for now.		*/
114114cf11afSPaul Mackerras	/* At least for now, we use a very simple random castout scheme */
114214cf11afSPaul Mackerras	/* Use the TB as a random number ;  OR in 1 to avoid entry 0	*/
114314cf11afSPaul Mackerras	mftb	r11
114414cf11afSPaul Mackerras	rldic	r11,r11,4,57	/* r11 = (r11 << 4) & 0x70 */
114514cf11afSPaul Mackerras	ori	r11,r11,0x10
114614cf11afSPaul Mackerras
114714cf11afSPaul Mackerras	/* r10 currently points to an ste one past the group of interest */
114814cf11afSPaul Mackerras	/* make it point to the randomly selected entry			*/
114914cf11afSPaul Mackerras	subi	r10,r10,128
115014cf11afSPaul Mackerras	or 	r10,r10,r11	/* r10 is the entry to invalidate	*/
115114cf11afSPaul Mackerras
115214cf11afSPaul Mackerras	isync			/* mark the entry invalid		*/
115314cf11afSPaul Mackerras	ld	r11,0(r10)
115414cf11afSPaul Mackerras	rldicl	r11,r11,56,1	/* clear the valid bit */
115514cf11afSPaul Mackerras	rotldi	r11,r11,8
115614cf11afSPaul Mackerras	std	r11,0(r10)
115714cf11afSPaul Mackerras	sync
115814cf11afSPaul Mackerras
115914cf11afSPaul Mackerras	clrrdi	r11,r11,28	/* Get the esid part of the ste		*/
116014cf11afSPaul Mackerras	slbie	r11
116114cf11afSPaul Mackerras
116214cf11afSPaul Mackerras2:	std	r9,8(r10)	/* Store the vsid part of the ste	*/
116314cf11afSPaul Mackerras	eieio
116414cf11afSPaul Mackerras
116514cf11afSPaul Mackerras	mfspr	r11,DAR		/* Get the new esid			*/
116614cf11afSPaul Mackerras	clrrdi	r11,r11,28	/* Permits a full 32b of ESID		*/
116714cf11afSPaul Mackerras	ori	r11,r11,0x90	/* Turn on valid and kp			*/
116814cf11afSPaul Mackerras	std	r11,0(r10)	/* Put new entry back into the stab	*/
116914cf11afSPaul Mackerras
117014cf11afSPaul Mackerras	sync
117114cf11afSPaul Mackerras
117214cf11afSPaul Mackerras	/* All done -- return from exception. */
117314cf11afSPaul Mackerras	lwz	r9,PACA_EXSLB+EX_CCR(r13)	/* get saved CR */
117414cf11afSPaul Mackerras	ld	r11,PACA_EXSLB+EX_SRR0(r13)	/* get saved SRR0 */
117514cf11afSPaul Mackerras
117614cf11afSPaul Mackerras	andi.	r10,r12,MSR_RI
117714cf11afSPaul Mackerras	beq-	unrecov_slb
117814cf11afSPaul Mackerras
117914cf11afSPaul Mackerras	mtcrf	0x80,r9			/* restore CR */
118014cf11afSPaul Mackerras
118114cf11afSPaul Mackerras	mfmsr	r10
118214cf11afSPaul Mackerras	clrrdi	r10,r10,2
118314cf11afSPaul Mackerras	mtmsrd	r10,1
118414cf11afSPaul Mackerras
118514cf11afSPaul Mackerras	mtspr	SRR0,r11
118614cf11afSPaul Mackerras	mtspr	SRR1,r12
118714cf11afSPaul Mackerras	ld	r9,PACA_EXSLB+EX_R9(r13)
118814cf11afSPaul Mackerras	ld	r10,PACA_EXSLB+EX_R10(r13)
118914cf11afSPaul Mackerras	ld	r11,PACA_EXSLB+EX_R11(r13)
119014cf11afSPaul Mackerras	ld	r12,PACA_EXSLB+EX_R12(r13)
119114cf11afSPaul Mackerras	ld	r13,PACA_EXSLB+EX_R13(r13)
119214cf11afSPaul Mackerras	rfid
119314cf11afSPaul Mackerras	b	.	/* prevent speculative execution */
119414cf11afSPaul Mackerras
119514cf11afSPaul Mackerras/*
119614cf11afSPaul Mackerras * r13 points to the PACA, r9 contains the saved CR,
119714cf11afSPaul Mackerras * r11 and r12 contain the saved SRR0 and SRR1.
119814cf11afSPaul Mackerras * r3 has the faulting address
119914cf11afSPaul Mackerras * r9 - r13 are saved in paca->exslb.
120014cf11afSPaul Mackerras * r3 is saved in paca->slb_r3
120114cf11afSPaul Mackerras * We assume we aren't going to take any exceptions during this procedure.
120214cf11afSPaul Mackerras */
120314cf11afSPaul Mackerras_GLOBAL(do_slb_miss)
120414cf11afSPaul Mackerras	mflr	r10
120514cf11afSPaul Mackerras
120614cf11afSPaul Mackerras	stw	r9,PACA_EXSLB+EX_CCR(r13)	/* save CR in exc. frame */
120714cf11afSPaul Mackerras	std	r10,PACA_EXSLB+EX_LR(r13)	/* save LR */
120814cf11afSPaul Mackerras
120914cf11afSPaul Mackerras	bl	.slb_allocate			/* handle it */
121014cf11afSPaul Mackerras
121114cf11afSPaul Mackerras	/* All done -- return from exception. */
121214cf11afSPaul Mackerras
121314cf11afSPaul Mackerras	ld	r10,PACA_EXSLB+EX_LR(r13)
121414cf11afSPaul Mackerras	ld	r3,PACA_EXSLB+EX_R3(r13)
121514cf11afSPaul Mackerras	lwz	r9,PACA_EXSLB+EX_CCR(r13)	/* get saved CR */
121614cf11afSPaul Mackerras#ifdef CONFIG_PPC_ISERIES
121714cf11afSPaul Mackerras	ld	r11,PACALPPACA+LPPACASRR0(r13)	/* get SRR0 value */
121814cf11afSPaul Mackerras#endif /* CONFIG_PPC_ISERIES */
121914cf11afSPaul Mackerras
122014cf11afSPaul Mackerras	mtlr	r10
122114cf11afSPaul Mackerras
122214cf11afSPaul Mackerras	andi.	r10,r12,MSR_RI	/* check for unrecoverable exception */
122314cf11afSPaul Mackerras	beq-	unrecov_slb
122414cf11afSPaul Mackerras
122514cf11afSPaul Mackerras.machine	push
122614cf11afSPaul Mackerras.machine	"power4"
122714cf11afSPaul Mackerras	mtcrf	0x80,r9
122814cf11afSPaul Mackerras	mtcrf	0x01,r9		/* slb_allocate uses cr0 and cr7 */
122914cf11afSPaul Mackerras.machine	pop
123014cf11afSPaul Mackerras
123114cf11afSPaul Mackerras#ifdef CONFIG_PPC_ISERIES
123214cf11afSPaul Mackerras	mtspr	SRR0,r11
123314cf11afSPaul Mackerras	mtspr	SRR1,r12
123414cf11afSPaul Mackerras#endif /* CONFIG_PPC_ISERIES */
123514cf11afSPaul Mackerras	ld	r9,PACA_EXSLB+EX_R9(r13)
123614cf11afSPaul Mackerras	ld	r10,PACA_EXSLB+EX_R10(r13)
123714cf11afSPaul Mackerras	ld	r11,PACA_EXSLB+EX_R11(r13)
123814cf11afSPaul Mackerras	ld	r12,PACA_EXSLB+EX_R12(r13)
123914cf11afSPaul Mackerras	ld	r13,PACA_EXSLB+EX_R13(r13)
124014cf11afSPaul Mackerras	rfid
124114cf11afSPaul Mackerras	b	.	/* prevent speculative execution */
124214cf11afSPaul Mackerras
124314cf11afSPaul Mackerrasunrecov_slb:
124414cf11afSPaul Mackerras	EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
124514cf11afSPaul Mackerras	DISABLE_INTS
124614cf11afSPaul Mackerras	bl	.save_nvgprs
124714cf11afSPaul Mackerras1:	addi	r3,r1,STACK_FRAME_OVERHEAD
124814cf11afSPaul Mackerras	bl	.unrecoverable_exception
124914cf11afSPaul Mackerras	b	1b
125014cf11afSPaul Mackerras
125114cf11afSPaul Mackerras/*
125214cf11afSPaul Mackerras * Space for CPU0's segment table.
125314cf11afSPaul Mackerras *
125414cf11afSPaul Mackerras * On iSeries, the hypervisor must fill in at least one entry before
125514cf11afSPaul Mackerras * we get control (with relocate on).  The address is give to the hv
125614cf11afSPaul Mackerras * as a page number (see xLparMap in LparData.c), so this must be at a
125714cf11afSPaul Mackerras * fixed address (the linker can't compute (u64)&initial_stab >>
125814cf11afSPaul Mackerras * PAGE_SHIFT).
125914cf11afSPaul Mackerras */
126014cf11afSPaul Mackerras	. = STAB0_PHYS_ADDR	/* 0x6000 */
126114cf11afSPaul Mackerras	.globl initial_stab
126214cf11afSPaul Mackerrasinitial_stab:
126314cf11afSPaul Mackerras	.space	4096
126414cf11afSPaul Mackerras
126514cf11afSPaul Mackerras/*
126614cf11afSPaul Mackerras * Data area reserved for FWNMI option.
126714cf11afSPaul Mackerras * This address (0x7000) is fixed by the RPA.
126814cf11afSPaul Mackerras */
126914cf11afSPaul Mackerras	.= 0x7000
127014cf11afSPaul Mackerras	.globl fwnmi_data_area
127114cf11afSPaul Mackerrasfwnmi_data_area:
127214cf11afSPaul Mackerras
127314cf11afSPaul Mackerras	/* iSeries does not use the FWNMI stuff, so it is safe to put
127414cf11afSPaul Mackerras	 * this here, even if we later allow kernels that will boot on
127514cf11afSPaul Mackerras	 * both pSeries and iSeries */
127614cf11afSPaul Mackerras#ifdef CONFIG_PPC_ISERIES
127714cf11afSPaul Mackerras        . = LPARMAP_PHYS
127814cf11afSPaul Mackerras#include "lparmap.s"
127914cf11afSPaul Mackerras/*
128014cf11afSPaul Mackerras * This ".text" is here for old compilers that generate a trailing
128114cf11afSPaul Mackerras * .note section when compiling .c files to .s
128214cf11afSPaul Mackerras */
128314cf11afSPaul Mackerras	.text
128414cf11afSPaul Mackerras#endif /* CONFIG_PPC_ISERIES */
128514cf11afSPaul Mackerras
128614cf11afSPaul Mackerras        . = 0x8000
128714cf11afSPaul Mackerras
128814cf11afSPaul Mackerras/*
128914cf11afSPaul Mackerras * On pSeries, secondary processors spin in the following code.
129014cf11afSPaul Mackerras * At entry, r3 = this processor's number (physical cpu id)
129114cf11afSPaul Mackerras */
129214cf11afSPaul Mackerras_GLOBAL(pSeries_secondary_smp_init)
129314cf11afSPaul Mackerras	mr	r24,r3
129414cf11afSPaul Mackerras
129514cf11afSPaul Mackerras	/* turn on 64-bit mode */
129614cf11afSPaul Mackerras	bl	.enable_64b_mode
129714cf11afSPaul Mackerras	isync
129814cf11afSPaul Mackerras
129914cf11afSPaul Mackerras	/* Copy some CPU settings from CPU 0 */
130014cf11afSPaul Mackerras	bl	.__restore_cpu_setup
130114cf11afSPaul Mackerras
130214cf11afSPaul Mackerras	/* Set up a paca value for this processor. Since we have the
130314cf11afSPaul Mackerras	 * physical cpu id in r24, we need to search the pacas to find
130414cf11afSPaul Mackerras	 * which logical id maps to our physical one.
130514cf11afSPaul Mackerras	 */
130614cf11afSPaul Mackerras	LOADADDR(r13, paca) 		/* Get base vaddr of paca array	 */
130714cf11afSPaul Mackerras	li	r5,0			/* logical cpu id                */
130814cf11afSPaul Mackerras1:	lhz	r6,PACAHWCPUID(r13)	/* Load HW procid from paca      */
130914cf11afSPaul Mackerras	cmpw	r6,r24			/* Compare to our id             */
131014cf11afSPaul Mackerras	beq	2f
131114cf11afSPaul Mackerras	addi	r13,r13,PACA_SIZE	/* Loop to next PACA on miss     */
131214cf11afSPaul Mackerras	addi	r5,r5,1
131314cf11afSPaul Mackerras	cmpwi	r5,NR_CPUS
131414cf11afSPaul Mackerras	blt	1b
131514cf11afSPaul Mackerras
131614cf11afSPaul Mackerras	mr	r3,r24			/* not found, copy phys to r3	 */
131714cf11afSPaul Mackerras	b	.kexec_wait		/* next kernel might do better	 */
131814cf11afSPaul Mackerras
131914cf11afSPaul Mackerras2:	mtspr	SPRG3,r13		/* Save vaddr of paca in SPRG3	 */
132014cf11afSPaul Mackerras	/* From now on, r24 is expected to be logical cpuid */
132114cf11afSPaul Mackerras	mr	r24,r5
132214cf11afSPaul Mackerras3:	HMT_LOW
132314cf11afSPaul Mackerras	lbz	r23,PACAPROCSTART(r13)	/* Test if this processor should */
132414cf11afSPaul Mackerras					/* start.			 */
132514cf11afSPaul Mackerras	sync
132614cf11afSPaul Mackerras
132714cf11afSPaul Mackerras	/* Create a temp kernel stack for use before relocation is on.	*/
132814cf11afSPaul Mackerras	ld	r1,PACAEMERGSP(r13)
132914cf11afSPaul Mackerras	subi	r1,r1,STACK_FRAME_OVERHEAD
133014cf11afSPaul Mackerras
133114cf11afSPaul Mackerras	cmpwi	0,r23,0
133214cf11afSPaul Mackerras#ifdef CONFIG_SMP
133314cf11afSPaul Mackerras	bne	.__secondary_start
133414cf11afSPaul Mackerras#endif
133514cf11afSPaul Mackerras	b 	3b			/* Loop until told to go	 */
133614cf11afSPaul Mackerras
133714cf11afSPaul Mackerras#ifdef CONFIG_PPC_ISERIES
133814cf11afSPaul Mackerras_STATIC(__start_initialization_iSeries)
133914cf11afSPaul Mackerras	/* Clear out the BSS */
134014cf11afSPaul Mackerras	LOADADDR(r11,__bss_stop)
134114cf11afSPaul Mackerras	LOADADDR(r8,__bss_start)
134214cf11afSPaul Mackerras	sub	r11,r11,r8		/* bss size			*/
134314cf11afSPaul Mackerras	addi	r11,r11,7		/* round up to an even double word */
134414cf11afSPaul Mackerras	rldicl. r11,r11,61,3		/* shift right by 3		*/
134514cf11afSPaul Mackerras	beq	4f
134614cf11afSPaul Mackerras	addi	r8,r8,-8
134714cf11afSPaul Mackerras	li	r0,0
134814cf11afSPaul Mackerras	mtctr	r11			/* zero this many doublewords	*/
134914cf11afSPaul Mackerras3:	stdu	r0,8(r8)
135014cf11afSPaul Mackerras	bdnz	3b
135114cf11afSPaul Mackerras4:
135214cf11afSPaul Mackerras	LOADADDR(r1,init_thread_union)
135314cf11afSPaul Mackerras	addi	r1,r1,THREAD_SIZE
135414cf11afSPaul Mackerras	li	r0,0
135514cf11afSPaul Mackerras	stdu	r0,-STACK_FRAME_OVERHEAD(r1)
135614cf11afSPaul Mackerras
135714cf11afSPaul Mackerras	LOADADDR(r3,cpu_specs)
135814cf11afSPaul Mackerras	LOADADDR(r4,cur_cpu_spec)
135914cf11afSPaul Mackerras	li	r5,0
136014cf11afSPaul Mackerras	bl	.identify_cpu
136114cf11afSPaul Mackerras
136214cf11afSPaul Mackerras	LOADADDR(r2,__toc_start)
136314cf11afSPaul Mackerras	addi	r2,r2,0x4000
136414cf11afSPaul Mackerras	addi	r2,r2,0x4000
136514cf11afSPaul Mackerras
136614cf11afSPaul Mackerras	bl	.iSeries_early_setup
136714cf11afSPaul Mackerras
136814cf11afSPaul Mackerras	/* relocation is on at this point */
136914cf11afSPaul Mackerras
137014cf11afSPaul Mackerras	b	.start_here_common
137114cf11afSPaul Mackerras#endif /* CONFIG_PPC_ISERIES */
137214cf11afSPaul Mackerras
137314cf11afSPaul Mackerras#ifdef CONFIG_PPC_MULTIPLATFORM
137414cf11afSPaul Mackerras
137514cf11afSPaul Mackerras_STATIC(__mmu_off)
137614cf11afSPaul Mackerras	mfmsr	r3
137714cf11afSPaul Mackerras	andi.	r0,r3,MSR_IR|MSR_DR
137814cf11afSPaul Mackerras	beqlr
137914cf11afSPaul Mackerras	andc	r3,r3,r0
138014cf11afSPaul Mackerras	mtspr	SPRN_SRR0,r4
138114cf11afSPaul Mackerras	mtspr	SPRN_SRR1,r3
138214cf11afSPaul Mackerras	sync
138314cf11afSPaul Mackerras	rfid
138414cf11afSPaul Mackerras	b	.	/* prevent speculative execution */
138514cf11afSPaul Mackerras
138614cf11afSPaul Mackerras
138714cf11afSPaul Mackerras/*
138814cf11afSPaul Mackerras * Here is our main kernel entry point. We support currently 2 kind of entries
138914cf11afSPaul Mackerras * depending on the value of r5.
139014cf11afSPaul Mackerras *
139114cf11afSPaul Mackerras *   r5 != NULL -> OF entry, we go to prom_init, "legacy" parameter content
139214cf11afSPaul Mackerras *                 in r3...r7
139314cf11afSPaul Mackerras *
139414cf11afSPaul Mackerras *   r5 == NULL -> kexec style entry. r3 is a physical pointer to the
139514cf11afSPaul Mackerras *                 DT block, r4 is a physical pointer to the kernel itself
139614cf11afSPaul Mackerras *
139714cf11afSPaul Mackerras */
139814cf11afSPaul Mackerras_GLOBAL(__start_initialization_multiplatform)
139914cf11afSPaul Mackerras	/*
140014cf11afSPaul Mackerras	 * Are we booted from a PROM Of-type client-interface ?
140114cf11afSPaul Mackerras	 */
140214cf11afSPaul Mackerras	cmpldi	cr0,r5,0
140314cf11afSPaul Mackerras	bne	.__boot_from_prom		/* yes -> prom */
140414cf11afSPaul Mackerras
140514cf11afSPaul Mackerras	/* Save parameters */
140614cf11afSPaul Mackerras	mr	r31,r3
140714cf11afSPaul Mackerras	mr	r30,r4
140814cf11afSPaul Mackerras
140914cf11afSPaul Mackerras	/* Make sure we are running in 64 bits mode */
141014cf11afSPaul Mackerras	bl	.enable_64b_mode
141114cf11afSPaul Mackerras
141214cf11afSPaul Mackerras	/* Setup some critical 970 SPRs before switching MMU off */
141314cf11afSPaul Mackerras	bl	.__970_cpu_preinit
141414cf11afSPaul Mackerras
141514cf11afSPaul Mackerras	/* cpu # */
141614cf11afSPaul Mackerras	li	r24,0
141714cf11afSPaul Mackerras
141814cf11afSPaul Mackerras	/* Switch off MMU if not already */
141914cf11afSPaul Mackerras	LOADADDR(r4, .__after_prom_start - KERNELBASE)
142014cf11afSPaul Mackerras	add	r4,r4,r30
142114cf11afSPaul Mackerras	bl	.__mmu_off
142214cf11afSPaul Mackerras	b	.__after_prom_start
142314cf11afSPaul Mackerras
142414cf11afSPaul Mackerras_STATIC(__boot_from_prom)
142514cf11afSPaul Mackerras	/* Save parameters */
142614cf11afSPaul Mackerras	mr	r31,r3
142714cf11afSPaul Mackerras	mr	r30,r4
142814cf11afSPaul Mackerras	mr	r29,r5
142914cf11afSPaul Mackerras	mr	r28,r6
143014cf11afSPaul Mackerras	mr	r27,r7
143114cf11afSPaul Mackerras
143214cf11afSPaul Mackerras	/* Make sure we are running in 64 bits mode */
143314cf11afSPaul Mackerras	bl	.enable_64b_mode
143414cf11afSPaul Mackerras
143514cf11afSPaul Mackerras	/* put a relocation offset into r3 */
143614cf11afSPaul Mackerras	bl	.reloc_offset
143714cf11afSPaul Mackerras
143814cf11afSPaul Mackerras	LOADADDR(r2,__toc_start)
143914cf11afSPaul Mackerras	addi	r2,r2,0x4000
144014cf11afSPaul Mackerras	addi	r2,r2,0x4000
144114cf11afSPaul Mackerras
144214cf11afSPaul Mackerras	/* Relocate the TOC from a virt addr to a real addr */
144314cf11afSPaul Mackerras	sub	r2,r2,r3
144414cf11afSPaul Mackerras
144514cf11afSPaul Mackerras	/* Restore parameters */
144614cf11afSPaul Mackerras	mr	r3,r31
144714cf11afSPaul Mackerras	mr	r4,r30
144814cf11afSPaul Mackerras	mr	r5,r29
144914cf11afSPaul Mackerras	mr	r6,r28
145014cf11afSPaul Mackerras	mr	r7,r27
145114cf11afSPaul Mackerras
145214cf11afSPaul Mackerras	/* Do all of the interaction with OF client interface */
145314cf11afSPaul Mackerras	bl	.prom_init
145414cf11afSPaul Mackerras	/* We never return */
145514cf11afSPaul Mackerras	trap
145614cf11afSPaul Mackerras
145714cf11afSPaul Mackerras/*
145814cf11afSPaul Mackerras * At this point, r3 contains the physical address we are running at,
145914cf11afSPaul Mackerras * returned by prom_init()
146014cf11afSPaul Mackerras */
146114cf11afSPaul Mackerras_STATIC(__after_prom_start)
146214cf11afSPaul Mackerras
146314cf11afSPaul Mackerras/*
146414cf11afSPaul Mackerras * We need to run with __start at physical address 0.
146514cf11afSPaul Mackerras * This will leave some code in the first 256B of
146614cf11afSPaul Mackerras * real memory, which are reserved for software use.
146714cf11afSPaul Mackerras * The remainder of the first page is loaded with the fixed
146814cf11afSPaul Mackerras * interrupt vectors.  The next two pages are filled with
146914cf11afSPaul Mackerras * unknown exception placeholders.
147014cf11afSPaul Mackerras *
147114cf11afSPaul Mackerras * Note: This process overwrites the OF exception vectors.
147214cf11afSPaul Mackerras *	r26 == relocation offset
147314cf11afSPaul Mackerras *	r27 == KERNELBASE
147414cf11afSPaul Mackerras */
147514cf11afSPaul Mackerras	bl	.reloc_offset
147614cf11afSPaul Mackerras	mr	r26,r3
147714cf11afSPaul Mackerras	SET_REG_TO_CONST(r27,KERNELBASE)
147814cf11afSPaul Mackerras
147914cf11afSPaul Mackerras	li	r3,0			/* target addr */
148014cf11afSPaul Mackerras
148114cf11afSPaul Mackerras	// XXX FIXME: Use phys returned by OF (r30)
148214cf11afSPaul Mackerras	sub	r4,r27,r26 		/* source addr			 */
148314cf11afSPaul Mackerras					/* current address of _start	 */
148414cf11afSPaul Mackerras					/*   i.e. where we are running	 */
148514cf11afSPaul Mackerras					/*	the source addr		 */
148614cf11afSPaul Mackerras
148714cf11afSPaul Mackerras	LOADADDR(r5,copy_to_here)	/* # bytes of memory to copy	 */
148814cf11afSPaul Mackerras	sub	r5,r5,r27
148914cf11afSPaul Mackerras
149014cf11afSPaul Mackerras	li	r6,0x100		/* Start offset, the first 0x100 */
149114cf11afSPaul Mackerras					/* bytes were copied earlier.	 */
149214cf11afSPaul Mackerras
149314cf11afSPaul Mackerras	bl	.copy_and_flush		/* copy the first n bytes	 */
149414cf11afSPaul Mackerras					/* this includes the code being	 */
149514cf11afSPaul Mackerras					/* executed here.		 */
149614cf11afSPaul Mackerras
149714cf11afSPaul Mackerras	LOADADDR(r0, 4f)		/* Jump to the copy of this code */
149814cf11afSPaul Mackerras	mtctr	r0			/* that we just made/relocated	 */
149914cf11afSPaul Mackerras	bctr
150014cf11afSPaul Mackerras
150114cf11afSPaul Mackerras4:	LOADADDR(r5,klimit)
150214cf11afSPaul Mackerras	sub	r5,r5,r26
150314cf11afSPaul Mackerras	ld	r5,0(r5)		/* get the value of klimit */
150414cf11afSPaul Mackerras	sub	r5,r5,r27
150514cf11afSPaul Mackerras	bl	.copy_and_flush		/* copy the rest */
150614cf11afSPaul Mackerras	b	.start_here_multiplatform
150714cf11afSPaul Mackerras
150814cf11afSPaul Mackerras#endif /* CONFIG_PPC_MULTIPLATFORM */
150914cf11afSPaul Mackerras
151014cf11afSPaul Mackerras/*
151114cf11afSPaul Mackerras * Copy routine used to copy the kernel to start at physical address 0
151214cf11afSPaul Mackerras * and flush and invalidate the caches as needed.
151314cf11afSPaul Mackerras * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset
151414cf11afSPaul Mackerras * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.
151514cf11afSPaul Mackerras *
151614cf11afSPaul Mackerras * Note: this routine *only* clobbers r0, r6 and lr
151714cf11afSPaul Mackerras */
151814cf11afSPaul Mackerras_GLOBAL(copy_and_flush)
151914cf11afSPaul Mackerras	addi	r5,r5,-8
152014cf11afSPaul Mackerras	addi	r6,r6,-8
152114cf11afSPaul Mackerras4:	li	r0,16			/* Use the least common		*/
152214cf11afSPaul Mackerras					/* denominator cache line	*/
152314cf11afSPaul Mackerras					/* size.  This results in	*/
152414cf11afSPaul Mackerras					/* extra cache line flushes	*/
152514cf11afSPaul Mackerras					/* but operation is correct.	*/
152614cf11afSPaul Mackerras					/* Can't get cache line size	*/
152714cf11afSPaul Mackerras					/* from NACA as it is being	*/
152814cf11afSPaul Mackerras					/* moved too.			*/
152914cf11afSPaul Mackerras
153014cf11afSPaul Mackerras	mtctr	r0			/* put # words/line in ctr	*/
153114cf11afSPaul Mackerras3:	addi	r6,r6,8			/* copy a cache line		*/
153214cf11afSPaul Mackerras	ldx	r0,r6,r4
153314cf11afSPaul Mackerras	stdx	r0,r6,r3
153414cf11afSPaul Mackerras	bdnz	3b
153514cf11afSPaul Mackerras	dcbst	r6,r3			/* write it to memory		*/
153614cf11afSPaul Mackerras	sync
153714cf11afSPaul Mackerras	icbi	r6,r3			/* flush the icache line	*/
153814cf11afSPaul Mackerras	cmpld	0,r6,r5
153914cf11afSPaul Mackerras	blt	4b
154014cf11afSPaul Mackerras	sync
154114cf11afSPaul Mackerras	addi	r5,r5,8
154214cf11afSPaul Mackerras	addi	r6,r6,8
154314cf11afSPaul Mackerras	blr
154414cf11afSPaul Mackerras
154514cf11afSPaul Mackerras.align 8
154614cf11afSPaul Mackerrascopy_to_here:
154714cf11afSPaul Mackerras
154814cf11afSPaul Mackerras#ifdef CONFIG_SMP
154914cf11afSPaul Mackerras#ifdef CONFIG_PPC_PMAC
155014cf11afSPaul Mackerras/*
155114cf11afSPaul Mackerras * On PowerMac, secondary processors starts from the reset vector, which
155214cf11afSPaul Mackerras * is temporarily turned into a call to one of the functions below.
155314cf11afSPaul Mackerras */
155414cf11afSPaul Mackerras	.section ".text";
155514cf11afSPaul Mackerras	.align 2 ;
155614cf11afSPaul Mackerras
155714cf11afSPaul Mackerras	.globl	pmac_secondary_start_1
155814cf11afSPaul Mackerraspmac_secondary_start_1:
155914cf11afSPaul Mackerras	li	r24, 1
156014cf11afSPaul Mackerras	b	.pmac_secondary_start
156114cf11afSPaul Mackerras
156214cf11afSPaul Mackerras	.globl pmac_secondary_start_2
156314cf11afSPaul Mackerraspmac_secondary_start_2:
156414cf11afSPaul Mackerras	li	r24, 2
156514cf11afSPaul Mackerras	b	.pmac_secondary_start
156614cf11afSPaul Mackerras
156714cf11afSPaul Mackerras	.globl pmac_secondary_start_3
156814cf11afSPaul Mackerraspmac_secondary_start_3:
156914cf11afSPaul Mackerras	li	r24, 3
157014cf11afSPaul Mackerras	b	.pmac_secondary_start
157114cf11afSPaul Mackerras
157214cf11afSPaul Mackerras_GLOBAL(pmac_secondary_start)
157314cf11afSPaul Mackerras	/* turn on 64-bit mode */
157414cf11afSPaul Mackerras	bl	.enable_64b_mode
157514cf11afSPaul Mackerras	isync
157614cf11afSPaul Mackerras
157714cf11afSPaul Mackerras	/* Copy some CPU settings from CPU 0 */
157814cf11afSPaul Mackerras	bl	.__restore_cpu_setup
157914cf11afSPaul Mackerras
158014cf11afSPaul Mackerras	/* pSeries do that early though I don't think we really need it */
158114cf11afSPaul Mackerras	mfmsr	r3
158214cf11afSPaul Mackerras	ori	r3,r3,MSR_RI
158314cf11afSPaul Mackerras	mtmsrd	r3			/* RI on */
158414cf11afSPaul Mackerras
158514cf11afSPaul Mackerras	/* Set up a paca value for this processor. */
158614cf11afSPaul Mackerras	LOADADDR(r4, paca) 		 /* Get base vaddr of paca array	*/
158714cf11afSPaul Mackerras	mulli	r13,r24,PACA_SIZE	 /* Calculate vaddr of right paca */
158814cf11afSPaul Mackerras	add	r13,r13,r4		/* for this processor.		*/
158914cf11afSPaul Mackerras	mtspr	SPRG3,r13		 /* Save vaddr of paca in SPRG3	*/
159014cf11afSPaul Mackerras
159114cf11afSPaul Mackerras	/* Create a temp kernel stack for use before relocation is on.	*/
159214cf11afSPaul Mackerras	ld	r1,PACAEMERGSP(r13)
159314cf11afSPaul Mackerras	subi	r1,r1,STACK_FRAME_OVERHEAD
159414cf11afSPaul Mackerras
159514cf11afSPaul Mackerras	b	.__secondary_start
159614cf11afSPaul Mackerras
159714cf11afSPaul Mackerras#endif /* CONFIG_PPC_PMAC */
159814cf11afSPaul Mackerras
159914cf11afSPaul Mackerras/*
160014cf11afSPaul Mackerras * This function is called after the master CPU has released the
160114cf11afSPaul Mackerras * secondary processors.  The execution environment is relocation off.
160214cf11afSPaul Mackerras * The paca for this processor has the following fields initialized at
160314cf11afSPaul Mackerras * this point:
160414cf11afSPaul Mackerras *   1. Processor number
160514cf11afSPaul Mackerras *   2. Segment table pointer (virtual address)
160614cf11afSPaul Mackerras * On entry the following are set:
160714cf11afSPaul Mackerras *   r1	= stack pointer.  vaddr for iSeries, raddr (temp stack) for pSeries
160814cf11afSPaul Mackerras *   r24   = cpu# (in Linux terms)
160914cf11afSPaul Mackerras *   r13   = paca virtual address
161014cf11afSPaul Mackerras *   SPRG3 = paca virtual address
161114cf11afSPaul Mackerras */
161214cf11afSPaul Mackerras_GLOBAL(__secondary_start)
161314cf11afSPaul Mackerras
161414cf11afSPaul Mackerras	HMT_MEDIUM			/* Set thread priority to MEDIUM */
161514cf11afSPaul Mackerras
161614cf11afSPaul Mackerras	ld	r2,PACATOC(r13)
161714cf11afSPaul Mackerras	li	r6,0
161814cf11afSPaul Mackerras	stb	r6,PACAPROCENABLED(r13)
161914cf11afSPaul Mackerras
162014cf11afSPaul Mackerras#ifndef CONFIG_PPC_ISERIES
162114cf11afSPaul Mackerras	/* Initialize the page table pointer register. */
162214cf11afSPaul Mackerras	LOADADDR(r6,_SDR1)
162314cf11afSPaul Mackerras	ld	r6,0(r6)		/* get the value of _SDR1	 */
162414cf11afSPaul Mackerras	mtspr	SDR1,r6			/* set the htab location	 */
162514cf11afSPaul Mackerras#endif
162614cf11afSPaul Mackerras	/* Initialize the first segment table (or SLB) entry		 */
162714cf11afSPaul Mackerras	ld	r3,PACASTABVIRT(r13)	/* get addr of segment table	 */
162814cf11afSPaul Mackerras	bl	.stab_initialize
162914cf11afSPaul Mackerras
163014cf11afSPaul Mackerras	/* Initialize the kernel stack.  Just a repeat for iSeries.	 */
163114cf11afSPaul Mackerras	LOADADDR(r3,current_set)
163214cf11afSPaul Mackerras	sldi	r28,r24,3		/* get current_set[cpu#]	 */
163314cf11afSPaul Mackerras	ldx	r1,r3,r28
163414cf11afSPaul Mackerras	addi	r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
163514cf11afSPaul Mackerras	std	r1,PACAKSAVE(r13)
163614cf11afSPaul Mackerras
163714cf11afSPaul Mackerras	ld	r3,PACASTABREAL(r13)	/* get raddr of segment table	 */
163814cf11afSPaul Mackerras	ori	r4,r3,1			/* turn on valid bit		 */
163914cf11afSPaul Mackerras
164014cf11afSPaul Mackerras#ifdef CONFIG_PPC_ISERIES
164114cf11afSPaul Mackerras	li	r0,-1			/* hypervisor call */
164214cf11afSPaul Mackerras	li	r3,1
164314cf11afSPaul Mackerras	sldi	r3,r3,63		/* 0x8000000000000000 */
164414cf11afSPaul Mackerras	ori	r3,r3,4			/* 0x8000000000000004 */
164514cf11afSPaul Mackerras	sc				/* HvCall_setASR */
164614cf11afSPaul Mackerras#else
164714cf11afSPaul Mackerras	/* set the ASR */
164814cf11afSPaul Mackerras	ld	r3,systemcfg@got(r2)	/* r3 = ptr to systemcfg	 */
164914cf11afSPaul Mackerras	ld	r3,0(r3)
165014cf11afSPaul Mackerras	lwz	r3,PLATFORM(r3)		/* r3 = platform flags		 */
165114cf11afSPaul Mackerras	andi.	r3,r3,PLATFORM_LPAR	/* Test if bit 0 is set (LPAR bit) */
165214cf11afSPaul Mackerras	beq	98f			/* branch if result is 0  */
165314cf11afSPaul Mackerras	mfspr	r3,PVR
165414cf11afSPaul Mackerras	srwi	r3,r3,16
165514cf11afSPaul Mackerras	cmpwi	r3,0x37			/* SStar  */
165614cf11afSPaul Mackerras	beq	97f
165714cf11afSPaul Mackerras	cmpwi	r3,0x36			/* IStar  */
165814cf11afSPaul Mackerras	beq	97f
165914cf11afSPaul Mackerras	cmpwi	r3,0x34			/* Pulsar */
166014cf11afSPaul Mackerras	bne	98f
166114cf11afSPaul Mackerras97:	li	r3,H_SET_ASR		/* hcall = H_SET_ASR */
166214cf11afSPaul Mackerras	HVSC				/* Invoking hcall */
166314cf11afSPaul Mackerras	b	99f
166414cf11afSPaul Mackerras98:					/* !(rpa hypervisor) || !(star)  */
166514cf11afSPaul Mackerras	mtasr	r4			/* set the stab location	 */
166614cf11afSPaul Mackerras99:
166714cf11afSPaul Mackerras#endif
166814cf11afSPaul Mackerras	li	r7,0
166914cf11afSPaul Mackerras	mtlr	r7
167014cf11afSPaul Mackerras
167114cf11afSPaul Mackerras	/* enable MMU and jump to start_secondary */
167214cf11afSPaul Mackerras	LOADADDR(r3,.start_secondary_prolog)
167314cf11afSPaul Mackerras	SET_REG_TO_CONST(r4, MSR_KERNEL)
167414cf11afSPaul Mackerras#ifdef DO_SOFT_DISABLE
167514cf11afSPaul Mackerras	ori	r4,r4,MSR_EE
167614cf11afSPaul Mackerras#endif
167714cf11afSPaul Mackerras	mtspr	SRR0,r3
167814cf11afSPaul Mackerras	mtspr	SRR1,r4
167914cf11afSPaul Mackerras	rfid
168014cf11afSPaul Mackerras	b	.	/* prevent speculative execution */
168114cf11afSPaul Mackerras
168214cf11afSPaul Mackerras/*
168314cf11afSPaul Mackerras * Running with relocation on at this point.  All we want to do is
168414cf11afSPaul Mackerras * zero the stack back-chain pointer before going into C code.
168514cf11afSPaul Mackerras */
168614cf11afSPaul Mackerras_GLOBAL(start_secondary_prolog)
168714cf11afSPaul Mackerras	li	r3,0
168814cf11afSPaul Mackerras	std	r3,0(r1)		/* Zero the stack frame pointer	*/
168914cf11afSPaul Mackerras	bl	.start_secondary
169014cf11afSPaul Mackerras#endif
169114cf11afSPaul Mackerras
169214cf11afSPaul Mackerras/*
169314cf11afSPaul Mackerras * This subroutine clobbers r11 and r12
169414cf11afSPaul Mackerras */
169514cf11afSPaul Mackerras_GLOBAL(enable_64b_mode)
169614cf11afSPaul Mackerras	mfmsr	r11			/* grab the current MSR */
169714cf11afSPaul Mackerras	li	r12,1
169814cf11afSPaul Mackerras	rldicr	r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
169914cf11afSPaul Mackerras	or	r11,r11,r12
170014cf11afSPaul Mackerras	li	r12,1
170114cf11afSPaul Mackerras	rldicr	r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
170214cf11afSPaul Mackerras	or	r11,r11,r12
170314cf11afSPaul Mackerras	mtmsrd	r11
170414cf11afSPaul Mackerras	isync
170514cf11afSPaul Mackerras	blr
170614cf11afSPaul Mackerras
170714cf11afSPaul Mackerras#ifdef CONFIG_PPC_MULTIPLATFORM
170814cf11afSPaul Mackerras/*
170914cf11afSPaul Mackerras * This is where the main kernel code starts.
171014cf11afSPaul Mackerras */
171114cf11afSPaul Mackerras_STATIC(start_here_multiplatform)
171214cf11afSPaul Mackerras	/* get a new offset, now that the kernel has moved. */
171314cf11afSPaul Mackerras	bl	.reloc_offset
171414cf11afSPaul Mackerras	mr	r26,r3
171514cf11afSPaul Mackerras
171614cf11afSPaul Mackerras	/* Clear out the BSS. It may have been done in prom_init,
171714cf11afSPaul Mackerras	 * already but that's irrelevant since prom_init will soon
171814cf11afSPaul Mackerras	 * be detached from the kernel completely. Besides, we need
171914cf11afSPaul Mackerras	 * to clear it now for kexec-style entry.
172014cf11afSPaul Mackerras	 */
172114cf11afSPaul Mackerras	LOADADDR(r11,__bss_stop)
172214cf11afSPaul Mackerras	LOADADDR(r8,__bss_start)
172314cf11afSPaul Mackerras	sub	r11,r11,r8		/* bss size			*/
172414cf11afSPaul Mackerras	addi	r11,r11,7		/* round up to an even double word */
172514cf11afSPaul Mackerras	rldicl. r11,r11,61,3		/* shift right by 3		*/
172614cf11afSPaul Mackerras	beq	4f
172714cf11afSPaul Mackerras	addi	r8,r8,-8
172814cf11afSPaul Mackerras	li	r0,0
172914cf11afSPaul Mackerras	mtctr	r11			/* zero this many doublewords	*/
173014cf11afSPaul Mackerras3:	stdu	r0,8(r8)
173114cf11afSPaul Mackerras	bdnz	3b
173214cf11afSPaul Mackerras4:
173314cf11afSPaul Mackerras
173414cf11afSPaul Mackerras	mfmsr	r6
173514cf11afSPaul Mackerras	ori	r6,r6,MSR_RI
173614cf11afSPaul Mackerras	mtmsrd	r6			/* RI on */
173714cf11afSPaul Mackerras
173814cf11afSPaul Mackerras#ifdef CONFIG_HMT
173914cf11afSPaul Mackerras	/* Start up the second thread on cpu 0 */
174014cf11afSPaul Mackerras	mfspr	r3,PVR
174114cf11afSPaul Mackerras	srwi	r3,r3,16
174214cf11afSPaul Mackerras	cmpwi	r3,0x34			/* Pulsar  */
174314cf11afSPaul Mackerras	beq	90f
174414cf11afSPaul Mackerras	cmpwi	r3,0x36			/* Icestar */
174514cf11afSPaul Mackerras	beq	90f
174614cf11afSPaul Mackerras	cmpwi	r3,0x37			/* SStar   */
174714cf11afSPaul Mackerras	beq	90f
174814cf11afSPaul Mackerras	b	91f			/* HMT not supported */
174914cf11afSPaul Mackerras90:	li	r3,0
175014cf11afSPaul Mackerras	bl	.hmt_start_secondary
175114cf11afSPaul Mackerras91:
175214cf11afSPaul Mackerras#endif
175314cf11afSPaul Mackerras
175414cf11afSPaul Mackerras	/* The following gets the stack and TOC set up with the regs */
175514cf11afSPaul Mackerras	/* pointing to the real addr of the kernel stack.  This is   */
175614cf11afSPaul Mackerras	/* all done to support the C function call below which sets  */
175714cf11afSPaul Mackerras	/* up the htab.  This is done because we have relocated the  */
175814cf11afSPaul Mackerras	/* kernel but are still running in real mode. */
175914cf11afSPaul Mackerras
176014cf11afSPaul Mackerras	LOADADDR(r3,init_thread_union)
176114cf11afSPaul Mackerras	sub	r3,r3,r26
176214cf11afSPaul Mackerras
176314cf11afSPaul Mackerras	/* set up a stack pointer (physical address) */
176414cf11afSPaul Mackerras	addi	r1,r3,THREAD_SIZE
176514cf11afSPaul Mackerras	li	r0,0
176614cf11afSPaul Mackerras	stdu	r0,-STACK_FRAME_OVERHEAD(r1)
176714cf11afSPaul Mackerras
176814cf11afSPaul Mackerras	/* set up the TOC (physical address) */
176914cf11afSPaul Mackerras	LOADADDR(r2,__toc_start)
177014cf11afSPaul Mackerras	addi	r2,r2,0x4000
177114cf11afSPaul Mackerras	addi	r2,r2,0x4000
177214cf11afSPaul Mackerras	sub	r2,r2,r26
177314cf11afSPaul Mackerras
177414cf11afSPaul Mackerras	LOADADDR(r3,cpu_specs)
177514cf11afSPaul Mackerras	sub	r3,r3,r26
177614cf11afSPaul Mackerras	LOADADDR(r4,cur_cpu_spec)
177714cf11afSPaul Mackerras	sub	r4,r4,r26
177814cf11afSPaul Mackerras	mr	r5,r26
177914cf11afSPaul Mackerras	bl	.identify_cpu
178014cf11afSPaul Mackerras
178114cf11afSPaul Mackerras	/* Save some low level config HIDs of CPU0 to be copied to
178214cf11afSPaul Mackerras	 * other CPUs later on, or used for suspend/resume
178314cf11afSPaul Mackerras	 */
178414cf11afSPaul Mackerras	bl	.__save_cpu_setup
178514cf11afSPaul Mackerras	sync
178614cf11afSPaul Mackerras
178714cf11afSPaul Mackerras	/* Setup a valid physical PACA pointer in SPRG3 for early_setup
178814cf11afSPaul Mackerras	 * note that boot_cpuid can always be 0 nowadays since there is
178914cf11afSPaul Mackerras	 * nowhere it can be initialized differently before we reach this
179014cf11afSPaul Mackerras	 * code
179114cf11afSPaul Mackerras	 */
179214cf11afSPaul Mackerras	LOADADDR(r27, boot_cpuid)
179314cf11afSPaul Mackerras	sub	r27,r27,r26
179414cf11afSPaul Mackerras	lwz	r27,0(r27)
179514cf11afSPaul Mackerras
179614cf11afSPaul Mackerras	LOADADDR(r24, paca) 		/* Get base vaddr of paca array	 */
179714cf11afSPaul Mackerras	mulli	r13,r27,PACA_SIZE	/* Calculate vaddr of right paca */
179814cf11afSPaul Mackerras	add	r13,r13,r24		/* for this processor.		 */
179914cf11afSPaul Mackerras	sub	r13,r13,r26		/* convert to physical addr	 */
180014cf11afSPaul Mackerras	mtspr	SPRG3,r13		/* PPPBBB: Temp... -Peter */
180114cf11afSPaul Mackerras
180214cf11afSPaul Mackerras	/* Do very early kernel initializations, including initial hash table,
180314cf11afSPaul Mackerras	 * stab and slb setup before we turn on relocation.	*/
180414cf11afSPaul Mackerras
180514cf11afSPaul Mackerras	/* Restore parameters passed from prom_init/kexec */
180614cf11afSPaul Mackerras	mr	r3,r31
180714cf11afSPaul Mackerras 	bl	.early_setup
180814cf11afSPaul Mackerras
180914cf11afSPaul Mackerras	/* set the ASR */
181014cf11afSPaul Mackerras	ld	r3,PACASTABREAL(r13)
181114cf11afSPaul Mackerras	ori	r4,r3,1			/* turn on valid bit		 */
181214cf11afSPaul Mackerras	ld	r3,systemcfg@got(r2)	/* r3 = ptr to systemcfg */
181314cf11afSPaul Mackerras	ld	r3,0(r3)
181414cf11afSPaul Mackerras	lwz	r3,PLATFORM(r3)		/* r3 = platform flags */
181514cf11afSPaul Mackerras	andi.	r3,r3,PLATFORM_LPAR	/* Test if bit 0 is set (LPAR bit) */
181614cf11afSPaul Mackerras	beq	98f			/* branch if result is 0  */
181714cf11afSPaul Mackerras	mfspr	r3,PVR
181814cf11afSPaul Mackerras	srwi	r3,r3,16
181914cf11afSPaul Mackerras	cmpwi	r3,0x37			/* SStar */
182014cf11afSPaul Mackerras	beq	97f
182114cf11afSPaul Mackerras	cmpwi	r3,0x36			/* IStar  */
182214cf11afSPaul Mackerras	beq	97f
182314cf11afSPaul Mackerras	cmpwi	r3,0x34			/* Pulsar */
182414cf11afSPaul Mackerras	bne	98f
182514cf11afSPaul Mackerras97:	li	r3,H_SET_ASR		/* hcall = H_SET_ASR */
182614cf11afSPaul Mackerras	HVSC				/* Invoking hcall */
182714cf11afSPaul Mackerras	b	99f
182814cf11afSPaul Mackerras98:					/* !(rpa hypervisor) || !(star) */
182914cf11afSPaul Mackerras	mtasr	r4			/* set the stab location	*/
183014cf11afSPaul Mackerras99:
183114cf11afSPaul Mackerras	/* Set SDR1 (hash table pointer) */
183214cf11afSPaul Mackerras	ld	r3,systemcfg@got(r2)	/* r3 = ptr to systemcfg */
183314cf11afSPaul Mackerras	ld	r3,0(r3)
183414cf11afSPaul Mackerras	lwz	r3,PLATFORM(r3)		/* r3 = platform flags */
183514cf11afSPaul Mackerras	/* Test if bit 0 is set (LPAR bit) */
183614cf11afSPaul Mackerras	andi.	r3,r3,PLATFORM_LPAR
183714cf11afSPaul Mackerras	bne	98f			/* branch if result is !0  */
183814cf11afSPaul Mackerras	LOADADDR(r6,_SDR1)		/* Only if NOT LPAR */
183914cf11afSPaul Mackerras	sub	r6,r6,r26
184014cf11afSPaul Mackerras	ld	r6,0(r6)		/* get the value of _SDR1 */
184114cf11afSPaul Mackerras	mtspr	SDR1,r6			/* set the htab location  */
184214cf11afSPaul Mackerras98:
184314cf11afSPaul Mackerras	LOADADDR(r3,.start_here_common)
184414cf11afSPaul Mackerras	SET_REG_TO_CONST(r4, MSR_KERNEL)
184514cf11afSPaul Mackerras	mtspr	SRR0,r3
184614cf11afSPaul Mackerras	mtspr	SRR1,r4
184714cf11afSPaul Mackerras	rfid
184814cf11afSPaul Mackerras	b	.	/* prevent speculative execution */
184914cf11afSPaul Mackerras#endif /* CONFIG_PPC_MULTIPLATFORM */
185014cf11afSPaul Mackerras
185114cf11afSPaul Mackerras	/* This is where all platforms converge execution */
185214cf11afSPaul Mackerras_STATIC(start_here_common)
185314cf11afSPaul Mackerras	/* relocation is on at this point */
185414cf11afSPaul Mackerras
185514cf11afSPaul Mackerras	/* The following code sets up the SP and TOC now that we are */
185614cf11afSPaul Mackerras	/* running with translation enabled. */
185714cf11afSPaul Mackerras
185814cf11afSPaul Mackerras	LOADADDR(r3,init_thread_union)
185914cf11afSPaul Mackerras
186014cf11afSPaul Mackerras	/* set up the stack */
186114cf11afSPaul Mackerras	addi	r1,r3,THREAD_SIZE
186214cf11afSPaul Mackerras	li	r0,0
186314cf11afSPaul Mackerras	stdu	r0,-STACK_FRAME_OVERHEAD(r1)
186414cf11afSPaul Mackerras
186514cf11afSPaul Mackerras	/* Apply the CPUs-specific fixups (nop out sections not relevant
186614cf11afSPaul Mackerras	 * to this CPU
186714cf11afSPaul Mackerras	 */
186814cf11afSPaul Mackerras	li	r3,0
186914cf11afSPaul Mackerras	bl	.do_cpu_ftr_fixups
187014cf11afSPaul Mackerras
187114cf11afSPaul Mackerras	LOADADDR(r26, boot_cpuid)
187214cf11afSPaul Mackerras	lwz	r26,0(r26)
187314cf11afSPaul Mackerras
187414cf11afSPaul Mackerras	LOADADDR(r24, paca) 		/* Get base vaddr of paca array  */
187514cf11afSPaul Mackerras	mulli	r13,r26,PACA_SIZE	/* Calculate vaddr of right paca */
187614cf11afSPaul Mackerras	add	r13,r13,r24		/* for this processor.		 */
187714cf11afSPaul Mackerras	mtspr	SPRG3,r13
187814cf11afSPaul Mackerras
187914cf11afSPaul Mackerras	/* ptr to current */
188014cf11afSPaul Mackerras	LOADADDR(r4,init_task)
188114cf11afSPaul Mackerras	std	r4,PACACURRENT(r13)
188214cf11afSPaul Mackerras
188314cf11afSPaul Mackerras	/* Load the TOC */
188414cf11afSPaul Mackerras	ld	r2,PACATOC(r13)
188514cf11afSPaul Mackerras	std	r1,PACAKSAVE(r13)
188614cf11afSPaul Mackerras
188714cf11afSPaul Mackerras	bl	.setup_system
188814cf11afSPaul Mackerras
188914cf11afSPaul Mackerras	/* Load up the kernel context */
189014cf11afSPaul Mackerras5:
189114cf11afSPaul Mackerras#ifdef DO_SOFT_DISABLE
189214cf11afSPaul Mackerras	li	r5,0
189314cf11afSPaul Mackerras	stb	r5,PACAPROCENABLED(r13)	/* Soft Disabled */
189414cf11afSPaul Mackerras	mfmsr	r5
189514cf11afSPaul Mackerras	ori	r5,r5,MSR_EE		/* Hard Enabled */
189614cf11afSPaul Mackerras	mtmsrd	r5
189714cf11afSPaul Mackerras#endif
189814cf11afSPaul Mackerras
189914cf11afSPaul Mackerras	bl .start_kernel
190014cf11afSPaul Mackerras
190114cf11afSPaul Mackerras_GLOBAL(hmt_init)
190214cf11afSPaul Mackerras#ifdef CONFIG_HMT
190314cf11afSPaul Mackerras	LOADADDR(r5, hmt_thread_data)
190414cf11afSPaul Mackerras	mfspr	r7,PVR
190514cf11afSPaul Mackerras	srwi	r7,r7,16
190614cf11afSPaul Mackerras	cmpwi	r7,0x34			/* Pulsar  */
190714cf11afSPaul Mackerras	beq	90f
190814cf11afSPaul Mackerras	cmpwi	r7,0x36			/* Icestar */
190914cf11afSPaul Mackerras	beq	91f
191014cf11afSPaul Mackerras	cmpwi	r7,0x37			/* SStar   */
191114cf11afSPaul Mackerras	beq	91f
191214cf11afSPaul Mackerras	b	101f
191314cf11afSPaul Mackerras90:	mfspr	r6,PIR
191414cf11afSPaul Mackerras	andi.	r6,r6,0x1f
191514cf11afSPaul Mackerras	b	92f
191614cf11afSPaul Mackerras91:	mfspr	r6,PIR
191714cf11afSPaul Mackerras	andi.	r6,r6,0x3ff
191814cf11afSPaul Mackerras92:	sldi	r4,r24,3
191914cf11afSPaul Mackerras	stwx	r6,r5,r4
192014cf11afSPaul Mackerras	bl	.hmt_start_secondary
192114cf11afSPaul Mackerras	b	101f
192214cf11afSPaul Mackerras
192314cf11afSPaul Mackerras__hmt_secondary_hold:
192414cf11afSPaul Mackerras	LOADADDR(r5, hmt_thread_data)
192514cf11afSPaul Mackerras	clrldi	r5,r5,4
192614cf11afSPaul Mackerras	li	r7,0
192714cf11afSPaul Mackerras	mfspr	r6,PIR
192814cf11afSPaul Mackerras	mfspr	r8,PVR
192914cf11afSPaul Mackerras	srwi	r8,r8,16
193014cf11afSPaul Mackerras	cmpwi	r8,0x34
193114cf11afSPaul Mackerras	bne	93f
193214cf11afSPaul Mackerras	andi.	r6,r6,0x1f
193314cf11afSPaul Mackerras	b	103f
193414cf11afSPaul Mackerras93:	andi.	r6,r6,0x3f
193514cf11afSPaul Mackerras
193614cf11afSPaul Mackerras103:	lwzx	r8,r5,r7
193714cf11afSPaul Mackerras	cmpw	r8,r6
193814cf11afSPaul Mackerras	beq	104f
193914cf11afSPaul Mackerras	addi	r7,r7,8
194014cf11afSPaul Mackerras	b	103b
194114cf11afSPaul Mackerras
194214cf11afSPaul Mackerras104:	addi	r7,r7,4
194314cf11afSPaul Mackerras	lwzx	r9,r5,r7
194414cf11afSPaul Mackerras	mr	r24,r9
194514cf11afSPaul Mackerras101:
194614cf11afSPaul Mackerras#endif
194714cf11afSPaul Mackerras	mr	r3,r24
194814cf11afSPaul Mackerras	b	.pSeries_secondary_smp_init
194914cf11afSPaul Mackerras
195014cf11afSPaul Mackerras#ifdef CONFIG_HMT
195114cf11afSPaul Mackerras_GLOBAL(hmt_start_secondary)
195214cf11afSPaul Mackerras	LOADADDR(r4,__hmt_secondary_hold)
195314cf11afSPaul Mackerras	clrldi	r4,r4,4
195414cf11afSPaul Mackerras	mtspr	NIADORM, r4
195514cf11afSPaul Mackerras	mfspr	r4, MSRDORM
195614cf11afSPaul Mackerras	li	r5, -65
195714cf11afSPaul Mackerras	and	r4, r4, r5
195814cf11afSPaul Mackerras	mtspr	MSRDORM, r4
195914cf11afSPaul Mackerras	lis	r4,0xffef
196014cf11afSPaul Mackerras	ori	r4,r4,0x7403
196114cf11afSPaul Mackerras	mtspr	TSC, r4
196214cf11afSPaul Mackerras	li	r4,0x1f4
196314cf11afSPaul Mackerras	mtspr	TST, r4
196414cf11afSPaul Mackerras	mfspr	r4, HID0
196514cf11afSPaul Mackerras	ori	r4, r4, 0x1
196614cf11afSPaul Mackerras	mtspr	HID0, r4
196714cf11afSPaul Mackerras	mfspr	r4, SPRN_CTRLF
196814cf11afSPaul Mackerras	oris	r4, r4, 0x40
196914cf11afSPaul Mackerras	mtspr	SPRN_CTRLT, r4
197014cf11afSPaul Mackerras	blr
197114cf11afSPaul Mackerras#endif
197214cf11afSPaul Mackerras
197314cf11afSPaul Mackerras#if defined(CONFIG_KEXEC) || (defined(CONFIG_SMP) && !defined(CONFIG_PPC_ISERIES))
197414cf11afSPaul Mackerras_GLOBAL(smp_release_cpus)
197514cf11afSPaul Mackerras	/* All secondary cpus are spinning on a common
197614cf11afSPaul Mackerras	 * spinloop, release them all now so they can start
197714cf11afSPaul Mackerras	 * to spin on their individual paca spinloops.
197814cf11afSPaul Mackerras	 * For non SMP kernels, the secondary cpus never
197914cf11afSPaul Mackerras	 * get out of the common spinloop.
198014cf11afSPaul Mackerras	 */
198114cf11afSPaul Mackerras	li	r3,1
198214cf11afSPaul Mackerras	LOADADDR(r5,__secondary_hold_spinloop)
198314cf11afSPaul Mackerras	std	r3,0(r5)
198414cf11afSPaul Mackerras	sync
198514cf11afSPaul Mackerras	blr
198614cf11afSPaul Mackerras#endif /* CONFIG_SMP && !CONFIG_PPC_ISERIES */
198714cf11afSPaul Mackerras
198814cf11afSPaul Mackerras
198914cf11afSPaul Mackerras/*
199014cf11afSPaul Mackerras * We put a few things here that have to be page-aligned.
199114cf11afSPaul Mackerras * This stuff goes at the beginning of the bss, which is page-aligned.
199214cf11afSPaul Mackerras */
199314cf11afSPaul Mackerras	.section ".bss"
199414cf11afSPaul Mackerras
199514cf11afSPaul Mackerras	.align	PAGE_SHIFT
199614cf11afSPaul Mackerras
199714cf11afSPaul Mackerras	.globl	empty_zero_page
199814cf11afSPaul Mackerrasempty_zero_page:
199914cf11afSPaul Mackerras	.space	PAGE_SIZE
200014cf11afSPaul Mackerras
200114cf11afSPaul Mackerras	.globl	swapper_pg_dir
200214cf11afSPaul Mackerrasswapper_pg_dir:
200314cf11afSPaul Mackerras	.space	PAGE_SIZE
200414cf11afSPaul Mackerras
200514cf11afSPaul Mackerras/*
200614cf11afSPaul Mackerras * This space gets a copy of optional info passed to us by the bootstrap
200714cf11afSPaul Mackerras * Used to pass parameters into the kernel like root=/dev/sda1, etc.
200814cf11afSPaul Mackerras */
200914cf11afSPaul Mackerras	.globl	cmd_line
201014cf11afSPaul Mackerrascmd_line:
201114cf11afSPaul Mackerras	.space	COMMAND_LINE_SIZE
2012