xref: /openbmc/linux/arch/powerpc/kernel/head_64.S (revision 14cf11af6cf608eb8c23e989ddb17a715ddce109)
1*14cf11afSPaul Mackerras/*
2*14cf11afSPaul Mackerras *  arch/ppc64/kernel/head.S
3*14cf11afSPaul Mackerras *
4*14cf11afSPaul Mackerras *  PowerPC version
5*14cf11afSPaul Mackerras *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6*14cf11afSPaul Mackerras *
7*14cf11afSPaul Mackerras *  Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
8*14cf11afSPaul Mackerras *    Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
9*14cf11afSPaul Mackerras *  Adapted for Power Macintosh by Paul Mackerras.
10*14cf11afSPaul Mackerras *  Low-level exception handlers and MMU support
11*14cf11afSPaul Mackerras *  rewritten by Paul Mackerras.
12*14cf11afSPaul Mackerras *    Copyright (C) 1996 Paul Mackerras.
13*14cf11afSPaul Mackerras *
14*14cf11afSPaul Mackerras *  Adapted for 64bit PowerPC by Dave Engebretsen, Peter Bergner, and
15*14cf11afSPaul Mackerras *    Mike Corrigan {engebret|bergner|mikejc}@us.ibm.com
16*14cf11afSPaul Mackerras *
17*14cf11afSPaul Mackerras *  This file contains the low-level support and setup for the
18*14cf11afSPaul Mackerras *  PowerPC-64 platform, including trap and interrupt dispatch.
19*14cf11afSPaul Mackerras *
20*14cf11afSPaul Mackerras *  This program is free software; you can redistribute it and/or
21*14cf11afSPaul Mackerras *  modify it under the terms of the GNU General Public License
22*14cf11afSPaul Mackerras *  as published by the Free Software Foundation; either version
23*14cf11afSPaul Mackerras *  2 of the License, or (at your option) any later version.
24*14cf11afSPaul Mackerras */
25*14cf11afSPaul Mackerras
26*14cf11afSPaul Mackerras#include <linux/config.h>
27*14cf11afSPaul Mackerras#include <linux/threads.h>
28*14cf11afSPaul Mackerras#include <asm/processor.h>
29*14cf11afSPaul Mackerras#include <asm/page.h>
30*14cf11afSPaul Mackerras#include <asm/mmu.h>
31*14cf11afSPaul Mackerras#include <asm/systemcfg.h>
32*14cf11afSPaul Mackerras#include <asm/ppc_asm.h>
33*14cf11afSPaul Mackerras#include <asm/asm-offsets.h>
34*14cf11afSPaul Mackerras#include <asm/bug.h>
35*14cf11afSPaul Mackerras#include <asm/cputable.h>
36*14cf11afSPaul Mackerras#include <asm/setup.h>
37*14cf11afSPaul Mackerras#include <asm/hvcall.h>
38*14cf11afSPaul Mackerras#include <asm/iSeries/LparMap.h>
39*14cf11afSPaul Mackerras
40*14cf11afSPaul Mackerras#ifdef CONFIG_PPC_ISERIES
41*14cf11afSPaul Mackerras#define DO_SOFT_DISABLE
42*14cf11afSPaul Mackerras#endif
43*14cf11afSPaul Mackerras
44*14cf11afSPaul Mackerras/*
45*14cf11afSPaul Mackerras * We layout physical memory as follows:
46*14cf11afSPaul Mackerras * 0x0000 - 0x00ff : Secondary processor spin code
47*14cf11afSPaul Mackerras * 0x0100 - 0x2fff : pSeries Interrupt prologs
48*14cf11afSPaul Mackerras * 0x3000 - 0x5fff : interrupt support, iSeries and common interrupt prologs
49*14cf11afSPaul Mackerras * 0x6000 - 0x6fff : Initial (CPU0) segment table
50*14cf11afSPaul Mackerras * 0x7000 - 0x7fff : FWNMI data area
51*14cf11afSPaul Mackerras * 0x8000 -        : Early init and support code
52*14cf11afSPaul Mackerras */
53*14cf11afSPaul Mackerras
54*14cf11afSPaul Mackerras/*
55*14cf11afSPaul Mackerras *   SPRG Usage
56*14cf11afSPaul Mackerras *
57*14cf11afSPaul Mackerras *   Register	Definition
58*14cf11afSPaul Mackerras *
59*14cf11afSPaul Mackerras *   SPRG0	reserved for hypervisor
60*14cf11afSPaul Mackerras *   SPRG1	temp - used to save gpr
61*14cf11afSPaul Mackerras *   SPRG2	temp - used to save gpr
62*14cf11afSPaul Mackerras *   SPRG3	virt addr of paca
63*14cf11afSPaul Mackerras */
64*14cf11afSPaul Mackerras
65*14cf11afSPaul Mackerras/*
66*14cf11afSPaul Mackerras * Entering into this code we make the following assumptions:
67*14cf11afSPaul Mackerras *  For pSeries:
68*14cf11afSPaul Mackerras *   1. The MMU is off & open firmware is running in real mode.
69*14cf11afSPaul Mackerras *   2. The kernel is entered at __start
70*14cf11afSPaul Mackerras *
71*14cf11afSPaul Mackerras *  For iSeries:
72*14cf11afSPaul Mackerras *   1. The MMU is on (as it always is for iSeries)
73*14cf11afSPaul Mackerras *   2. The kernel is entered at system_reset_iSeries
74*14cf11afSPaul Mackerras */
75*14cf11afSPaul Mackerras
76*14cf11afSPaul Mackerras	.text
77*14cf11afSPaul Mackerras	.globl  _stext
78*14cf11afSPaul Mackerras_stext:
79*14cf11afSPaul Mackerras#ifdef CONFIG_PPC_MULTIPLATFORM
80*14cf11afSPaul Mackerras_GLOBAL(__start)
81*14cf11afSPaul Mackerras	/* NOP this out unconditionally */
82*14cf11afSPaul MackerrasBEGIN_FTR_SECTION
83*14cf11afSPaul Mackerras	b .__start_initialization_multiplatform
84*14cf11afSPaul MackerrasEND_FTR_SECTION(0, 1)
85*14cf11afSPaul Mackerras#endif /* CONFIG_PPC_MULTIPLATFORM */
86*14cf11afSPaul Mackerras
87*14cf11afSPaul Mackerras	/* Catch branch to 0 in real mode */
88*14cf11afSPaul Mackerras	trap
89*14cf11afSPaul Mackerras
90*14cf11afSPaul Mackerras#ifdef CONFIG_PPC_ISERIES
91*14cf11afSPaul Mackerras	/*
92*14cf11afSPaul Mackerras	 * At offset 0x20, there is a pointer to iSeries LPAR data.
93*14cf11afSPaul Mackerras	 * This is required by the hypervisor
94*14cf11afSPaul Mackerras	 */
95*14cf11afSPaul Mackerras	. = 0x20
96*14cf11afSPaul Mackerras	.llong hvReleaseData-KERNELBASE
97*14cf11afSPaul Mackerras
98*14cf11afSPaul Mackerras	/*
99*14cf11afSPaul Mackerras	 * At offset 0x28 and 0x30 are offsets to the mschunks_map
100*14cf11afSPaul Mackerras	 * array (used by the iSeries LPAR debugger to do translation
101*14cf11afSPaul Mackerras	 * between physical addresses and absolute addresses) and
102*14cf11afSPaul Mackerras	 * to the pidhash table (also used by the debugger)
103*14cf11afSPaul Mackerras	 */
104*14cf11afSPaul Mackerras	.llong mschunks_map-KERNELBASE
105*14cf11afSPaul Mackerras	.llong 0	/* pidhash-KERNELBASE SFRXXX */
106*14cf11afSPaul Mackerras
107*14cf11afSPaul Mackerras	/* Offset 0x38 - Pointer to start of embedded System.map */
108*14cf11afSPaul Mackerras	.globl	embedded_sysmap_start
109*14cf11afSPaul Mackerrasembedded_sysmap_start:
110*14cf11afSPaul Mackerras	.llong	0
111*14cf11afSPaul Mackerras	/* Offset 0x40 - Pointer to end of embedded System.map */
112*14cf11afSPaul Mackerras	.globl	embedded_sysmap_end
113*14cf11afSPaul Mackerrasembedded_sysmap_end:
114*14cf11afSPaul Mackerras	.llong	0
115*14cf11afSPaul Mackerras
116*14cf11afSPaul Mackerras#endif /* CONFIG_PPC_ISERIES */
117*14cf11afSPaul Mackerras
118*14cf11afSPaul Mackerras	/* Secondary processors spin on this value until it goes to 1. */
119*14cf11afSPaul Mackerras	.globl  __secondary_hold_spinloop
120*14cf11afSPaul Mackerras__secondary_hold_spinloop:
121*14cf11afSPaul Mackerras	.llong	0x0
122*14cf11afSPaul Mackerras
123*14cf11afSPaul Mackerras	/* Secondary processors write this value with their cpu # */
124*14cf11afSPaul Mackerras	/* after they enter the spin loop immediately below.	  */
125*14cf11afSPaul Mackerras	.globl	__secondary_hold_acknowledge
126*14cf11afSPaul Mackerras__secondary_hold_acknowledge:
127*14cf11afSPaul Mackerras	.llong	0x0
128*14cf11afSPaul Mackerras
129*14cf11afSPaul Mackerras	. = 0x60
130*14cf11afSPaul Mackerras/*
131*14cf11afSPaul Mackerras * The following code is used on pSeries to hold secondary processors
132*14cf11afSPaul Mackerras * in a spin loop after they have been freed from OpenFirmware, but
133*14cf11afSPaul Mackerras * before the bulk of the kernel has been relocated.  This code
134*14cf11afSPaul Mackerras * is relocated to physical address 0x60 before prom_init is run.
135*14cf11afSPaul Mackerras * All of it must fit below the first exception vector at 0x100.
136*14cf11afSPaul Mackerras */
137*14cf11afSPaul Mackerras_GLOBAL(__secondary_hold)
138*14cf11afSPaul Mackerras	mfmsr	r24
139*14cf11afSPaul Mackerras	ori	r24,r24,MSR_RI
140*14cf11afSPaul Mackerras	mtmsrd	r24			/* RI on */
141*14cf11afSPaul Mackerras
142*14cf11afSPaul Mackerras	/* Grab our linux cpu number */
143*14cf11afSPaul Mackerras	mr	r24,r3
144*14cf11afSPaul Mackerras
145*14cf11afSPaul Mackerras	/* Tell the master cpu we're here */
146*14cf11afSPaul Mackerras	/* Relocation is off & we are located at an address less */
147*14cf11afSPaul Mackerras	/* than 0x100, so only need to grab low order offset.    */
148*14cf11afSPaul Mackerras	std	r24,__secondary_hold_acknowledge@l(0)
149*14cf11afSPaul Mackerras	sync
150*14cf11afSPaul Mackerras
151*14cf11afSPaul Mackerras	/* All secondary cpus wait here until told to start. */
152*14cf11afSPaul Mackerras100:	ld	r4,__secondary_hold_spinloop@l(0)
153*14cf11afSPaul Mackerras	cmpdi	0,r4,1
154*14cf11afSPaul Mackerras	bne	100b
155*14cf11afSPaul Mackerras
156*14cf11afSPaul Mackerras#ifdef CONFIG_HMT
157*14cf11afSPaul Mackerras	b	.hmt_init
158*14cf11afSPaul Mackerras#else
159*14cf11afSPaul Mackerras#ifdef CONFIG_SMP
160*14cf11afSPaul Mackerras	mr	r3,r24
161*14cf11afSPaul Mackerras	b	.pSeries_secondary_smp_init
162*14cf11afSPaul Mackerras#else
163*14cf11afSPaul Mackerras	BUG_OPCODE
164*14cf11afSPaul Mackerras#endif
165*14cf11afSPaul Mackerras#endif
166*14cf11afSPaul Mackerras
167*14cf11afSPaul Mackerras/* This value is used to mark exception frames on the stack. */
168*14cf11afSPaul Mackerras	.section ".toc","aw"
169*14cf11afSPaul Mackerrasexception_marker:
170*14cf11afSPaul Mackerras	.tc	ID_72656773_68657265[TC],0x7265677368657265
171*14cf11afSPaul Mackerras	.text
172*14cf11afSPaul Mackerras
173*14cf11afSPaul Mackerras/*
174*14cf11afSPaul Mackerras * The following macros define the code that appears as
175*14cf11afSPaul Mackerras * the prologue to each of the exception handlers.  They
176*14cf11afSPaul Mackerras * are split into two parts to allow a single kernel binary
177*14cf11afSPaul Mackerras * to be used for pSeries and iSeries.
178*14cf11afSPaul Mackerras * LOL.  One day... - paulus
179*14cf11afSPaul Mackerras */
180*14cf11afSPaul Mackerras
181*14cf11afSPaul Mackerras/*
182*14cf11afSPaul Mackerras * We make as much of the exception code common between native
183*14cf11afSPaul Mackerras * exception handlers (including pSeries LPAR) and iSeries LPAR
184*14cf11afSPaul Mackerras * implementations as possible.
185*14cf11afSPaul Mackerras */
186*14cf11afSPaul Mackerras
187*14cf11afSPaul Mackerras/*
188*14cf11afSPaul Mackerras * This is the start of the interrupt handlers for pSeries
189*14cf11afSPaul Mackerras * This code runs with relocation off.
190*14cf11afSPaul Mackerras */
191*14cf11afSPaul Mackerras#define EX_R9		0
192*14cf11afSPaul Mackerras#define EX_R10		8
193*14cf11afSPaul Mackerras#define EX_R11		16
194*14cf11afSPaul Mackerras#define EX_R12		24
195*14cf11afSPaul Mackerras#define EX_R13		32
196*14cf11afSPaul Mackerras#define EX_SRR0		40
197*14cf11afSPaul Mackerras#define EX_R3		40	/* SLB miss saves R3, but not SRR0 */
198*14cf11afSPaul Mackerras#define EX_DAR		48
199*14cf11afSPaul Mackerras#define EX_LR		48	/* SLB miss saves LR, but not DAR */
200*14cf11afSPaul Mackerras#define EX_DSISR	56
201*14cf11afSPaul Mackerras#define EX_CCR		60
202*14cf11afSPaul Mackerras
203*14cf11afSPaul Mackerras#define EXCEPTION_PROLOG_PSERIES(area, label)				\
204*14cf11afSPaul Mackerras	mfspr	r13,SPRG3;		/* get paca address into r13 */	\
205*14cf11afSPaul Mackerras	std	r9,area+EX_R9(r13);	/* save r9 - r12 */		\
206*14cf11afSPaul Mackerras	std	r10,area+EX_R10(r13);					\
207*14cf11afSPaul Mackerras	std	r11,area+EX_R11(r13);					\
208*14cf11afSPaul Mackerras	std	r12,area+EX_R12(r13);					\
209*14cf11afSPaul Mackerras	mfspr	r9,SPRG1;						\
210*14cf11afSPaul Mackerras	std	r9,area+EX_R13(r13);					\
211*14cf11afSPaul Mackerras	mfcr	r9;							\
212*14cf11afSPaul Mackerras	clrrdi	r12,r13,32;		/* get high part of &label */	\
213*14cf11afSPaul Mackerras	mfmsr	r10;							\
214*14cf11afSPaul Mackerras	mfspr	r11,SRR0;		/* save SRR0 */			\
215*14cf11afSPaul Mackerras	ori	r12,r12,(label)@l;	/* virt addr of handler */	\
216*14cf11afSPaul Mackerras	ori	r10,r10,MSR_IR|MSR_DR|MSR_RI;				\
217*14cf11afSPaul Mackerras	mtspr	SRR0,r12;						\
218*14cf11afSPaul Mackerras	mfspr	r12,SRR1;		/* and SRR1 */			\
219*14cf11afSPaul Mackerras	mtspr	SRR1,r10;						\
220*14cf11afSPaul Mackerras	rfid;								\
221*14cf11afSPaul Mackerras	b	.	/* prevent speculative execution */
222*14cf11afSPaul Mackerras
223*14cf11afSPaul Mackerras/*
224*14cf11afSPaul Mackerras * This is the start of the interrupt handlers for iSeries
225*14cf11afSPaul Mackerras * This code runs with relocation on.
226*14cf11afSPaul Mackerras */
227*14cf11afSPaul Mackerras#define EXCEPTION_PROLOG_ISERIES_1(area)				\
228*14cf11afSPaul Mackerras	mfspr	r13,SPRG3;		/* get paca address into r13 */	\
229*14cf11afSPaul Mackerras	std	r9,area+EX_R9(r13);	/* save r9 - r12 */		\
230*14cf11afSPaul Mackerras	std	r10,area+EX_R10(r13);					\
231*14cf11afSPaul Mackerras	std	r11,area+EX_R11(r13);					\
232*14cf11afSPaul Mackerras	std	r12,area+EX_R12(r13);					\
233*14cf11afSPaul Mackerras	mfspr	r9,SPRG1;						\
234*14cf11afSPaul Mackerras	std	r9,area+EX_R13(r13);					\
235*14cf11afSPaul Mackerras	mfcr	r9
236*14cf11afSPaul Mackerras
237*14cf11afSPaul Mackerras#define EXCEPTION_PROLOG_ISERIES_2					\
238*14cf11afSPaul Mackerras	mfmsr	r10;							\
239*14cf11afSPaul Mackerras	ld	r11,PACALPPACA+LPPACASRR0(r13);				\
240*14cf11afSPaul Mackerras	ld	r12,PACALPPACA+LPPACASRR1(r13);				\
241*14cf11afSPaul Mackerras	ori	r10,r10,MSR_RI;						\
242*14cf11afSPaul Mackerras	mtmsrd	r10,1
243*14cf11afSPaul Mackerras
244*14cf11afSPaul Mackerras/*
245*14cf11afSPaul Mackerras * The common exception prolog is used for all except a few exceptions
246*14cf11afSPaul Mackerras * such as a segment miss on a kernel address.  We have to be prepared
247*14cf11afSPaul Mackerras * to take another exception from the point where we first touch the
248*14cf11afSPaul Mackerras * kernel stack onwards.
249*14cf11afSPaul Mackerras *
250*14cf11afSPaul Mackerras * On entry r13 points to the paca, r9-r13 are saved in the paca,
251*14cf11afSPaul Mackerras * r9 contains the saved CR, r11 and r12 contain the saved SRR0 and
252*14cf11afSPaul Mackerras * SRR1, and relocation is on.
253*14cf11afSPaul Mackerras */
254*14cf11afSPaul Mackerras#define EXCEPTION_PROLOG_COMMON(n, area)				   \
255*14cf11afSPaul Mackerras	andi.	r10,r12,MSR_PR;		/* See if coming from user	*/ \
256*14cf11afSPaul Mackerras	mr	r10,r1;			/* Save r1			*/ \
257*14cf11afSPaul Mackerras	subi	r1,r1,INT_FRAME_SIZE;	/* alloc frame on kernel stack	*/ \
258*14cf11afSPaul Mackerras	beq-	1f;							   \
259*14cf11afSPaul Mackerras	ld	r1,PACAKSAVE(r13);	/* kernel stack to use		*/ \
260*14cf11afSPaul Mackerras1:	cmpdi	cr1,r1,0;		/* check if r1 is in userspace	*/ \
261*14cf11afSPaul Mackerras	bge-	cr1,bad_stack;		/* abort if it is		*/ \
262*14cf11afSPaul Mackerras	std	r9,_CCR(r1);		/* save CR in stackframe	*/ \
263*14cf11afSPaul Mackerras	std	r11,_NIP(r1);		/* save SRR0 in stackframe	*/ \
264*14cf11afSPaul Mackerras	std	r12,_MSR(r1);		/* save SRR1 in stackframe	*/ \
265*14cf11afSPaul Mackerras	std	r10,0(r1);		/* make stack chain pointer	*/ \
266*14cf11afSPaul Mackerras	std	r0,GPR0(r1);		/* save r0 in stackframe	*/ \
267*14cf11afSPaul Mackerras	std	r10,GPR1(r1);		/* save r1 in stackframe	*/ \
268*14cf11afSPaul Mackerras	std	r2,GPR2(r1);		/* save r2 in stackframe	*/ \
269*14cf11afSPaul Mackerras	SAVE_4GPRS(3, r1);		/* save r3 - r6 in stackframe	*/ \
270*14cf11afSPaul Mackerras	SAVE_2GPRS(7, r1);		/* save r7, r8 in stackframe	*/ \
271*14cf11afSPaul Mackerras	ld	r9,area+EX_R9(r13);	/* move r9, r10 to stackframe	*/ \
272*14cf11afSPaul Mackerras	ld	r10,area+EX_R10(r13);					   \
273*14cf11afSPaul Mackerras	std	r9,GPR9(r1);						   \
274*14cf11afSPaul Mackerras	std	r10,GPR10(r1);						   \
275*14cf11afSPaul Mackerras	ld	r9,area+EX_R11(r13);	/* move r11 - r13 to stackframe	*/ \
276*14cf11afSPaul Mackerras	ld	r10,area+EX_R12(r13);					   \
277*14cf11afSPaul Mackerras	ld	r11,area+EX_R13(r13);					   \
278*14cf11afSPaul Mackerras	std	r9,GPR11(r1);						   \
279*14cf11afSPaul Mackerras	std	r10,GPR12(r1);						   \
280*14cf11afSPaul Mackerras	std	r11,GPR13(r1);						   \
281*14cf11afSPaul Mackerras	ld	r2,PACATOC(r13);	/* get kernel TOC into r2	*/ \
282*14cf11afSPaul Mackerras	mflr	r9;			/* save LR in stackframe	*/ \
283*14cf11afSPaul Mackerras	std	r9,_LINK(r1);						   \
284*14cf11afSPaul Mackerras	mfctr	r10;			/* save CTR in stackframe	*/ \
285*14cf11afSPaul Mackerras	std	r10,_CTR(r1);						   \
286*14cf11afSPaul Mackerras	mfspr	r11,XER;		/* save XER in stackframe	*/ \
287*14cf11afSPaul Mackerras	std	r11,_XER(r1);						   \
288*14cf11afSPaul Mackerras	li	r9,(n)+1;						   \
289*14cf11afSPaul Mackerras	std	r9,_TRAP(r1);		/* set trap number		*/ \
290*14cf11afSPaul Mackerras	li	r10,0;							   \
291*14cf11afSPaul Mackerras	ld	r11,exception_marker@toc(r2);				   \
292*14cf11afSPaul Mackerras	std	r10,RESULT(r1);		/* clear regs->result		*/ \
293*14cf11afSPaul Mackerras	std	r11,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame	*/
294*14cf11afSPaul Mackerras
295*14cf11afSPaul Mackerras/*
296*14cf11afSPaul Mackerras * Exception vectors.
297*14cf11afSPaul Mackerras */
298*14cf11afSPaul Mackerras#define STD_EXCEPTION_PSERIES(n, label)			\
299*14cf11afSPaul Mackerras	. = n;						\
300*14cf11afSPaul Mackerras	.globl label##_pSeries;				\
301*14cf11afSPaul Mackerraslabel##_pSeries:					\
302*14cf11afSPaul Mackerras	HMT_MEDIUM;					\
303*14cf11afSPaul Mackerras	mtspr	SPRG1,r13;		/* save r13 */	\
304*14cf11afSPaul Mackerras	RUNLATCH_ON(r13);				\
305*14cf11afSPaul Mackerras	EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common)
306*14cf11afSPaul Mackerras
307*14cf11afSPaul Mackerras#define STD_EXCEPTION_ISERIES(n, label, area)		\
308*14cf11afSPaul Mackerras	.globl label##_iSeries;				\
309*14cf11afSPaul Mackerraslabel##_iSeries:					\
310*14cf11afSPaul Mackerras	HMT_MEDIUM;					\
311*14cf11afSPaul Mackerras	mtspr	SPRG1,r13;		/* save r13 */	\
312*14cf11afSPaul Mackerras	RUNLATCH_ON(r13);				\
313*14cf11afSPaul Mackerras	EXCEPTION_PROLOG_ISERIES_1(area);		\
314*14cf11afSPaul Mackerras	EXCEPTION_PROLOG_ISERIES_2;			\
315*14cf11afSPaul Mackerras	b	label##_common
316*14cf11afSPaul Mackerras
317*14cf11afSPaul Mackerras#define MASKABLE_EXCEPTION_ISERIES(n, label)				\
318*14cf11afSPaul Mackerras	.globl label##_iSeries;						\
319*14cf11afSPaul Mackerraslabel##_iSeries:							\
320*14cf11afSPaul Mackerras	HMT_MEDIUM;							\
321*14cf11afSPaul Mackerras	mtspr	SPRG1,r13;		/* save r13 */			\
322*14cf11afSPaul Mackerras	RUNLATCH_ON(r13);						\
323*14cf11afSPaul Mackerras	EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN);				\
324*14cf11afSPaul Mackerras	lbz	r10,PACAPROCENABLED(r13);				\
325*14cf11afSPaul Mackerras	cmpwi	0,r10,0;						\
326*14cf11afSPaul Mackerras	beq-	label##_iSeries_masked;					\
327*14cf11afSPaul Mackerras	EXCEPTION_PROLOG_ISERIES_2;					\
328*14cf11afSPaul Mackerras	b	label##_common;						\
329*14cf11afSPaul Mackerras
330*14cf11afSPaul Mackerras#ifdef DO_SOFT_DISABLE
331*14cf11afSPaul Mackerras#define DISABLE_INTS				\
332*14cf11afSPaul Mackerras	lbz	r10,PACAPROCENABLED(r13);	\
333*14cf11afSPaul Mackerras	li	r11,0;				\
334*14cf11afSPaul Mackerras	std	r10,SOFTE(r1);			\
335*14cf11afSPaul Mackerras	mfmsr	r10;				\
336*14cf11afSPaul Mackerras	stb	r11,PACAPROCENABLED(r13);	\
337*14cf11afSPaul Mackerras	ori	r10,r10,MSR_EE;			\
338*14cf11afSPaul Mackerras	mtmsrd	r10,1
339*14cf11afSPaul Mackerras
340*14cf11afSPaul Mackerras#define ENABLE_INTS				\
341*14cf11afSPaul Mackerras	lbz	r10,PACAPROCENABLED(r13);	\
342*14cf11afSPaul Mackerras	mfmsr	r11;				\
343*14cf11afSPaul Mackerras	std	r10,SOFTE(r1);			\
344*14cf11afSPaul Mackerras	ori	r11,r11,MSR_EE;			\
345*14cf11afSPaul Mackerras	mtmsrd	r11,1
346*14cf11afSPaul Mackerras
347*14cf11afSPaul Mackerras#else	/* hard enable/disable interrupts */
348*14cf11afSPaul Mackerras#define DISABLE_INTS
349*14cf11afSPaul Mackerras
350*14cf11afSPaul Mackerras#define ENABLE_INTS				\
351*14cf11afSPaul Mackerras	ld	r12,_MSR(r1);			\
352*14cf11afSPaul Mackerras	mfmsr	r11;				\
353*14cf11afSPaul Mackerras	rlwimi	r11,r12,0,MSR_EE;		\
354*14cf11afSPaul Mackerras	mtmsrd	r11,1
355*14cf11afSPaul Mackerras
356*14cf11afSPaul Mackerras#endif
357*14cf11afSPaul Mackerras
358*14cf11afSPaul Mackerras#define STD_EXCEPTION_COMMON(trap, label, hdlr)		\
359*14cf11afSPaul Mackerras	.align	7;					\
360*14cf11afSPaul Mackerras	.globl label##_common;				\
361*14cf11afSPaul Mackerraslabel##_common:						\
362*14cf11afSPaul Mackerras	EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN);	\
363*14cf11afSPaul Mackerras	DISABLE_INTS;					\
364*14cf11afSPaul Mackerras	bl	.save_nvgprs;				\
365*14cf11afSPaul Mackerras	addi	r3,r1,STACK_FRAME_OVERHEAD;		\
366*14cf11afSPaul Mackerras	bl	hdlr;					\
367*14cf11afSPaul Mackerras	b	.ret_from_except
368*14cf11afSPaul Mackerras
369*14cf11afSPaul Mackerras#define STD_EXCEPTION_COMMON_LITE(trap, label, hdlr)	\
370*14cf11afSPaul Mackerras	.align	7;					\
371*14cf11afSPaul Mackerras	.globl label##_common;				\
372*14cf11afSPaul Mackerraslabel##_common:						\
373*14cf11afSPaul Mackerras	EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN);	\
374*14cf11afSPaul Mackerras	DISABLE_INTS;					\
375*14cf11afSPaul Mackerras	addi	r3,r1,STACK_FRAME_OVERHEAD;		\
376*14cf11afSPaul Mackerras	bl	hdlr;					\
377*14cf11afSPaul Mackerras	b	.ret_from_except_lite
378*14cf11afSPaul Mackerras
379*14cf11afSPaul Mackerras/*
380*14cf11afSPaul Mackerras * Start of pSeries system interrupt routines
381*14cf11afSPaul Mackerras */
382*14cf11afSPaul Mackerras	. = 0x100
383*14cf11afSPaul Mackerras	.globl __start_interrupts
384*14cf11afSPaul Mackerras__start_interrupts:
385*14cf11afSPaul Mackerras
386*14cf11afSPaul Mackerras	STD_EXCEPTION_PSERIES(0x100, system_reset)
387*14cf11afSPaul Mackerras
388*14cf11afSPaul Mackerras	. = 0x200
389*14cf11afSPaul Mackerras_machine_check_pSeries:
390*14cf11afSPaul Mackerras	HMT_MEDIUM
391*14cf11afSPaul Mackerras	mtspr	SPRG1,r13		/* save r13 */
392*14cf11afSPaul Mackerras	RUNLATCH_ON(r13)
393*14cf11afSPaul Mackerras	EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
394*14cf11afSPaul Mackerras
395*14cf11afSPaul Mackerras	. = 0x300
396*14cf11afSPaul Mackerras	.globl data_access_pSeries
397*14cf11afSPaul Mackerrasdata_access_pSeries:
398*14cf11afSPaul Mackerras	HMT_MEDIUM
399*14cf11afSPaul Mackerras	mtspr	SPRG1,r13
400*14cf11afSPaul MackerrasBEGIN_FTR_SECTION
401*14cf11afSPaul Mackerras	mtspr	SPRG2,r12
402*14cf11afSPaul Mackerras	mfspr	r13,DAR
403*14cf11afSPaul Mackerras	mfspr	r12,DSISR
404*14cf11afSPaul Mackerras	srdi	r13,r13,60
405*14cf11afSPaul Mackerras	rlwimi	r13,r12,16,0x20
406*14cf11afSPaul Mackerras	mfcr	r12
407*14cf11afSPaul Mackerras	cmpwi	r13,0x2c
408*14cf11afSPaul Mackerras	beq	.do_stab_bolted_pSeries
409*14cf11afSPaul Mackerras	mtcrf	0x80,r12
410*14cf11afSPaul Mackerras	mfspr	r12,SPRG2
411*14cf11afSPaul MackerrasEND_FTR_SECTION_IFCLR(CPU_FTR_SLB)
412*14cf11afSPaul Mackerras	EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common)
413*14cf11afSPaul Mackerras
414*14cf11afSPaul Mackerras	. = 0x380
415*14cf11afSPaul Mackerras	.globl data_access_slb_pSeries
416*14cf11afSPaul Mackerrasdata_access_slb_pSeries:
417*14cf11afSPaul Mackerras	HMT_MEDIUM
418*14cf11afSPaul Mackerras	mtspr	SPRG1,r13
419*14cf11afSPaul Mackerras	RUNLATCH_ON(r13)
420*14cf11afSPaul Mackerras	mfspr	r13,SPRG3		/* get paca address into r13 */
421*14cf11afSPaul Mackerras	std	r9,PACA_EXSLB+EX_R9(r13)	/* save r9 - r12 */
422*14cf11afSPaul Mackerras	std	r10,PACA_EXSLB+EX_R10(r13)
423*14cf11afSPaul Mackerras	std	r11,PACA_EXSLB+EX_R11(r13)
424*14cf11afSPaul Mackerras	std	r12,PACA_EXSLB+EX_R12(r13)
425*14cf11afSPaul Mackerras	std	r3,PACA_EXSLB+EX_R3(r13)
426*14cf11afSPaul Mackerras	mfspr	r9,SPRG1
427*14cf11afSPaul Mackerras	std	r9,PACA_EXSLB+EX_R13(r13)
428*14cf11afSPaul Mackerras	mfcr	r9
429*14cf11afSPaul Mackerras	mfspr	r12,SRR1		/* and SRR1 */
430*14cf11afSPaul Mackerras	mfspr	r3,DAR
431*14cf11afSPaul Mackerras	b	.do_slb_miss		/* Rel. branch works in real mode */
432*14cf11afSPaul Mackerras
433*14cf11afSPaul Mackerras	STD_EXCEPTION_PSERIES(0x400, instruction_access)
434*14cf11afSPaul Mackerras
435*14cf11afSPaul Mackerras	. = 0x480
436*14cf11afSPaul Mackerras	.globl instruction_access_slb_pSeries
437*14cf11afSPaul Mackerrasinstruction_access_slb_pSeries:
438*14cf11afSPaul Mackerras	HMT_MEDIUM
439*14cf11afSPaul Mackerras	mtspr	SPRG1,r13
440*14cf11afSPaul Mackerras	RUNLATCH_ON(r13)
441*14cf11afSPaul Mackerras	mfspr	r13,SPRG3		/* get paca address into r13 */
442*14cf11afSPaul Mackerras	std	r9,PACA_EXSLB+EX_R9(r13)	/* save r9 - r12 */
443*14cf11afSPaul Mackerras	std	r10,PACA_EXSLB+EX_R10(r13)
444*14cf11afSPaul Mackerras	std	r11,PACA_EXSLB+EX_R11(r13)
445*14cf11afSPaul Mackerras	std	r12,PACA_EXSLB+EX_R12(r13)
446*14cf11afSPaul Mackerras	std	r3,PACA_EXSLB+EX_R3(r13)
447*14cf11afSPaul Mackerras	mfspr	r9,SPRG1
448*14cf11afSPaul Mackerras	std	r9,PACA_EXSLB+EX_R13(r13)
449*14cf11afSPaul Mackerras	mfcr	r9
450*14cf11afSPaul Mackerras	mfspr	r12,SRR1		/* and SRR1 */
451*14cf11afSPaul Mackerras	mfspr	r3,SRR0			/* SRR0 is faulting address */
452*14cf11afSPaul Mackerras	b	.do_slb_miss		/* Rel. branch works in real mode */
453*14cf11afSPaul Mackerras
454*14cf11afSPaul Mackerras	STD_EXCEPTION_PSERIES(0x500, hardware_interrupt)
455*14cf11afSPaul Mackerras	STD_EXCEPTION_PSERIES(0x600, alignment)
456*14cf11afSPaul Mackerras	STD_EXCEPTION_PSERIES(0x700, program_check)
457*14cf11afSPaul Mackerras	STD_EXCEPTION_PSERIES(0x800, fp_unavailable)
458*14cf11afSPaul Mackerras	STD_EXCEPTION_PSERIES(0x900, decrementer)
459*14cf11afSPaul Mackerras	STD_EXCEPTION_PSERIES(0xa00, trap_0a)
460*14cf11afSPaul Mackerras	STD_EXCEPTION_PSERIES(0xb00, trap_0b)
461*14cf11afSPaul Mackerras
462*14cf11afSPaul Mackerras	. = 0xc00
463*14cf11afSPaul Mackerras	.globl	system_call_pSeries
464*14cf11afSPaul Mackerrassystem_call_pSeries:
465*14cf11afSPaul Mackerras	HMT_MEDIUM
466*14cf11afSPaul Mackerras	RUNLATCH_ON(r9)
467*14cf11afSPaul Mackerras	mr	r9,r13
468*14cf11afSPaul Mackerras	mfmsr	r10
469*14cf11afSPaul Mackerras	mfspr	r13,SPRG3
470*14cf11afSPaul Mackerras	mfspr	r11,SRR0
471*14cf11afSPaul Mackerras	clrrdi	r12,r13,32
472*14cf11afSPaul Mackerras	oris	r12,r12,system_call_common@h
473*14cf11afSPaul Mackerras	ori	r12,r12,system_call_common@l
474*14cf11afSPaul Mackerras	mtspr	SRR0,r12
475*14cf11afSPaul Mackerras	ori	r10,r10,MSR_IR|MSR_DR|MSR_RI
476*14cf11afSPaul Mackerras	mfspr	r12,SRR1
477*14cf11afSPaul Mackerras	mtspr	SRR1,r10
478*14cf11afSPaul Mackerras	rfid
479*14cf11afSPaul Mackerras	b	.	/* prevent speculative execution */
480*14cf11afSPaul Mackerras
481*14cf11afSPaul Mackerras	STD_EXCEPTION_PSERIES(0xd00, single_step)
482*14cf11afSPaul Mackerras	STD_EXCEPTION_PSERIES(0xe00, trap_0e)
483*14cf11afSPaul Mackerras
484*14cf11afSPaul Mackerras	/* We need to deal with the Altivec unavailable exception
485*14cf11afSPaul Mackerras	 * here which is at 0xf20, thus in the middle of the
486*14cf11afSPaul Mackerras	 * prolog code of the PerformanceMonitor one. A little
487*14cf11afSPaul Mackerras	 * trickery is thus necessary
488*14cf11afSPaul Mackerras	 */
489*14cf11afSPaul Mackerras	. = 0xf00
490*14cf11afSPaul Mackerras	b	performance_monitor_pSeries
491*14cf11afSPaul Mackerras
492*14cf11afSPaul Mackerras	STD_EXCEPTION_PSERIES(0xf20, altivec_unavailable)
493*14cf11afSPaul Mackerras
494*14cf11afSPaul Mackerras	STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint)
495*14cf11afSPaul Mackerras	STD_EXCEPTION_PSERIES(0x1700, altivec_assist)
496*14cf11afSPaul Mackerras
497*14cf11afSPaul Mackerras	. = 0x3000
498*14cf11afSPaul Mackerras
499*14cf11afSPaul Mackerras/*** pSeries interrupt support ***/
500*14cf11afSPaul Mackerras
501*14cf11afSPaul Mackerras	/* moved from 0xf00 */
502*14cf11afSPaul Mackerras	STD_EXCEPTION_PSERIES(., performance_monitor)
503*14cf11afSPaul Mackerras
504*14cf11afSPaul Mackerras	.align	7
505*14cf11afSPaul Mackerras_GLOBAL(do_stab_bolted_pSeries)
506*14cf11afSPaul Mackerras	mtcrf	0x80,r12
507*14cf11afSPaul Mackerras	mfspr	r12,SPRG2
508*14cf11afSPaul Mackerras	EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted)
509*14cf11afSPaul Mackerras
510*14cf11afSPaul Mackerras/*
511*14cf11afSPaul Mackerras * Vectors for the FWNMI option.  Share common code.
512*14cf11afSPaul Mackerras */
513*14cf11afSPaul Mackerras      .globl system_reset_fwnmi
514*14cf11afSPaul Mackerrassystem_reset_fwnmi:
515*14cf11afSPaul Mackerras      HMT_MEDIUM
516*14cf11afSPaul Mackerras      mtspr   SPRG1,r13               /* save r13 */
517*14cf11afSPaul Mackerras      RUNLATCH_ON(r13)
518*14cf11afSPaul Mackerras      EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common)
519*14cf11afSPaul Mackerras
520*14cf11afSPaul Mackerras      .globl machine_check_fwnmi
521*14cf11afSPaul Mackerrasmachine_check_fwnmi:
522*14cf11afSPaul Mackerras      HMT_MEDIUM
523*14cf11afSPaul Mackerras      mtspr   SPRG1,r13               /* save r13 */
524*14cf11afSPaul Mackerras      RUNLATCH_ON(r13)
525*14cf11afSPaul Mackerras      EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
526*14cf11afSPaul Mackerras
527*14cf11afSPaul Mackerras#ifdef CONFIG_PPC_ISERIES
528*14cf11afSPaul Mackerras/***  ISeries-LPAR interrupt handlers ***/
529*14cf11afSPaul Mackerras
530*14cf11afSPaul Mackerras	STD_EXCEPTION_ISERIES(0x200, machine_check, PACA_EXMC)
531*14cf11afSPaul Mackerras
532*14cf11afSPaul Mackerras	.globl data_access_iSeries
533*14cf11afSPaul Mackerrasdata_access_iSeries:
534*14cf11afSPaul Mackerras	mtspr	SPRG1,r13
535*14cf11afSPaul MackerrasBEGIN_FTR_SECTION
536*14cf11afSPaul Mackerras	mtspr	SPRG2,r12
537*14cf11afSPaul Mackerras	mfspr	r13,DAR
538*14cf11afSPaul Mackerras	mfspr	r12,DSISR
539*14cf11afSPaul Mackerras	srdi	r13,r13,60
540*14cf11afSPaul Mackerras	rlwimi	r13,r12,16,0x20
541*14cf11afSPaul Mackerras	mfcr	r12
542*14cf11afSPaul Mackerras	cmpwi	r13,0x2c
543*14cf11afSPaul Mackerras	beq	.do_stab_bolted_iSeries
544*14cf11afSPaul Mackerras	mtcrf	0x80,r12
545*14cf11afSPaul Mackerras	mfspr	r12,SPRG2
546*14cf11afSPaul MackerrasEND_FTR_SECTION_IFCLR(CPU_FTR_SLB)
547*14cf11afSPaul Mackerras	EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN)
548*14cf11afSPaul Mackerras	EXCEPTION_PROLOG_ISERIES_2
549*14cf11afSPaul Mackerras	b	data_access_common
550*14cf11afSPaul Mackerras
551*14cf11afSPaul Mackerras.do_stab_bolted_iSeries:
552*14cf11afSPaul Mackerras	mtcrf	0x80,r12
553*14cf11afSPaul Mackerras	mfspr	r12,SPRG2
554*14cf11afSPaul Mackerras	EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
555*14cf11afSPaul Mackerras	EXCEPTION_PROLOG_ISERIES_2
556*14cf11afSPaul Mackerras	b	.do_stab_bolted
557*14cf11afSPaul Mackerras
558*14cf11afSPaul Mackerras	.globl	data_access_slb_iSeries
559*14cf11afSPaul Mackerrasdata_access_slb_iSeries:
560*14cf11afSPaul Mackerras	mtspr	SPRG1,r13		/* save r13 */
561*14cf11afSPaul Mackerras	EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
562*14cf11afSPaul Mackerras	std	r3,PACA_EXSLB+EX_R3(r13)
563*14cf11afSPaul Mackerras	ld	r12,PACALPPACA+LPPACASRR1(r13)
564*14cf11afSPaul Mackerras	mfspr	r3,DAR
565*14cf11afSPaul Mackerras	b	.do_slb_miss
566*14cf11afSPaul Mackerras
567*14cf11afSPaul Mackerras	STD_EXCEPTION_ISERIES(0x400, instruction_access, PACA_EXGEN)
568*14cf11afSPaul Mackerras
569*14cf11afSPaul Mackerras	.globl	instruction_access_slb_iSeries
570*14cf11afSPaul Mackerrasinstruction_access_slb_iSeries:
571*14cf11afSPaul Mackerras	mtspr	SPRG1,r13		/* save r13 */
572*14cf11afSPaul Mackerras	EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
573*14cf11afSPaul Mackerras	std	r3,PACA_EXSLB+EX_R3(r13)
574*14cf11afSPaul Mackerras	ld	r12,PACALPPACA+LPPACASRR1(r13)
575*14cf11afSPaul Mackerras	ld	r3,PACALPPACA+LPPACASRR0(r13)
576*14cf11afSPaul Mackerras	b	.do_slb_miss
577*14cf11afSPaul Mackerras
578*14cf11afSPaul Mackerras	MASKABLE_EXCEPTION_ISERIES(0x500, hardware_interrupt)
579*14cf11afSPaul Mackerras	STD_EXCEPTION_ISERIES(0x600, alignment, PACA_EXGEN)
580*14cf11afSPaul Mackerras	STD_EXCEPTION_ISERIES(0x700, program_check, PACA_EXGEN)
581*14cf11afSPaul Mackerras	STD_EXCEPTION_ISERIES(0x800, fp_unavailable, PACA_EXGEN)
582*14cf11afSPaul Mackerras	MASKABLE_EXCEPTION_ISERIES(0x900, decrementer)
583*14cf11afSPaul Mackerras	STD_EXCEPTION_ISERIES(0xa00, trap_0a, PACA_EXGEN)
584*14cf11afSPaul Mackerras	STD_EXCEPTION_ISERIES(0xb00, trap_0b, PACA_EXGEN)
585*14cf11afSPaul Mackerras
586*14cf11afSPaul Mackerras	.globl	system_call_iSeries
587*14cf11afSPaul Mackerrassystem_call_iSeries:
588*14cf11afSPaul Mackerras	mr	r9,r13
589*14cf11afSPaul Mackerras	mfspr	r13,SPRG3
590*14cf11afSPaul Mackerras	EXCEPTION_PROLOG_ISERIES_2
591*14cf11afSPaul Mackerras	b	system_call_common
592*14cf11afSPaul Mackerras
593*14cf11afSPaul Mackerras	STD_EXCEPTION_ISERIES( 0xd00, single_step, PACA_EXGEN)
594*14cf11afSPaul Mackerras	STD_EXCEPTION_ISERIES( 0xe00, trap_0e, PACA_EXGEN)
595*14cf11afSPaul Mackerras	STD_EXCEPTION_ISERIES( 0xf00, performance_monitor, PACA_EXGEN)
596*14cf11afSPaul Mackerras
597*14cf11afSPaul Mackerras	.globl system_reset_iSeries
598*14cf11afSPaul Mackerrassystem_reset_iSeries:
599*14cf11afSPaul Mackerras	mfspr	r13,SPRG3		/* Get paca address */
600*14cf11afSPaul Mackerras	mfmsr	r24
601*14cf11afSPaul Mackerras	ori	r24,r24,MSR_RI
602*14cf11afSPaul Mackerras	mtmsrd	r24			/* RI on */
603*14cf11afSPaul Mackerras	lhz	r24,PACAPACAINDEX(r13)	/* Get processor # */
604*14cf11afSPaul Mackerras	cmpwi	0,r24,0			/* Are we processor 0? */
605*14cf11afSPaul Mackerras	beq	.__start_initialization_iSeries	/* Start up the first processor */
606*14cf11afSPaul Mackerras	mfspr	r4,SPRN_CTRLF
607*14cf11afSPaul Mackerras	li	r5,CTRL_RUNLATCH	/* Turn off the run light */
608*14cf11afSPaul Mackerras	andc	r4,r4,r5
609*14cf11afSPaul Mackerras	mtspr	SPRN_CTRLT,r4
610*14cf11afSPaul Mackerras
611*14cf11afSPaul Mackerras1:
612*14cf11afSPaul Mackerras	HMT_LOW
613*14cf11afSPaul Mackerras#ifdef CONFIG_SMP
614*14cf11afSPaul Mackerras	lbz	r23,PACAPROCSTART(r13)	/* Test if this processor
615*14cf11afSPaul Mackerras					 * should start */
616*14cf11afSPaul Mackerras	sync
617*14cf11afSPaul Mackerras	LOADADDR(r3,current_set)
618*14cf11afSPaul Mackerras	sldi	r28,r24,3		/* get current_set[cpu#] */
619*14cf11afSPaul Mackerras	ldx	r3,r3,r28
620*14cf11afSPaul Mackerras	addi	r1,r3,THREAD_SIZE
621*14cf11afSPaul Mackerras	subi	r1,r1,STACK_FRAME_OVERHEAD
622*14cf11afSPaul Mackerras
623*14cf11afSPaul Mackerras	cmpwi	0,r23,0
624*14cf11afSPaul Mackerras	beq	iSeries_secondary_smp_loop	/* Loop until told to go */
625*14cf11afSPaul Mackerras	bne	.__secondary_start		/* Loop until told to go */
626*14cf11afSPaul MackerrasiSeries_secondary_smp_loop:
627*14cf11afSPaul Mackerras	/* Let the Hypervisor know we are alive */
628*14cf11afSPaul Mackerras	/* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */
629*14cf11afSPaul Mackerras	lis	r3,0x8002
630*14cf11afSPaul Mackerras	rldicr	r3,r3,32,15		/* r0 = (r3 << 32) & 0xffff000000000000 */
631*14cf11afSPaul Mackerras#else /* CONFIG_SMP */
632*14cf11afSPaul Mackerras	/* Yield the processor.  This is required for non-SMP kernels
633*14cf11afSPaul Mackerras		which are running on multi-threaded machines. */
634*14cf11afSPaul Mackerras	lis	r3,0x8000
635*14cf11afSPaul Mackerras	rldicr	r3,r3,32,15		/* r3 = (r3 << 32) & 0xffff000000000000 */
636*14cf11afSPaul Mackerras	addi	r3,r3,18		/* r3 = 0x8000000000000012 which is "yield" */
637*14cf11afSPaul Mackerras	li	r4,0			/* "yield timed" */
638*14cf11afSPaul Mackerras	li	r5,-1			/* "yield forever" */
639*14cf11afSPaul Mackerras#endif /* CONFIG_SMP */
640*14cf11afSPaul Mackerras	li	r0,-1			/* r0=-1 indicates a Hypervisor call */
641*14cf11afSPaul Mackerras	sc				/* Invoke the hypervisor via a system call */
642*14cf11afSPaul Mackerras	mfspr	r13,SPRG3		/* Put r13 back ???? */
643*14cf11afSPaul Mackerras	b	1b			/* If SMP not configured, secondaries
644*14cf11afSPaul Mackerras					 * loop forever */
645*14cf11afSPaul Mackerras
646*14cf11afSPaul Mackerras	.globl decrementer_iSeries_masked
647*14cf11afSPaul Mackerrasdecrementer_iSeries_masked:
648*14cf11afSPaul Mackerras	li	r11,1
649*14cf11afSPaul Mackerras	stb	r11,PACALPPACA+LPPACADECRINT(r13)
650*14cf11afSPaul Mackerras	lwz	r12,PACADEFAULTDECR(r13)
651*14cf11afSPaul Mackerras	mtspr	SPRN_DEC,r12
652*14cf11afSPaul Mackerras	/* fall through */
653*14cf11afSPaul Mackerras
654*14cf11afSPaul Mackerras	.globl hardware_interrupt_iSeries_masked
655*14cf11afSPaul Mackerrashardware_interrupt_iSeries_masked:
656*14cf11afSPaul Mackerras	mtcrf	0x80,r9		/* Restore regs */
657*14cf11afSPaul Mackerras	ld	r11,PACALPPACA+LPPACASRR0(r13)
658*14cf11afSPaul Mackerras	ld	r12,PACALPPACA+LPPACASRR1(r13)
659*14cf11afSPaul Mackerras	mtspr	SRR0,r11
660*14cf11afSPaul Mackerras	mtspr	SRR1,r12
661*14cf11afSPaul Mackerras	ld	r9,PACA_EXGEN+EX_R9(r13)
662*14cf11afSPaul Mackerras	ld	r10,PACA_EXGEN+EX_R10(r13)
663*14cf11afSPaul Mackerras	ld	r11,PACA_EXGEN+EX_R11(r13)
664*14cf11afSPaul Mackerras	ld	r12,PACA_EXGEN+EX_R12(r13)
665*14cf11afSPaul Mackerras	ld	r13,PACA_EXGEN+EX_R13(r13)
666*14cf11afSPaul Mackerras	rfid
667*14cf11afSPaul Mackerras	b	.	/* prevent speculative execution */
668*14cf11afSPaul Mackerras#endif /* CONFIG_PPC_ISERIES */
669*14cf11afSPaul Mackerras
670*14cf11afSPaul Mackerras/*** Common interrupt handlers ***/
671*14cf11afSPaul Mackerras
672*14cf11afSPaul Mackerras	STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception)
673*14cf11afSPaul Mackerras
674*14cf11afSPaul Mackerras	/*
675*14cf11afSPaul Mackerras	 * Machine check is different because we use a different
676*14cf11afSPaul Mackerras	 * save area: PACA_EXMC instead of PACA_EXGEN.
677*14cf11afSPaul Mackerras	 */
678*14cf11afSPaul Mackerras	.align	7
679*14cf11afSPaul Mackerras	.globl machine_check_common
680*14cf11afSPaul Mackerrasmachine_check_common:
681*14cf11afSPaul Mackerras	EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
682*14cf11afSPaul Mackerras	DISABLE_INTS
683*14cf11afSPaul Mackerras	bl	.save_nvgprs
684*14cf11afSPaul Mackerras	addi	r3,r1,STACK_FRAME_OVERHEAD
685*14cf11afSPaul Mackerras	bl	.machine_check_exception
686*14cf11afSPaul Mackerras	b	.ret_from_except
687*14cf11afSPaul Mackerras
688*14cf11afSPaul Mackerras	STD_EXCEPTION_COMMON_LITE(0x900, decrementer, .timer_interrupt)
689*14cf11afSPaul Mackerras	STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception)
690*14cf11afSPaul Mackerras	STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
691*14cf11afSPaul Mackerras	STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
692*14cf11afSPaul Mackerras	STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception)
693*14cf11afSPaul Mackerras	STD_EXCEPTION_COMMON(0xf00, performance_monitor, .performance_monitor_exception)
694*14cf11afSPaul Mackerras	STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception)
695*14cf11afSPaul Mackerras#ifdef CONFIG_ALTIVEC
696*14cf11afSPaul Mackerras	STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception)
697*14cf11afSPaul Mackerras#else
698*14cf11afSPaul Mackerras	STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception)
699*14cf11afSPaul Mackerras#endif
700*14cf11afSPaul Mackerras
701*14cf11afSPaul Mackerras/*
702*14cf11afSPaul Mackerras * Here we have detected that the kernel stack pointer is bad.
703*14cf11afSPaul Mackerras * R9 contains the saved CR, r13 points to the paca,
704*14cf11afSPaul Mackerras * r10 contains the (bad) kernel stack pointer,
705*14cf11afSPaul Mackerras * r11 and r12 contain the saved SRR0 and SRR1.
706*14cf11afSPaul Mackerras * We switch to using an emergency stack, save the registers there,
707*14cf11afSPaul Mackerras * and call kernel_bad_stack(), which panics.
708*14cf11afSPaul Mackerras */
709*14cf11afSPaul Mackerrasbad_stack:
710*14cf11afSPaul Mackerras	ld	r1,PACAEMERGSP(r13)
711*14cf11afSPaul Mackerras	subi	r1,r1,64+INT_FRAME_SIZE
712*14cf11afSPaul Mackerras	std	r9,_CCR(r1)
713*14cf11afSPaul Mackerras	std	r10,GPR1(r1)
714*14cf11afSPaul Mackerras	std	r11,_NIP(r1)
715*14cf11afSPaul Mackerras	std	r12,_MSR(r1)
716*14cf11afSPaul Mackerras	mfspr	r11,DAR
717*14cf11afSPaul Mackerras	mfspr	r12,DSISR
718*14cf11afSPaul Mackerras	std	r11,_DAR(r1)
719*14cf11afSPaul Mackerras	std	r12,_DSISR(r1)
720*14cf11afSPaul Mackerras	mflr	r10
721*14cf11afSPaul Mackerras	mfctr	r11
722*14cf11afSPaul Mackerras	mfxer	r12
723*14cf11afSPaul Mackerras	std	r10,_LINK(r1)
724*14cf11afSPaul Mackerras	std	r11,_CTR(r1)
725*14cf11afSPaul Mackerras	std	r12,_XER(r1)
726*14cf11afSPaul Mackerras	SAVE_GPR(0,r1)
727*14cf11afSPaul Mackerras	SAVE_GPR(2,r1)
728*14cf11afSPaul Mackerras	SAVE_4GPRS(3,r1)
729*14cf11afSPaul Mackerras	SAVE_2GPRS(7,r1)
730*14cf11afSPaul Mackerras	SAVE_10GPRS(12,r1)
731*14cf11afSPaul Mackerras	SAVE_10GPRS(22,r1)
732*14cf11afSPaul Mackerras	addi	r11,r1,INT_FRAME_SIZE
733*14cf11afSPaul Mackerras	std	r11,0(r1)
734*14cf11afSPaul Mackerras	li	r12,0
735*14cf11afSPaul Mackerras	std	r12,0(r11)
736*14cf11afSPaul Mackerras	ld	r2,PACATOC(r13)
737*14cf11afSPaul Mackerras1:	addi	r3,r1,STACK_FRAME_OVERHEAD
738*14cf11afSPaul Mackerras	bl	.kernel_bad_stack
739*14cf11afSPaul Mackerras	b	1b
740*14cf11afSPaul Mackerras
741*14cf11afSPaul Mackerras/*
742*14cf11afSPaul Mackerras * Return from an exception with minimal checks.
743*14cf11afSPaul Mackerras * The caller is assumed to have done EXCEPTION_PROLOG_COMMON.
744*14cf11afSPaul Mackerras * If interrupts have been enabled, or anything has been
745*14cf11afSPaul Mackerras * done that might have changed the scheduling status of
746*14cf11afSPaul Mackerras * any task or sent any task a signal, you should use
747*14cf11afSPaul Mackerras * ret_from_except or ret_from_except_lite instead of this.
748*14cf11afSPaul Mackerras */
749*14cf11afSPaul Mackerrasfast_exception_return:
750*14cf11afSPaul Mackerras	ld	r12,_MSR(r1)
751*14cf11afSPaul Mackerras	ld	r11,_NIP(r1)
752*14cf11afSPaul Mackerras	andi.	r3,r12,MSR_RI		/* check if RI is set */
753*14cf11afSPaul Mackerras	beq-	unrecov_fer
754*14cf11afSPaul Mackerras	ld	r3,_CCR(r1)
755*14cf11afSPaul Mackerras	ld	r4,_LINK(r1)
756*14cf11afSPaul Mackerras	ld	r5,_CTR(r1)
757*14cf11afSPaul Mackerras	ld	r6,_XER(r1)
758*14cf11afSPaul Mackerras	mtcr	r3
759*14cf11afSPaul Mackerras	mtlr	r4
760*14cf11afSPaul Mackerras	mtctr	r5
761*14cf11afSPaul Mackerras	mtxer	r6
762*14cf11afSPaul Mackerras	REST_GPR(0, r1)
763*14cf11afSPaul Mackerras	REST_8GPRS(2, r1)
764*14cf11afSPaul Mackerras
765*14cf11afSPaul Mackerras	mfmsr	r10
766*14cf11afSPaul Mackerras	clrrdi	r10,r10,2		/* clear RI (LE is 0 already) */
767*14cf11afSPaul Mackerras	mtmsrd	r10,1
768*14cf11afSPaul Mackerras
769*14cf11afSPaul Mackerras	mtspr	SRR1,r12
770*14cf11afSPaul Mackerras	mtspr	SRR0,r11
771*14cf11afSPaul Mackerras	REST_4GPRS(10, r1)
772*14cf11afSPaul Mackerras	ld	r1,GPR1(r1)
773*14cf11afSPaul Mackerras	rfid
774*14cf11afSPaul Mackerras	b	.	/* prevent speculative execution */
775*14cf11afSPaul Mackerras
776*14cf11afSPaul Mackerrasunrecov_fer:
777*14cf11afSPaul Mackerras	bl	.save_nvgprs
778*14cf11afSPaul Mackerras1:	addi	r3,r1,STACK_FRAME_OVERHEAD
779*14cf11afSPaul Mackerras	bl	.unrecoverable_exception
780*14cf11afSPaul Mackerras	b	1b
781*14cf11afSPaul Mackerras
782*14cf11afSPaul Mackerras/*
783*14cf11afSPaul Mackerras * Here r13 points to the paca, r9 contains the saved CR,
784*14cf11afSPaul Mackerras * SRR0 and SRR1 are saved in r11 and r12,
785*14cf11afSPaul Mackerras * r9 - r13 are saved in paca->exgen.
786*14cf11afSPaul Mackerras */
787*14cf11afSPaul Mackerras	.align	7
788*14cf11afSPaul Mackerras	.globl data_access_common
789*14cf11afSPaul Mackerrasdata_access_common:
790*14cf11afSPaul Mackerras	RUNLATCH_ON(r10)		/* It wont fit in the 0x300 handler */
791*14cf11afSPaul Mackerras	mfspr	r10,DAR
792*14cf11afSPaul Mackerras	std	r10,PACA_EXGEN+EX_DAR(r13)
793*14cf11afSPaul Mackerras	mfspr	r10,DSISR
794*14cf11afSPaul Mackerras	stw	r10,PACA_EXGEN+EX_DSISR(r13)
795*14cf11afSPaul Mackerras	EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN)
796*14cf11afSPaul Mackerras	ld	r3,PACA_EXGEN+EX_DAR(r13)
797*14cf11afSPaul Mackerras	lwz	r4,PACA_EXGEN+EX_DSISR(r13)
798*14cf11afSPaul Mackerras	li	r5,0x300
799*14cf11afSPaul Mackerras	b	.do_hash_page	 	/* Try to handle as hpte fault */
800*14cf11afSPaul Mackerras
801*14cf11afSPaul Mackerras	.align	7
802*14cf11afSPaul Mackerras	.globl instruction_access_common
803*14cf11afSPaul Mackerrasinstruction_access_common:
804*14cf11afSPaul Mackerras	EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN)
805*14cf11afSPaul Mackerras	ld	r3,_NIP(r1)
806*14cf11afSPaul Mackerras	andis.	r4,r12,0x5820
807*14cf11afSPaul Mackerras	li	r5,0x400
808*14cf11afSPaul Mackerras	b	.do_hash_page		/* Try to handle as hpte fault */
809*14cf11afSPaul Mackerras
810*14cf11afSPaul Mackerras	.align	7
811*14cf11afSPaul Mackerras	.globl hardware_interrupt_common
812*14cf11afSPaul Mackerras	.globl hardware_interrupt_entry
813*14cf11afSPaul Mackerrashardware_interrupt_common:
814*14cf11afSPaul Mackerras	EXCEPTION_PROLOG_COMMON(0x500, PACA_EXGEN)
815*14cf11afSPaul Mackerrashardware_interrupt_entry:
816*14cf11afSPaul Mackerras	DISABLE_INTS
817*14cf11afSPaul Mackerras	addi	r3,r1,STACK_FRAME_OVERHEAD
818*14cf11afSPaul Mackerras	bl	.do_IRQ
819*14cf11afSPaul Mackerras	b	.ret_from_except_lite
820*14cf11afSPaul Mackerras
821*14cf11afSPaul Mackerras	.align	7
822*14cf11afSPaul Mackerras	.globl alignment_common
823*14cf11afSPaul Mackerrasalignment_common:
824*14cf11afSPaul Mackerras	mfspr	r10,DAR
825*14cf11afSPaul Mackerras	std	r10,PACA_EXGEN+EX_DAR(r13)
826*14cf11afSPaul Mackerras	mfspr	r10,DSISR
827*14cf11afSPaul Mackerras	stw	r10,PACA_EXGEN+EX_DSISR(r13)
828*14cf11afSPaul Mackerras	EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN)
829*14cf11afSPaul Mackerras	ld	r3,PACA_EXGEN+EX_DAR(r13)
830*14cf11afSPaul Mackerras	lwz	r4,PACA_EXGEN+EX_DSISR(r13)
831*14cf11afSPaul Mackerras	std	r3,_DAR(r1)
832*14cf11afSPaul Mackerras	std	r4,_DSISR(r1)
833*14cf11afSPaul Mackerras	bl	.save_nvgprs
834*14cf11afSPaul Mackerras	addi	r3,r1,STACK_FRAME_OVERHEAD
835*14cf11afSPaul Mackerras	ENABLE_INTS
836*14cf11afSPaul Mackerras	bl	.alignment_exception
837*14cf11afSPaul Mackerras	b	.ret_from_except
838*14cf11afSPaul Mackerras
839*14cf11afSPaul Mackerras	.align	7
840*14cf11afSPaul Mackerras	.globl program_check_common
841*14cf11afSPaul Mackerrasprogram_check_common:
842*14cf11afSPaul Mackerras	EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
843*14cf11afSPaul Mackerras	bl	.save_nvgprs
844*14cf11afSPaul Mackerras	addi	r3,r1,STACK_FRAME_OVERHEAD
845*14cf11afSPaul Mackerras	ENABLE_INTS
846*14cf11afSPaul Mackerras	bl	.program_check_exception
847*14cf11afSPaul Mackerras	b	.ret_from_except
848*14cf11afSPaul Mackerras
849*14cf11afSPaul Mackerras	.align	7
850*14cf11afSPaul Mackerras	.globl fp_unavailable_common
851*14cf11afSPaul Mackerrasfp_unavailable_common:
852*14cf11afSPaul Mackerras	EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN)
853*14cf11afSPaul Mackerras	bne	.load_up_fpu		/* if from user, just load it up */
854*14cf11afSPaul Mackerras	bl	.save_nvgprs
855*14cf11afSPaul Mackerras	addi	r3,r1,STACK_FRAME_OVERHEAD
856*14cf11afSPaul Mackerras	ENABLE_INTS
857*14cf11afSPaul Mackerras	bl	.kernel_fp_unavailable_exception
858*14cf11afSPaul Mackerras	BUG_OPCODE
859*14cf11afSPaul Mackerras
860*14cf11afSPaul Mackerras/*
861*14cf11afSPaul Mackerras * load_up_fpu(unused, unused, tsk)
862*14cf11afSPaul Mackerras * Disable FP for the task which had the FPU previously,
863*14cf11afSPaul Mackerras * and save its floating-point registers in its thread_struct.
864*14cf11afSPaul Mackerras * Enables the FPU for use in the kernel on return.
865*14cf11afSPaul Mackerras * On SMP we know the fpu is free, since we give it up every
866*14cf11afSPaul Mackerras * switch (ie, no lazy save of the FP registers).
867*14cf11afSPaul Mackerras * On entry: r13 == 'current' && last_task_used_math != 'current'
868*14cf11afSPaul Mackerras */
869*14cf11afSPaul Mackerras_STATIC(load_up_fpu)
870*14cf11afSPaul Mackerras	mfmsr	r5			/* grab the current MSR */
871*14cf11afSPaul Mackerras	ori	r5,r5,MSR_FP
872*14cf11afSPaul Mackerras	mtmsrd	r5			/* enable use of fpu now */
873*14cf11afSPaul Mackerras	isync
874*14cf11afSPaul Mackerras/*
875*14cf11afSPaul Mackerras * For SMP, we don't do lazy FPU switching because it just gets too
876*14cf11afSPaul Mackerras * horrendously complex, especially when a task switches from one CPU
877*14cf11afSPaul Mackerras * to another.  Instead we call giveup_fpu in switch_to.
878*14cf11afSPaul Mackerras *
879*14cf11afSPaul Mackerras */
880*14cf11afSPaul Mackerras#ifndef CONFIG_SMP
881*14cf11afSPaul Mackerras	ld	r3,last_task_used_math@got(r2)
882*14cf11afSPaul Mackerras	ld	r4,0(r3)
883*14cf11afSPaul Mackerras	cmpdi	0,r4,0
884*14cf11afSPaul Mackerras	beq	1f
885*14cf11afSPaul Mackerras	/* Save FP state to last_task_used_math's THREAD struct */
886*14cf11afSPaul Mackerras	addi	r4,r4,THREAD
887*14cf11afSPaul Mackerras	SAVE_32FPRS(0, r4)
888*14cf11afSPaul Mackerras	mffs	fr0
889*14cf11afSPaul Mackerras	stfd	fr0,THREAD_FPSCR(r4)
890*14cf11afSPaul Mackerras	/* Disable FP for last_task_used_math */
891*14cf11afSPaul Mackerras	ld	r5,PT_REGS(r4)
892*14cf11afSPaul Mackerras	ld	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
893*14cf11afSPaul Mackerras	li	r6,MSR_FP|MSR_FE0|MSR_FE1
894*14cf11afSPaul Mackerras	andc	r4,r4,r6
895*14cf11afSPaul Mackerras	std	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
896*14cf11afSPaul Mackerras1:
897*14cf11afSPaul Mackerras#endif /* CONFIG_SMP */
898*14cf11afSPaul Mackerras	/* enable use of FP after return */
899*14cf11afSPaul Mackerras	ld	r4,PACACURRENT(r13)
900*14cf11afSPaul Mackerras	addi	r5,r4,THREAD		/* Get THREAD */
901*14cf11afSPaul Mackerras	ld	r4,THREAD_FPEXC_MODE(r5)
902*14cf11afSPaul Mackerras	ori	r12,r12,MSR_FP
903*14cf11afSPaul Mackerras	or	r12,r12,r4
904*14cf11afSPaul Mackerras	std	r12,_MSR(r1)
905*14cf11afSPaul Mackerras	lfd	fr0,THREAD_FPSCR(r5)
906*14cf11afSPaul Mackerras	mtfsf	0xff,fr0
907*14cf11afSPaul Mackerras	REST_32FPRS(0, r5)
908*14cf11afSPaul Mackerras#ifndef CONFIG_SMP
909*14cf11afSPaul Mackerras	/* Update last_task_used_math to 'current' */
910*14cf11afSPaul Mackerras	subi	r4,r5,THREAD		/* Back to 'current' */
911*14cf11afSPaul Mackerras	std	r4,0(r3)
912*14cf11afSPaul Mackerras#endif /* CONFIG_SMP */
913*14cf11afSPaul Mackerras	/* restore registers and return */
914*14cf11afSPaul Mackerras	b	fast_exception_return
915*14cf11afSPaul Mackerras
916*14cf11afSPaul Mackerras	.align	7
917*14cf11afSPaul Mackerras	.globl altivec_unavailable_common
918*14cf11afSPaul Mackerrasaltivec_unavailable_common:
919*14cf11afSPaul Mackerras	EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN)
920*14cf11afSPaul Mackerras#ifdef CONFIG_ALTIVEC
921*14cf11afSPaul MackerrasBEGIN_FTR_SECTION
922*14cf11afSPaul Mackerras	bne	.load_up_altivec	/* if from user, just load it up */
923*14cf11afSPaul MackerrasEND_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
924*14cf11afSPaul Mackerras#endif
925*14cf11afSPaul Mackerras	bl	.save_nvgprs
926*14cf11afSPaul Mackerras	addi	r3,r1,STACK_FRAME_OVERHEAD
927*14cf11afSPaul Mackerras	ENABLE_INTS
928*14cf11afSPaul Mackerras	bl	.altivec_unavailable_exception
929*14cf11afSPaul Mackerras	b	.ret_from_except
930*14cf11afSPaul Mackerras
931*14cf11afSPaul Mackerras#ifdef CONFIG_ALTIVEC
932*14cf11afSPaul Mackerras/*
933*14cf11afSPaul Mackerras * load_up_altivec(unused, unused, tsk)
934*14cf11afSPaul Mackerras * Disable VMX for the task which had it previously,
935*14cf11afSPaul Mackerras * and save its vector registers in its thread_struct.
936*14cf11afSPaul Mackerras * Enables the VMX for use in the kernel on return.
937*14cf11afSPaul Mackerras * On SMP we know the VMX is free, since we give it up every
938*14cf11afSPaul Mackerras * switch (ie, no lazy save of the vector registers).
939*14cf11afSPaul Mackerras * On entry: r13 == 'current' && last_task_used_altivec != 'current'
940*14cf11afSPaul Mackerras */
941*14cf11afSPaul Mackerras_STATIC(load_up_altivec)
942*14cf11afSPaul Mackerras	mfmsr	r5			/* grab the current MSR */
943*14cf11afSPaul Mackerras	oris	r5,r5,MSR_VEC@h
944*14cf11afSPaul Mackerras	mtmsrd	r5			/* enable use of VMX now */
945*14cf11afSPaul Mackerras	isync
946*14cf11afSPaul Mackerras
947*14cf11afSPaul Mackerras/*
948*14cf11afSPaul Mackerras * For SMP, we don't do lazy VMX switching because it just gets too
949*14cf11afSPaul Mackerras * horrendously complex, especially when a task switches from one CPU
950*14cf11afSPaul Mackerras * to another.  Instead we call giveup_altvec in switch_to.
951*14cf11afSPaul Mackerras * VRSAVE isn't dealt with here, that is done in the normal context
952*14cf11afSPaul Mackerras * switch code. Note that we could rely on vrsave value to eventually
953*14cf11afSPaul Mackerras * avoid saving all of the VREGs here...
954*14cf11afSPaul Mackerras */
955*14cf11afSPaul Mackerras#ifndef CONFIG_SMP
956*14cf11afSPaul Mackerras	ld	r3,last_task_used_altivec@got(r2)
957*14cf11afSPaul Mackerras	ld	r4,0(r3)
958*14cf11afSPaul Mackerras	cmpdi	0,r4,0
959*14cf11afSPaul Mackerras	beq	1f
960*14cf11afSPaul Mackerras	/* Save VMX state to last_task_used_altivec's THREAD struct */
961*14cf11afSPaul Mackerras	addi	r4,r4,THREAD
962*14cf11afSPaul Mackerras	SAVE_32VRS(0,r5,r4)
963*14cf11afSPaul Mackerras	mfvscr	vr0
964*14cf11afSPaul Mackerras	li	r10,THREAD_VSCR
965*14cf11afSPaul Mackerras	stvx	vr0,r10,r4
966*14cf11afSPaul Mackerras	/* Disable VMX for last_task_used_altivec */
967*14cf11afSPaul Mackerras	ld	r5,PT_REGS(r4)
968*14cf11afSPaul Mackerras	ld	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
969*14cf11afSPaul Mackerras	lis	r6,MSR_VEC@h
970*14cf11afSPaul Mackerras	andc	r4,r4,r6
971*14cf11afSPaul Mackerras	std	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
972*14cf11afSPaul Mackerras1:
973*14cf11afSPaul Mackerras#endif /* CONFIG_SMP */
974*14cf11afSPaul Mackerras	/* Hack: if we get an altivec unavailable trap with VRSAVE
975*14cf11afSPaul Mackerras	 * set to all zeros, we assume this is a broken application
976*14cf11afSPaul Mackerras	 * that fails to set it properly, and thus we switch it to
977*14cf11afSPaul Mackerras	 * all 1's
978*14cf11afSPaul Mackerras	 */
979*14cf11afSPaul Mackerras	mfspr	r4,SPRN_VRSAVE
980*14cf11afSPaul Mackerras	cmpdi	0,r4,0
981*14cf11afSPaul Mackerras	bne+	1f
982*14cf11afSPaul Mackerras	li	r4,-1
983*14cf11afSPaul Mackerras	mtspr	SPRN_VRSAVE,r4
984*14cf11afSPaul Mackerras1:
985*14cf11afSPaul Mackerras	/* enable use of VMX after return */
986*14cf11afSPaul Mackerras	ld	r4,PACACURRENT(r13)
987*14cf11afSPaul Mackerras	addi	r5,r4,THREAD		/* Get THREAD */
988*14cf11afSPaul Mackerras	oris	r12,r12,MSR_VEC@h
989*14cf11afSPaul Mackerras	std	r12,_MSR(r1)
990*14cf11afSPaul Mackerras	li	r4,1
991*14cf11afSPaul Mackerras	li	r10,THREAD_VSCR
992*14cf11afSPaul Mackerras	stw	r4,THREAD_USED_VR(r5)
993*14cf11afSPaul Mackerras	lvx	vr0,r10,r5
994*14cf11afSPaul Mackerras	mtvscr	vr0
995*14cf11afSPaul Mackerras	REST_32VRS(0,r4,r5)
996*14cf11afSPaul Mackerras#ifndef CONFIG_SMP
997*14cf11afSPaul Mackerras	/* Update last_task_used_math to 'current' */
998*14cf11afSPaul Mackerras	subi	r4,r5,THREAD		/* Back to 'current' */
999*14cf11afSPaul Mackerras	std	r4,0(r3)
1000*14cf11afSPaul Mackerras#endif /* CONFIG_SMP */
1001*14cf11afSPaul Mackerras	/* restore registers and return */
1002*14cf11afSPaul Mackerras	b	fast_exception_return
1003*14cf11afSPaul Mackerras#endif /* CONFIG_ALTIVEC */
1004*14cf11afSPaul Mackerras
1005*14cf11afSPaul Mackerras/*
1006*14cf11afSPaul Mackerras * Hash table stuff
1007*14cf11afSPaul Mackerras */
1008*14cf11afSPaul Mackerras	.align	7
1009*14cf11afSPaul Mackerras_GLOBAL(do_hash_page)
1010*14cf11afSPaul Mackerras	std	r3,_DAR(r1)
1011*14cf11afSPaul Mackerras	std	r4,_DSISR(r1)
1012*14cf11afSPaul Mackerras
1013*14cf11afSPaul Mackerras	andis.	r0,r4,0xa450		/* weird error? */
1014*14cf11afSPaul Mackerras	bne-	.handle_page_fault	/* if not, try to insert a HPTE */
1015*14cf11afSPaul MackerrasBEGIN_FTR_SECTION
1016*14cf11afSPaul Mackerras	andis.	r0,r4,0x0020		/* Is it a segment table fault? */
1017*14cf11afSPaul Mackerras	bne-	.do_ste_alloc		/* If so handle it */
1018*14cf11afSPaul MackerrasEND_FTR_SECTION_IFCLR(CPU_FTR_SLB)
1019*14cf11afSPaul Mackerras
1020*14cf11afSPaul Mackerras	/*
1021*14cf11afSPaul Mackerras	 * We need to set the _PAGE_USER bit if MSR_PR is set or if we are
1022*14cf11afSPaul Mackerras	 * accessing a userspace segment (even from the kernel). We assume
1023*14cf11afSPaul Mackerras	 * kernel addresses always have the high bit set.
1024*14cf11afSPaul Mackerras	 */
1025*14cf11afSPaul Mackerras	rlwinm	r4,r4,32-25+9,31-9,31-9	/* DSISR_STORE -> _PAGE_RW */
1026*14cf11afSPaul Mackerras	rotldi	r0,r3,15		/* Move high bit into MSR_PR posn */
1027*14cf11afSPaul Mackerras	orc	r0,r12,r0		/* MSR_PR | ~high_bit */
1028*14cf11afSPaul Mackerras	rlwimi	r4,r0,32-13,30,30	/* becomes _PAGE_USER access bit */
1029*14cf11afSPaul Mackerras	ori	r4,r4,1			/* add _PAGE_PRESENT */
1030*14cf11afSPaul Mackerras	rlwimi	r4,r5,22+2,31-2,31-2	/* Set _PAGE_EXEC if trap is 0x400 */
1031*14cf11afSPaul Mackerras
1032*14cf11afSPaul Mackerras	/*
1033*14cf11afSPaul Mackerras	 * On iSeries, we soft-disable interrupts here, then
1034*14cf11afSPaul Mackerras	 * hard-enable interrupts so that the hash_page code can spin on
1035*14cf11afSPaul Mackerras	 * the hash_table_lock without problems on a shared processor.
1036*14cf11afSPaul Mackerras	 */
1037*14cf11afSPaul Mackerras	DISABLE_INTS
1038*14cf11afSPaul Mackerras
1039*14cf11afSPaul Mackerras	/*
1040*14cf11afSPaul Mackerras	 * r3 contains the faulting address
1041*14cf11afSPaul Mackerras	 * r4 contains the required access permissions
1042*14cf11afSPaul Mackerras	 * r5 contains the trap number
1043*14cf11afSPaul Mackerras	 *
1044*14cf11afSPaul Mackerras	 * at return r3 = 0 for success
1045*14cf11afSPaul Mackerras	 */
1046*14cf11afSPaul Mackerras	bl	.hash_page		/* build HPTE if possible */
1047*14cf11afSPaul Mackerras	cmpdi	r3,0			/* see if hash_page succeeded */
1048*14cf11afSPaul Mackerras
1049*14cf11afSPaul Mackerras#ifdef DO_SOFT_DISABLE
1050*14cf11afSPaul Mackerras	/*
1051*14cf11afSPaul Mackerras	 * If we had interrupts soft-enabled at the point where the
1052*14cf11afSPaul Mackerras	 * DSI/ISI occurred, and an interrupt came in during hash_page,
1053*14cf11afSPaul Mackerras	 * handle it now.
1054*14cf11afSPaul Mackerras	 * We jump to ret_from_except_lite rather than fast_exception_return
1055*14cf11afSPaul Mackerras	 * because ret_from_except_lite will check for and handle pending
1056*14cf11afSPaul Mackerras	 * interrupts if necessary.
1057*14cf11afSPaul Mackerras	 */
1058*14cf11afSPaul Mackerras	beq	.ret_from_except_lite
1059*14cf11afSPaul Mackerras	/* For a hash failure, we don't bother re-enabling interrupts */
1060*14cf11afSPaul Mackerras	ble-	12f
1061*14cf11afSPaul Mackerras
1062*14cf11afSPaul Mackerras	/*
1063*14cf11afSPaul Mackerras	 * hash_page couldn't handle it, set soft interrupt enable back
1064*14cf11afSPaul Mackerras	 * to what it was before the trap.  Note that .local_irq_restore
1065*14cf11afSPaul Mackerras	 * handles any interrupts pending at this point.
1066*14cf11afSPaul Mackerras	 */
1067*14cf11afSPaul Mackerras	ld	r3,SOFTE(r1)
1068*14cf11afSPaul Mackerras	bl	.local_irq_restore
1069*14cf11afSPaul Mackerras	b	11f
1070*14cf11afSPaul Mackerras#else
1071*14cf11afSPaul Mackerras	beq	fast_exception_return   /* Return from exception on success */
1072*14cf11afSPaul Mackerras	ble-	12f			/* Failure return from hash_page */
1073*14cf11afSPaul Mackerras
1074*14cf11afSPaul Mackerras	/* fall through */
1075*14cf11afSPaul Mackerras#endif
1076*14cf11afSPaul Mackerras
1077*14cf11afSPaul Mackerras/* Here we have a page fault that hash_page can't handle. */
1078*14cf11afSPaul Mackerras_GLOBAL(handle_page_fault)
1079*14cf11afSPaul Mackerras	ENABLE_INTS
1080*14cf11afSPaul Mackerras11:	ld	r4,_DAR(r1)
1081*14cf11afSPaul Mackerras	ld	r5,_DSISR(r1)
1082*14cf11afSPaul Mackerras	addi	r3,r1,STACK_FRAME_OVERHEAD
1083*14cf11afSPaul Mackerras	bl	.do_page_fault
1084*14cf11afSPaul Mackerras	cmpdi	r3,0
1085*14cf11afSPaul Mackerras	beq+	.ret_from_except_lite
1086*14cf11afSPaul Mackerras	bl	.save_nvgprs
1087*14cf11afSPaul Mackerras	mr	r5,r3
1088*14cf11afSPaul Mackerras	addi	r3,r1,STACK_FRAME_OVERHEAD
1089*14cf11afSPaul Mackerras	lwz	r4,_DAR(r1)
1090*14cf11afSPaul Mackerras	bl	.bad_page_fault
1091*14cf11afSPaul Mackerras	b	.ret_from_except
1092*14cf11afSPaul Mackerras
1093*14cf11afSPaul Mackerras/* We have a page fault that hash_page could handle but HV refused
1094*14cf11afSPaul Mackerras * the PTE insertion
1095*14cf11afSPaul Mackerras */
1096*14cf11afSPaul Mackerras12:	bl	.save_nvgprs
1097*14cf11afSPaul Mackerras	addi	r3,r1,STACK_FRAME_OVERHEAD
1098*14cf11afSPaul Mackerras	lwz	r4,_DAR(r1)
1099*14cf11afSPaul Mackerras	bl	.low_hash_fault
1100*14cf11afSPaul Mackerras	b	.ret_from_except
1101*14cf11afSPaul Mackerras
1102*14cf11afSPaul Mackerras	/* here we have a segment miss */
1103*14cf11afSPaul Mackerras_GLOBAL(do_ste_alloc)
1104*14cf11afSPaul Mackerras	bl	.ste_allocate		/* try to insert stab entry */
1105*14cf11afSPaul Mackerras	cmpdi	r3,0
1106*14cf11afSPaul Mackerras	beq+	fast_exception_return
1107*14cf11afSPaul Mackerras	b	.handle_page_fault
1108*14cf11afSPaul Mackerras
1109*14cf11afSPaul Mackerras/*
1110*14cf11afSPaul Mackerras * r13 points to the PACA, r9 contains the saved CR,
1111*14cf11afSPaul Mackerras * r11 and r12 contain the saved SRR0 and SRR1.
1112*14cf11afSPaul Mackerras * r9 - r13 are saved in paca->exslb.
1113*14cf11afSPaul Mackerras * We assume we aren't going to take any exceptions during this procedure.
1114*14cf11afSPaul Mackerras * We assume (DAR >> 60) == 0xc.
1115*14cf11afSPaul Mackerras */
1116*14cf11afSPaul Mackerras	.align	7
1117*14cf11afSPaul Mackerras_GLOBAL(do_stab_bolted)
1118*14cf11afSPaul Mackerras	stw	r9,PACA_EXSLB+EX_CCR(r13)	/* save CR in exc. frame */
1119*14cf11afSPaul Mackerras	std	r11,PACA_EXSLB+EX_SRR0(r13)	/* save SRR0 in exc. frame */
1120*14cf11afSPaul Mackerras
1121*14cf11afSPaul Mackerras	/* Hash to the primary group */
1122*14cf11afSPaul Mackerras	ld	r10,PACASTABVIRT(r13)
1123*14cf11afSPaul Mackerras	mfspr	r11,DAR
1124*14cf11afSPaul Mackerras	srdi	r11,r11,28
1125*14cf11afSPaul Mackerras	rldimi	r10,r11,7,52	/* r10 = first ste of the group */
1126*14cf11afSPaul Mackerras
1127*14cf11afSPaul Mackerras	/* Calculate VSID */
1128*14cf11afSPaul Mackerras	/* This is a kernel address, so protovsid = ESID */
1129*14cf11afSPaul Mackerras	ASM_VSID_SCRAMBLE(r11, r9)
1130*14cf11afSPaul Mackerras	rldic	r9,r11,12,16	/* r9 = vsid << 12 */
1131*14cf11afSPaul Mackerras
1132*14cf11afSPaul Mackerras	/* Search the primary group for a free entry */
1133*14cf11afSPaul Mackerras1:	ld	r11,0(r10)	/* Test valid bit of the current ste	*/
1134*14cf11afSPaul Mackerras	andi.	r11,r11,0x80
1135*14cf11afSPaul Mackerras	beq	2f
1136*14cf11afSPaul Mackerras	addi	r10,r10,16
1137*14cf11afSPaul Mackerras	andi.	r11,r10,0x70
1138*14cf11afSPaul Mackerras	bne	1b
1139*14cf11afSPaul Mackerras
1140*14cf11afSPaul Mackerras	/* Stick for only searching the primary group for now.		*/
1141*14cf11afSPaul Mackerras	/* At least for now, we use a very simple random castout scheme */
1142*14cf11afSPaul Mackerras	/* Use the TB as a random number ;  OR in 1 to avoid entry 0	*/
1143*14cf11afSPaul Mackerras	mftb	r11
1144*14cf11afSPaul Mackerras	rldic	r11,r11,4,57	/* r11 = (r11 << 4) & 0x70 */
1145*14cf11afSPaul Mackerras	ori	r11,r11,0x10
1146*14cf11afSPaul Mackerras
1147*14cf11afSPaul Mackerras	/* r10 currently points to an ste one past the group of interest */
1148*14cf11afSPaul Mackerras	/* make it point to the randomly selected entry			*/
1149*14cf11afSPaul Mackerras	subi	r10,r10,128
1150*14cf11afSPaul Mackerras	or 	r10,r10,r11	/* r10 is the entry to invalidate	*/
1151*14cf11afSPaul Mackerras
1152*14cf11afSPaul Mackerras	isync			/* mark the entry invalid		*/
1153*14cf11afSPaul Mackerras	ld	r11,0(r10)
1154*14cf11afSPaul Mackerras	rldicl	r11,r11,56,1	/* clear the valid bit */
1155*14cf11afSPaul Mackerras	rotldi	r11,r11,8
1156*14cf11afSPaul Mackerras	std	r11,0(r10)
1157*14cf11afSPaul Mackerras	sync
1158*14cf11afSPaul Mackerras
1159*14cf11afSPaul Mackerras	clrrdi	r11,r11,28	/* Get the esid part of the ste		*/
1160*14cf11afSPaul Mackerras	slbie	r11
1161*14cf11afSPaul Mackerras
1162*14cf11afSPaul Mackerras2:	std	r9,8(r10)	/* Store the vsid part of the ste	*/
1163*14cf11afSPaul Mackerras	eieio
1164*14cf11afSPaul Mackerras
1165*14cf11afSPaul Mackerras	mfspr	r11,DAR		/* Get the new esid			*/
1166*14cf11afSPaul Mackerras	clrrdi	r11,r11,28	/* Permits a full 32b of ESID		*/
1167*14cf11afSPaul Mackerras	ori	r11,r11,0x90	/* Turn on valid and kp			*/
1168*14cf11afSPaul Mackerras	std	r11,0(r10)	/* Put new entry back into the stab	*/
1169*14cf11afSPaul Mackerras
1170*14cf11afSPaul Mackerras	sync
1171*14cf11afSPaul Mackerras
1172*14cf11afSPaul Mackerras	/* All done -- return from exception. */
1173*14cf11afSPaul Mackerras	lwz	r9,PACA_EXSLB+EX_CCR(r13)	/* get saved CR */
1174*14cf11afSPaul Mackerras	ld	r11,PACA_EXSLB+EX_SRR0(r13)	/* get saved SRR0 */
1175*14cf11afSPaul Mackerras
1176*14cf11afSPaul Mackerras	andi.	r10,r12,MSR_RI
1177*14cf11afSPaul Mackerras	beq-	unrecov_slb
1178*14cf11afSPaul Mackerras
1179*14cf11afSPaul Mackerras	mtcrf	0x80,r9			/* restore CR */
1180*14cf11afSPaul Mackerras
1181*14cf11afSPaul Mackerras	mfmsr	r10
1182*14cf11afSPaul Mackerras	clrrdi	r10,r10,2
1183*14cf11afSPaul Mackerras	mtmsrd	r10,1
1184*14cf11afSPaul Mackerras
1185*14cf11afSPaul Mackerras	mtspr	SRR0,r11
1186*14cf11afSPaul Mackerras	mtspr	SRR1,r12
1187*14cf11afSPaul Mackerras	ld	r9,PACA_EXSLB+EX_R9(r13)
1188*14cf11afSPaul Mackerras	ld	r10,PACA_EXSLB+EX_R10(r13)
1189*14cf11afSPaul Mackerras	ld	r11,PACA_EXSLB+EX_R11(r13)
1190*14cf11afSPaul Mackerras	ld	r12,PACA_EXSLB+EX_R12(r13)
1191*14cf11afSPaul Mackerras	ld	r13,PACA_EXSLB+EX_R13(r13)
1192*14cf11afSPaul Mackerras	rfid
1193*14cf11afSPaul Mackerras	b	.	/* prevent speculative execution */
1194*14cf11afSPaul Mackerras
1195*14cf11afSPaul Mackerras/*
1196*14cf11afSPaul Mackerras * r13 points to the PACA, r9 contains the saved CR,
1197*14cf11afSPaul Mackerras * r11 and r12 contain the saved SRR0 and SRR1.
1198*14cf11afSPaul Mackerras * r3 has the faulting address
1199*14cf11afSPaul Mackerras * r9 - r13 are saved in paca->exslb.
1200*14cf11afSPaul Mackerras * r3 is saved in paca->slb_r3
1201*14cf11afSPaul Mackerras * We assume we aren't going to take any exceptions during this procedure.
1202*14cf11afSPaul Mackerras */
1203*14cf11afSPaul Mackerras_GLOBAL(do_slb_miss)
1204*14cf11afSPaul Mackerras	mflr	r10
1205*14cf11afSPaul Mackerras
1206*14cf11afSPaul Mackerras	stw	r9,PACA_EXSLB+EX_CCR(r13)	/* save CR in exc. frame */
1207*14cf11afSPaul Mackerras	std	r10,PACA_EXSLB+EX_LR(r13)	/* save LR */
1208*14cf11afSPaul Mackerras
1209*14cf11afSPaul Mackerras	bl	.slb_allocate			/* handle it */
1210*14cf11afSPaul Mackerras
1211*14cf11afSPaul Mackerras	/* All done -- return from exception. */
1212*14cf11afSPaul Mackerras
1213*14cf11afSPaul Mackerras	ld	r10,PACA_EXSLB+EX_LR(r13)
1214*14cf11afSPaul Mackerras	ld	r3,PACA_EXSLB+EX_R3(r13)
1215*14cf11afSPaul Mackerras	lwz	r9,PACA_EXSLB+EX_CCR(r13)	/* get saved CR */
1216*14cf11afSPaul Mackerras#ifdef CONFIG_PPC_ISERIES
1217*14cf11afSPaul Mackerras	ld	r11,PACALPPACA+LPPACASRR0(r13)	/* get SRR0 value */
1218*14cf11afSPaul Mackerras#endif /* CONFIG_PPC_ISERIES */
1219*14cf11afSPaul Mackerras
1220*14cf11afSPaul Mackerras	mtlr	r10
1221*14cf11afSPaul Mackerras
1222*14cf11afSPaul Mackerras	andi.	r10,r12,MSR_RI	/* check for unrecoverable exception */
1223*14cf11afSPaul Mackerras	beq-	unrecov_slb
1224*14cf11afSPaul Mackerras
1225*14cf11afSPaul Mackerras.machine	push
1226*14cf11afSPaul Mackerras.machine	"power4"
1227*14cf11afSPaul Mackerras	mtcrf	0x80,r9
1228*14cf11afSPaul Mackerras	mtcrf	0x01,r9		/* slb_allocate uses cr0 and cr7 */
1229*14cf11afSPaul Mackerras.machine	pop
1230*14cf11afSPaul Mackerras
1231*14cf11afSPaul Mackerras#ifdef CONFIG_PPC_ISERIES
1232*14cf11afSPaul Mackerras	mtspr	SRR0,r11
1233*14cf11afSPaul Mackerras	mtspr	SRR1,r12
1234*14cf11afSPaul Mackerras#endif /* CONFIG_PPC_ISERIES */
1235*14cf11afSPaul Mackerras	ld	r9,PACA_EXSLB+EX_R9(r13)
1236*14cf11afSPaul Mackerras	ld	r10,PACA_EXSLB+EX_R10(r13)
1237*14cf11afSPaul Mackerras	ld	r11,PACA_EXSLB+EX_R11(r13)
1238*14cf11afSPaul Mackerras	ld	r12,PACA_EXSLB+EX_R12(r13)
1239*14cf11afSPaul Mackerras	ld	r13,PACA_EXSLB+EX_R13(r13)
1240*14cf11afSPaul Mackerras	rfid
1241*14cf11afSPaul Mackerras	b	.	/* prevent speculative execution */
1242*14cf11afSPaul Mackerras
1243*14cf11afSPaul Mackerrasunrecov_slb:
1244*14cf11afSPaul Mackerras	EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
1245*14cf11afSPaul Mackerras	DISABLE_INTS
1246*14cf11afSPaul Mackerras	bl	.save_nvgprs
1247*14cf11afSPaul Mackerras1:	addi	r3,r1,STACK_FRAME_OVERHEAD
1248*14cf11afSPaul Mackerras	bl	.unrecoverable_exception
1249*14cf11afSPaul Mackerras	b	1b
1250*14cf11afSPaul Mackerras
1251*14cf11afSPaul Mackerras/*
1252*14cf11afSPaul Mackerras * Space for CPU0's segment table.
1253*14cf11afSPaul Mackerras *
1254*14cf11afSPaul Mackerras * On iSeries, the hypervisor must fill in at least one entry before
1255*14cf11afSPaul Mackerras * we get control (with relocate on).  The address is give to the hv
1256*14cf11afSPaul Mackerras * as a page number (see xLparMap in LparData.c), so this must be at a
1257*14cf11afSPaul Mackerras * fixed address (the linker can't compute (u64)&initial_stab >>
1258*14cf11afSPaul Mackerras * PAGE_SHIFT).
1259*14cf11afSPaul Mackerras */
1260*14cf11afSPaul Mackerras	. = STAB0_PHYS_ADDR	/* 0x6000 */
1261*14cf11afSPaul Mackerras	.globl initial_stab
1262*14cf11afSPaul Mackerrasinitial_stab:
1263*14cf11afSPaul Mackerras	.space	4096
1264*14cf11afSPaul Mackerras
1265*14cf11afSPaul Mackerras/*
1266*14cf11afSPaul Mackerras * Data area reserved for FWNMI option.
1267*14cf11afSPaul Mackerras * This address (0x7000) is fixed by the RPA.
1268*14cf11afSPaul Mackerras */
1269*14cf11afSPaul Mackerras	.= 0x7000
1270*14cf11afSPaul Mackerras	.globl fwnmi_data_area
1271*14cf11afSPaul Mackerrasfwnmi_data_area:
1272*14cf11afSPaul Mackerras
1273*14cf11afSPaul Mackerras	/* iSeries does not use the FWNMI stuff, so it is safe to put
1274*14cf11afSPaul Mackerras	 * this here, even if we later allow kernels that will boot on
1275*14cf11afSPaul Mackerras	 * both pSeries and iSeries */
1276*14cf11afSPaul Mackerras#ifdef CONFIG_PPC_ISERIES
1277*14cf11afSPaul Mackerras        . = LPARMAP_PHYS
1278*14cf11afSPaul Mackerras#include "lparmap.s"
1279*14cf11afSPaul Mackerras/*
1280*14cf11afSPaul Mackerras * This ".text" is here for old compilers that generate a trailing
1281*14cf11afSPaul Mackerras * .note section when compiling .c files to .s
1282*14cf11afSPaul Mackerras */
1283*14cf11afSPaul Mackerras	.text
1284*14cf11afSPaul Mackerras#endif /* CONFIG_PPC_ISERIES */
1285*14cf11afSPaul Mackerras
1286*14cf11afSPaul Mackerras        . = 0x8000
1287*14cf11afSPaul Mackerras
1288*14cf11afSPaul Mackerras/*
1289*14cf11afSPaul Mackerras * On pSeries, secondary processors spin in the following code.
1290*14cf11afSPaul Mackerras * At entry, r3 = this processor's number (physical cpu id)
1291*14cf11afSPaul Mackerras */
1292*14cf11afSPaul Mackerras_GLOBAL(pSeries_secondary_smp_init)
1293*14cf11afSPaul Mackerras	mr	r24,r3
1294*14cf11afSPaul Mackerras
1295*14cf11afSPaul Mackerras	/* turn on 64-bit mode */
1296*14cf11afSPaul Mackerras	bl	.enable_64b_mode
1297*14cf11afSPaul Mackerras	isync
1298*14cf11afSPaul Mackerras
1299*14cf11afSPaul Mackerras	/* Copy some CPU settings from CPU 0 */
1300*14cf11afSPaul Mackerras	bl	.__restore_cpu_setup
1301*14cf11afSPaul Mackerras
1302*14cf11afSPaul Mackerras	/* Set up a paca value for this processor. Since we have the
1303*14cf11afSPaul Mackerras	 * physical cpu id in r24, we need to search the pacas to find
1304*14cf11afSPaul Mackerras	 * which logical id maps to our physical one.
1305*14cf11afSPaul Mackerras	 */
1306*14cf11afSPaul Mackerras	LOADADDR(r13, paca) 		/* Get base vaddr of paca array	 */
1307*14cf11afSPaul Mackerras	li	r5,0			/* logical cpu id                */
1308*14cf11afSPaul Mackerras1:	lhz	r6,PACAHWCPUID(r13)	/* Load HW procid from paca      */
1309*14cf11afSPaul Mackerras	cmpw	r6,r24			/* Compare to our id             */
1310*14cf11afSPaul Mackerras	beq	2f
1311*14cf11afSPaul Mackerras	addi	r13,r13,PACA_SIZE	/* Loop to next PACA on miss     */
1312*14cf11afSPaul Mackerras	addi	r5,r5,1
1313*14cf11afSPaul Mackerras	cmpwi	r5,NR_CPUS
1314*14cf11afSPaul Mackerras	blt	1b
1315*14cf11afSPaul Mackerras
1316*14cf11afSPaul Mackerras	mr	r3,r24			/* not found, copy phys to r3	 */
1317*14cf11afSPaul Mackerras	b	.kexec_wait		/* next kernel might do better	 */
1318*14cf11afSPaul Mackerras
1319*14cf11afSPaul Mackerras2:	mtspr	SPRG3,r13		/* Save vaddr of paca in SPRG3	 */
1320*14cf11afSPaul Mackerras	/* From now on, r24 is expected to be logical cpuid */
1321*14cf11afSPaul Mackerras	mr	r24,r5
1322*14cf11afSPaul Mackerras3:	HMT_LOW
1323*14cf11afSPaul Mackerras	lbz	r23,PACAPROCSTART(r13)	/* Test if this processor should */
1324*14cf11afSPaul Mackerras					/* start.			 */
1325*14cf11afSPaul Mackerras	sync
1326*14cf11afSPaul Mackerras
1327*14cf11afSPaul Mackerras	/* Create a temp kernel stack for use before relocation is on.	*/
1328*14cf11afSPaul Mackerras	ld	r1,PACAEMERGSP(r13)
1329*14cf11afSPaul Mackerras	subi	r1,r1,STACK_FRAME_OVERHEAD
1330*14cf11afSPaul Mackerras
1331*14cf11afSPaul Mackerras	cmpwi	0,r23,0
1332*14cf11afSPaul Mackerras#ifdef CONFIG_SMP
1333*14cf11afSPaul Mackerras	bne	.__secondary_start
1334*14cf11afSPaul Mackerras#endif
1335*14cf11afSPaul Mackerras	b 	3b			/* Loop until told to go	 */
1336*14cf11afSPaul Mackerras
1337*14cf11afSPaul Mackerras#ifdef CONFIG_PPC_ISERIES
1338*14cf11afSPaul Mackerras_STATIC(__start_initialization_iSeries)
1339*14cf11afSPaul Mackerras	/* Clear out the BSS */
1340*14cf11afSPaul Mackerras	LOADADDR(r11,__bss_stop)
1341*14cf11afSPaul Mackerras	LOADADDR(r8,__bss_start)
1342*14cf11afSPaul Mackerras	sub	r11,r11,r8		/* bss size			*/
1343*14cf11afSPaul Mackerras	addi	r11,r11,7		/* round up to an even double word */
1344*14cf11afSPaul Mackerras	rldicl. r11,r11,61,3		/* shift right by 3		*/
1345*14cf11afSPaul Mackerras	beq	4f
1346*14cf11afSPaul Mackerras	addi	r8,r8,-8
1347*14cf11afSPaul Mackerras	li	r0,0
1348*14cf11afSPaul Mackerras	mtctr	r11			/* zero this many doublewords	*/
1349*14cf11afSPaul Mackerras3:	stdu	r0,8(r8)
1350*14cf11afSPaul Mackerras	bdnz	3b
1351*14cf11afSPaul Mackerras4:
1352*14cf11afSPaul Mackerras	LOADADDR(r1,init_thread_union)
1353*14cf11afSPaul Mackerras	addi	r1,r1,THREAD_SIZE
1354*14cf11afSPaul Mackerras	li	r0,0
1355*14cf11afSPaul Mackerras	stdu	r0,-STACK_FRAME_OVERHEAD(r1)
1356*14cf11afSPaul Mackerras
1357*14cf11afSPaul Mackerras	LOADADDR(r3,cpu_specs)
1358*14cf11afSPaul Mackerras	LOADADDR(r4,cur_cpu_spec)
1359*14cf11afSPaul Mackerras	li	r5,0
1360*14cf11afSPaul Mackerras	bl	.identify_cpu
1361*14cf11afSPaul Mackerras
1362*14cf11afSPaul Mackerras	LOADADDR(r2,__toc_start)
1363*14cf11afSPaul Mackerras	addi	r2,r2,0x4000
1364*14cf11afSPaul Mackerras	addi	r2,r2,0x4000
1365*14cf11afSPaul Mackerras
1366*14cf11afSPaul Mackerras	bl	.iSeries_early_setup
1367*14cf11afSPaul Mackerras
1368*14cf11afSPaul Mackerras	/* relocation is on at this point */
1369*14cf11afSPaul Mackerras
1370*14cf11afSPaul Mackerras	b	.start_here_common
1371*14cf11afSPaul Mackerras#endif /* CONFIG_PPC_ISERIES */
1372*14cf11afSPaul Mackerras
1373*14cf11afSPaul Mackerras#ifdef CONFIG_PPC_MULTIPLATFORM
1374*14cf11afSPaul Mackerras
1375*14cf11afSPaul Mackerras_STATIC(__mmu_off)
1376*14cf11afSPaul Mackerras	mfmsr	r3
1377*14cf11afSPaul Mackerras	andi.	r0,r3,MSR_IR|MSR_DR
1378*14cf11afSPaul Mackerras	beqlr
1379*14cf11afSPaul Mackerras	andc	r3,r3,r0
1380*14cf11afSPaul Mackerras	mtspr	SPRN_SRR0,r4
1381*14cf11afSPaul Mackerras	mtspr	SPRN_SRR1,r3
1382*14cf11afSPaul Mackerras	sync
1383*14cf11afSPaul Mackerras	rfid
1384*14cf11afSPaul Mackerras	b	.	/* prevent speculative execution */
1385*14cf11afSPaul Mackerras
1386*14cf11afSPaul Mackerras
1387*14cf11afSPaul Mackerras/*
1388*14cf11afSPaul Mackerras * Here is our main kernel entry point. We support currently 2 kind of entries
1389*14cf11afSPaul Mackerras * depending on the value of r5.
1390*14cf11afSPaul Mackerras *
1391*14cf11afSPaul Mackerras *   r5 != NULL -> OF entry, we go to prom_init, "legacy" parameter content
1392*14cf11afSPaul Mackerras *                 in r3...r7
1393*14cf11afSPaul Mackerras *
1394*14cf11afSPaul Mackerras *   r5 == NULL -> kexec style entry. r3 is a physical pointer to the
1395*14cf11afSPaul Mackerras *                 DT block, r4 is a physical pointer to the kernel itself
1396*14cf11afSPaul Mackerras *
1397*14cf11afSPaul Mackerras */
1398*14cf11afSPaul Mackerras_GLOBAL(__start_initialization_multiplatform)
1399*14cf11afSPaul Mackerras	/*
1400*14cf11afSPaul Mackerras	 * Are we booted from a PROM Of-type client-interface ?
1401*14cf11afSPaul Mackerras	 */
1402*14cf11afSPaul Mackerras	cmpldi	cr0,r5,0
1403*14cf11afSPaul Mackerras	bne	.__boot_from_prom		/* yes -> prom */
1404*14cf11afSPaul Mackerras
1405*14cf11afSPaul Mackerras	/* Save parameters */
1406*14cf11afSPaul Mackerras	mr	r31,r3
1407*14cf11afSPaul Mackerras	mr	r30,r4
1408*14cf11afSPaul Mackerras
1409*14cf11afSPaul Mackerras	/* Make sure we are running in 64 bits mode */
1410*14cf11afSPaul Mackerras	bl	.enable_64b_mode
1411*14cf11afSPaul Mackerras
1412*14cf11afSPaul Mackerras	/* Setup some critical 970 SPRs before switching MMU off */
1413*14cf11afSPaul Mackerras	bl	.__970_cpu_preinit
1414*14cf11afSPaul Mackerras
1415*14cf11afSPaul Mackerras	/* cpu # */
1416*14cf11afSPaul Mackerras	li	r24,0
1417*14cf11afSPaul Mackerras
1418*14cf11afSPaul Mackerras	/* Switch off MMU if not already */
1419*14cf11afSPaul Mackerras	LOADADDR(r4, .__after_prom_start - KERNELBASE)
1420*14cf11afSPaul Mackerras	add	r4,r4,r30
1421*14cf11afSPaul Mackerras	bl	.__mmu_off
1422*14cf11afSPaul Mackerras	b	.__after_prom_start
1423*14cf11afSPaul Mackerras
1424*14cf11afSPaul Mackerras_STATIC(__boot_from_prom)
1425*14cf11afSPaul Mackerras	/* Save parameters */
1426*14cf11afSPaul Mackerras	mr	r31,r3
1427*14cf11afSPaul Mackerras	mr	r30,r4
1428*14cf11afSPaul Mackerras	mr	r29,r5
1429*14cf11afSPaul Mackerras	mr	r28,r6
1430*14cf11afSPaul Mackerras	mr	r27,r7
1431*14cf11afSPaul Mackerras
1432*14cf11afSPaul Mackerras	/* Make sure we are running in 64 bits mode */
1433*14cf11afSPaul Mackerras	bl	.enable_64b_mode
1434*14cf11afSPaul Mackerras
1435*14cf11afSPaul Mackerras	/* put a relocation offset into r3 */
1436*14cf11afSPaul Mackerras	bl	.reloc_offset
1437*14cf11afSPaul Mackerras
1438*14cf11afSPaul Mackerras	LOADADDR(r2,__toc_start)
1439*14cf11afSPaul Mackerras	addi	r2,r2,0x4000
1440*14cf11afSPaul Mackerras	addi	r2,r2,0x4000
1441*14cf11afSPaul Mackerras
1442*14cf11afSPaul Mackerras	/* Relocate the TOC from a virt addr to a real addr */
1443*14cf11afSPaul Mackerras	sub	r2,r2,r3
1444*14cf11afSPaul Mackerras
1445*14cf11afSPaul Mackerras	/* Restore parameters */
1446*14cf11afSPaul Mackerras	mr	r3,r31
1447*14cf11afSPaul Mackerras	mr	r4,r30
1448*14cf11afSPaul Mackerras	mr	r5,r29
1449*14cf11afSPaul Mackerras	mr	r6,r28
1450*14cf11afSPaul Mackerras	mr	r7,r27
1451*14cf11afSPaul Mackerras
1452*14cf11afSPaul Mackerras	/* Do all of the interaction with OF client interface */
1453*14cf11afSPaul Mackerras	bl	.prom_init
1454*14cf11afSPaul Mackerras	/* We never return */
1455*14cf11afSPaul Mackerras	trap
1456*14cf11afSPaul Mackerras
1457*14cf11afSPaul Mackerras/*
1458*14cf11afSPaul Mackerras * At this point, r3 contains the physical address we are running at,
1459*14cf11afSPaul Mackerras * returned by prom_init()
1460*14cf11afSPaul Mackerras */
1461*14cf11afSPaul Mackerras_STATIC(__after_prom_start)
1462*14cf11afSPaul Mackerras
1463*14cf11afSPaul Mackerras/*
1464*14cf11afSPaul Mackerras * We need to run with __start at physical address 0.
1465*14cf11afSPaul Mackerras * This will leave some code in the first 256B of
1466*14cf11afSPaul Mackerras * real memory, which are reserved for software use.
1467*14cf11afSPaul Mackerras * The remainder of the first page is loaded with the fixed
1468*14cf11afSPaul Mackerras * interrupt vectors.  The next two pages are filled with
1469*14cf11afSPaul Mackerras * unknown exception placeholders.
1470*14cf11afSPaul Mackerras *
1471*14cf11afSPaul Mackerras * Note: This process overwrites the OF exception vectors.
1472*14cf11afSPaul Mackerras *	r26 == relocation offset
1473*14cf11afSPaul Mackerras *	r27 == KERNELBASE
1474*14cf11afSPaul Mackerras */
1475*14cf11afSPaul Mackerras	bl	.reloc_offset
1476*14cf11afSPaul Mackerras	mr	r26,r3
1477*14cf11afSPaul Mackerras	SET_REG_TO_CONST(r27,KERNELBASE)
1478*14cf11afSPaul Mackerras
1479*14cf11afSPaul Mackerras	li	r3,0			/* target addr */
1480*14cf11afSPaul Mackerras
1481*14cf11afSPaul Mackerras	// XXX FIXME: Use phys returned by OF (r30)
1482*14cf11afSPaul Mackerras	sub	r4,r27,r26 		/* source addr			 */
1483*14cf11afSPaul Mackerras					/* current address of _start	 */
1484*14cf11afSPaul Mackerras					/*   i.e. where we are running	 */
1485*14cf11afSPaul Mackerras					/*	the source addr		 */
1486*14cf11afSPaul Mackerras
1487*14cf11afSPaul Mackerras	LOADADDR(r5,copy_to_here)	/* # bytes of memory to copy	 */
1488*14cf11afSPaul Mackerras	sub	r5,r5,r27
1489*14cf11afSPaul Mackerras
1490*14cf11afSPaul Mackerras	li	r6,0x100		/* Start offset, the first 0x100 */
1491*14cf11afSPaul Mackerras					/* bytes were copied earlier.	 */
1492*14cf11afSPaul Mackerras
1493*14cf11afSPaul Mackerras	bl	.copy_and_flush		/* copy the first n bytes	 */
1494*14cf11afSPaul Mackerras					/* this includes the code being	 */
1495*14cf11afSPaul Mackerras					/* executed here.		 */
1496*14cf11afSPaul Mackerras
1497*14cf11afSPaul Mackerras	LOADADDR(r0, 4f)		/* Jump to the copy of this code */
1498*14cf11afSPaul Mackerras	mtctr	r0			/* that we just made/relocated	 */
1499*14cf11afSPaul Mackerras	bctr
1500*14cf11afSPaul Mackerras
1501*14cf11afSPaul Mackerras4:	LOADADDR(r5,klimit)
1502*14cf11afSPaul Mackerras	sub	r5,r5,r26
1503*14cf11afSPaul Mackerras	ld	r5,0(r5)		/* get the value of klimit */
1504*14cf11afSPaul Mackerras	sub	r5,r5,r27
1505*14cf11afSPaul Mackerras	bl	.copy_and_flush		/* copy the rest */
1506*14cf11afSPaul Mackerras	b	.start_here_multiplatform
1507*14cf11afSPaul Mackerras
1508*14cf11afSPaul Mackerras#endif /* CONFIG_PPC_MULTIPLATFORM */
1509*14cf11afSPaul Mackerras
1510*14cf11afSPaul Mackerras/*
1511*14cf11afSPaul Mackerras * Copy routine used to copy the kernel to start at physical address 0
1512*14cf11afSPaul Mackerras * and flush and invalidate the caches as needed.
1513*14cf11afSPaul Mackerras * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset
1514*14cf11afSPaul Mackerras * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.
1515*14cf11afSPaul Mackerras *
1516*14cf11afSPaul Mackerras * Note: this routine *only* clobbers r0, r6 and lr
1517*14cf11afSPaul Mackerras */
1518*14cf11afSPaul Mackerras_GLOBAL(copy_and_flush)
1519*14cf11afSPaul Mackerras	addi	r5,r5,-8
1520*14cf11afSPaul Mackerras	addi	r6,r6,-8
1521*14cf11afSPaul Mackerras4:	li	r0,16			/* Use the least common		*/
1522*14cf11afSPaul Mackerras					/* denominator cache line	*/
1523*14cf11afSPaul Mackerras					/* size.  This results in	*/
1524*14cf11afSPaul Mackerras					/* extra cache line flushes	*/
1525*14cf11afSPaul Mackerras					/* but operation is correct.	*/
1526*14cf11afSPaul Mackerras					/* Can't get cache line size	*/
1527*14cf11afSPaul Mackerras					/* from NACA as it is being	*/
1528*14cf11afSPaul Mackerras					/* moved too.			*/
1529*14cf11afSPaul Mackerras
1530*14cf11afSPaul Mackerras	mtctr	r0			/* put # words/line in ctr	*/
1531*14cf11afSPaul Mackerras3:	addi	r6,r6,8			/* copy a cache line		*/
1532*14cf11afSPaul Mackerras	ldx	r0,r6,r4
1533*14cf11afSPaul Mackerras	stdx	r0,r6,r3
1534*14cf11afSPaul Mackerras	bdnz	3b
1535*14cf11afSPaul Mackerras	dcbst	r6,r3			/* write it to memory		*/
1536*14cf11afSPaul Mackerras	sync
1537*14cf11afSPaul Mackerras	icbi	r6,r3			/* flush the icache line	*/
1538*14cf11afSPaul Mackerras	cmpld	0,r6,r5
1539*14cf11afSPaul Mackerras	blt	4b
1540*14cf11afSPaul Mackerras	sync
1541*14cf11afSPaul Mackerras	addi	r5,r5,8
1542*14cf11afSPaul Mackerras	addi	r6,r6,8
1543*14cf11afSPaul Mackerras	blr
1544*14cf11afSPaul Mackerras
1545*14cf11afSPaul Mackerras.align 8
1546*14cf11afSPaul Mackerrascopy_to_here:
1547*14cf11afSPaul Mackerras
1548*14cf11afSPaul Mackerras#ifdef CONFIG_SMP
1549*14cf11afSPaul Mackerras#ifdef CONFIG_PPC_PMAC
1550*14cf11afSPaul Mackerras/*
1551*14cf11afSPaul Mackerras * On PowerMac, secondary processors starts from the reset vector, which
1552*14cf11afSPaul Mackerras * is temporarily turned into a call to one of the functions below.
1553*14cf11afSPaul Mackerras */
1554*14cf11afSPaul Mackerras	.section ".text";
1555*14cf11afSPaul Mackerras	.align 2 ;
1556*14cf11afSPaul Mackerras
1557*14cf11afSPaul Mackerras	.globl	pmac_secondary_start_1
1558*14cf11afSPaul Mackerraspmac_secondary_start_1:
1559*14cf11afSPaul Mackerras	li	r24, 1
1560*14cf11afSPaul Mackerras	b	.pmac_secondary_start
1561*14cf11afSPaul Mackerras
1562*14cf11afSPaul Mackerras	.globl pmac_secondary_start_2
1563*14cf11afSPaul Mackerraspmac_secondary_start_2:
1564*14cf11afSPaul Mackerras	li	r24, 2
1565*14cf11afSPaul Mackerras	b	.pmac_secondary_start
1566*14cf11afSPaul Mackerras
1567*14cf11afSPaul Mackerras	.globl pmac_secondary_start_3
1568*14cf11afSPaul Mackerraspmac_secondary_start_3:
1569*14cf11afSPaul Mackerras	li	r24, 3
1570*14cf11afSPaul Mackerras	b	.pmac_secondary_start
1571*14cf11afSPaul Mackerras
1572*14cf11afSPaul Mackerras_GLOBAL(pmac_secondary_start)
1573*14cf11afSPaul Mackerras	/* turn on 64-bit mode */
1574*14cf11afSPaul Mackerras	bl	.enable_64b_mode
1575*14cf11afSPaul Mackerras	isync
1576*14cf11afSPaul Mackerras
1577*14cf11afSPaul Mackerras	/* Copy some CPU settings from CPU 0 */
1578*14cf11afSPaul Mackerras	bl	.__restore_cpu_setup
1579*14cf11afSPaul Mackerras
1580*14cf11afSPaul Mackerras	/* pSeries do that early though I don't think we really need it */
1581*14cf11afSPaul Mackerras	mfmsr	r3
1582*14cf11afSPaul Mackerras	ori	r3,r3,MSR_RI
1583*14cf11afSPaul Mackerras	mtmsrd	r3			/* RI on */
1584*14cf11afSPaul Mackerras
1585*14cf11afSPaul Mackerras	/* Set up a paca value for this processor. */
1586*14cf11afSPaul Mackerras	LOADADDR(r4, paca) 		 /* Get base vaddr of paca array	*/
1587*14cf11afSPaul Mackerras	mulli	r13,r24,PACA_SIZE	 /* Calculate vaddr of right paca */
1588*14cf11afSPaul Mackerras	add	r13,r13,r4		/* for this processor.		*/
1589*14cf11afSPaul Mackerras	mtspr	SPRG3,r13		 /* Save vaddr of paca in SPRG3	*/
1590*14cf11afSPaul Mackerras
1591*14cf11afSPaul Mackerras	/* Create a temp kernel stack for use before relocation is on.	*/
1592*14cf11afSPaul Mackerras	ld	r1,PACAEMERGSP(r13)
1593*14cf11afSPaul Mackerras	subi	r1,r1,STACK_FRAME_OVERHEAD
1594*14cf11afSPaul Mackerras
1595*14cf11afSPaul Mackerras	b	.__secondary_start
1596*14cf11afSPaul Mackerras
1597*14cf11afSPaul Mackerras#endif /* CONFIG_PPC_PMAC */
1598*14cf11afSPaul Mackerras
1599*14cf11afSPaul Mackerras/*
1600*14cf11afSPaul Mackerras * This function is called after the master CPU has released the
1601*14cf11afSPaul Mackerras * secondary processors.  The execution environment is relocation off.
1602*14cf11afSPaul Mackerras * The paca for this processor has the following fields initialized at
1603*14cf11afSPaul Mackerras * this point:
1604*14cf11afSPaul Mackerras *   1. Processor number
1605*14cf11afSPaul Mackerras *   2. Segment table pointer (virtual address)
1606*14cf11afSPaul Mackerras * On entry the following are set:
1607*14cf11afSPaul Mackerras *   r1	= stack pointer.  vaddr for iSeries, raddr (temp stack) for pSeries
1608*14cf11afSPaul Mackerras *   r24   = cpu# (in Linux terms)
1609*14cf11afSPaul Mackerras *   r13   = paca virtual address
1610*14cf11afSPaul Mackerras *   SPRG3 = paca virtual address
1611*14cf11afSPaul Mackerras */
1612*14cf11afSPaul Mackerras_GLOBAL(__secondary_start)
1613*14cf11afSPaul Mackerras
1614*14cf11afSPaul Mackerras	HMT_MEDIUM			/* Set thread priority to MEDIUM */
1615*14cf11afSPaul Mackerras
1616*14cf11afSPaul Mackerras	ld	r2,PACATOC(r13)
1617*14cf11afSPaul Mackerras	li	r6,0
1618*14cf11afSPaul Mackerras	stb	r6,PACAPROCENABLED(r13)
1619*14cf11afSPaul Mackerras
1620*14cf11afSPaul Mackerras#ifndef CONFIG_PPC_ISERIES
1621*14cf11afSPaul Mackerras	/* Initialize the page table pointer register. */
1622*14cf11afSPaul Mackerras	LOADADDR(r6,_SDR1)
1623*14cf11afSPaul Mackerras	ld	r6,0(r6)		/* get the value of _SDR1	 */
1624*14cf11afSPaul Mackerras	mtspr	SDR1,r6			/* set the htab location	 */
1625*14cf11afSPaul Mackerras#endif
1626*14cf11afSPaul Mackerras	/* Initialize the first segment table (or SLB) entry		 */
1627*14cf11afSPaul Mackerras	ld	r3,PACASTABVIRT(r13)	/* get addr of segment table	 */
1628*14cf11afSPaul Mackerras	bl	.stab_initialize
1629*14cf11afSPaul Mackerras
1630*14cf11afSPaul Mackerras	/* Initialize the kernel stack.  Just a repeat for iSeries.	 */
1631*14cf11afSPaul Mackerras	LOADADDR(r3,current_set)
1632*14cf11afSPaul Mackerras	sldi	r28,r24,3		/* get current_set[cpu#]	 */
1633*14cf11afSPaul Mackerras	ldx	r1,r3,r28
1634*14cf11afSPaul Mackerras	addi	r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
1635*14cf11afSPaul Mackerras	std	r1,PACAKSAVE(r13)
1636*14cf11afSPaul Mackerras
1637*14cf11afSPaul Mackerras	ld	r3,PACASTABREAL(r13)	/* get raddr of segment table	 */
1638*14cf11afSPaul Mackerras	ori	r4,r3,1			/* turn on valid bit		 */
1639*14cf11afSPaul Mackerras
1640*14cf11afSPaul Mackerras#ifdef CONFIG_PPC_ISERIES
1641*14cf11afSPaul Mackerras	li	r0,-1			/* hypervisor call */
1642*14cf11afSPaul Mackerras	li	r3,1
1643*14cf11afSPaul Mackerras	sldi	r3,r3,63		/* 0x8000000000000000 */
1644*14cf11afSPaul Mackerras	ori	r3,r3,4			/* 0x8000000000000004 */
1645*14cf11afSPaul Mackerras	sc				/* HvCall_setASR */
1646*14cf11afSPaul Mackerras#else
1647*14cf11afSPaul Mackerras	/* set the ASR */
1648*14cf11afSPaul Mackerras	ld	r3,systemcfg@got(r2)	/* r3 = ptr to systemcfg	 */
1649*14cf11afSPaul Mackerras	ld	r3,0(r3)
1650*14cf11afSPaul Mackerras	lwz	r3,PLATFORM(r3)		/* r3 = platform flags		 */
1651*14cf11afSPaul Mackerras	andi.	r3,r3,PLATFORM_LPAR	/* Test if bit 0 is set (LPAR bit) */
1652*14cf11afSPaul Mackerras	beq	98f			/* branch if result is 0  */
1653*14cf11afSPaul Mackerras	mfspr	r3,PVR
1654*14cf11afSPaul Mackerras	srwi	r3,r3,16
1655*14cf11afSPaul Mackerras	cmpwi	r3,0x37			/* SStar  */
1656*14cf11afSPaul Mackerras	beq	97f
1657*14cf11afSPaul Mackerras	cmpwi	r3,0x36			/* IStar  */
1658*14cf11afSPaul Mackerras	beq	97f
1659*14cf11afSPaul Mackerras	cmpwi	r3,0x34			/* Pulsar */
1660*14cf11afSPaul Mackerras	bne	98f
1661*14cf11afSPaul Mackerras97:	li	r3,H_SET_ASR		/* hcall = H_SET_ASR */
1662*14cf11afSPaul Mackerras	HVSC				/* Invoking hcall */
1663*14cf11afSPaul Mackerras	b	99f
1664*14cf11afSPaul Mackerras98:					/* !(rpa hypervisor) || !(star)  */
1665*14cf11afSPaul Mackerras	mtasr	r4			/* set the stab location	 */
1666*14cf11afSPaul Mackerras99:
1667*14cf11afSPaul Mackerras#endif
1668*14cf11afSPaul Mackerras	li	r7,0
1669*14cf11afSPaul Mackerras	mtlr	r7
1670*14cf11afSPaul Mackerras
1671*14cf11afSPaul Mackerras	/* enable MMU and jump to start_secondary */
1672*14cf11afSPaul Mackerras	LOADADDR(r3,.start_secondary_prolog)
1673*14cf11afSPaul Mackerras	SET_REG_TO_CONST(r4, MSR_KERNEL)
1674*14cf11afSPaul Mackerras#ifdef DO_SOFT_DISABLE
1675*14cf11afSPaul Mackerras	ori	r4,r4,MSR_EE
1676*14cf11afSPaul Mackerras#endif
1677*14cf11afSPaul Mackerras	mtspr	SRR0,r3
1678*14cf11afSPaul Mackerras	mtspr	SRR1,r4
1679*14cf11afSPaul Mackerras	rfid
1680*14cf11afSPaul Mackerras	b	.	/* prevent speculative execution */
1681*14cf11afSPaul Mackerras
1682*14cf11afSPaul Mackerras/*
1683*14cf11afSPaul Mackerras * Running with relocation on at this point.  All we want to do is
1684*14cf11afSPaul Mackerras * zero the stack back-chain pointer before going into C code.
1685*14cf11afSPaul Mackerras */
1686*14cf11afSPaul Mackerras_GLOBAL(start_secondary_prolog)
1687*14cf11afSPaul Mackerras	li	r3,0
1688*14cf11afSPaul Mackerras	std	r3,0(r1)		/* Zero the stack frame pointer	*/
1689*14cf11afSPaul Mackerras	bl	.start_secondary
1690*14cf11afSPaul Mackerras#endif
1691*14cf11afSPaul Mackerras
1692*14cf11afSPaul Mackerras/*
1693*14cf11afSPaul Mackerras * This subroutine clobbers r11 and r12
1694*14cf11afSPaul Mackerras */
1695*14cf11afSPaul Mackerras_GLOBAL(enable_64b_mode)
1696*14cf11afSPaul Mackerras	mfmsr	r11			/* grab the current MSR */
1697*14cf11afSPaul Mackerras	li	r12,1
1698*14cf11afSPaul Mackerras	rldicr	r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
1699*14cf11afSPaul Mackerras	or	r11,r11,r12
1700*14cf11afSPaul Mackerras	li	r12,1
1701*14cf11afSPaul Mackerras	rldicr	r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
1702*14cf11afSPaul Mackerras	or	r11,r11,r12
1703*14cf11afSPaul Mackerras	mtmsrd	r11
1704*14cf11afSPaul Mackerras	isync
1705*14cf11afSPaul Mackerras	blr
1706*14cf11afSPaul Mackerras
1707*14cf11afSPaul Mackerras#ifdef CONFIG_PPC_MULTIPLATFORM
1708*14cf11afSPaul Mackerras/*
1709*14cf11afSPaul Mackerras * This is where the main kernel code starts.
1710*14cf11afSPaul Mackerras */
1711*14cf11afSPaul Mackerras_STATIC(start_here_multiplatform)
1712*14cf11afSPaul Mackerras	/* get a new offset, now that the kernel has moved. */
1713*14cf11afSPaul Mackerras	bl	.reloc_offset
1714*14cf11afSPaul Mackerras	mr	r26,r3
1715*14cf11afSPaul Mackerras
1716*14cf11afSPaul Mackerras	/* Clear out the BSS. It may have been done in prom_init,
1717*14cf11afSPaul Mackerras	 * already but that's irrelevant since prom_init will soon
1718*14cf11afSPaul Mackerras	 * be detached from the kernel completely. Besides, we need
1719*14cf11afSPaul Mackerras	 * to clear it now for kexec-style entry.
1720*14cf11afSPaul Mackerras	 */
1721*14cf11afSPaul Mackerras	LOADADDR(r11,__bss_stop)
1722*14cf11afSPaul Mackerras	LOADADDR(r8,__bss_start)
1723*14cf11afSPaul Mackerras	sub	r11,r11,r8		/* bss size			*/
1724*14cf11afSPaul Mackerras	addi	r11,r11,7		/* round up to an even double word */
1725*14cf11afSPaul Mackerras	rldicl. r11,r11,61,3		/* shift right by 3		*/
1726*14cf11afSPaul Mackerras	beq	4f
1727*14cf11afSPaul Mackerras	addi	r8,r8,-8
1728*14cf11afSPaul Mackerras	li	r0,0
1729*14cf11afSPaul Mackerras	mtctr	r11			/* zero this many doublewords	*/
1730*14cf11afSPaul Mackerras3:	stdu	r0,8(r8)
1731*14cf11afSPaul Mackerras	bdnz	3b
1732*14cf11afSPaul Mackerras4:
1733*14cf11afSPaul Mackerras
1734*14cf11afSPaul Mackerras	mfmsr	r6
1735*14cf11afSPaul Mackerras	ori	r6,r6,MSR_RI
1736*14cf11afSPaul Mackerras	mtmsrd	r6			/* RI on */
1737*14cf11afSPaul Mackerras
1738*14cf11afSPaul Mackerras#ifdef CONFIG_HMT
1739*14cf11afSPaul Mackerras	/* Start up the second thread on cpu 0 */
1740*14cf11afSPaul Mackerras	mfspr	r3,PVR
1741*14cf11afSPaul Mackerras	srwi	r3,r3,16
1742*14cf11afSPaul Mackerras	cmpwi	r3,0x34			/* Pulsar  */
1743*14cf11afSPaul Mackerras	beq	90f
1744*14cf11afSPaul Mackerras	cmpwi	r3,0x36			/* Icestar */
1745*14cf11afSPaul Mackerras	beq	90f
1746*14cf11afSPaul Mackerras	cmpwi	r3,0x37			/* SStar   */
1747*14cf11afSPaul Mackerras	beq	90f
1748*14cf11afSPaul Mackerras	b	91f			/* HMT not supported */
1749*14cf11afSPaul Mackerras90:	li	r3,0
1750*14cf11afSPaul Mackerras	bl	.hmt_start_secondary
1751*14cf11afSPaul Mackerras91:
1752*14cf11afSPaul Mackerras#endif
1753*14cf11afSPaul Mackerras
1754*14cf11afSPaul Mackerras	/* The following gets the stack and TOC set up with the regs */
1755*14cf11afSPaul Mackerras	/* pointing to the real addr of the kernel stack.  This is   */
1756*14cf11afSPaul Mackerras	/* all done to support the C function call below which sets  */
1757*14cf11afSPaul Mackerras	/* up the htab.  This is done because we have relocated the  */
1758*14cf11afSPaul Mackerras	/* kernel but are still running in real mode. */
1759*14cf11afSPaul Mackerras
1760*14cf11afSPaul Mackerras	LOADADDR(r3,init_thread_union)
1761*14cf11afSPaul Mackerras	sub	r3,r3,r26
1762*14cf11afSPaul Mackerras
1763*14cf11afSPaul Mackerras	/* set up a stack pointer (physical address) */
1764*14cf11afSPaul Mackerras	addi	r1,r3,THREAD_SIZE
1765*14cf11afSPaul Mackerras	li	r0,0
1766*14cf11afSPaul Mackerras	stdu	r0,-STACK_FRAME_OVERHEAD(r1)
1767*14cf11afSPaul Mackerras
1768*14cf11afSPaul Mackerras	/* set up the TOC (physical address) */
1769*14cf11afSPaul Mackerras	LOADADDR(r2,__toc_start)
1770*14cf11afSPaul Mackerras	addi	r2,r2,0x4000
1771*14cf11afSPaul Mackerras	addi	r2,r2,0x4000
1772*14cf11afSPaul Mackerras	sub	r2,r2,r26
1773*14cf11afSPaul Mackerras
1774*14cf11afSPaul Mackerras	LOADADDR(r3,cpu_specs)
1775*14cf11afSPaul Mackerras	sub	r3,r3,r26
1776*14cf11afSPaul Mackerras	LOADADDR(r4,cur_cpu_spec)
1777*14cf11afSPaul Mackerras	sub	r4,r4,r26
1778*14cf11afSPaul Mackerras	mr	r5,r26
1779*14cf11afSPaul Mackerras	bl	.identify_cpu
1780*14cf11afSPaul Mackerras
1781*14cf11afSPaul Mackerras	/* Save some low level config HIDs of CPU0 to be copied to
1782*14cf11afSPaul Mackerras	 * other CPUs later on, or used for suspend/resume
1783*14cf11afSPaul Mackerras	 */
1784*14cf11afSPaul Mackerras	bl	.__save_cpu_setup
1785*14cf11afSPaul Mackerras	sync
1786*14cf11afSPaul Mackerras
1787*14cf11afSPaul Mackerras	/* Setup a valid physical PACA pointer in SPRG3 for early_setup
1788*14cf11afSPaul Mackerras	 * note that boot_cpuid can always be 0 nowadays since there is
1789*14cf11afSPaul Mackerras	 * nowhere it can be initialized differently before we reach this
1790*14cf11afSPaul Mackerras	 * code
1791*14cf11afSPaul Mackerras	 */
1792*14cf11afSPaul Mackerras	LOADADDR(r27, boot_cpuid)
1793*14cf11afSPaul Mackerras	sub	r27,r27,r26
1794*14cf11afSPaul Mackerras	lwz	r27,0(r27)
1795*14cf11afSPaul Mackerras
1796*14cf11afSPaul Mackerras	LOADADDR(r24, paca) 		/* Get base vaddr of paca array	 */
1797*14cf11afSPaul Mackerras	mulli	r13,r27,PACA_SIZE	/* Calculate vaddr of right paca */
1798*14cf11afSPaul Mackerras	add	r13,r13,r24		/* for this processor.		 */
1799*14cf11afSPaul Mackerras	sub	r13,r13,r26		/* convert to physical addr	 */
1800*14cf11afSPaul Mackerras	mtspr	SPRG3,r13		/* PPPBBB: Temp... -Peter */
1801*14cf11afSPaul Mackerras
1802*14cf11afSPaul Mackerras	/* Do very early kernel initializations, including initial hash table,
1803*14cf11afSPaul Mackerras	 * stab and slb setup before we turn on relocation.	*/
1804*14cf11afSPaul Mackerras
1805*14cf11afSPaul Mackerras	/* Restore parameters passed from prom_init/kexec */
1806*14cf11afSPaul Mackerras	mr	r3,r31
1807*14cf11afSPaul Mackerras 	bl	.early_setup
1808*14cf11afSPaul Mackerras
1809*14cf11afSPaul Mackerras	/* set the ASR */
1810*14cf11afSPaul Mackerras	ld	r3,PACASTABREAL(r13)
1811*14cf11afSPaul Mackerras	ori	r4,r3,1			/* turn on valid bit		 */
1812*14cf11afSPaul Mackerras	ld	r3,systemcfg@got(r2)	/* r3 = ptr to systemcfg */
1813*14cf11afSPaul Mackerras	ld	r3,0(r3)
1814*14cf11afSPaul Mackerras	lwz	r3,PLATFORM(r3)		/* r3 = platform flags */
1815*14cf11afSPaul Mackerras	andi.	r3,r3,PLATFORM_LPAR	/* Test if bit 0 is set (LPAR bit) */
1816*14cf11afSPaul Mackerras	beq	98f			/* branch if result is 0  */
1817*14cf11afSPaul Mackerras	mfspr	r3,PVR
1818*14cf11afSPaul Mackerras	srwi	r3,r3,16
1819*14cf11afSPaul Mackerras	cmpwi	r3,0x37			/* SStar */
1820*14cf11afSPaul Mackerras	beq	97f
1821*14cf11afSPaul Mackerras	cmpwi	r3,0x36			/* IStar  */
1822*14cf11afSPaul Mackerras	beq	97f
1823*14cf11afSPaul Mackerras	cmpwi	r3,0x34			/* Pulsar */
1824*14cf11afSPaul Mackerras	bne	98f
1825*14cf11afSPaul Mackerras97:	li	r3,H_SET_ASR		/* hcall = H_SET_ASR */
1826*14cf11afSPaul Mackerras	HVSC				/* Invoking hcall */
1827*14cf11afSPaul Mackerras	b	99f
1828*14cf11afSPaul Mackerras98:					/* !(rpa hypervisor) || !(star) */
1829*14cf11afSPaul Mackerras	mtasr	r4			/* set the stab location	*/
1830*14cf11afSPaul Mackerras99:
1831*14cf11afSPaul Mackerras	/* Set SDR1 (hash table pointer) */
1832*14cf11afSPaul Mackerras	ld	r3,systemcfg@got(r2)	/* r3 = ptr to systemcfg */
1833*14cf11afSPaul Mackerras	ld	r3,0(r3)
1834*14cf11afSPaul Mackerras	lwz	r3,PLATFORM(r3)		/* r3 = platform flags */
1835*14cf11afSPaul Mackerras	/* Test if bit 0 is set (LPAR bit) */
1836*14cf11afSPaul Mackerras	andi.	r3,r3,PLATFORM_LPAR
1837*14cf11afSPaul Mackerras	bne	98f			/* branch if result is !0  */
1838*14cf11afSPaul Mackerras	LOADADDR(r6,_SDR1)		/* Only if NOT LPAR */
1839*14cf11afSPaul Mackerras	sub	r6,r6,r26
1840*14cf11afSPaul Mackerras	ld	r6,0(r6)		/* get the value of _SDR1 */
1841*14cf11afSPaul Mackerras	mtspr	SDR1,r6			/* set the htab location  */
1842*14cf11afSPaul Mackerras98:
1843*14cf11afSPaul Mackerras	LOADADDR(r3,.start_here_common)
1844*14cf11afSPaul Mackerras	SET_REG_TO_CONST(r4, MSR_KERNEL)
1845*14cf11afSPaul Mackerras	mtspr	SRR0,r3
1846*14cf11afSPaul Mackerras	mtspr	SRR1,r4
1847*14cf11afSPaul Mackerras	rfid
1848*14cf11afSPaul Mackerras	b	.	/* prevent speculative execution */
1849*14cf11afSPaul Mackerras#endif /* CONFIG_PPC_MULTIPLATFORM */
1850*14cf11afSPaul Mackerras
1851*14cf11afSPaul Mackerras	/* This is where all platforms converge execution */
1852*14cf11afSPaul Mackerras_STATIC(start_here_common)
1853*14cf11afSPaul Mackerras	/* relocation is on at this point */
1854*14cf11afSPaul Mackerras
1855*14cf11afSPaul Mackerras	/* The following code sets up the SP and TOC now that we are */
1856*14cf11afSPaul Mackerras	/* running with translation enabled. */
1857*14cf11afSPaul Mackerras
1858*14cf11afSPaul Mackerras	LOADADDR(r3,init_thread_union)
1859*14cf11afSPaul Mackerras
1860*14cf11afSPaul Mackerras	/* set up the stack */
1861*14cf11afSPaul Mackerras	addi	r1,r3,THREAD_SIZE
1862*14cf11afSPaul Mackerras	li	r0,0
1863*14cf11afSPaul Mackerras	stdu	r0,-STACK_FRAME_OVERHEAD(r1)
1864*14cf11afSPaul Mackerras
1865*14cf11afSPaul Mackerras	/* Apply the CPUs-specific fixups (nop out sections not relevant
1866*14cf11afSPaul Mackerras	 * to this CPU
1867*14cf11afSPaul Mackerras	 */
1868*14cf11afSPaul Mackerras	li	r3,0
1869*14cf11afSPaul Mackerras	bl	.do_cpu_ftr_fixups
1870*14cf11afSPaul Mackerras
1871*14cf11afSPaul Mackerras	LOADADDR(r26, boot_cpuid)
1872*14cf11afSPaul Mackerras	lwz	r26,0(r26)
1873*14cf11afSPaul Mackerras
1874*14cf11afSPaul Mackerras	LOADADDR(r24, paca) 		/* Get base vaddr of paca array  */
1875*14cf11afSPaul Mackerras	mulli	r13,r26,PACA_SIZE	/* Calculate vaddr of right paca */
1876*14cf11afSPaul Mackerras	add	r13,r13,r24		/* for this processor.		 */
1877*14cf11afSPaul Mackerras	mtspr	SPRG3,r13
1878*14cf11afSPaul Mackerras
1879*14cf11afSPaul Mackerras	/* ptr to current */
1880*14cf11afSPaul Mackerras	LOADADDR(r4,init_task)
1881*14cf11afSPaul Mackerras	std	r4,PACACURRENT(r13)
1882*14cf11afSPaul Mackerras
1883*14cf11afSPaul Mackerras	/* Load the TOC */
1884*14cf11afSPaul Mackerras	ld	r2,PACATOC(r13)
1885*14cf11afSPaul Mackerras	std	r1,PACAKSAVE(r13)
1886*14cf11afSPaul Mackerras
1887*14cf11afSPaul Mackerras	bl	.setup_system
1888*14cf11afSPaul Mackerras
1889*14cf11afSPaul Mackerras	/* Load up the kernel context */
1890*14cf11afSPaul Mackerras5:
1891*14cf11afSPaul Mackerras#ifdef DO_SOFT_DISABLE
1892*14cf11afSPaul Mackerras	li	r5,0
1893*14cf11afSPaul Mackerras	stb	r5,PACAPROCENABLED(r13)	/* Soft Disabled */
1894*14cf11afSPaul Mackerras	mfmsr	r5
1895*14cf11afSPaul Mackerras	ori	r5,r5,MSR_EE		/* Hard Enabled */
1896*14cf11afSPaul Mackerras	mtmsrd	r5
1897*14cf11afSPaul Mackerras#endif
1898*14cf11afSPaul Mackerras
1899*14cf11afSPaul Mackerras	bl .start_kernel
1900*14cf11afSPaul Mackerras
1901*14cf11afSPaul Mackerras_GLOBAL(hmt_init)
1902*14cf11afSPaul Mackerras#ifdef CONFIG_HMT
1903*14cf11afSPaul Mackerras	LOADADDR(r5, hmt_thread_data)
1904*14cf11afSPaul Mackerras	mfspr	r7,PVR
1905*14cf11afSPaul Mackerras	srwi	r7,r7,16
1906*14cf11afSPaul Mackerras	cmpwi	r7,0x34			/* Pulsar  */
1907*14cf11afSPaul Mackerras	beq	90f
1908*14cf11afSPaul Mackerras	cmpwi	r7,0x36			/* Icestar */
1909*14cf11afSPaul Mackerras	beq	91f
1910*14cf11afSPaul Mackerras	cmpwi	r7,0x37			/* SStar   */
1911*14cf11afSPaul Mackerras	beq	91f
1912*14cf11afSPaul Mackerras	b	101f
1913*14cf11afSPaul Mackerras90:	mfspr	r6,PIR
1914*14cf11afSPaul Mackerras	andi.	r6,r6,0x1f
1915*14cf11afSPaul Mackerras	b	92f
1916*14cf11afSPaul Mackerras91:	mfspr	r6,PIR
1917*14cf11afSPaul Mackerras	andi.	r6,r6,0x3ff
1918*14cf11afSPaul Mackerras92:	sldi	r4,r24,3
1919*14cf11afSPaul Mackerras	stwx	r6,r5,r4
1920*14cf11afSPaul Mackerras	bl	.hmt_start_secondary
1921*14cf11afSPaul Mackerras	b	101f
1922*14cf11afSPaul Mackerras
1923*14cf11afSPaul Mackerras__hmt_secondary_hold:
1924*14cf11afSPaul Mackerras	LOADADDR(r5, hmt_thread_data)
1925*14cf11afSPaul Mackerras	clrldi	r5,r5,4
1926*14cf11afSPaul Mackerras	li	r7,0
1927*14cf11afSPaul Mackerras	mfspr	r6,PIR
1928*14cf11afSPaul Mackerras	mfspr	r8,PVR
1929*14cf11afSPaul Mackerras	srwi	r8,r8,16
1930*14cf11afSPaul Mackerras	cmpwi	r8,0x34
1931*14cf11afSPaul Mackerras	bne	93f
1932*14cf11afSPaul Mackerras	andi.	r6,r6,0x1f
1933*14cf11afSPaul Mackerras	b	103f
1934*14cf11afSPaul Mackerras93:	andi.	r6,r6,0x3f
1935*14cf11afSPaul Mackerras
1936*14cf11afSPaul Mackerras103:	lwzx	r8,r5,r7
1937*14cf11afSPaul Mackerras	cmpw	r8,r6
1938*14cf11afSPaul Mackerras	beq	104f
1939*14cf11afSPaul Mackerras	addi	r7,r7,8
1940*14cf11afSPaul Mackerras	b	103b
1941*14cf11afSPaul Mackerras
1942*14cf11afSPaul Mackerras104:	addi	r7,r7,4
1943*14cf11afSPaul Mackerras	lwzx	r9,r5,r7
1944*14cf11afSPaul Mackerras	mr	r24,r9
1945*14cf11afSPaul Mackerras101:
1946*14cf11afSPaul Mackerras#endif
1947*14cf11afSPaul Mackerras	mr	r3,r24
1948*14cf11afSPaul Mackerras	b	.pSeries_secondary_smp_init
1949*14cf11afSPaul Mackerras
1950*14cf11afSPaul Mackerras#ifdef CONFIG_HMT
1951*14cf11afSPaul Mackerras_GLOBAL(hmt_start_secondary)
1952*14cf11afSPaul Mackerras	LOADADDR(r4,__hmt_secondary_hold)
1953*14cf11afSPaul Mackerras	clrldi	r4,r4,4
1954*14cf11afSPaul Mackerras	mtspr	NIADORM, r4
1955*14cf11afSPaul Mackerras	mfspr	r4, MSRDORM
1956*14cf11afSPaul Mackerras	li	r5, -65
1957*14cf11afSPaul Mackerras	and	r4, r4, r5
1958*14cf11afSPaul Mackerras	mtspr	MSRDORM, r4
1959*14cf11afSPaul Mackerras	lis	r4,0xffef
1960*14cf11afSPaul Mackerras	ori	r4,r4,0x7403
1961*14cf11afSPaul Mackerras	mtspr	TSC, r4
1962*14cf11afSPaul Mackerras	li	r4,0x1f4
1963*14cf11afSPaul Mackerras	mtspr	TST, r4
1964*14cf11afSPaul Mackerras	mfspr	r4, HID0
1965*14cf11afSPaul Mackerras	ori	r4, r4, 0x1
1966*14cf11afSPaul Mackerras	mtspr	HID0, r4
1967*14cf11afSPaul Mackerras	mfspr	r4, SPRN_CTRLF
1968*14cf11afSPaul Mackerras	oris	r4, r4, 0x40
1969*14cf11afSPaul Mackerras	mtspr	SPRN_CTRLT, r4
1970*14cf11afSPaul Mackerras	blr
1971*14cf11afSPaul Mackerras#endif
1972*14cf11afSPaul Mackerras
1973*14cf11afSPaul Mackerras#if defined(CONFIG_KEXEC) || (defined(CONFIG_SMP) && !defined(CONFIG_PPC_ISERIES))
1974*14cf11afSPaul Mackerras_GLOBAL(smp_release_cpus)
1975*14cf11afSPaul Mackerras	/* All secondary cpus are spinning on a common
1976*14cf11afSPaul Mackerras	 * spinloop, release them all now so they can start
1977*14cf11afSPaul Mackerras	 * to spin on their individual paca spinloops.
1978*14cf11afSPaul Mackerras	 * For non SMP kernels, the secondary cpus never
1979*14cf11afSPaul Mackerras	 * get out of the common spinloop.
1980*14cf11afSPaul Mackerras	 */
1981*14cf11afSPaul Mackerras	li	r3,1
1982*14cf11afSPaul Mackerras	LOADADDR(r5,__secondary_hold_spinloop)
1983*14cf11afSPaul Mackerras	std	r3,0(r5)
1984*14cf11afSPaul Mackerras	sync
1985*14cf11afSPaul Mackerras	blr
1986*14cf11afSPaul Mackerras#endif /* CONFIG_SMP && !CONFIG_PPC_ISERIES */
1987*14cf11afSPaul Mackerras
1988*14cf11afSPaul Mackerras
1989*14cf11afSPaul Mackerras/*
1990*14cf11afSPaul Mackerras * We put a few things here that have to be page-aligned.
1991*14cf11afSPaul Mackerras * This stuff goes at the beginning of the bss, which is page-aligned.
1992*14cf11afSPaul Mackerras */
1993*14cf11afSPaul Mackerras	.section ".bss"
1994*14cf11afSPaul Mackerras
1995*14cf11afSPaul Mackerras	.align	PAGE_SHIFT
1996*14cf11afSPaul Mackerras
1997*14cf11afSPaul Mackerras	.globl	empty_zero_page
1998*14cf11afSPaul Mackerrasempty_zero_page:
1999*14cf11afSPaul Mackerras	.space	PAGE_SIZE
2000*14cf11afSPaul Mackerras
2001*14cf11afSPaul Mackerras	.globl	swapper_pg_dir
2002*14cf11afSPaul Mackerrasswapper_pg_dir:
2003*14cf11afSPaul Mackerras	.space	PAGE_SIZE
2004*14cf11afSPaul Mackerras
2005*14cf11afSPaul Mackerras/*
2006*14cf11afSPaul Mackerras * This space gets a copy of optional info passed to us by the bootstrap
2007*14cf11afSPaul Mackerras * Used to pass parameters into the kernel like root=/dev/sda1, etc.
2008*14cf11afSPaul Mackerras */
2009*14cf11afSPaul Mackerras	.globl	cmd_line
2010*14cf11afSPaul Mackerrascmd_line:
2011*14cf11afSPaul Mackerras	.space	COMMAND_LINE_SIZE
2012