xref: /openbmc/linux/arch/powerpc/kernel/head_32.h (revision 51ad5b54)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __HEAD_32_H__
3 #define __HEAD_32_H__
4 
5 #include <asm/ptrace.h>	/* for STACK_FRAME_REGS_MARKER */
6 
7 /*
8  * Exception entry code.  This code runs with address translation
9  * turned off, i.e. using physical addresses.
10  * We assume sprg3 has the physical address of the current
11  * task's thread_struct.
12  */
13 .macro EXCEPTION_PROLOG handle_dar_dsisr=0
14 	EXCEPTION_PROLOG_0	handle_dar_dsisr=\handle_dar_dsisr
15 	EXCEPTION_PROLOG_1
16 	EXCEPTION_PROLOG_2	handle_dar_dsisr=\handle_dar_dsisr
17 .endm
18 
19 .macro EXCEPTION_PROLOG_0 handle_dar_dsisr=0
20 	mtspr	SPRN_SPRG_SCRATCH0,r10
21 	mtspr	SPRN_SPRG_SCRATCH1,r11
22 #ifdef CONFIG_VMAP_STACK
23 	mfspr	r10, SPRN_SPRG_THREAD
24 	.if	\handle_dar_dsisr
25 	mfspr	r11, SPRN_DAR
26 	stw	r11, DAR(r10)
27 	mfspr	r11, SPRN_DSISR
28 	stw	r11, DSISR(r10)
29 	.endif
30 	mfspr	r11, SPRN_SRR0
31 	stw	r11, SRR0(r10)
32 #endif
33 	mfspr	r11, SPRN_SRR1		/* check whether user or kernel */
34 #ifdef CONFIG_VMAP_STACK
35 	stw	r11, SRR1(r10)
36 #endif
37 	mfcr	r10
38 	andi.	r11, r11, MSR_PR
39 .endm
40 
41 .macro EXCEPTION_PROLOG_1 for_rtas=0
42 #ifdef CONFIG_VMAP_STACK
43 	.ifeq	\for_rtas
44 	li	r11, MSR_KERNEL & ~(MSR_IR | MSR_RI) /* can take DTLB miss */
45 	mtmsr	r11
46 	isync
47 	.endif
48 	subi	r11, r1, INT_FRAME_SIZE		/* use r1 if kernel */
49 #else
50 	tophys(r11,r1)			/* use tophys(r1) if kernel */
51 	subi	r11, r11, INT_FRAME_SIZE	/* alloc exc. frame */
52 #endif
53 	beq	1f
54 	mfspr	r11,SPRN_SPRG_THREAD
55 	tovirt_vmstack r11, r11
56 	lwz	r11,TASK_STACK-THREAD(r11)
57 	addi	r11, r11, THREAD_SIZE - INT_FRAME_SIZE
58 	tophys_novmstack r11, r11
59 1:
60 #ifdef CONFIG_VMAP_STACK
61 	mtcrf	0x7f, r11
62 	bt	32 - THREAD_ALIGN_SHIFT, stack_overflow
63 #endif
64 .endm
65 
66 .macro EXCEPTION_PROLOG_2 handle_dar_dsisr=0
67 #if defined(CONFIG_VMAP_STACK) && defined(CONFIG_PPC_BOOK3S)
68 BEGIN_MMU_FTR_SECTION
69 	mtcr	r10
70 FTR_SECTION_ELSE
71 	stw	r10, _CCR(r11)
72 ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_HPTE_TABLE)
73 #else
74 	stw	r10,_CCR(r11)		/* save registers */
75 #endif
76 	mfspr	r10, SPRN_SPRG_SCRATCH0
77 	stw	r12,GPR12(r11)
78 	stw	r9,GPR9(r11)
79 	stw	r10,GPR10(r11)
80 #if defined(CONFIG_VMAP_STACK) && defined(CONFIG_PPC_BOOK3S)
81 BEGIN_MMU_FTR_SECTION
82 	mfcr	r10
83 	stw	r10, _CCR(r11)
84 END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
85 #endif
86 	mfspr	r12,SPRN_SPRG_SCRATCH1
87 	stw	r12,GPR11(r11)
88 	mflr	r10
89 	stw	r10,_LINK(r11)
90 #ifdef CONFIG_VMAP_STACK
91 	mfspr	r12, SPRN_SPRG_THREAD
92 	tovirt(r12, r12)
93 	.if	\handle_dar_dsisr
94 	lwz	r10, DAR(r12)
95 	stw	r10, _DAR(r11)
96 	lwz	r10, DSISR(r12)
97 	stw	r10, _DSISR(r11)
98 	.endif
99 	lwz	r9, SRR1(r12)
100 #if defined(CONFIG_VMAP_STACK) && defined(CONFIG_PPC_BOOK3S)
101 BEGIN_MMU_FTR_SECTION
102 	andi.	r10, r9, MSR_PR
103 END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
104 #endif
105 	lwz	r12, SRR0(r12)
106 #else
107 	mfspr	r12,SPRN_SRR0
108 	mfspr	r9,SPRN_SRR1
109 #endif
110 	stw	r1,GPR1(r11)
111 	stw	r1,0(r11)
112 	tovirt_novmstack r1, r11	/* set new kernel sp */
113 #ifdef CONFIG_40x
114 	rlwinm	r9,r9,0,14,12		/* clear MSR_WE (necessary?) */
115 #else
116 #ifdef CONFIG_VMAP_STACK
117 	li	r10, MSR_KERNEL & ~MSR_IR /* can take exceptions */
118 #else
119 	li	r10,MSR_KERNEL & ~(MSR_IR|MSR_DR) /* can take exceptions */
120 #endif
121 	mtmsr	r10			/* (except for mach check in rtas) */
122 #endif
123 	stw	r0,GPR0(r11)
124 	lis	r10,STACK_FRAME_REGS_MARKER@ha /* exception frame marker */
125 	addi	r10,r10,STACK_FRAME_REGS_MARKER@l
126 	stw	r10,8(r11)
127 	SAVE_4GPRS(3, r11)
128 	SAVE_2GPRS(7, r11)
129 .endm
130 
131 .macro SYSCALL_ENTRY trapno
132 	mfspr	r12,SPRN_SPRG_THREAD
133 	mfspr	r9, SPRN_SRR1
134 #ifdef CONFIG_VMAP_STACK
135 	mfspr	r11, SPRN_SRR0
136 	mtctr	r11
137 #endif
138 	andi.	r11, r9, MSR_PR
139 	lwz	r11,TASK_STACK-THREAD(r12)
140 	beq-	99f
141 	addi	r11, r11, THREAD_SIZE - INT_FRAME_SIZE
142 #ifdef CONFIG_VMAP_STACK
143 	li	r10, MSR_KERNEL & ~(MSR_IR | MSR_RI) /* can take DTLB miss */
144 	mtmsr	r10
145 	isync
146 #endif
147 	tovirt_vmstack r12, r12
148 	tophys_novmstack r11, r11
149 	mflr	r10
150 	stw	r10, _LINK(r11)
151 #ifdef CONFIG_VMAP_STACK
152 	mfctr	r10
153 #else
154 	mfspr	r10,SPRN_SRR0
155 #endif
156 	stw	r1,GPR1(r11)
157 	stw	r1,0(r11)
158 	tovirt_novmstack r1, r11	/* set new kernel sp */
159 	stw	r10,_NIP(r11)
160 	mfcr	r10
161 	rlwinm	r10,r10,0,4,2	/* Clear SO bit in CR */
162 	stw	r10,_CCR(r11)		/* save registers */
163 #ifdef CONFIG_40x
164 	rlwinm	r9,r9,0,14,12		/* clear MSR_WE (necessary?) */
165 #else
166 #ifdef CONFIG_VMAP_STACK
167 	LOAD_REG_IMMEDIATE(r10, MSR_KERNEL & ~MSR_IR) /* can take exceptions */
168 #else
169 	LOAD_REG_IMMEDIATE(r10, MSR_KERNEL & ~(MSR_IR|MSR_DR)) /* can take exceptions */
170 #endif
171 	mtmsr	r10			/* (except for mach check in rtas) */
172 #endif
173 	lis	r10,STACK_FRAME_REGS_MARKER@ha /* exception frame marker */
174 	stw	r2,GPR2(r11)
175 	addi	r10,r10,STACK_FRAME_REGS_MARKER@l
176 	stw	r9,_MSR(r11)
177 	li	r2, \trapno + 1
178 	stw	r10,8(r11)
179 	stw	r2,_TRAP(r11)
180 	SAVE_GPR(0, r11)
181 	SAVE_4GPRS(3, r11)
182 	SAVE_2GPRS(7, r11)
183 	addi	r11,r1,STACK_FRAME_OVERHEAD
184 	addi	r2,r12,-THREAD
185 	stw	r11,PT_REGS(r12)
186 #if defined(CONFIG_40x)
187 	/* Check to see if the dbcr0 register is set up to debug.  Use the
188 	   internal debug mode bit to do this. */
189 	lwz	r12,THREAD_DBCR0(r12)
190 	andis.	r12,r12,DBCR0_IDM@h
191 #endif
192 	ACCOUNT_CPU_USER_ENTRY(r2, r11, r12)
193 #if defined(CONFIG_40x)
194 	beq+	3f
195 	/* From user and task is ptraced - load up global dbcr0 */
196 	li	r12,-1			/* clear all pending debug events */
197 	mtspr	SPRN_DBSR,r12
198 	lis	r11,global_dbcr0@ha
199 	tophys(r11,r11)
200 	addi	r11,r11,global_dbcr0@l
201 	lwz	r12,0(r11)
202 	mtspr	SPRN_DBCR0,r12
203 	lwz	r12,4(r11)
204 	addi	r12,r12,-1
205 	stw	r12,4(r11)
206 #endif
207 
208 3:
209 	tovirt_novmstack r2, r2 	/* set r2 to current */
210 	lis	r11, transfer_to_syscall@h
211 	ori	r11, r11, transfer_to_syscall@l
212 #ifdef CONFIG_TRACE_IRQFLAGS
213 	/*
214 	 * If MSR is changing we need to keep interrupts disabled at this point
215 	 * otherwise we might risk taking an interrupt before we tell lockdep
216 	 * they are enabled.
217 	 */
218 	LOAD_REG_IMMEDIATE(r10, MSR_KERNEL)
219 	rlwimi	r10, r9, 0, MSR_EE
220 #else
221 	LOAD_REG_IMMEDIATE(r10, MSR_KERNEL | MSR_EE)
222 #endif
223 #if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
224 	mtspr	SPRN_NRI, r0
225 #endif
226 	mtspr	SPRN_SRR1,r10
227 	mtspr	SPRN_SRR0,r11
228 	SYNC
229 	RFI				/* jump to handler, enable MMU */
230 99:	b	ret_from_kernel_syscall
231 .endm
232 
233 .macro save_dar_dsisr_on_stack reg1, reg2, sp
234 #ifndef CONFIG_VMAP_STACK
235 	mfspr	\reg1, SPRN_DAR
236 	mfspr	\reg2, SPRN_DSISR
237 	stw	\reg1, _DAR(\sp)
238 	stw	\reg2, _DSISR(\sp)
239 #endif
240 .endm
241 
242 .macro get_and_save_dar_dsisr_on_stack reg1, reg2, sp
243 #ifdef CONFIG_VMAP_STACK
244 	lwz	\reg1, _DAR(\sp)
245 	lwz	\reg2, _DSISR(\sp)
246 #else
247 	save_dar_dsisr_on_stack \reg1, \reg2, \sp
248 #endif
249 .endm
250 
251 .macro tovirt_vmstack dst, src
252 #ifdef CONFIG_VMAP_STACK
253 	tovirt(\dst, \src)
254 #else
255 	.ifnc	\dst, \src
256 	mr	\dst, \src
257 	.endif
258 #endif
259 .endm
260 
261 .macro tovirt_novmstack dst, src
262 #ifndef CONFIG_VMAP_STACK
263 	tovirt(\dst, \src)
264 #else
265 	.ifnc	\dst, \src
266 	mr	\dst, \src
267 	.endif
268 #endif
269 .endm
270 
271 .macro tophys_novmstack dst, src
272 #ifndef CONFIG_VMAP_STACK
273 	tophys(\dst, \src)
274 #else
275 	.ifnc	\dst, \src
276 	mr	\dst, \src
277 	.endif
278 #endif
279 .endm
280 
281 /*
282  * Note: code which follows this uses cr0.eq (set if from kernel),
283  * r11, r12 (SRR0), and r9 (SRR1).
284  *
285  * Note2: once we have set r1 we are in a position to take exceptions
286  * again, and we could thus set MSR:RI at that point.
287  */
288 
289 /*
290  * Exception vectors.
291  */
292 #ifdef CONFIG_PPC_BOOK3S
293 #define	START_EXCEPTION(n, label)		\
294 	. = n;					\
295 	DO_KVM n;				\
296 label:
297 
298 #else
299 #define	START_EXCEPTION(n, label)		\
300 	. = n;					\
301 label:
302 
303 #endif
304 
305 #define EXCEPTION(n, label, hdlr, xfer)		\
306 	START_EXCEPTION(n, label)		\
307 	EXCEPTION_PROLOG;			\
308 	addi	r3,r1,STACK_FRAME_OVERHEAD;	\
309 	xfer(n, hdlr)
310 
311 #define EXC_XFER_TEMPLATE(hdlr, trap, msr, tfer, ret)		\
312 	li	r10,trap;					\
313 	stw	r10,_TRAP(r11);					\
314 	LOAD_REG_IMMEDIATE(r10, msr);				\
315 	bl	tfer;						\
316 	.long	hdlr;						\
317 	.long	ret
318 
319 #define EXC_XFER_STD(n, hdlr)		\
320 	EXC_XFER_TEMPLATE(hdlr, n, MSR_KERNEL, transfer_to_handler_full,	\
321 			  ret_from_except_full)
322 
323 #define EXC_XFER_LITE(n, hdlr)		\
324 	EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, transfer_to_handler, \
325 			  ret_from_except)
326 
327 .macro vmap_stack_overflow_exception
328 #ifdef CONFIG_VMAP_STACK
329 #ifdef CONFIG_SMP
330 	mfspr	r11, SPRN_SPRG_THREAD
331 	tovirt(r11, r11)
332 	lwz	r11, TASK_CPU - THREAD(r11)
333 	slwi	r11, r11, 3
334 	addis	r11, r11, emergency_ctx@ha
335 #else
336 	lis	r11, emergency_ctx@ha
337 #endif
338 	lwz	r11, emergency_ctx@l(r11)
339 	cmpwi	cr1, r11, 0
340 	bne	cr1, 1f
341 	lis	r11, init_thread_union@ha
342 	addi	r11, r11, init_thread_union@l
343 1:	addi	r11, r11, THREAD_SIZE - INT_FRAME_SIZE
344 	EXCEPTION_PROLOG_2
345 	SAVE_NVGPRS(r11)
346 	addi	r3, r1, STACK_FRAME_OVERHEAD
347 	EXC_XFER_STD(0, stack_overflow_exception)
348 #endif
349 .endm
350 
351 #endif /* __HEAD_32_H__ */
352