xref: /openbmc/linux/arch/powerpc/kernel/head_32.h (revision 9726bfcd)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __HEAD_32_H__
3 #define __HEAD_32_H__
4 
5 #include <asm/ptrace.h>	/* for STACK_FRAME_REGS_MARKER */
6 
7 /*
8  * MSR_KERNEL is > 0x8000 on 4xx/Book-E since it include MSR_CE.
9  */
10 .macro __LOAD_MSR_KERNEL r, x
11 .if \x >= 0x8000
12 	lis \r, (\x)@h
13 	ori \r, \r, (\x)@l
14 .else
15 	li \r, (\x)
16 .endif
17 .endm
18 #define LOAD_MSR_KERNEL(r, x) __LOAD_MSR_KERNEL r, x
19 
20 /*
21  * Exception entry code.  This code runs with address translation
22  * turned off, i.e. using physical addresses.
23  * We assume sprg3 has the physical address of the current
24  * task's thread_struct.
25  */
26 
27 .macro EXCEPTION_PROLOG
28 	mtspr	SPRN_SPRG_SCRATCH0,r10
29 	mtspr	SPRN_SPRG_SCRATCH1,r11
30 	mfcr	r10
31 	EXCEPTION_PROLOG_1
32 	EXCEPTION_PROLOG_2
33 .endm
34 
35 .macro EXCEPTION_PROLOG_1
36 	mfspr	r11,SPRN_SRR1		/* check whether user or kernel */
37 	andi.	r11,r11,MSR_PR
38 	tophys(r11,r1)			/* use tophys(r1) if kernel */
39 	beq	1f
40 	mfspr	r11,SPRN_SPRG_THREAD
41 	lwz	r11,TASK_STACK-THREAD(r11)
42 	addi	r11,r11,THREAD_SIZE
43 	tophys(r11,r11)
44 1:	subi	r11,r11,INT_FRAME_SIZE	/* alloc exc. frame */
45 .endm
46 
47 .macro EXCEPTION_PROLOG_2
48 	stw	r10,_CCR(r11)		/* save registers */
49 	stw	r12,GPR12(r11)
50 	stw	r9,GPR9(r11)
51 	mfspr	r10,SPRN_SPRG_SCRATCH0
52 	stw	r10,GPR10(r11)
53 	mfspr	r12,SPRN_SPRG_SCRATCH1
54 	stw	r12,GPR11(r11)
55 	mflr	r10
56 	stw	r10,_LINK(r11)
57 	mfspr	r12,SPRN_SRR0
58 	mfspr	r9,SPRN_SRR1
59 	stw	r1,GPR1(r11)
60 	stw	r1,0(r11)
61 	tovirt(r1,r11)			/* set new kernel sp */
62 #ifdef CONFIG_40x
63 	rlwinm	r9,r9,0,14,12		/* clear MSR_WE (necessary?) */
64 #else
65 	li	r10,MSR_KERNEL & ~(MSR_IR|MSR_DR) /* can take exceptions */
66 	MTMSRD(r10)			/* (except for mach check in rtas) */
67 #endif
68 	stw	r0,GPR0(r11)
69 	lis	r10,STACK_FRAME_REGS_MARKER@ha /* exception frame marker */
70 	addi	r10,r10,STACK_FRAME_REGS_MARKER@l
71 	stw	r10,8(r11)
72 	SAVE_4GPRS(3, r11)
73 	SAVE_2GPRS(7, r11)
74 .endm
75 
76 .macro SYSCALL_ENTRY trapno
77 	mfspr	r12,SPRN_SPRG_THREAD
78 	mfcr	r10
79 	lwz	r11,TASK_STACK-THREAD(r12)
80 	mflr	r9
81 	addi	r11,r11,THREAD_SIZE - INT_FRAME_SIZE
82 	rlwinm	r10,r10,0,4,2	/* Clear SO bit in CR */
83 	tophys(r11,r11)
84 	stw	r10,_CCR(r11)		/* save registers */
85 	mfspr	r10,SPRN_SRR0
86 	stw	r9,_LINK(r11)
87 	mfspr	r9,SPRN_SRR1
88 	stw	r1,GPR1(r11)
89 	stw	r1,0(r11)
90 	tovirt(r1,r11)			/* set new kernel sp */
91 	stw	r10,_NIP(r11)
92 #ifdef CONFIG_40x
93 	rlwinm	r9,r9,0,14,12		/* clear MSR_WE (necessary?) */
94 #else
95 	LOAD_MSR_KERNEL(r10, MSR_KERNEL & ~(MSR_IR|MSR_DR)) /* can take exceptions */
96 	MTMSRD(r10)			/* (except for mach check in rtas) */
97 #endif
98 	lis	r10,STACK_FRAME_REGS_MARKER@ha /* exception frame marker */
99 	stw	r2,GPR2(r11)
100 	addi	r10,r10,STACK_FRAME_REGS_MARKER@l
101 	stw	r9,_MSR(r11)
102 	li	r2, \trapno + 1
103 	stw	r10,8(r11)
104 	stw	r2,_TRAP(r11)
105 	SAVE_GPR(0, r11)
106 	SAVE_4GPRS(3, r11)
107 	SAVE_2GPRS(7, r11)
108 	addi	r11,r1,STACK_FRAME_OVERHEAD
109 	addi	r2,r12,-THREAD
110 	stw	r11,PT_REGS(r12)
111 #if defined(CONFIG_40x)
112 	/* Check to see if the dbcr0 register is set up to debug.  Use the
113 	   internal debug mode bit to do this. */
114 	lwz	r12,THREAD_DBCR0(r12)
115 	andis.	r12,r12,DBCR0_IDM@h
116 #endif
117 	ACCOUNT_CPU_USER_ENTRY(r2, r11, r12)
118 #if defined(CONFIG_40x)
119 	beq+	3f
120 	/* From user and task is ptraced - load up global dbcr0 */
121 	li	r12,-1			/* clear all pending debug events */
122 	mtspr	SPRN_DBSR,r12
123 	lis	r11,global_dbcr0@ha
124 	tophys(r11,r11)
125 	addi	r11,r11,global_dbcr0@l
126 	lwz	r12,0(r11)
127 	mtspr	SPRN_DBCR0,r12
128 	lwz	r12,4(r11)
129 	addi	r12,r12,-1
130 	stw	r12,4(r11)
131 #endif
132 
133 3:
134 	tovirt(r2, r2)			/* set r2 to current */
135 	lis	r11, transfer_to_syscall@h
136 	ori	r11, r11, transfer_to_syscall@l
137 #ifdef CONFIG_TRACE_IRQFLAGS
138 	/*
139 	 * If MSR is changing we need to keep interrupts disabled at this point
140 	 * otherwise we might risk taking an interrupt before we tell lockdep
141 	 * they are enabled.
142 	 */
143 	LOAD_MSR_KERNEL(r10, MSR_KERNEL)
144 	rlwimi	r10, r9, 0, MSR_EE
145 #else
146 	LOAD_MSR_KERNEL(r10, MSR_KERNEL | MSR_EE)
147 #endif
148 #if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
149 	mtspr	SPRN_NRI, r0
150 #endif
151 	mtspr	SPRN_SRR1,r10
152 	mtspr	SPRN_SRR0,r11
153 	SYNC
154 	RFI				/* jump to handler, enable MMU */
155 .endm
156 
157 /*
158  * Note: code which follows this uses cr0.eq (set if from kernel),
159  * r11, r12 (SRR0), and r9 (SRR1).
160  *
161  * Note2: once we have set r1 we are in a position to take exceptions
162  * again, and we could thus set MSR:RI at that point.
163  */
164 
165 /*
166  * Exception vectors.
167  */
168 #ifdef CONFIG_PPC_BOOK3S
169 #define	START_EXCEPTION(n, label)		\
170 	. = n;					\
171 	DO_KVM n;				\
172 label:
173 
174 #else
175 #define	START_EXCEPTION(n, label)		\
176 	. = n;					\
177 label:
178 
179 #endif
180 
181 #define EXCEPTION(n, label, hdlr, xfer)		\
182 	START_EXCEPTION(n, label)		\
183 	EXCEPTION_PROLOG;			\
184 	addi	r3,r1,STACK_FRAME_OVERHEAD;	\
185 	xfer(n, hdlr)
186 
187 #define EXC_XFER_TEMPLATE(hdlr, trap, msr, tfer, ret)		\
188 	li	r10,trap;					\
189 	stw	r10,_TRAP(r11);					\
190 	LOAD_MSR_KERNEL(r10, msr);				\
191 	bl	tfer;						\
192 	.long	hdlr;						\
193 	.long	ret
194 
195 #define EXC_XFER_STD(n, hdlr)		\
196 	EXC_XFER_TEMPLATE(hdlr, n, MSR_KERNEL, transfer_to_handler_full,	\
197 			  ret_from_except_full)
198 
199 #define EXC_XFER_LITE(n, hdlr)		\
200 	EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, transfer_to_handler, \
201 			  ret_from_except)
202 
203 #endif /* __HEAD_32_H__ */
204