xref: /openbmc/linux/arch/xtensa/kernel/coprocessor.S (revision cfbb9be8)
1/*
2 * arch/xtensa/kernel/coprocessor.S
3 *
4 * Xtensa processor configuration-specific table of coprocessor and
5 * other custom register layout information.
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License.  See the file "COPYING" in the main directory of this archive
9 * for more details.
10 *
11 * Copyright (C) 2003 - 2007 Tensilica Inc.
12 */
13
14
15#include <linux/linkage.h>
16#include <asm/asm-offsets.h>
17#include <asm/processor.h>
18#include <asm/coprocessor.h>
19#include <asm/thread_info.h>
20#include <asm/asm-uaccess.h>
21#include <asm/unistd.h>
22#include <asm/ptrace.h>
23#include <asm/current.h>
24#include <asm/pgtable.h>
25#include <asm/page.h>
26#include <asm/signal.h>
27#include <asm/tlbflush.h>
28
29#if XTENSA_HAVE_COPROCESSORS
30
31/*
32 * Macros for lazy context switch.
33 */
34
35#define SAVE_CP_REGS(x)							\
36	.align 4;							\
37	.Lsave_cp_regs_cp##x:						\
38	.if XTENSA_HAVE_COPROCESSOR(x);					\
39		xchal_cp##x##_store a2 a4 a5 a6 a7;			\
40	.endif;								\
41	jx	a0
42
43#define SAVE_CP_REGS_TAB(x)						\
44	.if XTENSA_HAVE_COPROCESSOR(x);					\
45		.long .Lsave_cp_regs_cp##x - .Lsave_cp_regs_jump_table;	\
46	.else;								\
47		.long 0;						\
48	.endif;								\
49	.long THREAD_XTREGS_CP##x
50
51
52#define LOAD_CP_REGS(x)							\
53	.align 4;							\
54	.Lload_cp_regs_cp##x:						\
55	.if XTENSA_HAVE_COPROCESSOR(x);					\
56		xchal_cp##x##_load a2 a4 a5 a6 a7;			\
57	.endif;								\
58	jx	a0
59
60#define LOAD_CP_REGS_TAB(x)						\
61	.if XTENSA_HAVE_COPROCESSOR(x);					\
62		.long .Lload_cp_regs_cp##x - .Lload_cp_regs_jump_table; \
63	.else;								\
64		.long 0;						\
65	.endif;								\
66	.long THREAD_XTREGS_CP##x
67
68	SAVE_CP_REGS(0)
69	SAVE_CP_REGS(1)
70	SAVE_CP_REGS(2)
71	SAVE_CP_REGS(3)
72	SAVE_CP_REGS(4)
73	SAVE_CP_REGS(5)
74	SAVE_CP_REGS(6)
75	SAVE_CP_REGS(7)
76
77	LOAD_CP_REGS(0)
78	LOAD_CP_REGS(1)
79	LOAD_CP_REGS(2)
80	LOAD_CP_REGS(3)
81	LOAD_CP_REGS(4)
82	LOAD_CP_REGS(5)
83	LOAD_CP_REGS(6)
84	LOAD_CP_REGS(7)
85
86	.align 4
87.Lsave_cp_regs_jump_table:
88	SAVE_CP_REGS_TAB(0)
89	SAVE_CP_REGS_TAB(1)
90	SAVE_CP_REGS_TAB(2)
91	SAVE_CP_REGS_TAB(3)
92	SAVE_CP_REGS_TAB(4)
93	SAVE_CP_REGS_TAB(5)
94	SAVE_CP_REGS_TAB(6)
95	SAVE_CP_REGS_TAB(7)
96
97.Lload_cp_regs_jump_table:
98	LOAD_CP_REGS_TAB(0)
99	LOAD_CP_REGS_TAB(1)
100	LOAD_CP_REGS_TAB(2)
101	LOAD_CP_REGS_TAB(3)
102	LOAD_CP_REGS_TAB(4)
103	LOAD_CP_REGS_TAB(5)
104	LOAD_CP_REGS_TAB(6)
105	LOAD_CP_REGS_TAB(7)
106
107/*
108 * coprocessor_save(buffer, index)
109 *                    a2      a3
110 * coprocessor_load(buffer, index)
111 *                    a2      a3
112 *
113 * Save or load coprocessor registers for coprocessor 'index'.
114 * The register values are saved to or loaded from them 'buffer' address.
115 *
116 * Note that these functions don't update the coprocessor_owner information!
117 *
118 */
119
120ENTRY(coprocessor_save)
121
122	entry	a1, 32
123	s32i	a0, a1, 0
124	movi	a0, .Lsave_cp_regs_jump_table
125	addx8	a3, a3, a0
126	l32i	a3, a3, 0
127	beqz	a3, 1f
128	add	a0, a0, a3
129	callx0	a0
1301:	l32i	a0, a1, 0
131	retw
132
133ENDPROC(coprocessor_save)
134
135ENTRY(coprocessor_load)
136
137	entry	a1, 32
138	s32i	a0, a1, 0
139	movi	a0, .Lload_cp_regs_jump_table
140	addx4	a3, a3, a0
141	l32i	a3, a3, 0
142	beqz	a3, 1f
143	add	a0, a0, a3
144	callx0	a0
1451:	l32i	a0, a1, 0
146	retw
147
148ENDPROC(coprocessor_load)
149
150/*
151 * coprocessor_flush(struct task_info*, index)
152 *                             a2        a3
153 * coprocessor_restore(struct task_info*, index)
154 *                              a2         a3
155 *
156 * Save or load coprocessor registers for coprocessor 'index'.
157 * The register values are saved to or loaded from the coprocessor area
158 * inside the task_info structure.
159 *
160 * Note that these functions don't update the coprocessor_owner information!
161 *
162 */
163
164
165ENTRY(coprocessor_flush)
166
167	entry	a1, 32
168	s32i	a0, a1, 0
169	movi	a0, .Lsave_cp_regs_jump_table
170	addx8	a3, a3, a0
171	l32i	a4, a3, 4
172	l32i	a3, a3, 0
173	add	a2, a2, a4
174	beqz	a3, 1f
175	add	a0, a0, a3
176	callx0	a0
1771:	l32i	a0, a1, 0
178	retw
179
180ENDPROC(coprocessor_flush)
181
182ENTRY(coprocessor_restore)
183	entry	a1, 32
184	s32i	a0, a1, 0
185	movi	a0, .Lload_cp_regs_jump_table
186	addx4	a3, a3, a0
187	l32i	a4, a3, 4
188	l32i	a3, a3, 0
189	add	a2, a2, a4
190	beqz	a3, 1f
191	add	a0, a0, a3
192	callx0	a0
1931:	l32i	a0, a1, 0
194	retw
195
196ENDPROC(coprocessor_restore)
197
198/*
199 * Entry condition:
200 *
201 *   a0:	trashed, original value saved on stack (PT_AREG0)
202 *   a1:	a1
203 *   a2:	new stack pointer, original in DEPC
204 *   a3:	a3
205 *   depc:	a2, original value saved on stack (PT_DEPC)
206 *   excsave_1:	dispatch table
207 *
208 *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
209 *	     <  VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
210 */
211
212ENTRY(fast_coprocessor_double)
213
214	wsr	a0, excsave1
215	call0	unrecoverable_exception
216
217ENDPROC(fast_coprocessor_double)
218
219ENTRY(fast_coprocessor)
220
221	/* Save remaining registers a1-a3 and SAR */
222
223	s32i	a3, a2, PT_AREG3
224	rsr	a3, sar
225	s32i	a1, a2, PT_AREG1
226	s32i	a3, a2, PT_SAR
227	mov	a1, a2
228	rsr	a2, depc
229	s32i	a2, a1, PT_AREG2
230
231	/*
232	 * The hal macros require up to 4 temporary registers. We use a3..a6.
233	 */
234
235	s32i	a4, a1, PT_AREG4
236	s32i	a5, a1, PT_AREG5
237	s32i	a6, a1, PT_AREG6
238
239	/* Find coprocessor number. Subtract first CP EXCCAUSE from EXCCAUSE */
240
241	rsr	a3, exccause
242	addi	a3, a3, -EXCCAUSE_COPROCESSOR0_DISABLED
243
244	/* Set corresponding CPENABLE bit -> (sar:cp-index, a3: 1<<cp-index)*/
245
246	ssl	a3			# SAR: 32 - coprocessor_number
247	movi	a2, 1
248	rsr	a0, cpenable
249	sll	a2, a2
250	or	a0, a0, a2
251	wsr	a0, cpenable
252	rsync
253
254	/* Retrieve previous owner. (a3 still holds CP number) */
255
256	movi	a0, coprocessor_owner	# list of owners
257	addx4	a0, a3, a0		# entry for CP
258	l32i	a4, a0, 0
259
260	beqz	a4, 1f			# skip 'save' if no previous owner
261
262	/* Disable coprocessor for previous owner. (a2 = 1 << CP number) */
263
264	l32i	a5, a4, THREAD_CPENABLE
265	xor	a5, a5, a2		# (1 << cp-id) still in a2
266	s32i	a5, a4, THREAD_CPENABLE
267
268	/*
269	 * Get context save area and 'call' save routine.
270	 * (a4 still holds previous owner (thread_info), a3 CP number)
271	 */
272
273	movi	a5, .Lsave_cp_regs_jump_table
274	movi	a0, 2f			# a0: 'return' address
275	addx8	a3, a3, a5		# a3: coprocessor number
276	l32i	a2, a3, 4		# a2: xtregs offset
277	l32i	a3, a3, 0		# a3: jump offset
278	add	a2, a2, a4
279	add	a4, a3, a5		# a4: address of save routine
280	jx	a4
281
282	/* Note that only a0 and a1 were preserved. */
283
2842:	rsr	a3, exccause
285	addi	a3, a3, -EXCCAUSE_COPROCESSOR0_DISABLED
286	movi	a0, coprocessor_owner
287	addx4	a0, a3, a0
288
289	/* Set new 'owner' (a0 points to the CP owner, a3 contains the CP nr) */
290
2911:	GET_THREAD_INFO (a4, a1)
292	s32i	a4, a0, 0
293
294	/* Get context save area and 'call' load routine. */
295
296	movi	a5, .Lload_cp_regs_jump_table
297	movi	a0, 1f
298	addx8	a3, a3, a5
299	l32i	a2, a3, 4		# a2: xtregs offset
300	l32i	a3, a3, 0		# a3: jump offset
301	add	a2, a2, a4
302	add	a4, a3, a5
303	jx	a4
304
305	/* Restore all registers and return from exception handler. */
306
3071:	l32i	a6, a1, PT_AREG6
308	l32i	a5, a1, PT_AREG5
309	l32i	a4, a1, PT_AREG4
310
311	l32i	a0, a1, PT_SAR
312	l32i	a3, a1, PT_AREG3
313	l32i	a2, a1, PT_AREG2
314	wsr	a0, sar
315	l32i	a0, a1, PT_AREG0
316	l32i	a1, a1, PT_AREG1
317
318	rfe
319
320ENDPROC(fast_coprocessor)
321
322	.data
323
324ENTRY(coprocessor_owner)
325
326	.fill XCHAL_CP_MAX, 4, 0
327
328END(coprocessor_owner)
329
330#endif /* XTENSA_HAVE_COPROCESSORS */
331