xref: /openbmc/linux/arch/arm/vfp/vfphw.S (revision 2a598d0b)
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 *  linux/arch/arm/vfp/vfphw.S
4 *
5 *  Copyright (C) 2004 ARM Limited.
6 *  Written by Deep Blue Solutions Limited.
7 *
8 * This code is called from the kernel's undefined instruction trap.
9 * r1 holds the thread_info pointer
10 * r3 holds the return address for successful handling.
11 * lr holds the return address for unrecognised instructions.
12 * sp points to a struct pt_regs (as defined in include/asm/proc/ptrace.h)
13 */
14#include <linux/init.h>
15#include <linux/linkage.h>
16#include <asm/thread_info.h>
17#include <asm/vfpmacros.h>
18#include <linux/kern_levels.h>
19#include <asm/assembler.h>
20#include <asm/asm-offsets.h>
21
22	.macro	DBGSTR, str
23#ifdef DEBUG
24	stmfd	sp!, {r0-r3, ip, lr}
25	ldr	r0, =1f
26	bl	_printk
27	ldmfd	sp!, {r0-r3, ip, lr}
28
29	.pushsection .rodata, "a"
301:	.ascii	KERN_DEBUG "VFP: \str\n"
31	.byte	0
32	.previous
33#endif
34	.endm
35
36	.macro  DBGSTR1, str, arg
37#ifdef DEBUG
38	stmfd	sp!, {r0-r3, ip, lr}
39	mov	r1, \arg
40	ldr	r0, =1f
41	bl	_printk
42	ldmfd	sp!, {r0-r3, ip, lr}
43
44	.pushsection .rodata, "a"
451:	.ascii	KERN_DEBUG "VFP: \str\n"
46	.byte	0
47	.previous
48#endif
49	.endm
50
51	.macro  DBGSTR3, str, arg1, arg2, arg3
52#ifdef DEBUG
53	stmfd	sp!, {r0-r3, ip, lr}
54	mov	r3, \arg3
55	mov	r2, \arg2
56	mov	r1, \arg1
57	ldr	r0, =1f
58	bl	_printk
59	ldmfd	sp!, {r0-r3, ip, lr}
60
61	.pushsection .rodata, "a"
621:	.ascii	KERN_DEBUG "VFP: \str\n"
63	.byte	0
64	.previous
65#endif
66	.endm
67
68
69@ VFP hardware support entry point.
70@
71@  r0  = instruction opcode (32-bit ARM or two 16-bit Thumb)
72@  r1  = thread_info pointer
73@  r2  = PC value to resume execution after successful emulation
74@  r3  = normal "successful" return address
75@  lr  = unrecognised instruction return address
76@  IRQs enabled.
77ENTRY(vfp_support_entry)
78	ldr	r11, [r1, #TI_CPU]	@ CPU number
79	add	r10, r1, #TI_VFPSTATE	@ r10 = workspace
80
81	DBGSTR3	"instr %08x pc %08x state %p", r0, r2, r10
82
83	.fpu	vfpv2
84	VFPFMRX	r1, FPEXC		@ Is the VFP enabled?
85	DBGSTR1	"fpexc %08x", r1
86	tst	r1, #FPEXC_EN
87	bne	look_for_VFP_exceptions	@ VFP is already enabled
88
89	DBGSTR1 "enable %x", r10
90	ldr	r9, vfp_current_hw_state_address
91	orr	r1, r1, #FPEXC_EN	@ user FPEXC has the enable bit set
92	ldr	r4, [r9, r11, lsl #2]	@ vfp_current_hw_state pointer
93	bic	r5, r1, #FPEXC_EX	@ make sure exceptions are disabled
94	cmp	r4, r10			@ this thread owns the hw context?
95#ifndef CONFIG_SMP
96	@ For UP, checking that this thread owns the hw context is
97	@ sufficient to determine that the hardware state is valid.
98	beq	vfp_hw_state_valid
99
100	@ On UP, we lazily save the VFP context.  As a different
101	@ thread wants ownership of the VFP hardware, save the old
102	@ state if there was a previous (valid) owner.
103
104	VFPFMXR	FPEXC, r5		@ enable VFP, disable any pending
105					@ exceptions, so we can get at the
106					@ rest of it
107
108	DBGSTR1	"save old state %p", r4
109	cmp	r4, #0			@ if the vfp_current_hw_state is NULL
110	beq	vfp_reload_hw		@ then the hw state needs reloading
111	VFPFSTMIA r4, r5		@ save the working registers
112	VFPFMRX	r5, FPSCR		@ current status
113#ifndef CONFIG_CPU_FEROCEON
114	tst	r1, #FPEXC_EX		@ is there additional state to save?
115	beq	1f
116	VFPFMRX	r6, FPINST		@ FPINST (only if FPEXC.EX is set)
117	tst	r1, #FPEXC_FP2V		@ is there an FPINST2 to read?
118	beq	1f
119	VFPFMRX	r8, FPINST2		@ FPINST2 if needed (and present)
1201:
121#endif
122	stmia	r4, {r1, r5, r6, r8}	@ save FPEXC, FPSCR, FPINST, FPINST2
123vfp_reload_hw:
124
125#else
126	@ For SMP, if this thread does not own the hw context, then we
127	@ need to reload it.  No need to save the old state as on SMP,
128	@ we always save the state when we switch away from a thread.
129	bne	vfp_reload_hw
130
131	@ This thread has ownership of the current hardware context.
132	@ However, it may have been migrated to another CPU, in which
133	@ case the saved state is newer than the hardware context.
134	@ Check this by looking at the CPU number which the state was
135	@ last loaded onto.
136	ldr	ip, [r10, #VFP_CPU]
137	teq	ip, r11
138	beq	vfp_hw_state_valid
139
140vfp_reload_hw:
141	@ We're loading this threads state into the VFP hardware. Update
142	@ the CPU number which contains the most up to date VFP context.
143	str	r11, [r10, #VFP_CPU]
144
145	VFPFMXR	FPEXC, r5		@ enable VFP, disable any pending
146					@ exceptions, so we can get at the
147					@ rest of it
148#endif
149
150	DBGSTR1	"load state %p", r10
151	str	r10, [r9, r11, lsl #2]	@ update the vfp_current_hw_state pointer
152					@ Load the saved state back into the VFP
153	VFPFLDMIA r10, r5		@ reload the working registers while
154					@ FPEXC is in a safe state
155	ldmia	r10, {r1, r5, r6, r8}	@ load FPEXC, FPSCR, FPINST, FPINST2
156#ifndef CONFIG_CPU_FEROCEON
157	tst	r1, #FPEXC_EX		@ is there additional state to restore?
158	beq	1f
159	VFPFMXR	FPINST, r6		@ restore FPINST (only if FPEXC.EX is set)
160	tst	r1, #FPEXC_FP2V		@ is there an FPINST2 to write?
161	beq	1f
162	VFPFMXR	FPINST2, r8		@ FPINST2 if needed (and present)
1631:
164#endif
165	VFPFMXR	FPSCR, r5		@ restore status
166
167@ The context stored in the VFP hardware is up to date with this thread
168vfp_hw_state_valid:
169	tst	r1, #FPEXC_EX
170	bne	process_exception	@ might as well handle the pending
171					@ exception before retrying branch
172					@ out before setting an FPEXC that
173					@ stops us reading stuff
174	VFPFMXR	FPEXC, r1		@ Restore FPEXC last
175	mov	sp, r3			@ we think we have handled things
176	pop	{lr}
177	sub	r2, r2, #4		@ Retry current instruction - if Thumb
178	str	r2, [sp, #S_PC]		@ mode it's two 16-bit instructions,
179					@ else it's one 32-bit instruction, so
180					@ always subtract 4 from the following
181					@ instruction address.
182
183local_bh_enable_and_ret:
184	adr	r0, .
185	mov	r1, #SOFTIRQ_DISABLE_OFFSET
186	b	__local_bh_enable_ip	@ tail call
187
188look_for_VFP_exceptions:
189	@ Check for synchronous or asynchronous exception
190	tst	r1, #FPEXC_EX | FPEXC_DEX
191	bne	process_exception
192	@ On some implementations of the VFP subarch 1, setting FPSCR.IXE
193	@ causes all the CDP instructions to be bounced synchronously without
194	@ setting the FPEXC.EX bit
195	VFPFMRX	r5, FPSCR
196	tst	r5, #FPSCR_IXE
197	bne	process_exception
198
199	tst	r5, #FPSCR_LENGTH_MASK
200	beq	skip
201	orr	r1, r1, #FPEXC_DEX
202	b	process_exception
203skip:
204
205	@ Fall into hand on to next handler - appropriate coproc instr
206	@ not recognised by VFP
207
208	DBGSTR	"not VFP"
209	b	local_bh_enable_and_ret
210
211process_exception:
212	DBGSTR	"bounce"
213	mov	sp, r3			@ setup for a return to the user code.
214	pop	{lr}
215	mov	r2, sp			@ nothing stacked - regdump is at TOS
216
217	@ Now call the C code to package up the bounce to the support code
218	@   r0 holds the trigger instruction
219	@   r1 holds the FPEXC value
220	@   r2 pointer to register dump
221	b	VFP_bounce		@ we have handled this - the support
222					@ code will raise an exception if
223					@ required. If not, the user code will
224					@ retry the faulted instruction
225ENDPROC(vfp_support_entry)
226
227ENTRY(vfp_save_state)
228	@ Save the current VFP state
229	@ r0 - save location
230	@ r1 - FPEXC
231	DBGSTR1	"save VFP state %p", r0
232	VFPFSTMIA r0, r2		@ save the working registers
233	VFPFMRX	r2, FPSCR		@ current status
234	tst	r1, #FPEXC_EX		@ is there additional state to save?
235	beq	1f
236	VFPFMRX	r3, FPINST		@ FPINST (only if FPEXC.EX is set)
237	tst	r1, #FPEXC_FP2V		@ is there an FPINST2 to read?
238	beq	1f
239	VFPFMRX	r12, FPINST2		@ FPINST2 if needed (and present)
2401:
241	stmia	r0, {r1, r2, r3, r12}	@ save FPEXC, FPSCR, FPINST, FPINST2
242	ret	lr
243ENDPROC(vfp_save_state)
244
245	.align
246vfp_current_hw_state_address:
247	.word	vfp_current_hw_state
248
249	.macro	tbl_branch, base, tmp, shift
250#ifdef CONFIG_THUMB2_KERNEL
251	adr	\tmp, 1f
252	add	\tmp, \tmp, \base, lsl \shift
253	ret	\tmp
254#else
255	add	pc, pc, \base, lsl \shift
256	mov	r0, r0
257#endif
2581:
259	.endm
260
261ENTRY(vfp_get_float)
262	tbl_branch r0, r3, #3
263	.fpu	vfpv2
264	.irp	dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
2651:	vmov	r0, s\dr
266	ret	lr
267	.org	1b + 8
268	.endr
269	.irp	dr,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
2701:	vmov	r0, s\dr
271	ret	lr
272	.org	1b + 8
273	.endr
274ENDPROC(vfp_get_float)
275
276ENTRY(vfp_put_float)
277	tbl_branch r1, r3, #3
278	.fpu	vfpv2
279	.irp	dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
2801:	vmov	s\dr, r0
281	ret	lr
282	.org	1b + 8
283	.endr
284	.irp	dr,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
2851:	vmov	s\dr, r0
286	ret	lr
287	.org	1b + 8
288	.endr
289ENDPROC(vfp_put_float)
290
291ENTRY(vfp_get_double)
292	tbl_branch r0, r3, #3
293	.fpu	vfpv2
294	.irp	dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
2951:	vmov	r0, r1, d\dr
296	ret	lr
297	.org	1b + 8
298	.endr
299#ifdef CONFIG_VFPv3
300	@ d16 - d31 registers
301	.fpu	vfpv3
302	.irp	dr,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
3031:	vmov	r0, r1, d\dr
304	ret	lr
305	.org	1b + 8
306	.endr
307#endif
308
309	@ virtual register 16 (or 32 if VFPv3) for compare with zero
310	mov	r0, #0
311	mov	r1, #0
312	ret	lr
313ENDPROC(vfp_get_double)
314
315ENTRY(vfp_put_double)
316	tbl_branch r2, r3, #3
317	.fpu	vfpv2
318	.irp	dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
3191:	vmov	d\dr, r0, r1
320	ret	lr
321	.org	1b + 8
322	.endr
323#ifdef CONFIG_VFPv3
324	.fpu	vfpv3
325	@ d16 - d31 registers
326	.irp	dr,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
3271:	vmov	d\dr, r0, r1
328	ret	lr
329	.org	1b + 8
330	.endr
331#endif
332ENDPROC(vfp_put_double)
333