xref: /openbmc/linux/arch/arm/vfp/vfpmodule.c (revision a09d2831)
1 /*
2  *  linux/arch/arm/vfp/vfpmodule.c
3  *
4  *  Copyright (C) 2004 ARM Limited.
5  *  Written by Deep Blue Solutions Limited.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 #include <linux/module.h>
12 #include <linux/types.h>
13 #include <linux/kernel.h>
14 #include <linux/signal.h>
15 #include <linux/sched.h>
16 #include <linux/init.h>
17 
18 #include <asm/thread_notify.h>
19 #include <asm/vfp.h>
20 
21 #include "vfpinstr.h"
22 #include "vfp.h"
23 
24 /*
25  * Our undef handlers (in entry.S)
26  */
27 void vfp_testing_entry(void);
28 void vfp_support_entry(void);
29 void vfp_null_entry(void);
30 
31 void (*vfp_vector)(void) = vfp_null_entry;
32 union vfp_state *last_VFP_context[NR_CPUS];
33 
34 /*
35  * Dual-use variable.
36  * Used in startup: set to non-zero if VFP checks fail
37  * After startup, holds VFP architecture
38  */
39 unsigned int VFP_arch;
40 
41 /*
42  * Per-thread VFP initialization.
43  */
44 static void vfp_thread_flush(struct thread_info *thread)
45 {
46 	union vfp_state *vfp = &thread->vfpstate;
47 	unsigned int cpu;
48 
49 	memset(vfp, 0, sizeof(union vfp_state));
50 
51 	vfp->hard.fpexc = FPEXC_EN;
52 	vfp->hard.fpscr = FPSCR_ROUND_NEAREST;
53 
54 	/*
55 	 * Disable VFP to ensure we initialize it first.  We must ensure
56 	 * that the modification of last_VFP_context[] and hardware disable
57 	 * are done for the same CPU and without preemption.
58 	 */
59 	cpu = get_cpu();
60 	if (last_VFP_context[cpu] == vfp)
61 		last_VFP_context[cpu] = NULL;
62 	fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
63 	put_cpu();
64 }
65 
66 static void vfp_thread_exit(struct thread_info *thread)
67 {
68 	/* release case: Per-thread VFP cleanup. */
69 	union vfp_state *vfp = &thread->vfpstate;
70 	unsigned int cpu = get_cpu();
71 
72 	if (last_VFP_context[cpu] == vfp)
73 		last_VFP_context[cpu] = NULL;
74 	put_cpu();
75 }
76 
77 /*
78  * When this function is called with the following 'cmd's, the following
79  * is true while this function is being run:
80  *  THREAD_NOFTIFY_SWTICH:
81  *   - the previously running thread will not be scheduled onto another CPU.
82  *   - the next thread to be run (v) will not be running on another CPU.
83  *   - thread->cpu is the local CPU number
84  *   - not preemptible as we're called in the middle of a thread switch
85  *  THREAD_NOTIFY_FLUSH:
86  *   - the thread (v) will be running on the local CPU, so
87  *	v === current_thread_info()
88  *   - thread->cpu is the local CPU number at the time it is accessed,
89  *	but may change at any time.
90  *   - we could be preempted if tree preempt rcu is enabled, so
91  *	it is unsafe to use thread->cpu.
92  *  THREAD_NOTIFY_EXIT
93  *   - the thread (v) will be running on the local CPU, so
94  *	v === current_thread_info()
95  *   - thread->cpu is the local CPU number at the time it is accessed,
96  *	but may change at any time.
97  *   - we could be preempted if tree preempt rcu is enabled, so
98  *	it is unsafe to use thread->cpu.
99  */
100 static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v)
101 {
102 	struct thread_info *thread = v;
103 
104 	if (likely(cmd == THREAD_NOTIFY_SWITCH)) {
105 		u32 fpexc = fmrx(FPEXC);
106 
107 #ifdef CONFIG_SMP
108 		unsigned int cpu = thread->cpu;
109 
110 		/*
111 		 * On SMP, if VFP is enabled, save the old state in
112 		 * case the thread migrates to a different CPU. The
113 		 * restoring is done lazily.
114 		 */
115 		if ((fpexc & FPEXC_EN) && last_VFP_context[cpu]) {
116 			vfp_save_state(last_VFP_context[cpu], fpexc);
117 			last_VFP_context[cpu]->hard.cpu = cpu;
118 		}
119 		/*
120 		 * Thread migration, just force the reloading of the
121 		 * state on the new CPU in case the VFP registers
122 		 * contain stale data.
123 		 */
124 		if (thread->vfpstate.hard.cpu != cpu)
125 			last_VFP_context[cpu] = NULL;
126 #endif
127 
128 		/*
129 		 * Always disable VFP so we can lazily save/restore the
130 		 * old state.
131 		 */
132 		fmxr(FPEXC, fpexc & ~FPEXC_EN);
133 		return NOTIFY_DONE;
134 	}
135 
136 	if (cmd == THREAD_NOTIFY_FLUSH)
137 		vfp_thread_flush(thread);
138 	else
139 		vfp_thread_exit(thread);
140 
141 	return NOTIFY_DONE;
142 }
143 
144 static struct notifier_block vfp_notifier_block = {
145 	.notifier_call	= vfp_notifier,
146 };
147 
148 /*
149  * Raise a SIGFPE for the current process.
150  * sicode describes the signal being raised.
151  */
152 void vfp_raise_sigfpe(unsigned int sicode, struct pt_regs *regs)
153 {
154 	siginfo_t info;
155 
156 	memset(&info, 0, sizeof(info));
157 
158 	info.si_signo = SIGFPE;
159 	info.si_code = sicode;
160 	info.si_addr = (void __user *)(instruction_pointer(regs) - 4);
161 
162 	/*
163 	 * This is the same as NWFPE, because it's not clear what
164 	 * this is used for
165 	 */
166 	current->thread.error_code = 0;
167 	current->thread.trap_no = 6;
168 
169 	send_sig_info(SIGFPE, &info, current);
170 }
171 
172 static void vfp_panic(char *reason, u32 inst)
173 {
174 	int i;
175 
176 	printk(KERN_ERR "VFP: Error: %s\n", reason);
177 	printk(KERN_ERR "VFP: EXC 0x%08x SCR 0x%08x INST 0x%08x\n",
178 		fmrx(FPEXC), fmrx(FPSCR), inst);
179 	for (i = 0; i < 32; i += 2)
180 		printk(KERN_ERR "VFP: s%2u: 0x%08x s%2u: 0x%08x\n",
181 		       i, vfp_get_float(i), i+1, vfp_get_float(i+1));
182 }
183 
184 /*
185  * Process bitmask of exception conditions.
186  */
187 static void vfp_raise_exceptions(u32 exceptions, u32 inst, u32 fpscr, struct pt_regs *regs)
188 {
189 	int si_code = 0;
190 
191 	pr_debug("VFP: raising exceptions %08x\n", exceptions);
192 
193 	if (exceptions == VFP_EXCEPTION_ERROR) {
194 		vfp_panic("unhandled bounce", inst);
195 		vfp_raise_sigfpe(0, regs);
196 		return;
197 	}
198 
199 	/*
200 	 * Update the FPSCR with the additional exception flags.
201 	 * Comparison instructions always return at least one of
202 	 * these flags set.
203 	 */
204 	fpscr |= exceptions;
205 
206 	fmxr(FPSCR, fpscr);
207 
208 #define RAISE(stat,en,sig)				\
209 	if (exceptions & stat && fpscr & en)		\
210 		si_code = sig;
211 
212 	/*
213 	 * These are arranged in priority order, least to highest.
214 	 */
215 	RAISE(FPSCR_DZC, FPSCR_DZE, FPE_FLTDIV);
216 	RAISE(FPSCR_IXC, FPSCR_IXE, FPE_FLTRES);
217 	RAISE(FPSCR_UFC, FPSCR_UFE, FPE_FLTUND);
218 	RAISE(FPSCR_OFC, FPSCR_OFE, FPE_FLTOVF);
219 	RAISE(FPSCR_IOC, FPSCR_IOE, FPE_FLTINV);
220 
221 	if (si_code)
222 		vfp_raise_sigfpe(si_code, regs);
223 }
224 
225 /*
226  * Emulate a VFP instruction.
227  */
228 static u32 vfp_emulate_instruction(u32 inst, u32 fpscr, struct pt_regs *regs)
229 {
230 	u32 exceptions = VFP_EXCEPTION_ERROR;
231 
232 	pr_debug("VFP: emulate: INST=0x%08x SCR=0x%08x\n", inst, fpscr);
233 
234 	if (INST_CPRTDO(inst)) {
235 		if (!INST_CPRT(inst)) {
236 			/*
237 			 * CPDO
238 			 */
239 			if (vfp_single(inst)) {
240 				exceptions = vfp_single_cpdo(inst, fpscr);
241 			} else {
242 				exceptions = vfp_double_cpdo(inst, fpscr);
243 			}
244 		} else {
245 			/*
246 			 * A CPRT instruction can not appear in FPINST2, nor
247 			 * can it cause an exception.  Therefore, we do not
248 			 * have to emulate it.
249 			 */
250 		}
251 	} else {
252 		/*
253 		 * A CPDT instruction can not appear in FPINST2, nor can
254 		 * it cause an exception.  Therefore, we do not have to
255 		 * emulate it.
256 		 */
257 	}
258 	return exceptions & ~VFP_NAN_FLAG;
259 }
260 
261 /*
262  * Package up a bounce condition.
263  */
264 void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs)
265 {
266 	u32 fpscr, orig_fpscr, fpsid, exceptions;
267 
268 	pr_debug("VFP: bounce: trigger %08x fpexc %08x\n", trigger, fpexc);
269 
270 	/*
271 	 * At this point, FPEXC can have the following configuration:
272 	 *
273 	 *  EX DEX IXE
274 	 *  0   1   x   - synchronous exception
275 	 *  1   x   0   - asynchronous exception
276 	 *  1   x   1   - sychronous on VFP subarch 1 and asynchronous on later
277 	 *  0   0   1   - synchronous on VFP9 (non-standard subarch 1
278 	 *                implementation), undefined otherwise
279 	 *
280 	 * Clear various bits and enable access to the VFP so we can
281 	 * handle the bounce.
282 	 */
283 	fmxr(FPEXC, fpexc & ~(FPEXC_EX|FPEXC_DEX|FPEXC_FP2V|FPEXC_VV|FPEXC_TRAP_MASK));
284 
285 	fpsid = fmrx(FPSID);
286 	orig_fpscr = fpscr = fmrx(FPSCR);
287 
288 	/*
289 	 * Check for the special VFP subarch 1 and FPSCR.IXE bit case
290 	 */
291 	if ((fpsid & FPSID_ARCH_MASK) == (1 << FPSID_ARCH_BIT)
292 	    && (fpscr & FPSCR_IXE)) {
293 		/*
294 		 * Synchronous exception, emulate the trigger instruction
295 		 */
296 		goto emulate;
297 	}
298 
299 	if (fpexc & FPEXC_EX) {
300 #ifndef CONFIG_CPU_FEROCEON
301 		/*
302 		 * Asynchronous exception. The instruction is read from FPINST
303 		 * and the interrupted instruction has to be restarted.
304 		 */
305 		trigger = fmrx(FPINST);
306 		regs->ARM_pc -= 4;
307 #endif
308 	} else if (!(fpexc & FPEXC_DEX)) {
309 		/*
310 		 * Illegal combination of bits. It can be caused by an
311 		 * unallocated VFP instruction but with FPSCR.IXE set and not
312 		 * on VFP subarch 1.
313 		 */
314 		 vfp_raise_exceptions(VFP_EXCEPTION_ERROR, trigger, fpscr, regs);
315 		goto exit;
316 	}
317 
318 	/*
319 	 * Modify fpscr to indicate the number of iterations remaining.
320 	 * If FPEXC.EX is 0, FPEXC.DEX is 1 and the FPEXC.VV bit indicates
321 	 * whether FPEXC.VECITR or FPSCR.LEN is used.
322 	 */
323 	if (fpexc & (FPEXC_EX | FPEXC_VV)) {
324 		u32 len;
325 
326 		len = fpexc + (1 << FPEXC_LENGTH_BIT);
327 
328 		fpscr &= ~FPSCR_LENGTH_MASK;
329 		fpscr |= (len & FPEXC_LENGTH_MASK) << (FPSCR_LENGTH_BIT - FPEXC_LENGTH_BIT);
330 	}
331 
332 	/*
333 	 * Handle the first FP instruction.  We used to take note of the
334 	 * FPEXC bounce reason, but this appears to be unreliable.
335 	 * Emulate the bounced instruction instead.
336 	 */
337 	exceptions = vfp_emulate_instruction(trigger, fpscr, regs);
338 	if (exceptions)
339 		vfp_raise_exceptions(exceptions, trigger, orig_fpscr, regs);
340 
341 	/*
342 	 * If there isn't a second FP instruction, exit now. Note that
343 	 * the FPEXC.FP2V bit is valid only if FPEXC.EX is 1.
344 	 */
345 	if (fpexc ^ (FPEXC_EX | FPEXC_FP2V))
346 		goto exit;
347 
348 	/*
349 	 * The barrier() here prevents fpinst2 being read
350 	 * before the condition above.
351 	 */
352 	barrier();
353 	trigger = fmrx(FPINST2);
354 
355  emulate:
356 	exceptions = vfp_emulate_instruction(trigger, orig_fpscr, regs);
357 	if (exceptions)
358 		vfp_raise_exceptions(exceptions, trigger, orig_fpscr, regs);
359  exit:
360 	preempt_enable();
361 }
362 
363 static void vfp_enable(void *unused)
364 {
365 	u32 access = get_copro_access();
366 
367 	/*
368 	 * Enable full access to VFP (cp10 and cp11)
369 	 */
370 	set_copro_access(access | CPACC_FULL(10) | CPACC_FULL(11));
371 }
372 
373 #ifdef CONFIG_PM
374 #include <linux/sysdev.h>
375 
376 static int vfp_pm_suspend(struct sys_device *dev, pm_message_t state)
377 {
378 	struct thread_info *ti = current_thread_info();
379 	u32 fpexc = fmrx(FPEXC);
380 
381 	/* if vfp is on, then save state for resumption */
382 	if (fpexc & FPEXC_EN) {
383 		printk(KERN_DEBUG "%s: saving vfp state\n", __func__);
384 		vfp_save_state(&ti->vfpstate, fpexc);
385 
386 		/* disable, just in case */
387 		fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
388 	}
389 
390 	/* clear any information we had about last context state */
391 	memset(last_VFP_context, 0, sizeof(last_VFP_context));
392 
393 	return 0;
394 }
395 
396 static int vfp_pm_resume(struct sys_device *dev)
397 {
398 	/* ensure we have access to the vfp */
399 	vfp_enable(NULL);
400 
401 	/* and disable it to ensure the next usage restores the state */
402 	fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
403 
404 	return 0;
405 }
406 
407 static struct sysdev_class vfp_pm_sysclass = {
408 	.name		= "vfp",
409 	.suspend	= vfp_pm_suspend,
410 	.resume		= vfp_pm_resume,
411 };
412 
413 static struct sys_device vfp_pm_sysdev = {
414 	.cls	= &vfp_pm_sysclass,
415 };
416 
417 static void vfp_pm_init(void)
418 {
419 	sysdev_class_register(&vfp_pm_sysclass);
420 	sysdev_register(&vfp_pm_sysdev);
421 }
422 
423 
424 #else
425 static inline void vfp_pm_init(void) { }
426 #endif /* CONFIG_PM */
427 
428 /*
429  * Synchronise the hardware VFP state of a thread other than current with the
430  * saved one. This function is used by the ptrace mechanism.
431  */
432 #ifdef CONFIG_SMP
433 void vfp_sync_state(struct thread_info *thread)
434 {
435 	/*
436 	 * On SMP systems, the VFP state is automatically saved at every
437 	 * context switch. We mark the thread VFP state as belonging to a
438 	 * non-existent CPU so that the saved one will be reloaded when
439 	 * needed.
440 	 */
441 	thread->vfpstate.hard.cpu = NR_CPUS;
442 }
443 #else
444 void vfp_sync_state(struct thread_info *thread)
445 {
446 	unsigned int cpu = get_cpu();
447 	u32 fpexc = fmrx(FPEXC);
448 
449 	/*
450 	 * If VFP is enabled, the previous state was already saved and
451 	 * last_VFP_context updated.
452 	 */
453 	if (fpexc & FPEXC_EN)
454 		goto out;
455 
456 	if (!last_VFP_context[cpu])
457 		goto out;
458 
459 	/*
460 	 * Save the last VFP state on this CPU.
461 	 */
462 	fmxr(FPEXC, fpexc | FPEXC_EN);
463 	vfp_save_state(last_VFP_context[cpu], fpexc);
464 	fmxr(FPEXC, fpexc);
465 
466 	/*
467 	 * Set the context to NULL to force a reload the next time the thread
468 	 * uses the VFP.
469 	 */
470 	last_VFP_context[cpu] = NULL;
471 
472 out:
473 	put_cpu();
474 }
475 #endif
476 
477 #include <linux/smp.h>
478 
479 /*
480  * VFP support code initialisation.
481  */
482 static int __init vfp_init(void)
483 {
484 	unsigned int vfpsid;
485 	unsigned int cpu_arch = cpu_architecture();
486 
487 	if (cpu_arch >= CPU_ARCH_ARMv6)
488 		vfp_enable(NULL);
489 
490 	/*
491 	 * First check that there is a VFP that we can use.
492 	 * The handler is already setup to just log calls, so
493 	 * we just need to read the VFPSID register.
494 	 */
495 	vfp_vector = vfp_testing_entry;
496 	barrier();
497 	vfpsid = fmrx(FPSID);
498 	barrier();
499 	vfp_vector = vfp_null_entry;
500 
501 	printk(KERN_INFO "VFP support v0.3: ");
502 	if (VFP_arch)
503 		printk("not present\n");
504 	else if (vfpsid & FPSID_NODOUBLE) {
505 		printk("no double precision support\n");
506 	} else {
507 		smp_call_function(vfp_enable, NULL, 1);
508 
509 		VFP_arch = (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT;  /* Extract the architecture version */
510 		printk("implementor %02x architecture %d part %02x variant %x rev %x\n",
511 			(vfpsid & FPSID_IMPLEMENTER_MASK) >> FPSID_IMPLEMENTER_BIT,
512 			(vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT,
513 			(vfpsid & FPSID_PART_MASK) >> FPSID_PART_BIT,
514 			(vfpsid & FPSID_VARIANT_MASK) >> FPSID_VARIANT_BIT,
515 			(vfpsid & FPSID_REV_MASK) >> FPSID_REV_BIT);
516 
517 		vfp_vector = vfp_support_entry;
518 
519 		thread_register_notifier(&vfp_notifier_block);
520 		vfp_pm_init();
521 
522 		/*
523 		 * We detected VFP, and the support code is
524 		 * in place; report VFP support to userspace.
525 		 */
526 		elf_hwcap |= HWCAP_VFP;
527 #ifdef CONFIG_VFPv3
528 		if (VFP_arch >= 3) {
529 			elf_hwcap |= HWCAP_VFPv3;
530 
531 			/*
532 			 * Check for VFPv3 D16. CPUs in this configuration
533 			 * only have 16 x 64bit registers.
534 			 */
535 			if (((fmrx(MVFR0) & MVFR0_A_SIMD_MASK)) == 1)
536 				elf_hwcap |= HWCAP_VFPv3D16;
537 		}
538 #endif
539 #ifdef CONFIG_NEON
540 		/*
541 		 * Check for the presence of the Advanced SIMD
542 		 * load/store instructions, integer and single
543 		 * precision floating point operations.
544 		 */
545 		if ((fmrx(MVFR1) & 0x000fff00) == 0x00011100)
546 			elf_hwcap |= HWCAP_NEON;
547 #endif
548 	}
549 	return 0;
550 }
551 
552 late_initcall(vfp_init);
553