xref: /openbmc/linux/arch/powerpc/lib/vmx-helper.c (revision 7ae9fb1b7ecbb5d85d07857943f677fd1a559b18)
11a59d1b8SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
26f7839e5SAnton Blanchard /*
36f7839e5SAnton Blanchard  *
46f7839e5SAnton Blanchard  * Copyright (C) IBM Corporation, 2011
56f7839e5SAnton Blanchard  *
66f7839e5SAnton Blanchard  * Authors: Sukadev Bhattiprolu <sukadev@linux.vnet.ibm.com>
76f7839e5SAnton Blanchard  *          Anton Blanchard <anton@au.ibm.com>
86f7839e5SAnton Blanchard  */
96f7839e5SAnton Blanchard #include <linux/uaccess.h>
106f7839e5SAnton Blanchard #include <linux/hardirq.h>
116f7839e5SAnton Blanchard #include <asm/switch_to.h>
126f7839e5SAnton Blanchard 
enter_vmx_usercopy(void)136f7839e5SAnton Blanchard int enter_vmx_usercopy(void)
146f7839e5SAnton Blanchard {
156f7839e5SAnton Blanchard 	if (in_interrupt())
166f7839e5SAnton Blanchard 		return 0;
176f7839e5SAnton Blanchard 
185f76eea8SDavid Hildenbrand 	preempt_disable();
195f76eea8SDavid Hildenbrand 	/*
205f76eea8SDavid Hildenbrand 	 * We need to disable page faults as they can call schedule and
215f76eea8SDavid Hildenbrand 	 * thus make us lose the VMX context. So on page faults, we just
225f76eea8SDavid Hildenbrand 	 * fail which will cause a fallback to the normal non-vmx copy.
236f7839e5SAnton Blanchard 	 */
246f7839e5SAnton Blanchard 	pagefault_disable();
256f7839e5SAnton Blanchard 
266f7839e5SAnton Blanchard 	enable_kernel_altivec();
276f7839e5SAnton Blanchard 
286f7839e5SAnton Blanchard 	return 1;
296f7839e5SAnton Blanchard }
306f7839e5SAnton Blanchard 
316f7839e5SAnton Blanchard /*
326f7839e5SAnton Blanchard  * This function must return 0 because we tail call optimise when calling
336f7839e5SAnton Blanchard  * from __copy_tofrom_user_power7 which returns 0 on success.
346f7839e5SAnton Blanchard  */
exit_vmx_usercopy(void)356f7839e5SAnton Blanchard int exit_vmx_usercopy(void)
366f7839e5SAnton Blanchard {
37dc4fbba1SAnton Blanchard 	disable_kernel_altivec();
386f7839e5SAnton Blanchard 	pagefault_enable();
39*00ff1eaaSNicholas Piggin 	preempt_enable_no_resched();
40*00ff1eaaSNicholas Piggin 	/*
41*00ff1eaaSNicholas Piggin 	 * Must never explicitly call schedule (including preempt_enable())
42*00ff1eaaSNicholas Piggin 	 * while in a kuap-unlocked user copy, because the AMR register will
43*00ff1eaaSNicholas Piggin 	 * not be saved and restored across context switch. However preempt
44*00ff1eaaSNicholas Piggin 	 * kernels need to be preempted as soon as possible if need_resched is
45*00ff1eaaSNicholas Piggin 	 * set and we are preemptible. The hack here is to schedule a
46*00ff1eaaSNicholas Piggin 	 * decrementer to fire here and reschedule for us if necessary.
47*00ff1eaaSNicholas Piggin 	 */
48*00ff1eaaSNicholas Piggin 	if (IS_ENABLED(CONFIG_PREEMPT) && need_resched())
49*00ff1eaaSNicholas Piggin 		set_dec(1);
506f7839e5SAnton Blanchard 	return 0;
516f7839e5SAnton Blanchard }
52fde69282SAnton Blanchard 
enter_vmx_ops(void)53d58badfbSSimon Guo int enter_vmx_ops(void)
54fde69282SAnton Blanchard {
55fde69282SAnton Blanchard 	if (in_interrupt())
56fde69282SAnton Blanchard 		return 0;
57fde69282SAnton Blanchard 
58fde69282SAnton Blanchard 	preempt_disable();
59fde69282SAnton Blanchard 
60fde69282SAnton Blanchard 	enable_kernel_altivec();
61fde69282SAnton Blanchard 
62fde69282SAnton Blanchard 	return 1;
63fde69282SAnton Blanchard }
64fde69282SAnton Blanchard 
65fde69282SAnton Blanchard /*
66fde69282SAnton Blanchard  * All calls to this function will be optimised into tail calls. We are
67fde69282SAnton Blanchard  * passed a pointer to the destination which we return as required by a
68fde69282SAnton Blanchard  * memcpy implementation.
69fde69282SAnton Blanchard  */
exit_vmx_ops(void * dest)70d58badfbSSimon Guo void *exit_vmx_ops(void *dest)
71fde69282SAnton Blanchard {
72dc4fbba1SAnton Blanchard 	disable_kernel_altivec();
73fde69282SAnton Blanchard 	preempt_enable();
74fde69282SAnton Blanchard 	return dest;
75fde69282SAnton Blanchard }
76