xref: /openbmc/linux/arch/arm64/include/asm/fpsimd.h (revision 349f631d)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2012 ARM Ltd.
4  */
5 #ifndef __ASM_FP_H
6 #define __ASM_FP_H
7 
8 #include <asm/errno.h>
9 #include <asm/ptrace.h>
10 #include <asm/processor.h>
11 #include <asm/sigcontext.h>
12 #include <asm/sysreg.h>
13 
14 #ifndef __ASSEMBLY__
15 
16 #include <linux/bitmap.h>
17 #include <linux/build_bug.h>
18 #include <linux/bug.h>
19 #include <linux/cache.h>
20 #include <linux/init.h>
21 #include <linux/stddef.h>
22 #include <linux/types.h>
23 
24 #ifdef CONFIG_COMPAT
25 /* Masks for extracting the FPSR and FPCR from the FPSCR */
26 #define VFP_FPSCR_STAT_MASK	0xf800009f
27 #define VFP_FPSCR_CTRL_MASK	0x07f79f00
28 /*
29  * The VFP state has 32x64-bit registers and a single 32-bit
30  * control/status register.
31  */
32 #define VFP_STATE_SIZE		((32 * 8) + 4)
33 #endif
34 
35 struct task_struct;
36 
37 extern void fpsimd_save_state(struct user_fpsimd_state *state);
38 extern void fpsimd_load_state(struct user_fpsimd_state *state);
39 
40 extern void fpsimd_thread_switch(struct task_struct *next);
41 extern void fpsimd_flush_thread(void);
42 
43 extern void fpsimd_signal_preserve_current_state(void);
44 extern void fpsimd_preserve_current_state(void);
45 extern void fpsimd_restore_current_state(void);
46 extern void fpsimd_update_current_state(struct user_fpsimd_state const *state);
47 
48 extern void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *state,
49 				     void *sve_state, unsigned int sve_vl);
50 
51 extern void fpsimd_flush_task_state(struct task_struct *target);
52 extern void fpsimd_save_and_flush_cpu_state(void);
53 
54 /* Maximum VL that SVE VL-agnostic software can transparently support */
55 #define SVE_VL_ARCH_MAX 0x100
56 
57 /* Offset of FFR in the SVE register dump */
58 static inline size_t sve_ffr_offset(int vl)
59 {
60 	return SVE_SIG_FFR_OFFSET(sve_vq_from_vl(vl)) - SVE_SIG_REGS_OFFSET;
61 }
62 
63 static inline void *sve_pffr(struct thread_struct *thread)
64 {
65 	return (char *)thread->sve_state + sve_ffr_offset(thread->sve_vl);
66 }
67 
68 extern void sve_save_state(void *state, u32 *pfpsr);
69 extern void sve_load_state(void const *state, u32 const *pfpsr,
70 			   unsigned long vq_minus_1);
71 extern void sve_flush_live(unsigned long vq_minus_1);
72 extern void sve_load_from_fpsimd_state(struct user_fpsimd_state const *state,
73 				       unsigned long vq_minus_1);
74 extern unsigned int sve_get_vl(void);
75 extern void sve_set_vq(unsigned long vq_minus_1);
76 
77 struct arm64_cpu_capabilities;
78 extern void sve_kernel_enable(const struct arm64_cpu_capabilities *__unused);
79 
80 extern u64 read_zcr_features(void);
81 
82 extern int __ro_after_init sve_max_vl;
83 extern int __ro_after_init sve_max_virtualisable_vl;
84 extern __ro_after_init DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX);
85 
86 /*
87  * Helpers to translate bit indices in sve_vq_map to VQ values (and
88  * vice versa).  This allows find_next_bit() to be used to find the
89  * _maximum_ VQ not exceeding a certain value.
90  */
91 static inline unsigned int __vq_to_bit(unsigned int vq)
92 {
93 	return SVE_VQ_MAX - vq;
94 }
95 
96 static inline unsigned int __bit_to_vq(unsigned int bit)
97 {
98 	return SVE_VQ_MAX - bit;
99 }
100 
101 /* Ensure vq >= SVE_VQ_MIN && vq <= SVE_VQ_MAX before calling this function */
102 static inline bool sve_vq_available(unsigned int vq)
103 {
104 	return test_bit(__vq_to_bit(vq), sve_vq_map);
105 }
106 
107 #ifdef CONFIG_ARM64_SVE
108 
109 extern size_t sve_state_size(struct task_struct const *task);
110 
111 extern void sve_alloc(struct task_struct *task);
112 extern void fpsimd_release_task(struct task_struct *task);
113 extern void fpsimd_sync_to_sve(struct task_struct *task);
114 extern void sve_sync_to_fpsimd(struct task_struct *task);
115 extern void sve_sync_from_fpsimd_zeropad(struct task_struct *task);
116 
117 extern int sve_set_vector_length(struct task_struct *task,
118 				 unsigned long vl, unsigned long flags);
119 
120 extern int sve_set_current_vl(unsigned long arg);
121 extern int sve_get_current_vl(void);
122 
123 static inline void sve_user_disable(void)
124 {
125 	sysreg_clear_set(cpacr_el1, CPACR_EL1_ZEN_EL0EN, 0);
126 }
127 
128 static inline void sve_user_enable(void)
129 {
130 	sysreg_clear_set(cpacr_el1, 0, CPACR_EL1_ZEN_EL0EN);
131 }
132 
133 #define sve_cond_update_zcr_vq(val, reg)		\
134 	do {						\
135 		u64 __zcr = read_sysreg_s((reg));	\
136 		u64 __new = __zcr & ~ZCR_ELx_LEN_MASK;	\
137 		__new |= (val) & ZCR_ELx_LEN_MASK;	\
138 		if (__zcr != __new)			\
139 			write_sysreg_s(__new, (reg));	\
140 	} while (0)
141 
142 /*
143  * Probing and setup functions.
144  * Calls to these functions must be serialised with one another.
145  */
146 extern void __init sve_init_vq_map(void);
147 extern void sve_update_vq_map(void);
148 extern int sve_verify_vq_map(void);
149 extern void __init sve_setup(void);
150 
151 #else /* ! CONFIG_ARM64_SVE */
152 
153 static inline void sve_alloc(struct task_struct *task) { }
154 static inline void fpsimd_release_task(struct task_struct *task) { }
155 static inline void sve_sync_to_fpsimd(struct task_struct *task) { }
156 static inline void sve_sync_from_fpsimd_zeropad(struct task_struct *task) { }
157 
158 static inline int sve_set_current_vl(unsigned long arg)
159 {
160 	return -EINVAL;
161 }
162 
163 static inline int sve_get_current_vl(void)
164 {
165 	return -EINVAL;
166 }
167 
168 static inline void sve_user_disable(void) { BUILD_BUG(); }
169 static inline void sve_user_enable(void) { BUILD_BUG(); }
170 
171 #define sve_cond_update_zcr_vq(val, reg) do { } while (0)
172 
173 static inline void sve_init_vq_map(void) { }
174 static inline void sve_update_vq_map(void) { }
175 static inline int sve_verify_vq_map(void) { return 0; }
176 static inline void sve_setup(void) { }
177 
178 #endif /* ! CONFIG_ARM64_SVE */
179 
180 /* For use by EFI runtime services calls only */
181 extern void __efi_fpsimd_begin(void);
182 extern void __efi_fpsimd_end(void);
183 
184 #endif
185 
186 #endif
187