1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * In-kernel vector facility support functions 4 * 5 * Copyright IBM Corp. 2015 6 * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com> 7 */ 8 #include <linux/kernel.h> 9 #include <linux/cpu.h> 10 #include <linux/sched.h> 11 #include <asm/fpu/types.h> 12 #include <asm/fpu/api.h> 13 #include <asm/vx-insn.h> 14 15 void __kernel_fpu_begin(struct kernel_fpu *state, u32 flags) 16 { 17 /* 18 * Limit the save to the FPU/vector registers already 19 * in use by the previous context 20 */ 21 flags &= state->mask; 22 23 if (flags & KERNEL_FPC) 24 /* Save floating point control */ 25 asm volatile("stfpc %0" : "=Q" (state->fpc)); 26 27 if (!MACHINE_HAS_VX) { 28 if (flags & KERNEL_VXR_V0V7) { 29 /* Save floating-point registers */ 30 asm volatile("std 0,%0" : "=Q" (state->fprs[0])); 31 asm volatile("std 1,%0" : "=Q" (state->fprs[1])); 32 asm volatile("std 2,%0" : "=Q" (state->fprs[2])); 33 asm volatile("std 3,%0" : "=Q" (state->fprs[3])); 34 asm volatile("std 4,%0" : "=Q" (state->fprs[4])); 35 asm volatile("std 5,%0" : "=Q" (state->fprs[5])); 36 asm volatile("std 6,%0" : "=Q" (state->fprs[6])); 37 asm volatile("std 7,%0" : "=Q" (state->fprs[7])); 38 asm volatile("std 8,%0" : "=Q" (state->fprs[8])); 39 asm volatile("std 9,%0" : "=Q" (state->fprs[9])); 40 asm volatile("std 10,%0" : "=Q" (state->fprs[10])); 41 asm volatile("std 11,%0" : "=Q" (state->fprs[11])); 42 asm volatile("std 12,%0" : "=Q" (state->fprs[12])); 43 asm volatile("std 13,%0" : "=Q" (state->fprs[13])); 44 asm volatile("std 14,%0" : "=Q" (state->fprs[14])); 45 asm volatile("std 15,%0" : "=Q" (state->fprs[15])); 46 } 47 return; 48 } 49 50 /* Test and save vector registers */ 51 asm volatile ( 52 /* 53 * Test if any vector register must be saved and, if so, 54 * test if all register can be saved. 55 */ 56 " la 1,%[vxrs]\n" /* load save area */ 57 " tmll %[m],30\n" /* KERNEL_VXR */ 58 " jz 7f\n" /* no work -> done */ 59 " jo 5f\n" /* -> save V0..V31 */ 60 /* 61 * Test for special case KERNEL_FPU_MID only. In this 62 * case a vstm V8..V23 is the best instruction 63 */ 64 " chi %[m],12\n" /* KERNEL_VXR_MID */ 65 " jne 0f\n" /* -> save V8..V23 */ 66 " VSTM 8,23,128,1\n" /* vstm %v8,%v23,128(%r1) */ 67 " j 7f\n" 68 /* Test and save the first half of 16 vector registers */ 69 "0: tmll %[m],6\n" /* KERNEL_VXR_LOW */ 70 " jz 3f\n" /* -> KERNEL_VXR_HIGH */ 71 " jo 2f\n" /* 11 -> save V0..V15 */ 72 " brc 2,1f\n" /* 10 -> save V8..V15 */ 73 " VSTM 0,7,0,1\n" /* vstm %v0,%v7,0(%r1) */ 74 " j 3f\n" 75 "1: VSTM 8,15,128,1\n" /* vstm %v8,%v15,128(%r1) */ 76 " j 3f\n" 77 "2: VSTM 0,15,0,1\n" /* vstm %v0,%v15,0(%r1) */ 78 /* Test and save the second half of 16 vector registers */ 79 "3: tmll %[m],24\n" /* KERNEL_VXR_HIGH */ 80 " jz 7f\n" 81 " jo 6f\n" /* 11 -> save V16..V31 */ 82 " brc 2,4f\n" /* 10 -> save V24..V31 */ 83 " VSTM 16,23,256,1\n" /* vstm %v16,%v23,256(%r1) */ 84 " j 7f\n" 85 "4: VSTM 24,31,384,1\n" /* vstm %v24,%v31,384(%r1) */ 86 " j 7f\n" 87 "5: VSTM 0,15,0,1\n" /* vstm %v0,%v15,0(%r1) */ 88 "6: VSTM 16,31,256,1\n" /* vstm %v16,%v31,256(%r1) */ 89 "7:" 90 : [vxrs] "=Q" (*(struct vx_array *) &state->vxrs) 91 : [m] "d" (flags) 92 : "1", "cc"); 93 } 94 EXPORT_SYMBOL(__kernel_fpu_begin); 95 96 void __kernel_fpu_end(struct kernel_fpu *state, u32 flags) 97 { 98 /* 99 * Limit the restore to the FPU/vector registers of the 100 * previous context that have been overwritte by the 101 * current context 102 */ 103 flags &= state->mask; 104 105 if (flags & KERNEL_FPC) 106 /* Restore floating-point controls */ 107 asm volatile("lfpc %0" : : "Q" (state->fpc)); 108 109 if (!MACHINE_HAS_VX) { 110 if (flags & KERNEL_VXR_V0V7) { 111 /* Restore floating-point registers */ 112 asm volatile("ld 0,%0" : : "Q" (state->fprs[0])); 113 asm volatile("ld 1,%0" : : "Q" (state->fprs[1])); 114 asm volatile("ld 2,%0" : : "Q" (state->fprs[2])); 115 asm volatile("ld 3,%0" : : "Q" (state->fprs[3])); 116 asm volatile("ld 4,%0" : : "Q" (state->fprs[4])); 117 asm volatile("ld 5,%0" : : "Q" (state->fprs[5])); 118 asm volatile("ld 6,%0" : : "Q" (state->fprs[6])); 119 asm volatile("ld 7,%0" : : "Q" (state->fprs[7])); 120 asm volatile("ld 8,%0" : : "Q" (state->fprs[8])); 121 asm volatile("ld 9,%0" : : "Q" (state->fprs[9])); 122 asm volatile("ld 10,%0" : : "Q" (state->fprs[10])); 123 asm volatile("ld 11,%0" : : "Q" (state->fprs[11])); 124 asm volatile("ld 12,%0" : : "Q" (state->fprs[12])); 125 asm volatile("ld 13,%0" : : "Q" (state->fprs[13])); 126 asm volatile("ld 14,%0" : : "Q" (state->fprs[14])); 127 asm volatile("ld 15,%0" : : "Q" (state->fprs[15])); 128 } 129 return; 130 } 131 132 /* Test and restore (load) vector registers */ 133 asm volatile ( 134 /* 135 * Test if any vector register must be loaded and, if so, 136 * test if all registers can be loaded at once. 137 */ 138 " la 1,%[vxrs]\n" /* load restore area */ 139 " tmll %[m],30\n" /* KERNEL_VXR */ 140 " jz 7f\n" /* no work -> done */ 141 " jo 5f\n" /* -> restore V0..V31 */ 142 /* 143 * Test for special case KERNEL_FPU_MID only. In this 144 * case a vlm V8..V23 is the best instruction 145 */ 146 " chi %[m],12\n" /* KERNEL_VXR_MID */ 147 " jne 0f\n" /* -> restore V8..V23 */ 148 " VLM 8,23,128,1\n" /* vlm %v8,%v23,128(%r1) */ 149 " j 7f\n" 150 /* Test and restore the first half of 16 vector registers */ 151 "0: tmll %[m],6\n" /* KERNEL_VXR_LOW */ 152 " jz 3f\n" /* -> KERNEL_VXR_HIGH */ 153 " jo 2f\n" /* 11 -> restore V0..V15 */ 154 " brc 2,1f\n" /* 10 -> restore V8..V15 */ 155 " VLM 0,7,0,1\n" /* vlm %v0,%v7,0(%r1) */ 156 " j 3f\n" 157 "1: VLM 8,15,128,1\n" /* vlm %v8,%v15,128(%r1) */ 158 " j 3f\n" 159 "2: VLM 0,15,0,1\n" /* vlm %v0,%v15,0(%r1) */ 160 /* Test and restore the second half of 16 vector registers */ 161 "3: tmll %[m],24\n" /* KERNEL_VXR_HIGH */ 162 " jz 7f\n" 163 " jo 6f\n" /* 11 -> restore V16..V31 */ 164 " brc 2,4f\n" /* 10 -> restore V24..V31 */ 165 " VLM 16,23,256,1\n" /* vlm %v16,%v23,256(%r1) */ 166 " j 7f\n" 167 "4: VLM 24,31,384,1\n" /* vlm %v24,%v31,384(%r1) */ 168 " j 7f\n" 169 "5: VLM 0,15,0,1\n" /* vlm %v0,%v15,0(%r1) */ 170 "6: VLM 16,31,256,1\n" /* vlm %v16,%v31,256(%r1) */ 171 "7:" 172 : [vxrs] "=Q" (*(struct vx_array *) &state->vxrs) 173 : [m] "d" (flags) 174 : "1", "cc"); 175 } 176 EXPORT_SYMBOL(__kernel_fpu_end); 177 178 void __load_fpu_regs(void) 179 { 180 struct fpu *state = ¤t->thread.fpu; 181 unsigned long *regs = current->thread.fpu.regs; 182 183 asm volatile("lfpc %0" : : "Q" (state->fpc)); 184 if (likely(MACHINE_HAS_VX)) { 185 asm volatile("lgr 1,%0\n" 186 "VLM 0,15,0,1\n" 187 "VLM 16,31,256,1\n" 188 : 189 : "d" (regs) 190 : "1", "cc", "memory"); 191 } else { 192 asm volatile("ld 0,%0" : : "Q" (regs[0])); 193 asm volatile("ld 1,%0" : : "Q" (regs[1])); 194 asm volatile("ld 2,%0" : : "Q" (regs[2])); 195 asm volatile("ld 3,%0" : : "Q" (regs[3])); 196 asm volatile("ld 4,%0" : : "Q" (regs[4])); 197 asm volatile("ld 5,%0" : : "Q" (regs[5])); 198 asm volatile("ld 6,%0" : : "Q" (regs[6])); 199 asm volatile("ld 7,%0" : : "Q" (regs[7])); 200 asm volatile("ld 8,%0" : : "Q" (regs[8])); 201 asm volatile("ld 9,%0" : : "Q" (regs[9])); 202 asm volatile("ld 10,%0" : : "Q" (regs[10])); 203 asm volatile("ld 11,%0" : : "Q" (regs[11])); 204 asm volatile("ld 12,%0" : : "Q" (regs[12])); 205 asm volatile("ld 13,%0" : : "Q" (regs[13])); 206 asm volatile("ld 14,%0" : : "Q" (regs[14])); 207 asm volatile("ld 15,%0" : : "Q" (regs[15])); 208 } 209 clear_cpu_flag(CIF_FPU); 210 } 211 EXPORT_SYMBOL(__load_fpu_regs); 212 213 void load_fpu_regs(void) 214 { 215 raw_local_irq_disable(); 216 __load_fpu_regs(); 217 raw_local_irq_enable(); 218 } 219 EXPORT_SYMBOL(load_fpu_regs); 220 221 void save_fpu_regs(void) 222 { 223 unsigned long flags, *regs; 224 struct fpu *state; 225 226 local_irq_save(flags); 227 228 if (test_cpu_flag(CIF_FPU)) 229 goto out; 230 231 state = ¤t->thread.fpu; 232 regs = current->thread.fpu.regs; 233 234 asm volatile("stfpc %0" : "=Q" (state->fpc)); 235 if (likely(MACHINE_HAS_VX)) { 236 asm volatile("lgr 1,%0\n" 237 "VSTM 0,15,0,1\n" 238 "VSTM 16,31,256,1\n" 239 : 240 : "d" (regs) 241 : "1", "cc", "memory"); 242 } else { 243 asm volatile("std 0,%0" : "=Q" (regs[0])); 244 asm volatile("std 1,%0" : "=Q" (regs[1])); 245 asm volatile("std 2,%0" : "=Q" (regs[2])); 246 asm volatile("std 3,%0" : "=Q" (regs[3])); 247 asm volatile("std 4,%0" : "=Q" (regs[4])); 248 asm volatile("std 5,%0" : "=Q" (regs[5])); 249 asm volatile("std 6,%0" : "=Q" (regs[6])); 250 asm volatile("std 7,%0" : "=Q" (regs[7])); 251 asm volatile("std 8,%0" : "=Q" (regs[8])); 252 asm volatile("std 9,%0" : "=Q" (regs[9])); 253 asm volatile("std 10,%0" : "=Q" (regs[10])); 254 asm volatile("std 11,%0" : "=Q" (regs[11])); 255 asm volatile("std 12,%0" : "=Q" (regs[12])); 256 asm volatile("std 13,%0" : "=Q" (regs[13])); 257 asm volatile("std 14,%0" : "=Q" (regs[14])); 258 asm volatile("std 15,%0" : "=Q" (regs[15])); 259 } 260 set_cpu_flag(CIF_FPU); 261 out: 262 local_irq_restore(flags); 263 } 264 EXPORT_SYMBOL(save_fpu_regs); 265