1#include <asm/processor.h> 2#include <asm/ppc_asm.h> 3#include <asm/reg.h> 4#include <asm/asm-offsets.h> 5#include <asm/cputable.h> 6#include <asm/thread_info.h> 7#include <asm/page.h> 8#include <asm/ptrace.h> 9 10/* 11 * load_up_altivec(unused, unused, tsk) 12 * Disable VMX for the task which had it previously, 13 * and save its vector registers in its thread_struct. 14 * Enables the VMX for use in the kernel on return. 15 * On SMP we know the VMX is free, since we give it up every 16 * switch (ie, no lazy save of the vector registers). 17 */ 18_GLOBAL(load_up_altivec) 19 mfmsr r5 /* grab the current MSR */ 20 oris r5,r5,MSR_VEC@h 21 MTMSRD(r5) /* enable use of AltiVec now */ 22 isync 23 24/* 25 * For SMP, we don't do lazy VMX switching because it just gets too 26 * horrendously complex, especially when a task switches from one CPU 27 * to another. Instead we call giveup_altvec in switch_to. 28 * VRSAVE isn't dealt with here, that is done in the normal context 29 * switch code. Note that we could rely on vrsave value to eventually 30 * avoid saving all of the VREGs here... 31 */ 32#ifndef CONFIG_SMP 33 LOAD_REG_ADDRBASE(r3, last_task_used_altivec) 34 toreal(r3) 35 PPC_LL r4,ADDROFF(last_task_used_altivec)(r3) 36 PPC_LCMPI 0,r4,0 37 beq 1f 38 39 /* Save VMX state to last_task_used_altivec's THREAD struct */ 40 toreal(r4) 41 addi r4,r4,THREAD 42 SAVE_32VRS(0,r5,r4) 43 mfvscr vr0 44 li r10,THREAD_VSCR 45 stvx vr0,r10,r4 46 /* Disable VMX for last_task_used_altivec */ 47 PPC_LL r5,PT_REGS(r4) 48 toreal(r5) 49 PPC_LL r4,_MSR-STACK_FRAME_OVERHEAD(r5) 50 lis r10,MSR_VEC@h 51 andc r4,r4,r10 52 PPC_STL r4,_MSR-STACK_FRAME_OVERHEAD(r5) 531: 54#endif /* CONFIG_SMP */ 55 56 /* Hack: if we get an altivec unavailable trap with VRSAVE 57 * set to all zeros, we assume this is a broken application 58 * that fails to set it properly, and thus we switch it to 59 * all 1's 60 */ 61 mfspr r4,SPRN_VRSAVE 62 cmpwi 0,r4,0 63 bne+ 1f 64 li r4,-1 65 mtspr SPRN_VRSAVE,r4 661: 67 /* enable use of VMX after return */ 68#ifdef CONFIG_PPC32 69 mfspr r5,SPRN_SPRG_THREAD /* current task's THREAD (phys) */ 70 oris r9,r9,MSR_VEC@h 71#else 72 ld r4,PACACURRENT(r13) 73 addi r5,r4,THREAD /* Get THREAD */ 74 oris r12,r12,MSR_VEC@h 75 std r12,_MSR(r1) 76#endif 77 li r4,1 78 li r10,THREAD_VSCR 79 stw r4,THREAD_USED_VR(r5) 80 lvx vr0,r10,r5 81 mtvscr vr0 82 REST_32VRS(0,r4,r5) 83#ifndef CONFIG_SMP 84 /* Update last_task_used_altivec to 'current' */ 85 subi r4,r5,THREAD /* Back to 'current' */ 86 fromreal(r4) 87 PPC_STL r4,ADDROFF(last_task_used_altivec)(r3) 88#endif /* CONFIG_SMP */ 89 /* restore registers and return */ 90 blr 91 92_GLOBAL(giveup_altivec_notask) 93 mfmsr r3 94 andis. r4,r3,MSR_VEC@h 95 bnelr /* Already enabled? */ 96 oris r3,r3,MSR_VEC@h 97 SYNC 98 MTMSRD(r3) /* enable use of VMX now */ 99 isync 100 blr 101 102/* 103 * giveup_altivec(tsk) 104 * Disable VMX for the task given as the argument, 105 * and save the vector registers in its thread_struct. 106 * Enables the VMX for use in the kernel on return. 107 */ 108_GLOBAL(giveup_altivec) 109 mfmsr r5 110 oris r5,r5,MSR_VEC@h 111 SYNC 112 MTMSRD(r5) /* enable use of VMX now */ 113 isync 114 PPC_LCMPI 0,r3,0 115 beqlr /* if no previous owner, done */ 116 addi r3,r3,THREAD /* want THREAD of task */ 117 PPC_LL r5,PT_REGS(r3) 118 PPC_LCMPI 0,r5,0 119 SAVE_32VRS(0,r4,r3) 120 mfvscr vr0 121 li r4,THREAD_VSCR 122 stvx vr0,r4,r3 123 beq 1f 124 PPC_LL r4,_MSR-STACK_FRAME_OVERHEAD(r5) 125#ifdef CONFIG_VSX 126BEGIN_FTR_SECTION 127 lis r3,(MSR_VEC|MSR_VSX)@h 128FTR_SECTION_ELSE 129 lis r3,MSR_VEC@h 130ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX) 131#else 132 lis r3,MSR_VEC@h 133#endif 134 andc r4,r4,r3 /* disable FP for previous task */ 135 PPC_STL r4,_MSR-STACK_FRAME_OVERHEAD(r5) 1361: 137#ifndef CONFIG_SMP 138 li r5,0 139 LOAD_REG_ADDRBASE(r4,last_task_used_altivec) 140 PPC_STL r5,ADDROFF(last_task_used_altivec)(r4) 141#endif /* CONFIG_SMP */ 142 blr 143 144#ifdef CONFIG_VSX 145 146#ifdef CONFIG_PPC32 147#error This asm code isn't ready for 32-bit kernels 148#endif 149 150/* 151 * load_up_vsx(unused, unused, tsk) 152 * Disable VSX for the task which had it previously, 153 * and save its vector registers in its thread_struct. 154 * Reuse the fp and vsx saves, but first check to see if they have 155 * been saved already. 156 */ 157_GLOBAL(load_up_vsx) 158/* Load FP and VSX registers if they haven't been done yet */ 159 andi. r5,r12,MSR_FP 160 beql+ load_up_fpu /* skip if already loaded */ 161 andis. r5,r12,MSR_VEC@h 162 beql+ load_up_altivec /* skip if already loaded */ 163 164#ifndef CONFIG_SMP 165 ld r3,last_task_used_vsx@got(r2) 166 ld r4,0(r3) 167 cmpdi 0,r4,0 168 beq 1f 169 /* Disable VSX for last_task_used_vsx */ 170 addi r4,r4,THREAD 171 ld r5,PT_REGS(r4) 172 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5) 173 lis r6,MSR_VSX@h 174 andc r6,r4,r6 175 std r6,_MSR-STACK_FRAME_OVERHEAD(r5) 1761: 177#endif /* CONFIG_SMP */ 178 ld r4,PACACURRENT(r13) 179 addi r4,r4,THREAD /* Get THREAD */ 180 li r6,1 181 stw r6,THREAD_USED_VSR(r4) /* ... also set thread used vsr */ 182 /* enable use of VSX after return */ 183 oris r12,r12,MSR_VSX@h 184 std r12,_MSR(r1) 185#ifndef CONFIG_SMP 186 /* Update last_task_used_vsx to 'current' */ 187 ld r4,PACACURRENT(r13) 188 std r4,0(r3) 189#endif /* CONFIG_SMP */ 190 b fast_exception_return 191 192/* 193 * __giveup_vsx(tsk) 194 * Disable VSX for the task given as the argument. 195 * Does NOT save vsx registers. 196 * Enables the VSX for use in the kernel on return. 197 */ 198_GLOBAL(__giveup_vsx) 199 mfmsr r5 200 oris r5,r5,MSR_VSX@h 201 mtmsrd r5 /* enable use of VSX now */ 202 isync 203 204 cmpdi 0,r3,0 205 beqlr- /* if no previous owner, done */ 206 addi r3,r3,THREAD /* want THREAD of task */ 207 ld r5,PT_REGS(r3) 208 cmpdi 0,r5,0 209 beq 1f 210 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5) 211 lis r3,MSR_VSX@h 212 andc r4,r4,r3 /* disable VSX for previous task */ 213 std r4,_MSR-STACK_FRAME_OVERHEAD(r5) 2141: 215#ifndef CONFIG_SMP 216 li r5,0 217 ld r4,last_task_used_vsx@got(r2) 218 std r5,0(r4) 219#endif /* CONFIG_SMP */ 220 blr 221 222#endif /* CONFIG_VSX */ 223 224 225/* 226 * The routines below are in assembler so we can closely control the 227 * usage of floating-point registers. These routines must be called 228 * with preempt disabled. 229 */ 230#ifdef CONFIG_PPC32 231 .data 232fpzero: 233 .long 0 234fpone: 235 .long 0x3f800000 /* 1.0 in single-precision FP */ 236fphalf: 237 .long 0x3f000000 /* 0.5 in single-precision FP */ 238 239#define LDCONST(fr, name) \ 240 lis r11,name@ha; \ 241 lfs fr,name@l(r11) 242#else 243 244 .section ".toc","aw" 245fpzero: 246 .tc FD_0_0[TC],0 247fpone: 248 .tc FD_3ff00000_0[TC],0x3ff0000000000000 /* 1.0 */ 249fphalf: 250 .tc FD_3fe00000_0[TC],0x3fe0000000000000 /* 0.5 */ 251 252#define LDCONST(fr, name) \ 253 lfd fr,name@toc(r2) 254#endif 255 256 .text 257/* 258 * Internal routine to enable floating point and set FPSCR to 0. 259 * Don't call it from C; it doesn't use the normal calling convention. 260 */ 261fpenable: 262#ifdef CONFIG_PPC32 263 stwu r1,-64(r1) 264#else 265 stdu r1,-64(r1) 266#endif 267 mfmsr r10 268 ori r11,r10,MSR_FP 269 mtmsr r11 270 isync 271 stfd fr0,24(r1) 272 stfd fr1,16(r1) 273 stfd fr31,8(r1) 274 LDCONST(fr1, fpzero) 275 mffs fr31 276 MTFSF_L(fr1) 277 blr 278 279fpdisable: 280 mtlr r12 281 MTFSF_L(fr31) 282 lfd fr31,8(r1) 283 lfd fr1,16(r1) 284 lfd fr0,24(r1) 285 mtmsr r10 286 isync 287 addi r1,r1,64 288 blr 289 290/* 291 * Vector add, floating point. 292 */ 293_GLOBAL(vaddfp) 294 mflr r12 295 bl fpenable 296 li r0,4 297 mtctr r0 298 li r6,0 2991: lfsx fr0,r4,r6 300 lfsx fr1,r5,r6 301 fadds fr0,fr0,fr1 302 stfsx fr0,r3,r6 303 addi r6,r6,4 304 bdnz 1b 305 b fpdisable 306 307/* 308 * Vector subtract, floating point. 309 */ 310_GLOBAL(vsubfp) 311 mflr r12 312 bl fpenable 313 li r0,4 314 mtctr r0 315 li r6,0 3161: lfsx fr0,r4,r6 317 lfsx fr1,r5,r6 318 fsubs fr0,fr0,fr1 319 stfsx fr0,r3,r6 320 addi r6,r6,4 321 bdnz 1b 322 b fpdisable 323 324/* 325 * Vector multiply and add, floating point. 326 */ 327_GLOBAL(vmaddfp) 328 mflr r12 329 bl fpenable 330 stfd fr2,32(r1) 331 li r0,4 332 mtctr r0 333 li r7,0 3341: lfsx fr0,r4,r7 335 lfsx fr1,r5,r7 336 lfsx fr2,r6,r7 337 fmadds fr0,fr0,fr2,fr1 338 stfsx fr0,r3,r7 339 addi r7,r7,4 340 bdnz 1b 341 lfd fr2,32(r1) 342 b fpdisable 343 344/* 345 * Vector negative multiply and subtract, floating point. 346 */ 347_GLOBAL(vnmsubfp) 348 mflr r12 349 bl fpenable 350 stfd fr2,32(r1) 351 li r0,4 352 mtctr r0 353 li r7,0 3541: lfsx fr0,r4,r7 355 lfsx fr1,r5,r7 356 lfsx fr2,r6,r7 357 fnmsubs fr0,fr0,fr2,fr1 358 stfsx fr0,r3,r7 359 addi r7,r7,4 360 bdnz 1b 361 lfd fr2,32(r1) 362 b fpdisable 363 364/* 365 * Vector reciprocal estimate. We just compute 1.0/x. 366 * r3 -> destination, r4 -> source. 367 */ 368_GLOBAL(vrefp) 369 mflr r12 370 bl fpenable 371 li r0,4 372 LDCONST(fr1, fpone) 373 mtctr r0 374 li r6,0 3751: lfsx fr0,r4,r6 376 fdivs fr0,fr1,fr0 377 stfsx fr0,r3,r6 378 addi r6,r6,4 379 bdnz 1b 380 b fpdisable 381 382/* 383 * Vector reciprocal square-root estimate, floating point. 384 * We use the frsqrte instruction for the initial estimate followed 385 * by 2 iterations of Newton-Raphson to get sufficient accuracy. 386 * r3 -> destination, r4 -> source. 387 */ 388_GLOBAL(vrsqrtefp) 389 mflr r12 390 bl fpenable 391 stfd fr2,32(r1) 392 stfd fr3,40(r1) 393 stfd fr4,48(r1) 394 stfd fr5,56(r1) 395 li r0,4 396 LDCONST(fr4, fpone) 397 LDCONST(fr5, fphalf) 398 mtctr r0 399 li r6,0 4001: lfsx fr0,r4,r6 401 frsqrte fr1,fr0 /* r = frsqrte(s) */ 402 fmuls fr3,fr1,fr0 /* r * s */ 403 fmuls fr2,fr1,fr5 /* r * 0.5 */ 404 fnmsubs fr3,fr1,fr3,fr4 /* 1 - s * r * r */ 405 fmadds fr1,fr2,fr3,fr1 /* r = r + 0.5 * r * (1 - s * r * r) */ 406 fmuls fr3,fr1,fr0 /* r * s */ 407 fmuls fr2,fr1,fr5 /* r * 0.5 */ 408 fnmsubs fr3,fr1,fr3,fr4 /* 1 - s * r * r */ 409 fmadds fr1,fr2,fr3,fr1 /* r = r + 0.5 * r * (1 - s * r * r) */ 410 stfsx fr1,r3,r6 411 addi r6,r6,4 412 bdnz 1b 413 lfd fr5,56(r1) 414 lfd fr4,48(r1) 415 lfd fr3,40(r1) 416 lfd fr2,32(r1) 417 b fpdisable 418