1/* SPDX-License-Identifier: GPL-2.0-or-later */ 2/* 3 * Floating-point, VMX/Altivec and VSX loads and stores 4 * for use in instruction emulation. 5 * 6 * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> 7 */ 8 9#include <asm/processor.h> 10#include <asm/ppc_asm.h> 11#include <asm/ppc-opcode.h> 12#include <asm/reg.h> 13#include <asm/asm-offsets.h> 14#include <asm/asm-compat.h> 15#include <linux/errno.h> 16 17#ifdef CONFIG_PPC_FPU 18 19#define STKFRM (PPC_MIN_STKFRM + 16) 20 21/* Get the contents of frN into *p; N is in r3 and p is in r4. */ 22_GLOBAL(get_fpr) 23 mflr r0 24 mfmsr r6 25 ori r7, r6, MSR_FP 26 MTMSRD(r7) 27 isync 28 rlwinm r3,r3,3,0xf8 29 bcl 20,31,1f 30reg = 0 31 .rept 32 32 stfd reg, 0(r4) 33 b 2f 34reg = reg + 1 35 .endr 361: mflr r5 37 add r5,r3,r5 38 mtctr r5 39 mtlr r0 40 bctr 412: MTMSRD(r6) 42 isync 43 blr 44 45/* Put the contents of *p into frN; N is in r3 and p is in r4. */ 46_GLOBAL(put_fpr) 47 mflr r0 48 mfmsr r6 49 ori r7, r6, MSR_FP 50 MTMSRD(r7) 51 isync 52 rlwinm r3,r3,3,0xf8 53 bcl 20,31,1f 54reg = 0 55 .rept 32 56 lfd reg, 0(r4) 57 b 2f 58reg = reg + 1 59 .endr 601: mflr r5 61 add r5,r3,r5 62 mtctr r5 63 mtlr r0 64 bctr 652: MTMSRD(r6) 66 isync 67 blr 68 69#ifdef CONFIG_ALTIVEC 70/* Get the contents of vrN into *p; N is in r3 and p is in r4. */ 71_GLOBAL(get_vr) 72 mflr r0 73 mfmsr r6 74 oris r7, r6, MSR_VEC@h 75 MTMSRD(r7) 76 isync 77 rlwinm r3,r3,3,0xf8 78 bcl 20,31,1f 79reg = 0 80 .rept 32 81 stvx reg, 0, r4 82 b 2f 83reg = reg + 1 84 .endr 851: mflr r5 86 add r5,r3,r5 87 mtctr r5 88 mtlr r0 89 bctr 902: MTMSRD(r6) 91 isync 92 blr 93 94/* Put the contents of *p into vrN; N is in r3 and p is in r4. */ 95_GLOBAL(put_vr) 96 mflr r0 97 mfmsr r6 98 oris r7, r6, MSR_VEC@h 99 MTMSRD(r7) 100 isync 101 rlwinm r3,r3,3,0xf8 102 bcl 20,31,1f 103reg = 0 104 .rept 32 105 lvx reg, 0, r4 106 b 2f 107reg = reg + 1 108 .endr 1091: mflr r5 110 add r5,r3,r5 111 mtctr r5 112 mtlr r0 113 bctr 1142: MTMSRD(r6) 115 isync 116 blr 117#endif /* CONFIG_ALTIVEC */ 118 119#ifdef CONFIG_VSX 120/* Get the contents of vsN into vs0; N is in r3. */ 121_GLOBAL(get_vsr) 122 mflr r0 123 rlwinm r3,r3,3,0x1f8 124 bcl 20,31,1f 125 blr /* vs0 is already in vs0 */ 126 nop 127reg = 1 128 .rept 63 129 XXLOR(0,reg,reg) 130 blr 131reg = reg + 1 132 .endr 1331: mflr r5 134 add r5,r3,r5 135 mtctr r5 136 mtlr r0 137 bctr 138 139/* Put the contents of vs0 into vsN; N is in r3. */ 140_GLOBAL(put_vsr) 141 mflr r0 142 rlwinm r3,r3,3,0x1f8 143 bcl 20,31,1f 144 blr /* v0 is already in v0 */ 145 nop 146reg = 1 147 .rept 63 148 XXLOR(reg,0,0) 149 blr 150reg = reg + 1 151 .endr 1521: mflr r5 153 add r5,r3,r5 154 mtctr r5 155 mtlr r0 156 bctr 157 158/* Load VSX reg N from vector doubleword *p. N is in r3, p in r4. */ 159_GLOBAL(load_vsrn) 160 PPC_STLU r1,-STKFRM(r1) 161 mflr r0 162 PPC_STL r0,STKFRM+PPC_LR_STKOFF(r1) 163 mfmsr r6 164 oris r7,r6,MSR_VSX@h 165 cmpwi cr7,r3,0 166 li r8,STKFRM-16 167 MTMSRD(r7) 168 isync 169 beq cr7,1f 170 STXVD2X(0,R1,R8) 1711: LXVD2X(0,R0,R4) 172#ifdef __LITTLE_ENDIAN__ 173 XXSWAPD(0,0) 174#endif 175 beq cr7,4f 176 bl put_vsr 177 LXVD2X(0,R1,R8) 1784: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1) 179 mtlr r0 180 MTMSRD(r6) 181 isync 182 addi r1,r1,STKFRM 183 blr 184 185/* Store VSX reg N to vector doubleword *p. N is in r3, p in r4. */ 186_GLOBAL(store_vsrn) 187 PPC_STLU r1,-STKFRM(r1) 188 mflr r0 189 PPC_STL r0,STKFRM+PPC_LR_STKOFF(r1) 190 mfmsr r6 191 oris r7,r6,MSR_VSX@h 192 li r8,STKFRM-16 193 MTMSRD(r7) 194 isync 195 STXVD2X(0,R1,R8) 196 bl get_vsr 197#ifdef __LITTLE_ENDIAN__ 198 XXSWAPD(0,0) 199#endif 200 STXVD2X(0,R0,R4) 201 LXVD2X(0,R1,R8) 202 PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1) 203 mtlr r0 204 MTMSRD(r6) 205 isync 206 mr r3,r9 207 addi r1,r1,STKFRM 208 blr 209#endif /* CONFIG_VSX */ 210 211/* Convert single-precision to double, without disturbing FPRs. */ 212/* conv_sp_to_dp(float *sp, double *dp) */ 213_GLOBAL(conv_sp_to_dp) 214 mfmsr r6 215 ori r7, r6, MSR_FP 216 MTMSRD(r7) 217 isync 218 stfd fr0, -16(r1) 219 lfs fr0, 0(r3) 220 stfd fr0, 0(r4) 221 lfd fr0, -16(r1) 222 MTMSRD(r6) 223 isync 224 blr 225 226/* Convert single-precision to double, without disturbing FPRs. */ 227/* conv_sp_to_dp(double *dp, float *sp) */ 228_GLOBAL(conv_dp_to_sp) 229 mfmsr r6 230 ori r7, r6, MSR_FP 231 MTMSRD(r7) 232 isync 233 stfd fr0, -16(r1) 234 lfd fr0, 0(r3) 235 stfs fr0, 0(r4) 236 lfd fr0, -16(r1) 237 MTMSRD(r6) 238 isync 239 blr 240 241#endif /* CONFIG_PPC_FPU */ 242