1/* 2 * Tiny Code Generator for QEMU 3 * 4 * Copyright (c) 2018 SiFive, Inc 5 * Copyright (c) 2008-2009 Arnaud Patard <arnaud.patard@rtp-net.org> 6 * Copyright (c) 2009 Aurelien Jarno <aurelien@aurel32.net> 7 * Copyright (c) 2008 Fabrice Bellard 8 * 9 * Based on i386/tcg-target.c and mips/tcg-target.c 10 * 11 * Permission is hereby granted, free of charge, to any person obtaining a copy 12 * of this software and associated documentation files (the "Software"), to deal 13 * in the Software without restriction, including without limitation the rights 14 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 15 * copies of the Software, and to permit persons to whom the Software is 16 * furnished to do so, subject to the following conditions: 17 * 18 * The above copyright notice and this permission notice shall be included in 19 * all copies or substantial portions of the Software. 20 * 21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 22 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 23 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 24 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 25 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 26 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 27 * THE SOFTWARE. 28 */ 29 30/* Used for function call generation. */ 31#define TCG_REG_CALL_STACK TCG_REG_SP 32#define TCG_TARGET_STACK_ALIGN 16 33#define TCG_TARGET_CALL_STACK_OFFSET 0 34#define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_NORMAL 35#define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_NORMAL 36#define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_NORMAL 37#define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_NORMAL 38 39#ifdef CONFIG_DEBUG_TCG 40static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { 41 "zero", "ra", "sp", "gp", "tp", "t0", "t1", "t2", 42 "s0", "s1", "a0", "a1", "a2", "a3", "a4", "a5", 43 "a6", "a7", "s2", "s3", "s4", "s5", "s6", "s7", 44 "s8", "s9", "s10", "s11", "t3", "t4", "t5", "t6", 45 "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", 46 "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", 47 "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", 48 "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", 49}; 50#endif 51 52static const int tcg_target_reg_alloc_order[] = { 53 /* Call saved registers */ 54 /* TCG_REG_S0 reserved for TCG_AREG0 */ 55 TCG_REG_S1, 56 TCG_REG_S2, 57 TCG_REG_S3, 58 TCG_REG_S4, 59 TCG_REG_S5, 60 TCG_REG_S6, 61 TCG_REG_S7, 62 TCG_REG_S8, 63 TCG_REG_S9, 64 TCG_REG_S10, 65 TCG_REG_S11, 66 67 /* Call clobbered registers */ 68 TCG_REG_T0, 69 TCG_REG_T1, 70 TCG_REG_T2, 71 TCG_REG_T3, 72 TCG_REG_T4, 73 TCG_REG_T5, 74 TCG_REG_T6, 75 76 /* Argument registers */ 77 TCG_REG_A0, 78 TCG_REG_A1, 79 TCG_REG_A2, 80 TCG_REG_A3, 81 TCG_REG_A4, 82 TCG_REG_A5, 83 TCG_REG_A6, 84 TCG_REG_A7, 85 86 /* Vector registers and TCG_REG_V0 reserved for mask. */ 87 TCG_REG_V1, TCG_REG_V2, TCG_REG_V3, TCG_REG_V4, 88 TCG_REG_V5, TCG_REG_V6, TCG_REG_V7, TCG_REG_V8, 89 TCG_REG_V9, TCG_REG_V10, TCG_REG_V11, TCG_REG_V12, 90 TCG_REG_V13, TCG_REG_V14, TCG_REG_V15, TCG_REG_V16, 91 TCG_REG_V17, TCG_REG_V18, TCG_REG_V19, TCG_REG_V20, 92 TCG_REG_V21, TCG_REG_V22, TCG_REG_V23, TCG_REG_V24, 93 TCG_REG_V25, TCG_REG_V26, TCG_REG_V27, TCG_REG_V28, 94 TCG_REG_V29, TCG_REG_V30, TCG_REG_V31, 95}; 96 97static const int tcg_target_call_iarg_regs[] = { 98 TCG_REG_A0, 99 TCG_REG_A1, 100 TCG_REG_A2, 101 TCG_REG_A3, 102 TCG_REG_A4, 103 TCG_REG_A5, 104 TCG_REG_A6, 105 TCG_REG_A7, 106}; 107 108static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot) 109{ 110 tcg_debug_assert(kind == TCG_CALL_RET_NORMAL); 111 tcg_debug_assert(slot >= 0 && slot <= 1); 112 return TCG_REG_A0 + slot; 113} 114 115#define TCG_CT_CONST_S12 0x100 116#define TCG_CT_CONST_M12 0x200 117#define TCG_CT_CONST_S5 0x400 118#define TCG_CT_CONST_CMP_VI 0x800 119 120#define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32) 121#define ALL_VECTOR_REGS MAKE_64BIT_MASK(32, 32) 122#define ALL_DVECTOR_REG_GROUPS 0x5555555500000000 123#define ALL_QVECTOR_REG_GROUPS 0x1111111100000000 124 125#define sextreg sextract64 126 127/* 128 * RISC-V Base ISA opcodes (IM) 129 */ 130 131#define V_OPIVV (0x0 << 12) 132#define V_OPFVV (0x1 << 12) 133#define V_OPMVV (0x2 << 12) 134#define V_OPIVI (0x3 << 12) 135#define V_OPIVX (0x4 << 12) 136#define V_OPFVF (0x5 << 12) 137#define V_OPMVX (0x6 << 12) 138#define V_OPCFG (0x7 << 12) 139 140/* NF <= 7 && NF >= 0 */ 141#define V_NF(x) (x << 29) 142#define V_UNIT_STRIDE (0x0 << 20) 143#define V_UNIT_STRIDE_WHOLE_REG (0x8 << 20) 144 145typedef enum { 146 VLMUL_M1 = 0, /* LMUL=1 */ 147 VLMUL_M2, /* LMUL=2 */ 148 VLMUL_M4, /* LMUL=4 */ 149 VLMUL_M8, /* LMUL=8 */ 150 VLMUL_RESERVED, 151 VLMUL_MF8, /* LMUL=1/8 */ 152 VLMUL_MF4, /* LMUL=1/4 */ 153 VLMUL_MF2, /* LMUL=1/2 */ 154} RISCVVlmul; 155 156typedef enum { 157 OPC_ADD = 0x33, 158 OPC_ADDI = 0x13, 159 OPC_AND = 0x7033, 160 OPC_ANDI = 0x7013, 161 OPC_AUIPC = 0x17, 162 OPC_BEQ = 0x63, 163 OPC_BEXTI = 0x48005013, 164 OPC_BGE = 0x5063, 165 OPC_BGEU = 0x7063, 166 OPC_BLT = 0x4063, 167 OPC_BLTU = 0x6063, 168 OPC_BNE = 0x1063, 169 OPC_DIV = 0x2004033, 170 OPC_DIVU = 0x2005033, 171 OPC_JAL = 0x6f, 172 OPC_JALR = 0x67, 173 OPC_LB = 0x3, 174 OPC_LBU = 0x4003, 175 OPC_LD = 0x3003, 176 OPC_LH = 0x1003, 177 OPC_LHU = 0x5003, 178 OPC_LUI = 0x37, 179 OPC_LW = 0x2003, 180 OPC_LWU = 0x6003, 181 OPC_MUL = 0x2000033, 182 OPC_MULH = 0x2001033, 183 OPC_MULHSU = 0x2002033, 184 OPC_MULHU = 0x2003033, 185 OPC_OR = 0x6033, 186 OPC_ORI = 0x6013, 187 OPC_REM = 0x2006033, 188 OPC_REMU = 0x2007033, 189 OPC_SB = 0x23, 190 OPC_SD = 0x3023, 191 OPC_SH = 0x1023, 192 OPC_SLL = 0x1033, 193 OPC_SLLI = 0x1013, 194 OPC_SLT = 0x2033, 195 OPC_SLTI = 0x2013, 196 OPC_SLTIU = 0x3013, 197 OPC_SLTU = 0x3033, 198 OPC_SRA = 0x40005033, 199 OPC_SRAI = 0x40005013, 200 OPC_SRL = 0x5033, 201 OPC_SRLI = 0x5013, 202 OPC_SUB = 0x40000033, 203 OPC_SW = 0x2023, 204 OPC_XOR = 0x4033, 205 OPC_XORI = 0x4013, 206 207 OPC_ADDIW = 0x1b, 208 OPC_ADDW = 0x3b, 209 OPC_DIVUW = 0x200503b, 210 OPC_DIVW = 0x200403b, 211 OPC_MULW = 0x200003b, 212 OPC_REMUW = 0x200703b, 213 OPC_REMW = 0x200603b, 214 OPC_SLLIW = 0x101b, 215 OPC_SLLW = 0x103b, 216 OPC_SRAIW = 0x4000501b, 217 OPC_SRAW = 0x4000503b, 218 OPC_SRLIW = 0x501b, 219 OPC_SRLW = 0x503b, 220 OPC_SUBW = 0x4000003b, 221 222 OPC_FENCE = 0x0000000f, 223 OPC_NOP = OPC_ADDI, /* nop = addi r0,r0,0 */ 224 225 /* Zba: Bit manipulation extension, address generation */ 226 OPC_ADD_UW = 0x0800003b, 227 228 /* Zbb: Bit manipulation extension, basic bit manipulation */ 229 OPC_ANDN = 0x40007033, 230 OPC_CLZ = 0x60001013, 231 OPC_CLZW = 0x6000101b, 232 OPC_CPOP = 0x60201013, 233 OPC_CPOPW = 0x6020101b, 234 OPC_CTZ = 0x60101013, 235 OPC_CTZW = 0x6010101b, 236 OPC_ORN = 0x40006033, 237 OPC_REV8 = 0x6b805013, 238 OPC_ROL = 0x60001033, 239 OPC_ROLW = 0x6000103b, 240 OPC_ROR = 0x60005033, 241 OPC_RORW = 0x6000503b, 242 OPC_RORI = 0x60005013, 243 OPC_RORIW = 0x6000501b, 244 OPC_SEXT_B = 0x60401013, 245 OPC_SEXT_H = 0x60501013, 246 OPC_XNOR = 0x40004033, 247 OPC_ZEXT_H = 0x0800403b, 248 249 /* Zicond: integer conditional operations */ 250 OPC_CZERO_EQZ = 0x0e005033, 251 OPC_CZERO_NEZ = 0x0e007033, 252 253 /* V: Vector extension 1.0 */ 254 OPC_VSETVLI = 0x57 | V_OPCFG, 255 OPC_VSETIVLI = 0xc0000057 | V_OPCFG, 256 OPC_VSETVL = 0x80000057 | V_OPCFG, 257 258 OPC_VLE8_V = 0x7 | V_UNIT_STRIDE, 259 OPC_VLE16_V = 0x5007 | V_UNIT_STRIDE, 260 OPC_VLE32_V = 0x6007 | V_UNIT_STRIDE, 261 OPC_VLE64_V = 0x7007 | V_UNIT_STRIDE, 262 OPC_VSE8_V = 0x27 | V_UNIT_STRIDE, 263 OPC_VSE16_V = 0x5027 | V_UNIT_STRIDE, 264 OPC_VSE32_V = 0x6027 | V_UNIT_STRIDE, 265 OPC_VSE64_V = 0x7027 | V_UNIT_STRIDE, 266 267 OPC_VL1RE64_V = 0x2007007 | V_UNIT_STRIDE_WHOLE_REG | V_NF(0), 268 OPC_VL2RE64_V = 0x2007007 | V_UNIT_STRIDE_WHOLE_REG | V_NF(1), 269 OPC_VL4RE64_V = 0x2007007 | V_UNIT_STRIDE_WHOLE_REG | V_NF(3), 270 OPC_VL8RE64_V = 0x2007007 | V_UNIT_STRIDE_WHOLE_REG | V_NF(7), 271 272 OPC_VS1R_V = 0x2000027 | V_UNIT_STRIDE_WHOLE_REG | V_NF(0), 273 OPC_VS2R_V = 0x2000027 | V_UNIT_STRIDE_WHOLE_REG | V_NF(1), 274 OPC_VS4R_V = 0x2000027 | V_UNIT_STRIDE_WHOLE_REG | V_NF(3), 275 OPC_VS8R_V = 0x2000027 | V_UNIT_STRIDE_WHOLE_REG | V_NF(7), 276 277 OPC_VMERGE_VIM = 0x5c000057 | V_OPIVI, 278 OPC_VMERGE_VVM = 0x5c000057 | V_OPIVV, 279 280 OPC_VADD_VV = 0x57 | V_OPIVV, 281 OPC_VADD_VI = 0x57 | V_OPIVI, 282 OPC_VSUB_VV = 0x8000057 | V_OPIVV, 283 OPC_VRSUB_VI = 0xc000057 | V_OPIVI, 284 OPC_VAND_VV = 0x24000057 | V_OPIVV, 285 OPC_VAND_VI = 0x24000057 | V_OPIVI, 286 OPC_VOR_VV = 0x28000057 | V_OPIVV, 287 OPC_VOR_VI = 0x28000057 | V_OPIVI, 288 OPC_VXOR_VV = 0x2c000057 | V_OPIVV, 289 OPC_VXOR_VI = 0x2c000057 | V_OPIVI, 290 291 OPC_VMUL_VV = 0x94000057 | V_OPMVV, 292 OPC_VSADD_VV = 0x84000057 | V_OPIVV, 293 OPC_VSADD_VI = 0x84000057 | V_OPIVI, 294 OPC_VSSUB_VV = 0x8c000057 | V_OPIVV, 295 OPC_VSSUB_VI = 0x8c000057 | V_OPIVI, 296 OPC_VSADDU_VV = 0x80000057 | V_OPIVV, 297 OPC_VSADDU_VI = 0x80000057 | V_OPIVI, 298 OPC_VSSUBU_VV = 0x88000057 | V_OPIVV, 299 OPC_VSSUBU_VI = 0x88000057 | V_OPIVI, 300 301 OPC_VMAX_VV = 0x1c000057 | V_OPIVV, 302 OPC_VMAX_VI = 0x1c000057 | V_OPIVI, 303 OPC_VMAXU_VV = 0x18000057 | V_OPIVV, 304 OPC_VMAXU_VI = 0x18000057 | V_OPIVI, 305 OPC_VMIN_VV = 0x14000057 | V_OPIVV, 306 OPC_VMIN_VI = 0x14000057 | V_OPIVI, 307 OPC_VMINU_VV = 0x10000057 | V_OPIVV, 308 OPC_VMINU_VI = 0x10000057 | V_OPIVI, 309 310 OPC_VMSEQ_VV = 0x60000057 | V_OPIVV, 311 OPC_VMSEQ_VI = 0x60000057 | V_OPIVI, 312 OPC_VMSEQ_VX = 0x60000057 | V_OPIVX, 313 OPC_VMSNE_VV = 0x64000057 | V_OPIVV, 314 OPC_VMSNE_VI = 0x64000057 | V_OPIVI, 315 OPC_VMSNE_VX = 0x64000057 | V_OPIVX, 316 317 OPC_VMSLTU_VV = 0x68000057 | V_OPIVV, 318 OPC_VMSLTU_VX = 0x68000057 | V_OPIVX, 319 OPC_VMSLT_VV = 0x6c000057 | V_OPIVV, 320 OPC_VMSLT_VX = 0x6c000057 | V_OPIVX, 321 OPC_VMSLEU_VV = 0x70000057 | V_OPIVV, 322 OPC_VMSLEU_VX = 0x70000057 | V_OPIVX, 323 OPC_VMSLE_VV = 0x74000057 | V_OPIVV, 324 OPC_VMSLE_VX = 0x74000057 | V_OPIVX, 325 326 OPC_VMSLEU_VI = 0x70000057 | V_OPIVI, 327 OPC_VMSLE_VI = 0x74000057 | V_OPIVI, 328 OPC_VMSGTU_VI = 0x78000057 | V_OPIVI, 329 OPC_VMSGTU_VX = 0x78000057 | V_OPIVX, 330 OPC_VMSGT_VI = 0x7c000057 | V_OPIVI, 331 OPC_VMSGT_VX = 0x7c000057 | V_OPIVX, 332 333 OPC_VSLL_VV = 0x94000057 | V_OPIVV, 334 OPC_VSLL_VI = 0x94000057 | V_OPIVI, 335 OPC_VSLL_VX = 0x94000057 | V_OPIVX, 336 OPC_VSRL_VV = 0xa0000057 | V_OPIVV, 337 OPC_VSRL_VI = 0xa0000057 | V_OPIVI, 338 OPC_VSRL_VX = 0xa0000057 | V_OPIVX, 339 OPC_VSRA_VV = 0xa4000057 | V_OPIVV, 340 OPC_VSRA_VI = 0xa4000057 | V_OPIVI, 341 OPC_VSRA_VX = 0xa4000057 | V_OPIVX, 342 343 OPC_VMV_V_V = 0x5e000057 | V_OPIVV, 344 OPC_VMV_V_I = 0x5e000057 | V_OPIVI, 345 OPC_VMV_V_X = 0x5e000057 | V_OPIVX, 346 347 OPC_VMVNR_V = 0x9e000057 | V_OPIVI, 348} RISCVInsn; 349 350static const struct { 351 RISCVInsn op; 352 bool swap; 353} tcg_cmpcond_to_rvv_vv[] = { 354 [TCG_COND_EQ] = { OPC_VMSEQ_VV, false }, 355 [TCG_COND_NE] = { OPC_VMSNE_VV, false }, 356 [TCG_COND_LT] = { OPC_VMSLT_VV, false }, 357 [TCG_COND_GE] = { OPC_VMSLE_VV, true }, 358 [TCG_COND_GT] = { OPC_VMSLT_VV, true }, 359 [TCG_COND_LE] = { OPC_VMSLE_VV, false }, 360 [TCG_COND_LTU] = { OPC_VMSLTU_VV, false }, 361 [TCG_COND_GEU] = { OPC_VMSLEU_VV, true }, 362 [TCG_COND_GTU] = { OPC_VMSLTU_VV, true }, 363 [TCG_COND_LEU] = { OPC_VMSLEU_VV, false } 364}; 365 366static const struct { 367 RISCVInsn op; 368 int min; 369 int max; 370 bool adjust; 371} tcg_cmpcond_to_rvv_vi[] = { 372 [TCG_COND_EQ] = { OPC_VMSEQ_VI, -16, 15, false }, 373 [TCG_COND_NE] = { OPC_VMSNE_VI, -16, 15, false }, 374 [TCG_COND_GT] = { OPC_VMSGT_VI, -16, 15, false }, 375 [TCG_COND_LE] = { OPC_VMSLE_VI, -16, 15, false }, 376 [TCG_COND_LT] = { OPC_VMSLE_VI, -15, 16, true }, 377 [TCG_COND_GE] = { OPC_VMSGT_VI, -15, 16, true }, 378 [TCG_COND_LEU] = { OPC_VMSLEU_VI, 0, 15, false }, 379 [TCG_COND_GTU] = { OPC_VMSGTU_VI, 0, 15, false }, 380 [TCG_COND_LTU] = { OPC_VMSLEU_VI, 1, 16, true }, 381 [TCG_COND_GEU] = { OPC_VMSGTU_VI, 1, 16, true }, 382}; 383 384/* test if a constant matches the constraint */ 385static bool tcg_target_const_match(int64_t val, int ct, 386 TCGType type, TCGCond cond, int vece) 387{ 388 if (ct & TCG_CT_CONST) { 389 return 1; 390 } 391 if (type >= TCG_TYPE_V64) { 392 /* Val is replicated by VECE; extract the highest element. */ 393 val >>= (-8 << vece) & 63; 394 } 395 /* 396 * Sign extended from 12 bits: [-0x800, 0x7ff]. 397 * Used for most arithmetic, as this is the isa field. 398 */ 399 if ((ct & TCG_CT_CONST_S12) && val >= -0x800 && val <= 0x7ff) { 400 return 1; 401 } 402 /* 403 * Sign extended from 12 bits, +/- matching: [-0x7ff, 0x7ff]. 404 * Used by movcond, which may need the negative value, 405 * and requires the modified constant to be representable. 406 */ 407 if ((ct & TCG_CT_CONST_M12) && val >= -0x7ff && val <= 0x7ff) { 408 return 1; 409 } 410 /* 411 * Sign extended from 5 bits: [-0x10, 0x0f]. 412 * Used for vector-immediate. 413 */ 414 if ((ct & TCG_CT_CONST_S5) && val >= -0x10 && val <= 0x0f) { 415 return 1; 416 } 417 /* 418 * Used for vector compare OPIVI instructions. 419 */ 420 if ((ct & TCG_CT_CONST_CMP_VI) && 421 val >= tcg_cmpcond_to_rvv_vi[cond].min && 422 val <= tcg_cmpcond_to_rvv_vi[cond].max) { 423 return true; 424 } 425 return 0; 426} 427 428/* 429 * RISC-V immediate and instruction encoders (excludes 16-bit RVC) 430 */ 431 432/* Type-R */ 433 434static int32_t encode_r(RISCVInsn opc, TCGReg rd, TCGReg rs1, TCGReg rs2) 435{ 436 return opc | (rd & 0x1f) << 7 | (rs1 & 0x1f) << 15 | (rs2 & 0x1f) << 20; 437} 438 439/* Type-I */ 440 441static int32_t encode_imm12(uint32_t imm) 442{ 443 return (imm & 0xfff) << 20; 444} 445 446static int32_t encode_i(RISCVInsn opc, TCGReg rd, TCGReg rs1, uint32_t imm) 447{ 448 return opc | (rd & 0x1f) << 7 | (rs1 & 0x1f) << 15 | encode_imm12(imm); 449} 450 451/* Type-S */ 452 453static int32_t encode_simm12(uint32_t imm) 454{ 455 int32_t ret = 0; 456 457 ret |= (imm & 0xFE0) << 20; 458 ret |= (imm & 0x1F) << 7; 459 460 return ret; 461} 462 463static int32_t encode_s(RISCVInsn opc, TCGReg rs1, TCGReg rs2, uint32_t imm) 464{ 465 return opc | (rs1 & 0x1f) << 15 | (rs2 & 0x1f) << 20 | encode_simm12(imm); 466} 467 468/* Type-SB */ 469 470static int32_t encode_sbimm12(uint32_t imm) 471{ 472 int32_t ret = 0; 473 474 ret |= (imm & 0x1000) << 19; 475 ret |= (imm & 0x7e0) << 20; 476 ret |= (imm & 0x1e) << 7; 477 ret |= (imm & 0x800) >> 4; 478 479 return ret; 480} 481 482static int32_t encode_sb(RISCVInsn opc, TCGReg rs1, TCGReg rs2, uint32_t imm) 483{ 484 return opc | (rs1 & 0x1f) << 15 | (rs2 & 0x1f) << 20 | encode_sbimm12(imm); 485} 486 487/* Type-U */ 488 489static int32_t encode_uimm20(uint32_t imm) 490{ 491 return imm & 0xfffff000; 492} 493 494static int32_t encode_u(RISCVInsn opc, TCGReg rd, uint32_t imm) 495{ 496 return opc | (rd & 0x1f) << 7 | encode_uimm20(imm); 497} 498 499/* Type-UJ */ 500 501static int32_t encode_ujimm20(uint32_t imm) 502{ 503 int32_t ret = 0; 504 505 ret |= (imm & 0x0007fe) << (21 - 1); 506 ret |= (imm & 0x000800) << (20 - 11); 507 ret |= (imm & 0x0ff000) << (12 - 12); 508 ret |= (imm & 0x100000) << (31 - 20); 509 510 return ret; 511} 512 513static int32_t encode_uj(RISCVInsn opc, TCGReg rd, uint32_t imm) 514{ 515 return opc | (rd & 0x1f) << 7 | encode_ujimm20(imm); 516} 517 518 519/* Type-OPIVI */ 520 521static int32_t encode_vi(RISCVInsn opc, TCGReg rd, int32_t imm, 522 TCGReg vs2, bool vm) 523{ 524 return opc | (rd & 0x1f) << 7 | (imm & 0x1f) << 15 | 525 (vs2 & 0x1f) << 20 | (vm << 25); 526} 527 528/* Type-OPIVV/OPMVV/OPIVX/OPMVX, Vector load and store */ 529 530static int32_t encode_v(RISCVInsn opc, TCGReg d, TCGReg s1, 531 TCGReg s2, bool vm) 532{ 533 return opc | (d & 0x1f) << 7 | (s1 & 0x1f) << 15 | 534 (s2 & 0x1f) << 20 | (vm << 25); 535} 536 537/* Vector vtype */ 538 539static uint32_t encode_vtype(bool vta, bool vma, 540 MemOp vsew, RISCVVlmul vlmul) 541{ 542 return vma << 7 | vta << 6 | vsew << 3 | vlmul; 543} 544 545static int32_t encode_vset(RISCVInsn opc, TCGReg rd, 546 TCGArg rs1, uint32_t vtype) 547{ 548 return opc | (rd & 0x1f) << 7 | (rs1 & 0x1f) << 15 | (vtype & 0x7ff) << 20; 549} 550 551static int32_t encode_vseti(RISCVInsn opc, TCGReg rd, 552 uint32_t uimm, uint32_t vtype) 553{ 554 return opc | (rd & 0x1f) << 7 | (uimm & 0x1f) << 15 | (vtype & 0x3ff) << 20; 555} 556 557/* 558 * RISC-V instruction emitters 559 */ 560 561static void tcg_out_opc_reg(TCGContext *s, RISCVInsn opc, 562 TCGReg rd, TCGReg rs1, TCGReg rs2) 563{ 564 tcg_out32(s, encode_r(opc, rd, rs1, rs2)); 565} 566 567static void tcg_out_opc_imm(TCGContext *s, RISCVInsn opc, 568 TCGReg rd, TCGReg rs1, TCGArg imm) 569{ 570 tcg_out32(s, encode_i(opc, rd, rs1, imm)); 571} 572 573static void tcg_out_opc_store(TCGContext *s, RISCVInsn opc, 574 TCGReg rs1, TCGReg rs2, uint32_t imm) 575{ 576 tcg_out32(s, encode_s(opc, rs1, rs2, imm)); 577} 578 579static void tcg_out_opc_branch(TCGContext *s, RISCVInsn opc, 580 TCGReg rs1, TCGReg rs2, uint32_t imm) 581{ 582 tcg_out32(s, encode_sb(opc, rs1, rs2, imm)); 583} 584 585static void tcg_out_opc_upper(TCGContext *s, RISCVInsn opc, 586 TCGReg rd, uint32_t imm) 587{ 588 tcg_out32(s, encode_u(opc, rd, imm)); 589} 590 591static void tcg_out_opc_jump(TCGContext *s, RISCVInsn opc, 592 TCGReg rd, uint32_t imm) 593{ 594 tcg_out32(s, encode_uj(opc, rd, imm)); 595} 596 597static void tcg_out_nop_fill(tcg_insn_unit *p, int count) 598{ 599 int i; 600 for (i = 0; i < count; ++i) { 601 p[i] = OPC_NOP; 602 } 603} 604 605/* 606 * Relocations 607 */ 608 609static bool reloc_sbimm12(tcg_insn_unit *src_rw, const tcg_insn_unit *target) 610{ 611 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw); 612 intptr_t offset = (intptr_t)target - (intptr_t)src_rx; 613 614 tcg_debug_assert((offset & 1) == 0); 615 if (offset == sextreg(offset, 0, 12)) { 616 *src_rw |= encode_sbimm12(offset); 617 return true; 618 } 619 620 return false; 621} 622 623static bool reloc_jimm20(tcg_insn_unit *src_rw, const tcg_insn_unit *target) 624{ 625 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw); 626 intptr_t offset = (intptr_t)target - (intptr_t)src_rx; 627 628 tcg_debug_assert((offset & 1) == 0); 629 if (offset == sextreg(offset, 0, 20)) { 630 *src_rw |= encode_ujimm20(offset); 631 return true; 632 } 633 634 return false; 635} 636 637static bool reloc_call(tcg_insn_unit *src_rw, const tcg_insn_unit *target) 638{ 639 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw); 640 intptr_t offset = (intptr_t)target - (intptr_t)src_rx; 641 int32_t lo = sextreg(offset, 0, 12); 642 int32_t hi = offset - lo; 643 644 if (offset == hi + lo) { 645 src_rw[0] |= encode_uimm20(hi); 646 src_rw[1] |= encode_imm12(lo); 647 return true; 648 } 649 650 return false; 651} 652 653static bool patch_reloc(tcg_insn_unit *code_ptr, int type, 654 intptr_t value, intptr_t addend) 655{ 656 tcg_debug_assert(addend == 0); 657 switch (type) { 658 case R_RISCV_BRANCH: 659 return reloc_sbimm12(code_ptr, (tcg_insn_unit *)value); 660 case R_RISCV_JAL: 661 return reloc_jimm20(code_ptr, (tcg_insn_unit *)value); 662 case R_RISCV_CALL: 663 return reloc_call(code_ptr, (tcg_insn_unit *)value); 664 default: 665 g_assert_not_reached(); 666 } 667} 668 669/* 670 * RISC-V vector instruction emitters 671 */ 672 673/* 674 * Vector registers uses the same 5 lower bits as GPR registers, 675 * and vm=0 (vm = false) means vector masking ENABLED. 676 * With RVV 1.0, vs2 is the first operand, while rs1/imm is the 677 * second operand. 678 */ 679static void tcg_out_opc_vv(TCGContext *s, RISCVInsn opc, 680 TCGReg vd, TCGReg vs2, TCGReg vs1) 681{ 682 tcg_out32(s, encode_v(opc, vd, vs1, vs2, true)); 683} 684 685static void tcg_out_opc_vx(TCGContext *s, RISCVInsn opc, 686 TCGReg vd, TCGReg vs2, TCGReg rs1) 687{ 688 tcg_out32(s, encode_v(opc, vd, rs1, vs2, true)); 689} 690 691static void tcg_out_opc_vi(TCGContext *s, RISCVInsn opc, 692 TCGReg vd, TCGReg vs2, int32_t imm) 693{ 694 tcg_out32(s, encode_vi(opc, vd, imm, vs2, true)); 695} 696 697static void tcg_out_opc_vv_vi(TCGContext *s, RISCVInsn o_vv, RISCVInsn o_vi, 698 TCGReg vd, TCGReg vs2, TCGArg vi1, int c_vi1) 699{ 700 if (c_vi1) { 701 tcg_out_opc_vi(s, o_vi, vd, vs2, vi1); 702 } else { 703 tcg_out_opc_vv(s, o_vv, vd, vs2, vi1); 704 } 705} 706 707static void tcg_out_opc_vim_mask(TCGContext *s, RISCVInsn opc, TCGReg vd, 708 TCGReg vs2, int32_t imm) 709{ 710 tcg_out32(s, encode_vi(opc, vd, imm, vs2, false)); 711} 712 713static void tcg_out_opc_vvm_mask(TCGContext *s, RISCVInsn opc, TCGReg vd, 714 TCGReg vs2, TCGReg vs1) 715{ 716 tcg_out32(s, encode_v(opc, vd, vs1, vs2, false)); 717} 718 719typedef struct VsetCache { 720 uint32_t movi_insn; 721 uint32_t vset_insn; 722} VsetCache; 723 724static VsetCache riscv_vset_cache[3][4]; 725 726static void set_vtype(TCGContext *s, TCGType type, MemOp vsew) 727{ 728 const VsetCache *p = &riscv_vset_cache[type - TCG_TYPE_V64][vsew]; 729 730 s->riscv_cur_type = type; 731 s->riscv_cur_vsew = vsew; 732 733 if (p->movi_insn) { 734 tcg_out32(s, p->movi_insn); 735 } 736 tcg_out32(s, p->vset_insn); 737} 738 739static MemOp set_vtype_len(TCGContext *s, TCGType type) 740{ 741 if (type != s->riscv_cur_type) { 742 set_vtype(s, type, MO_64); 743 } 744 return s->riscv_cur_vsew; 745} 746 747static void set_vtype_len_sew(TCGContext *s, TCGType type, MemOp vsew) 748{ 749 if (type != s->riscv_cur_type || vsew != s->riscv_cur_vsew) { 750 set_vtype(s, type, vsew); 751 } 752} 753 754/* 755 * TCG intrinsics 756 */ 757 758static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) 759{ 760 if (ret == arg) { 761 return true; 762 } 763 switch (type) { 764 case TCG_TYPE_I32: 765 case TCG_TYPE_I64: 766 tcg_out_opc_imm(s, OPC_ADDI, ret, arg, 0); 767 break; 768 case TCG_TYPE_V64: 769 case TCG_TYPE_V128: 770 case TCG_TYPE_V256: 771 { 772 int lmul = type - riscv_lg2_vlenb; 773 int nf = 1 << MAX(lmul, 0); 774 tcg_out_opc_vi(s, OPC_VMVNR_V, ret, arg, nf - 1); 775 } 776 break; 777 default: 778 g_assert_not_reached(); 779 } 780 return true; 781} 782 783static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd, 784 tcg_target_long val) 785{ 786 tcg_target_long lo, hi, tmp; 787 int shift, ret; 788 789 if (type == TCG_TYPE_I32) { 790 val = (int32_t)val; 791 } 792 793 lo = sextreg(val, 0, 12); 794 if (val == lo) { 795 tcg_out_opc_imm(s, OPC_ADDI, rd, TCG_REG_ZERO, lo); 796 return; 797 } 798 799 hi = val - lo; 800 if (val == (int32_t)val) { 801 tcg_out_opc_upper(s, OPC_LUI, rd, hi); 802 if (lo != 0) { 803 tcg_out_opc_imm(s, OPC_ADDIW, rd, rd, lo); 804 } 805 return; 806 } 807 808 tmp = tcg_pcrel_diff(s, (void *)val); 809 if (tmp == (int32_t)tmp) { 810 tcg_out_opc_upper(s, OPC_AUIPC, rd, 0); 811 tcg_out_opc_imm(s, OPC_ADDI, rd, rd, 0); 812 ret = reloc_call(s->code_ptr - 2, (const tcg_insn_unit *)val); 813 tcg_debug_assert(ret == true); 814 return; 815 } 816 817 /* Look for a single 20-bit section. */ 818 shift = ctz64(val); 819 tmp = val >> shift; 820 if (tmp == sextreg(tmp, 0, 20)) { 821 tcg_out_opc_upper(s, OPC_LUI, rd, tmp << 12); 822 if (shift > 12) { 823 tcg_out_opc_imm(s, OPC_SLLI, rd, rd, shift - 12); 824 } else { 825 tcg_out_opc_imm(s, OPC_SRAI, rd, rd, 12 - shift); 826 } 827 return; 828 } 829 830 /* Look for a few high zero bits, with lots of bits set in the middle. */ 831 shift = clz64(val); 832 tmp = val << shift; 833 if (tmp == sextreg(tmp, 12, 20) << 12) { 834 tcg_out_opc_upper(s, OPC_LUI, rd, tmp); 835 tcg_out_opc_imm(s, OPC_SRLI, rd, rd, shift); 836 return; 837 } else if (tmp == sextreg(tmp, 0, 12)) { 838 tcg_out_opc_imm(s, OPC_ADDI, rd, TCG_REG_ZERO, tmp); 839 tcg_out_opc_imm(s, OPC_SRLI, rd, rd, shift); 840 return; 841 } 842 843 /* Drop into the constant pool. */ 844 new_pool_label(s, val, R_RISCV_CALL, s->code_ptr, 0); 845 tcg_out_opc_upper(s, OPC_AUIPC, rd, 0); 846 tcg_out_opc_imm(s, OPC_LD, rd, rd, 0); 847} 848 849static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2) 850{ 851 return false; 852} 853 854static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs, 855 tcg_target_long imm) 856{ 857 /* This function is only used for passing structs by reference. */ 858 g_assert_not_reached(); 859} 860 861static void tcg_out_ext8u(TCGContext *s, TCGReg ret, TCGReg arg) 862{ 863 tcg_out_opc_imm(s, OPC_ANDI, ret, arg, 0xff); 864} 865 866static void tcg_out_ext16u(TCGContext *s, TCGReg ret, TCGReg arg) 867{ 868 if (cpuinfo & CPUINFO_ZBB) { 869 tcg_out_opc_reg(s, OPC_ZEXT_H, ret, arg, TCG_REG_ZERO); 870 } else { 871 tcg_out_opc_imm(s, OPC_SLLIW, ret, arg, 16); 872 tcg_out_opc_imm(s, OPC_SRLIW, ret, ret, 16); 873 } 874} 875 876static void tcg_out_ext32u(TCGContext *s, TCGReg ret, TCGReg arg) 877{ 878 if (cpuinfo & CPUINFO_ZBA) { 879 tcg_out_opc_reg(s, OPC_ADD_UW, ret, arg, TCG_REG_ZERO); 880 } else { 881 tcg_out_opc_imm(s, OPC_SLLI, ret, arg, 32); 882 tcg_out_opc_imm(s, OPC_SRLI, ret, ret, 32); 883 } 884} 885 886static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) 887{ 888 if (cpuinfo & CPUINFO_ZBB) { 889 tcg_out_opc_imm(s, OPC_SEXT_B, ret, arg, 0); 890 } else { 891 tcg_out_opc_imm(s, OPC_SLLIW, ret, arg, 24); 892 tcg_out_opc_imm(s, OPC_SRAIW, ret, ret, 24); 893 } 894} 895 896static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) 897{ 898 if (cpuinfo & CPUINFO_ZBB) { 899 tcg_out_opc_imm(s, OPC_SEXT_H, ret, arg, 0); 900 } else { 901 tcg_out_opc_imm(s, OPC_SLLIW, ret, arg, 16); 902 tcg_out_opc_imm(s, OPC_SRAIW, ret, ret, 16); 903 } 904} 905 906static void tcg_out_ext32s(TCGContext *s, TCGReg ret, TCGReg arg) 907{ 908 tcg_out_opc_imm(s, OPC_ADDIW, ret, arg, 0); 909} 910 911static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg ret, TCGReg arg) 912{ 913 if (ret != arg) { 914 tcg_out_ext32s(s, ret, arg); 915 } 916} 917 918static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg ret, TCGReg arg) 919{ 920 tcg_out_ext32u(s, ret, arg); 921} 922 923static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg ret, TCGReg arg) 924{ 925 tcg_out_ext32s(s, ret, arg); 926} 927 928static void tcg_out_ldst(TCGContext *s, RISCVInsn opc, TCGReg data, 929 TCGReg addr, intptr_t offset) 930{ 931 intptr_t imm12 = sextreg(offset, 0, 12); 932 933 if (offset != imm12) { 934 intptr_t diff = tcg_pcrel_diff(s, (void *)offset); 935 936 if (addr == TCG_REG_ZERO && diff == (int32_t)diff) { 937 imm12 = sextreg(diff, 0, 12); 938 tcg_out_opc_upper(s, OPC_AUIPC, TCG_REG_TMP2, diff - imm12); 939 } else { 940 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP2, offset - imm12); 941 if (addr != TCG_REG_ZERO) { 942 tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP2, TCG_REG_TMP2, addr); 943 } 944 } 945 addr = TCG_REG_TMP2; 946 } 947 948 switch (opc) { 949 case OPC_SB: 950 case OPC_SH: 951 case OPC_SW: 952 case OPC_SD: 953 tcg_out_opc_store(s, opc, addr, data, imm12); 954 break; 955 case OPC_LB: 956 case OPC_LBU: 957 case OPC_LH: 958 case OPC_LHU: 959 case OPC_LW: 960 case OPC_LWU: 961 case OPC_LD: 962 tcg_out_opc_imm(s, opc, data, addr, imm12); 963 break; 964 default: 965 g_assert_not_reached(); 966 } 967} 968 969static void tcg_out_vec_ldst(TCGContext *s, RISCVInsn opc, TCGReg data, 970 TCGReg addr, intptr_t offset) 971{ 972 tcg_debug_assert(data >= TCG_REG_V0); 973 tcg_debug_assert(addr < TCG_REG_V0); 974 975 if (offset) { 976 tcg_debug_assert(addr != TCG_REG_ZERO); 977 if (offset == sextreg(offset, 0, 12)) { 978 tcg_out_opc_imm(s, OPC_ADDI, TCG_REG_TMP0, addr, offset); 979 } else { 980 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, offset); 981 tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, TCG_REG_TMP0, addr); 982 } 983 addr = TCG_REG_TMP0; 984 } 985 tcg_out32(s, encode_v(opc, data, addr, 0, true)); 986} 987 988static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg, 989 TCGReg arg1, intptr_t arg2) 990{ 991 RISCVInsn insn; 992 993 switch (type) { 994 case TCG_TYPE_I32: 995 tcg_out_ldst(s, OPC_LW, arg, arg1, arg2); 996 break; 997 case TCG_TYPE_I64: 998 tcg_out_ldst(s, OPC_LD, arg, arg1, arg2); 999 break; 1000 case TCG_TYPE_V64: 1001 case TCG_TYPE_V128: 1002 case TCG_TYPE_V256: 1003 if (type >= riscv_lg2_vlenb) { 1004 static const RISCVInsn whole_reg_ld[] = { 1005 OPC_VL1RE64_V, OPC_VL2RE64_V, OPC_VL4RE64_V, OPC_VL8RE64_V 1006 }; 1007 unsigned idx = type - riscv_lg2_vlenb; 1008 1009 tcg_debug_assert(idx < ARRAY_SIZE(whole_reg_ld)); 1010 insn = whole_reg_ld[idx]; 1011 } else { 1012 static const RISCVInsn unit_stride_ld[] = { 1013 OPC_VLE8_V, OPC_VLE16_V, OPC_VLE32_V, OPC_VLE64_V 1014 }; 1015 MemOp prev_vsew = set_vtype_len(s, type); 1016 1017 tcg_debug_assert(prev_vsew < ARRAY_SIZE(unit_stride_ld)); 1018 insn = unit_stride_ld[prev_vsew]; 1019 } 1020 tcg_out_vec_ldst(s, insn, arg, arg1, arg2); 1021 break; 1022 default: 1023 g_assert_not_reached(); 1024 } 1025} 1026 1027static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, 1028 TCGReg arg1, intptr_t arg2) 1029{ 1030 RISCVInsn insn; 1031 1032 switch (type) { 1033 case TCG_TYPE_I32: 1034 tcg_out_ldst(s, OPC_SW, arg, arg1, arg2); 1035 break; 1036 case TCG_TYPE_I64: 1037 tcg_out_ldst(s, OPC_SD, arg, arg1, arg2); 1038 break; 1039 case TCG_TYPE_V64: 1040 case TCG_TYPE_V128: 1041 case TCG_TYPE_V256: 1042 if (type >= riscv_lg2_vlenb) { 1043 static const RISCVInsn whole_reg_st[] = { 1044 OPC_VS1R_V, OPC_VS2R_V, OPC_VS4R_V, OPC_VS8R_V 1045 }; 1046 unsigned idx = type - riscv_lg2_vlenb; 1047 1048 tcg_debug_assert(idx < ARRAY_SIZE(whole_reg_st)); 1049 insn = whole_reg_st[idx]; 1050 } else { 1051 static const RISCVInsn unit_stride_st[] = { 1052 OPC_VSE8_V, OPC_VSE16_V, OPC_VSE32_V, OPC_VSE64_V 1053 }; 1054 MemOp prev_vsew = set_vtype_len(s, type); 1055 1056 tcg_debug_assert(prev_vsew < ARRAY_SIZE(unit_stride_st)); 1057 insn = unit_stride_st[prev_vsew]; 1058 } 1059 tcg_out_vec_ldst(s, insn, arg, arg1, arg2); 1060 break; 1061 default: 1062 g_assert_not_reached(); 1063 } 1064} 1065 1066static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, 1067 TCGReg base, intptr_t ofs) 1068{ 1069 if (val == 0) { 1070 tcg_out_st(s, type, TCG_REG_ZERO, base, ofs); 1071 return true; 1072 } 1073 return false; 1074} 1075 1076static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece, 1077 TCGReg dst, TCGReg src) 1078{ 1079 set_vtype_len_sew(s, type, vece); 1080 tcg_out_opc_vx(s, OPC_VMV_V_X, dst, 0, src); 1081 return true; 1082} 1083 1084static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece, 1085 TCGReg dst, TCGReg base, intptr_t offset) 1086{ 1087 tcg_out_ld(s, TCG_TYPE_REG, TCG_REG_TMP0, base, offset); 1088 return tcg_out_dup_vec(s, type, vece, dst, TCG_REG_TMP0); 1089} 1090 1091static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece, 1092 TCGReg dst, int64_t arg) 1093{ 1094 /* Arg is replicated by VECE; extract the highest element. */ 1095 arg >>= (-8 << vece) & 63; 1096 1097 if (arg >= -16 && arg < 16) { 1098 if (arg == 0 || arg == -1) { 1099 set_vtype_len(s, type); 1100 } else { 1101 set_vtype_len_sew(s, type, vece); 1102 } 1103 tcg_out_opc_vi(s, OPC_VMV_V_I, dst, 0, arg); 1104 return; 1105 } 1106 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP0, arg); 1107 tcg_out_dup_vec(s, type, vece, dst, TCG_REG_TMP0); 1108} 1109 1110static void tcg_out_br(TCGContext *s, TCGLabel *l) 1111{ 1112 tcg_out_reloc(s, s->code_ptr, R_RISCV_JAL, l, 0); 1113 tcg_out_opc_jump(s, OPC_JAL, TCG_REG_ZERO, 0); 1114} 1115 1116static const struct { 1117 RISCVInsn op; 1118 bool swap; 1119} tcg_brcond_to_riscv[] = { 1120 [TCG_COND_EQ] = { OPC_BEQ, false }, 1121 [TCG_COND_NE] = { OPC_BNE, false }, 1122 [TCG_COND_LT] = { OPC_BLT, false }, 1123 [TCG_COND_GE] = { OPC_BGE, false }, 1124 [TCG_COND_LE] = { OPC_BGE, true }, 1125 [TCG_COND_GT] = { OPC_BLT, true }, 1126 [TCG_COND_LTU] = { OPC_BLTU, false }, 1127 [TCG_COND_GEU] = { OPC_BGEU, false }, 1128 [TCG_COND_LEU] = { OPC_BGEU, true }, 1129 [TCG_COND_GTU] = { OPC_BLTU, true } 1130}; 1131 1132static void tgen_brcond(TCGContext *s, TCGType type, TCGCond cond, 1133 TCGReg arg1, TCGReg arg2, TCGLabel *l) 1134{ 1135 RISCVInsn op = tcg_brcond_to_riscv[cond].op; 1136 1137 tcg_debug_assert(op != 0); 1138 1139 if (tcg_brcond_to_riscv[cond].swap) { 1140 TCGReg t = arg1; 1141 arg1 = arg2; 1142 arg2 = t; 1143 } 1144 1145 tcg_out_reloc(s, s->code_ptr, R_RISCV_BRANCH, l, 0); 1146 tcg_out_opc_branch(s, op, arg1, arg2, 0); 1147} 1148 1149static const TCGOutOpBrcond outop_brcond = { 1150 .base.static_constraint = C_O0_I2(r, rz), 1151 .out_rr = tgen_brcond, 1152}; 1153 1154#define SETCOND_INV TCG_TARGET_NB_REGS 1155#define SETCOND_NEZ (SETCOND_INV << 1) 1156#define SETCOND_FLAGS (SETCOND_INV | SETCOND_NEZ) 1157 1158static int tcg_out_setcond_int(TCGContext *s, TCGCond cond, TCGReg ret, 1159 TCGReg arg1, tcg_target_long arg2, bool c2) 1160{ 1161 int flags = 0; 1162 1163 switch (cond) { 1164 case TCG_COND_EQ: /* -> NE */ 1165 case TCG_COND_GE: /* -> LT */ 1166 case TCG_COND_GEU: /* -> LTU */ 1167 case TCG_COND_GT: /* -> LE */ 1168 case TCG_COND_GTU: /* -> LEU */ 1169 cond = tcg_invert_cond(cond); 1170 flags ^= SETCOND_INV; 1171 break; 1172 default: 1173 break; 1174 } 1175 1176 switch (cond) { 1177 case TCG_COND_LE: 1178 case TCG_COND_LEU: 1179 /* 1180 * If we have a constant input, the most efficient way to implement 1181 * LE is by adding 1 and using LT. Watch out for wrap around for LEU. 1182 * We don't need to care for this for LE because the constant input 1183 * is constrained to signed 12-bit, and 0x800 is representable in the 1184 * temporary register. 1185 */ 1186 if (c2) { 1187 if (cond == TCG_COND_LEU) { 1188 /* unsigned <= -1 is true */ 1189 if (arg2 == -1) { 1190 tcg_out_movi(s, TCG_TYPE_REG, ret, !(flags & SETCOND_INV)); 1191 return ret; 1192 } 1193 cond = TCG_COND_LTU; 1194 } else { 1195 cond = TCG_COND_LT; 1196 } 1197 tcg_debug_assert(arg2 <= 0x7ff); 1198 if (++arg2 == 0x800) { 1199 tcg_out_movi(s, TCG_TYPE_REG, TCG_REG_TMP0, arg2); 1200 arg2 = TCG_REG_TMP0; 1201 c2 = false; 1202 } 1203 } else { 1204 TCGReg tmp = arg2; 1205 arg2 = arg1; 1206 arg1 = tmp; 1207 cond = tcg_swap_cond(cond); /* LE -> GE */ 1208 cond = tcg_invert_cond(cond); /* GE -> LT */ 1209 flags ^= SETCOND_INV; 1210 } 1211 break; 1212 default: 1213 break; 1214 } 1215 1216 switch (cond) { 1217 case TCG_COND_NE: 1218 flags |= SETCOND_NEZ; 1219 if (!c2) { 1220 tcg_out_opc_reg(s, OPC_XOR, ret, arg1, arg2); 1221 } else if (arg2 == 0) { 1222 ret = arg1; 1223 } else { 1224 tcg_out_opc_imm(s, OPC_XORI, ret, arg1, arg2); 1225 } 1226 break; 1227 1228 case TCG_COND_LT: 1229 if (c2) { 1230 tcg_out_opc_imm(s, OPC_SLTI, ret, arg1, arg2); 1231 } else { 1232 tcg_out_opc_reg(s, OPC_SLT, ret, arg1, arg2); 1233 } 1234 break; 1235 1236 case TCG_COND_LTU: 1237 if (c2) { 1238 tcg_out_opc_imm(s, OPC_SLTIU, ret, arg1, arg2); 1239 } else { 1240 tcg_out_opc_reg(s, OPC_SLTU, ret, arg1, arg2); 1241 } 1242 break; 1243 1244 default: 1245 g_assert_not_reached(); 1246 } 1247 1248 return ret | flags; 1249} 1250 1251static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGReg ret, 1252 TCGReg arg1, tcg_target_long arg2, bool c2) 1253{ 1254 int tmpflags = tcg_out_setcond_int(s, cond, ret, arg1, arg2, c2); 1255 1256 if (tmpflags != ret) { 1257 TCGReg tmp = tmpflags & ~SETCOND_FLAGS; 1258 1259 switch (tmpflags & SETCOND_FLAGS) { 1260 case SETCOND_INV: 1261 /* Intermediate result is boolean: simply invert. */ 1262 tcg_out_opc_imm(s, OPC_XORI, ret, tmp, 1); 1263 break; 1264 case SETCOND_NEZ: 1265 /* Intermediate result is zero/non-zero: test != 0. */ 1266 tcg_out_opc_reg(s, OPC_SLTU, ret, TCG_REG_ZERO, tmp); 1267 break; 1268 case SETCOND_NEZ | SETCOND_INV: 1269 /* Intermediate result is zero/non-zero: test == 0. */ 1270 tcg_out_opc_imm(s, OPC_SLTIU, ret, tmp, 1); 1271 break; 1272 default: 1273 g_assert_not_reached(); 1274 } 1275 } 1276} 1277 1278static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond, 1279 TCGReg dest, TCGReg arg1, TCGReg arg2) 1280{ 1281 tcg_out_setcond(s, cond, dest, arg1, arg2, false); 1282} 1283 1284static void tgen_setcondi(TCGContext *s, TCGType type, TCGCond cond, 1285 TCGReg dest, TCGReg arg1, tcg_target_long arg2) 1286{ 1287 tcg_out_setcond(s, cond, dest, arg1, arg2, true); 1288} 1289 1290static const TCGOutOpSetcond outop_setcond = { 1291 .base.static_constraint = C_O1_I2(r, r, rI), 1292 .out_rrr = tgen_setcond, 1293 .out_rri = tgen_setcondi, 1294}; 1295 1296static void tcg_out_negsetcond(TCGContext *s, TCGCond cond, TCGReg ret, 1297 TCGReg arg1, tcg_target_long arg2, bool c2) 1298{ 1299 int tmpflags; 1300 TCGReg tmp; 1301 1302 /* For LT/GE comparison against 0, replicate the sign bit. */ 1303 if (c2 && arg2 == 0) { 1304 switch (cond) { 1305 case TCG_COND_GE: 1306 tcg_out_opc_imm(s, OPC_XORI, ret, arg1, -1); 1307 arg1 = ret; 1308 /* fall through */ 1309 case TCG_COND_LT: 1310 tcg_out_opc_imm(s, OPC_SRAI, ret, arg1, TCG_TARGET_REG_BITS - 1); 1311 return; 1312 default: 1313 break; 1314 } 1315 } 1316 1317 tmpflags = tcg_out_setcond_int(s, cond, ret, arg1, arg2, c2); 1318 tmp = tmpflags & ~SETCOND_FLAGS; 1319 1320 /* If intermediate result is zero/non-zero: test != 0. */ 1321 if (tmpflags & SETCOND_NEZ) { 1322 tcg_out_opc_reg(s, OPC_SLTU, ret, TCG_REG_ZERO, tmp); 1323 tmp = ret; 1324 } 1325 1326 /* Produce the 0/-1 result. */ 1327 if (tmpflags & SETCOND_INV) { 1328 tcg_out_opc_imm(s, OPC_ADDI, ret, tmp, -1); 1329 } else { 1330 tcg_out_opc_reg(s, OPC_SUB, ret, TCG_REG_ZERO, tmp); 1331 } 1332} 1333 1334static void tgen_negsetcond(TCGContext *s, TCGType type, TCGCond cond, 1335 TCGReg dest, TCGReg arg1, TCGReg arg2) 1336{ 1337 tcg_out_negsetcond(s, cond, dest, arg1, arg2, false); 1338} 1339 1340static void tgen_negsetcondi(TCGContext *s, TCGType type, TCGCond cond, 1341 TCGReg dest, TCGReg arg1, tcg_target_long arg2) 1342{ 1343 tcg_out_negsetcond(s, cond, dest, arg1, arg2, true); 1344} 1345 1346static const TCGOutOpSetcond outop_negsetcond = { 1347 .base.static_constraint = C_O1_I2(r, r, rI), 1348 .out_rrr = tgen_negsetcond, 1349 .out_rri = tgen_negsetcondi, 1350}; 1351 1352static void tcg_out_movcond_zicond(TCGContext *s, TCGReg ret, TCGReg test_ne, 1353 int val1, bool c_val1, 1354 int val2, bool c_val2) 1355{ 1356 if (val1 == 0) { 1357 if (c_val2) { 1358 tcg_out_movi(s, TCG_TYPE_REG, TCG_REG_TMP1, val2); 1359 val2 = TCG_REG_TMP1; 1360 } 1361 tcg_out_opc_reg(s, OPC_CZERO_NEZ, ret, val2, test_ne); 1362 return; 1363 } 1364 1365 if (val2 == 0) { 1366 if (c_val1) { 1367 tcg_out_movi(s, TCG_TYPE_REG, TCG_REG_TMP1, val1); 1368 val1 = TCG_REG_TMP1; 1369 } 1370 tcg_out_opc_reg(s, OPC_CZERO_EQZ, ret, val1, test_ne); 1371 return; 1372 } 1373 1374 if (c_val2) { 1375 if (c_val1) { 1376 tcg_out_movi(s, TCG_TYPE_REG, TCG_REG_TMP1, val1 - val2); 1377 } else { 1378 tcg_out_opc_imm(s, OPC_ADDI, TCG_REG_TMP1, val1, -val2); 1379 } 1380 tcg_out_opc_reg(s, OPC_CZERO_EQZ, ret, TCG_REG_TMP1, test_ne); 1381 tcg_out_opc_imm(s, OPC_ADDI, ret, ret, val2); 1382 return; 1383 } 1384 1385 if (c_val1) { 1386 tcg_out_opc_imm(s, OPC_ADDI, TCG_REG_TMP1, val2, -val1); 1387 tcg_out_opc_reg(s, OPC_CZERO_NEZ, ret, TCG_REG_TMP1, test_ne); 1388 tcg_out_opc_imm(s, OPC_ADDI, ret, ret, val1); 1389 return; 1390 } 1391 1392 tcg_out_opc_reg(s, OPC_CZERO_NEZ, TCG_REG_TMP1, val2, test_ne); 1393 tcg_out_opc_reg(s, OPC_CZERO_EQZ, TCG_REG_TMP0, val1, test_ne); 1394 tcg_out_opc_reg(s, OPC_OR, ret, TCG_REG_TMP0, TCG_REG_TMP1); 1395} 1396 1397static void tcg_out_movcond_br1(TCGContext *s, TCGCond cond, TCGReg ret, 1398 TCGReg cmp1, TCGReg cmp2, 1399 int val, bool c_val) 1400{ 1401 RISCVInsn op; 1402 int disp = 8; 1403 1404 tcg_debug_assert((unsigned)cond < ARRAY_SIZE(tcg_brcond_to_riscv)); 1405 op = tcg_brcond_to_riscv[cond].op; 1406 tcg_debug_assert(op != 0); 1407 1408 if (tcg_brcond_to_riscv[cond].swap) { 1409 tcg_out_opc_branch(s, op, cmp2, cmp1, disp); 1410 } else { 1411 tcg_out_opc_branch(s, op, cmp1, cmp2, disp); 1412 } 1413 if (c_val) { 1414 tcg_out_opc_imm(s, OPC_ADDI, ret, TCG_REG_ZERO, val); 1415 } else { 1416 tcg_out_opc_imm(s, OPC_ADDI, ret, val, 0); 1417 } 1418} 1419 1420static void tcg_out_movcond_br2(TCGContext *s, TCGCond cond, TCGReg ret, 1421 TCGReg cmp1, TCGReg cmp2, 1422 int val1, bool c_val1, 1423 int val2, bool c_val2) 1424{ 1425 TCGReg tmp; 1426 1427 /* TCG optimizer reorders to prefer ret matching val2. */ 1428 if (!c_val2 && ret == val2) { 1429 cond = tcg_invert_cond(cond); 1430 tcg_out_movcond_br1(s, cond, ret, cmp1, cmp2, val1, c_val1); 1431 return; 1432 } 1433 1434 if (!c_val1 && ret == val1) { 1435 tcg_out_movcond_br1(s, cond, ret, cmp1, cmp2, val2, c_val2); 1436 return; 1437 } 1438 1439 tmp = (ret == cmp1 || ret == cmp2 ? TCG_REG_TMP1 : ret); 1440 if (c_val1) { 1441 tcg_out_movi(s, TCG_TYPE_REG, tmp, val1); 1442 } else { 1443 tcg_out_mov(s, TCG_TYPE_REG, tmp, val1); 1444 } 1445 tcg_out_movcond_br1(s, cond, tmp, cmp1, cmp2, val2, c_val2); 1446 tcg_out_mov(s, TCG_TYPE_REG, ret, tmp); 1447} 1448 1449static void tcg_out_movcond(TCGContext *s, TCGType type, TCGCond cond, 1450 TCGReg ret, TCGReg cmp1, TCGArg cmp2, bool c_cmp2, 1451 TCGArg val1, bool c_val1, 1452 TCGArg val2, bool c_val2) 1453{ 1454 int tmpflags; 1455 TCGReg t; 1456 1457 if (!(cpuinfo & CPUINFO_ZICOND) && (!c_cmp2 || cmp2 == 0)) { 1458 tcg_out_movcond_br2(s, cond, ret, cmp1, cmp2, 1459 val1, c_val1, val2, c_val2); 1460 return; 1461 } 1462 1463 tmpflags = tcg_out_setcond_int(s, cond, TCG_REG_TMP0, cmp1, cmp2, c_cmp2); 1464 t = tmpflags & ~SETCOND_FLAGS; 1465 1466 if (cpuinfo & CPUINFO_ZICOND) { 1467 if (tmpflags & SETCOND_INV) { 1468 tcg_out_movcond_zicond(s, ret, t, val2, c_val2, val1, c_val1); 1469 } else { 1470 tcg_out_movcond_zicond(s, ret, t, val1, c_val1, val2, c_val2); 1471 } 1472 } else { 1473 cond = tmpflags & SETCOND_INV ? TCG_COND_EQ : TCG_COND_NE; 1474 tcg_out_movcond_br2(s, cond, ret, t, TCG_REG_ZERO, 1475 val1, c_val1, val2, c_val2); 1476 } 1477} 1478 1479static const TCGOutOpMovcond outop_movcond = { 1480 .base.static_constraint = C_O1_I4(r, r, rI, rM, rM), 1481 .out = tcg_out_movcond, 1482}; 1483 1484static void tcg_out_cltz(TCGContext *s, TCGType type, RISCVInsn insn, 1485 TCGReg ret, TCGReg src1, int src2, bool c_src2) 1486{ 1487 tcg_out_opc_imm(s, insn, ret, src1, 0); 1488 1489 if (!c_src2 || src2 != (type == TCG_TYPE_I32 ? 32 : 64)) { 1490 /* 1491 * The requested zero result does not match the insn, so adjust. 1492 * Note that constraints put 'ret' in a new register, so the 1493 * computation above did not clobber either 'src1' or 'src2'. 1494 */ 1495 tcg_out_movcond(s, type, TCG_COND_EQ, ret, src1, 0, true, 1496 src2, c_src2, ret, false); 1497 } 1498} 1499 1500static void tcg_out_cmpsel(TCGContext *s, TCGType type, unsigned vece, 1501 TCGCond cond, TCGReg ret, 1502 TCGReg cmp1, TCGReg cmp2, bool c_cmp2, 1503 TCGReg val1, bool c_val1, 1504 TCGReg val2, bool c_val2) 1505{ 1506 set_vtype_len_sew(s, type, vece); 1507 1508 /* Use only vmerge_vim if possible, by inverting the test. */ 1509 if (c_val2 && !c_val1) { 1510 TCGArg temp = val1; 1511 cond = tcg_invert_cond(cond); 1512 val1 = val2; 1513 val2 = temp; 1514 c_val1 = true; 1515 c_val2 = false; 1516 } 1517 1518 /* Perform the comparison into V0 mask. */ 1519 if (c_cmp2) { 1520 tcg_out_opc_vi(s, tcg_cmpcond_to_rvv_vi[cond].op, TCG_REG_V0, cmp1, 1521 cmp2 - tcg_cmpcond_to_rvv_vi[cond].adjust); 1522 } else if (tcg_cmpcond_to_rvv_vv[cond].swap) { 1523 tcg_out_opc_vv(s, tcg_cmpcond_to_rvv_vv[cond].op, 1524 TCG_REG_V0, cmp2, cmp1); 1525 } else { 1526 tcg_out_opc_vv(s, tcg_cmpcond_to_rvv_vv[cond].op, 1527 TCG_REG_V0, cmp1, cmp2); 1528 } 1529 if (c_val1) { 1530 if (c_val2) { 1531 tcg_out_opc_vi(s, OPC_VMV_V_I, ret, 0, val2); 1532 val2 = ret; 1533 } 1534 /* vd[i] == v0.mask[i] ? imm : vs2[i] */ 1535 tcg_out_opc_vim_mask(s, OPC_VMERGE_VIM, ret, val2, val1); 1536 } else { 1537 /* vd[i] == v0.mask[i] ? vs1[i] : vs2[i] */ 1538 tcg_out_opc_vvm_mask(s, OPC_VMERGE_VVM, ret, val2, val1); 1539 } 1540} 1541 1542static void tcg_out_vshifti(TCGContext *s, RISCVInsn opc_vi, RISCVInsn opc_vx, 1543 TCGReg dst, TCGReg src, unsigned imm) 1544{ 1545 if (imm < 32) { 1546 tcg_out_opc_vi(s, opc_vi, dst, src, imm); 1547 } else { 1548 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_TMP0, imm); 1549 tcg_out_opc_vx(s, opc_vx, dst, src, TCG_REG_TMP0); 1550 } 1551} 1552 1553static void init_setting_vtype(TCGContext *s) 1554{ 1555 s->riscv_cur_type = TCG_TYPE_COUNT; 1556} 1557 1558static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *arg, bool tail) 1559{ 1560 TCGReg link = tail ? TCG_REG_ZERO : TCG_REG_RA; 1561 ptrdiff_t offset = tcg_pcrel_diff(s, arg); 1562 int ret; 1563 1564 init_setting_vtype(s); 1565 1566 tcg_debug_assert((offset & 1) == 0); 1567 if (offset == sextreg(offset, 0, 20)) { 1568 /* short jump: -2097150 to 2097152 */ 1569 tcg_out_opc_jump(s, OPC_JAL, link, offset); 1570 } else if (offset == (int32_t)offset) { 1571 /* long jump: -2147483646 to 2147483648 */ 1572 tcg_out_opc_upper(s, OPC_AUIPC, TCG_REG_TMP0, 0); 1573 tcg_out_opc_imm(s, OPC_JALR, link, TCG_REG_TMP0, 0); 1574 ret = reloc_call(s->code_ptr - 2, arg); 1575 tcg_debug_assert(ret == true); 1576 } else { 1577 /* far jump: 64-bit */ 1578 tcg_target_long imm = sextreg((tcg_target_long)arg, 0, 12); 1579 tcg_target_long base = (tcg_target_long)arg - imm; 1580 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, base); 1581 tcg_out_opc_imm(s, OPC_JALR, link, TCG_REG_TMP0, imm); 1582 } 1583} 1584 1585static void tcg_out_call(TCGContext *s, const tcg_insn_unit *arg, 1586 const TCGHelperInfo *info) 1587{ 1588 tcg_out_call_int(s, arg, false); 1589} 1590 1591static void tcg_out_mb(TCGContext *s, unsigned a0) 1592{ 1593 tcg_insn_unit insn = OPC_FENCE; 1594 1595 if (a0 & TCG_MO_LD_LD) { 1596 insn |= 0x02200000; 1597 } 1598 if (a0 & TCG_MO_ST_LD) { 1599 insn |= 0x01200000; 1600 } 1601 if (a0 & TCG_MO_LD_ST) { 1602 insn |= 0x02100000; 1603 } 1604 if (a0 & TCG_MO_ST_ST) { 1605 insn |= 0x01100000; 1606 } 1607 tcg_out32(s, insn); 1608} 1609 1610/* 1611 * Load/store and TLB 1612 */ 1613 1614static void tcg_out_goto(TCGContext *s, const tcg_insn_unit *target) 1615{ 1616 tcg_out_opc_jump(s, OPC_JAL, TCG_REG_ZERO, 0); 1617 bool ok = reloc_jimm20(s->code_ptr - 1, target); 1618 tcg_debug_assert(ok); 1619} 1620 1621bool tcg_target_has_memory_bswap(MemOp memop) 1622{ 1623 return false; 1624} 1625 1626/* We have three temps, we might as well expose them. */ 1627static const TCGLdstHelperParam ldst_helper_param = { 1628 .ntmp = 3, .tmp = { TCG_REG_TMP0, TCG_REG_TMP1, TCG_REG_TMP2 } 1629}; 1630 1631static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) 1632{ 1633 MemOp opc = get_memop(l->oi); 1634 1635 /* resolve label address */ 1636 if (!reloc_sbimm12(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { 1637 return false; 1638 } 1639 1640 /* call load helper */ 1641 tcg_out_ld_helper_args(s, l, &ldst_helper_param); 1642 tcg_out_call_int(s, qemu_ld_helpers[opc & MO_SSIZE], false); 1643 tcg_out_ld_helper_ret(s, l, true, &ldst_helper_param); 1644 1645 tcg_out_goto(s, l->raddr); 1646 return true; 1647} 1648 1649static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) 1650{ 1651 MemOp opc = get_memop(l->oi); 1652 1653 /* resolve label address */ 1654 if (!reloc_sbimm12(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { 1655 return false; 1656 } 1657 1658 /* call store helper */ 1659 tcg_out_st_helper_args(s, l, &ldst_helper_param); 1660 tcg_out_call_int(s, qemu_st_helpers[opc & MO_SIZE], false); 1661 1662 tcg_out_goto(s, l->raddr); 1663 return true; 1664} 1665 1666/* We expect to use a 12-bit negative offset from ENV. */ 1667#define MIN_TLB_MASK_TABLE_OFS -(1 << 11) 1668 1669/* 1670 * For system-mode, perform the TLB load and compare. 1671 * For user-mode, perform any required alignment tests. 1672 * In both cases, return a TCGLabelQemuLdst structure if the slow path 1673 * is required and fill in @h with the host address for the fast path. 1674 */ 1675static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, TCGReg *pbase, 1676 TCGReg addr_reg, MemOpIdx oi, 1677 bool is_ld) 1678{ 1679 TCGType addr_type = s->addr_type; 1680 TCGLabelQemuLdst *ldst = NULL; 1681 MemOp opc = get_memop(oi); 1682 TCGAtomAlign aa; 1683 unsigned a_mask; 1684 1685 aa = atom_and_align_for_opc(s, opc, MO_ATOM_IFALIGN, false); 1686 a_mask = (1u << aa.align) - 1; 1687 1688 if (tcg_use_softmmu) { 1689 unsigned s_bits = opc & MO_SIZE; 1690 unsigned s_mask = (1u << s_bits) - 1; 1691 int mem_index = get_mmuidx(oi); 1692 int fast_ofs = tlb_mask_table_ofs(s, mem_index); 1693 int mask_ofs = fast_ofs + offsetof(CPUTLBDescFast, mask); 1694 int table_ofs = fast_ofs + offsetof(CPUTLBDescFast, table); 1695 int compare_mask; 1696 TCGReg addr_adj; 1697 1698 ldst = new_ldst_label(s); 1699 ldst->is_ld = is_ld; 1700 ldst->oi = oi; 1701 ldst->addr_reg = addr_reg; 1702 1703 init_setting_vtype(s); 1704 1705 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_AREG0, mask_ofs); 1706 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, table_ofs); 1707 1708 tcg_out_opc_imm(s, OPC_SRLI, TCG_REG_TMP2, addr_reg, 1709 s->page_bits - CPU_TLB_ENTRY_BITS); 1710 tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP0); 1711 tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP1); 1712 1713 /* 1714 * For aligned accesses, we check the first byte and include the 1715 * alignment bits within the address. For unaligned access, we 1716 * check that we don't cross pages using the address of the last 1717 * byte of the access. 1718 */ 1719 addr_adj = addr_reg; 1720 if (a_mask < s_mask) { 1721 addr_adj = TCG_REG_TMP0; 1722 tcg_out_opc_imm(s, addr_type == TCG_TYPE_I32 ? OPC_ADDIW : OPC_ADDI, 1723 addr_adj, addr_reg, s_mask - a_mask); 1724 } 1725 compare_mask = s->page_mask | a_mask; 1726 if (compare_mask == sextreg(compare_mask, 0, 12)) { 1727 tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_TMP1, addr_adj, compare_mask); 1728 } else { 1729 tcg_out_movi(s, addr_type, TCG_REG_TMP1, compare_mask); 1730 tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP1, TCG_REG_TMP1, addr_adj); 1731 } 1732 1733 /* Load the tlb comparator and the addend. */ 1734 QEMU_BUILD_BUG_ON(HOST_BIG_ENDIAN); 1735 tcg_out_ld(s, addr_type, TCG_REG_TMP0, TCG_REG_TMP2, 1736 is_ld ? offsetof(CPUTLBEntry, addr_read) 1737 : offsetof(CPUTLBEntry, addr_write)); 1738 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_REG_TMP2, 1739 offsetof(CPUTLBEntry, addend)); 1740 1741 /* Compare masked address with the TLB entry. */ 1742 ldst->label_ptr[0] = s->code_ptr; 1743 tcg_out_opc_branch(s, OPC_BNE, TCG_REG_TMP0, TCG_REG_TMP1, 0); 1744 1745 /* TLB Hit - translate address using addend. */ 1746 if (addr_type != TCG_TYPE_I32) { 1747 tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, addr_reg, TCG_REG_TMP2); 1748 } else if (cpuinfo & CPUINFO_ZBA) { 1749 tcg_out_opc_reg(s, OPC_ADD_UW, TCG_REG_TMP0, 1750 addr_reg, TCG_REG_TMP2); 1751 } else { 1752 tcg_out_ext32u(s, TCG_REG_TMP0, addr_reg); 1753 tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, 1754 TCG_REG_TMP0, TCG_REG_TMP2); 1755 } 1756 *pbase = TCG_REG_TMP0; 1757 } else { 1758 TCGReg base; 1759 1760 if (a_mask) { 1761 ldst = new_ldst_label(s); 1762 ldst->is_ld = is_ld; 1763 ldst->oi = oi; 1764 ldst->addr_reg = addr_reg; 1765 1766 init_setting_vtype(s); 1767 1768 /* We are expecting alignment max 7, so we can always use andi. */ 1769 tcg_debug_assert(a_mask == sextreg(a_mask, 0, 12)); 1770 tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_TMP1, addr_reg, a_mask); 1771 1772 ldst->label_ptr[0] = s->code_ptr; 1773 tcg_out_opc_branch(s, OPC_BNE, TCG_REG_TMP1, TCG_REG_ZERO, 0); 1774 } 1775 1776 if (guest_base != 0) { 1777 base = TCG_REG_TMP0; 1778 if (addr_type != TCG_TYPE_I32) { 1779 tcg_out_opc_reg(s, OPC_ADD, base, addr_reg, 1780 TCG_GUEST_BASE_REG); 1781 } else if (cpuinfo & CPUINFO_ZBA) { 1782 tcg_out_opc_reg(s, OPC_ADD_UW, base, addr_reg, 1783 TCG_GUEST_BASE_REG); 1784 } else { 1785 tcg_out_ext32u(s, base, addr_reg); 1786 tcg_out_opc_reg(s, OPC_ADD, base, base, TCG_GUEST_BASE_REG); 1787 } 1788 } else if (addr_type != TCG_TYPE_I32) { 1789 base = addr_reg; 1790 } else { 1791 base = TCG_REG_TMP0; 1792 tcg_out_ext32u(s, base, addr_reg); 1793 } 1794 *pbase = base; 1795 } 1796 1797 return ldst; 1798} 1799 1800static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg val, 1801 TCGReg base, MemOp opc, TCGType type) 1802{ 1803 /* Byte swapping is left to middle-end expansion. */ 1804 tcg_debug_assert((opc & MO_BSWAP) == 0); 1805 1806 switch (opc & (MO_SSIZE)) { 1807 case MO_UB: 1808 tcg_out_opc_imm(s, OPC_LBU, val, base, 0); 1809 break; 1810 case MO_SB: 1811 tcg_out_opc_imm(s, OPC_LB, val, base, 0); 1812 break; 1813 case MO_UW: 1814 tcg_out_opc_imm(s, OPC_LHU, val, base, 0); 1815 break; 1816 case MO_SW: 1817 tcg_out_opc_imm(s, OPC_LH, val, base, 0); 1818 break; 1819 case MO_UL: 1820 if (type == TCG_TYPE_I64) { 1821 tcg_out_opc_imm(s, OPC_LWU, val, base, 0); 1822 break; 1823 } 1824 /* FALLTHRU */ 1825 case MO_SL: 1826 tcg_out_opc_imm(s, OPC_LW, val, base, 0); 1827 break; 1828 case MO_UQ: 1829 tcg_out_opc_imm(s, OPC_LD, val, base, 0); 1830 break; 1831 default: 1832 g_assert_not_reached(); 1833 } 1834} 1835 1836static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, 1837 MemOpIdx oi, TCGType data_type) 1838{ 1839 TCGLabelQemuLdst *ldst; 1840 TCGReg base; 1841 1842 ldst = prepare_host_addr(s, &base, addr_reg, oi, true); 1843 tcg_out_qemu_ld_direct(s, data_reg, base, get_memop(oi), data_type); 1844 1845 if (ldst) { 1846 ldst->type = data_type; 1847 ldst->datalo_reg = data_reg; 1848 ldst->raddr = tcg_splitwx_to_rx(s->code_ptr); 1849 } 1850} 1851 1852static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg val, 1853 TCGReg base, MemOp opc) 1854{ 1855 /* Byte swapping is left to middle-end expansion. */ 1856 tcg_debug_assert((opc & MO_BSWAP) == 0); 1857 1858 switch (opc & (MO_SSIZE)) { 1859 case MO_8: 1860 tcg_out_opc_store(s, OPC_SB, base, val, 0); 1861 break; 1862 case MO_16: 1863 tcg_out_opc_store(s, OPC_SH, base, val, 0); 1864 break; 1865 case MO_32: 1866 tcg_out_opc_store(s, OPC_SW, base, val, 0); 1867 break; 1868 case MO_64: 1869 tcg_out_opc_store(s, OPC_SD, base, val, 0); 1870 break; 1871 default: 1872 g_assert_not_reached(); 1873 } 1874} 1875 1876static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, 1877 MemOpIdx oi, TCGType data_type) 1878{ 1879 TCGLabelQemuLdst *ldst; 1880 TCGReg base; 1881 1882 ldst = prepare_host_addr(s, &base, addr_reg, oi, false); 1883 tcg_out_qemu_st_direct(s, data_reg, base, get_memop(oi)); 1884 1885 if (ldst) { 1886 ldst->type = data_type; 1887 ldst->datalo_reg = data_reg; 1888 ldst->raddr = tcg_splitwx_to_rx(s->code_ptr); 1889 } 1890} 1891 1892static const tcg_insn_unit *tb_ret_addr; 1893 1894static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0) 1895{ 1896 /* Reuse the zeroing that exists for goto_ptr. */ 1897 if (a0 == 0) { 1898 tcg_out_call_int(s, tcg_code_gen_epilogue, true); 1899 } else { 1900 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A0, a0); 1901 tcg_out_call_int(s, tb_ret_addr, true); 1902 } 1903} 1904 1905static void tcg_out_goto_tb(TCGContext *s, int which) 1906{ 1907 /* Direct branch will be patched by tb_target_set_jmp_target. */ 1908 set_jmp_insn_offset(s, which); 1909 tcg_out32(s, OPC_JAL); 1910 1911 /* When branch is out of range, fall through to indirect. */ 1912 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_REG_ZERO, 1913 get_jmp_target_addr(s, which)); 1914 tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, TCG_REG_TMP0, 0); 1915 set_jmp_reset_offset(s, which); 1916} 1917 1918static void tcg_out_goto_ptr(TCGContext *s, TCGReg a0) 1919{ 1920 tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, a0, 0); 1921} 1922 1923void tb_target_set_jmp_target(const TranslationBlock *tb, int n, 1924 uintptr_t jmp_rx, uintptr_t jmp_rw) 1925{ 1926 uintptr_t addr = tb->jmp_target_addr[n]; 1927 ptrdiff_t offset = addr - jmp_rx; 1928 tcg_insn_unit insn; 1929 1930 /* Either directly branch, or fall through to indirect branch. */ 1931 if (offset == sextreg(offset, 0, 20)) { 1932 insn = encode_uj(OPC_JAL, TCG_REG_ZERO, offset); 1933 } else { 1934 insn = OPC_NOP; 1935 } 1936 qatomic_set((uint32_t *)jmp_rw, insn); 1937 flush_idcache_range(jmp_rx, jmp_rw, 4); 1938} 1939 1940 1941static void tgen_add(TCGContext *s, TCGType type, 1942 TCGReg a0, TCGReg a1, TCGReg a2) 1943{ 1944 RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_ADDW : OPC_ADD; 1945 tcg_out_opc_reg(s, insn, a0, a1, a2); 1946} 1947 1948static void tgen_addi(TCGContext *s, TCGType type, 1949 TCGReg a0, TCGReg a1, tcg_target_long a2) 1950{ 1951 RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_ADDIW : OPC_ADDI; 1952 tcg_out_opc_imm(s, insn, a0, a1, a2); 1953} 1954 1955static const TCGOutOpBinary outop_add = { 1956 .base.static_constraint = C_O1_I2(r, r, rI), 1957 .out_rrr = tgen_add, 1958 .out_rri = tgen_addi, 1959}; 1960 1961static const TCGOutOpBinary outop_addco = { 1962 .base.static_constraint = C_NotImplemented, 1963}; 1964 1965static const TCGOutOpAddSubCarry outop_addci = { 1966 .base.static_constraint = C_NotImplemented, 1967}; 1968 1969static const TCGOutOpBinary outop_addcio = { 1970 .base.static_constraint = C_NotImplemented, 1971}; 1972 1973static void tcg_out_set_carry(TCGContext *s) 1974{ 1975 g_assert_not_reached(); 1976} 1977 1978static void tgen_and(TCGContext *s, TCGType type, 1979 TCGReg a0, TCGReg a1, TCGReg a2) 1980{ 1981 tcg_out_opc_reg(s, OPC_AND, a0, a1, a2); 1982} 1983 1984static void tgen_andi(TCGContext *s, TCGType type, 1985 TCGReg a0, TCGReg a1, tcg_target_long a2) 1986{ 1987 tcg_out_opc_imm(s, OPC_ANDI, a0, a1, a2); 1988} 1989 1990static const TCGOutOpBinary outop_and = { 1991 .base.static_constraint = C_O1_I2(r, r, rI), 1992 .out_rrr = tgen_and, 1993 .out_rri = tgen_andi, 1994}; 1995 1996static void tgen_andc(TCGContext *s, TCGType type, 1997 TCGReg a0, TCGReg a1, TCGReg a2) 1998{ 1999 tcg_out_opc_reg(s, OPC_ANDN, a0, a1, a2); 2000} 2001 2002static TCGConstraintSetIndex cset_zbb_rrr(TCGType type, unsigned flags) 2003{ 2004 return cpuinfo & CPUINFO_ZBB ? C_O1_I2(r, r, r) : C_NotImplemented; 2005} 2006 2007static const TCGOutOpBinary outop_andc = { 2008 .base.static_constraint = C_Dynamic, 2009 .base.dynamic_constraint = cset_zbb_rrr, 2010 .out_rrr = tgen_andc, 2011}; 2012 2013static void tgen_clz(TCGContext *s, TCGType type, 2014 TCGReg a0, TCGReg a1, TCGReg a2) 2015{ 2016 RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_CLZW : OPC_CLZ; 2017 tcg_out_cltz(s, type, insn, a0, a1, a2, false); 2018} 2019 2020static void tgen_clzi(TCGContext *s, TCGType type, 2021 TCGReg a0, TCGReg a1, tcg_target_long a2) 2022{ 2023 RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_CLZW : OPC_CLZ; 2024 tcg_out_cltz(s, type, insn, a0, a1, a2, true); 2025} 2026 2027static TCGConstraintSetIndex cset_clzctz(TCGType type, unsigned flags) 2028{ 2029 return cpuinfo & CPUINFO_ZBB ? C_N1_I2(r, r, rM) : C_NotImplemented; 2030} 2031 2032static const TCGOutOpBinary outop_clz = { 2033 .base.static_constraint = C_Dynamic, 2034 .base.dynamic_constraint = cset_clzctz, 2035 .out_rrr = tgen_clz, 2036 .out_rri = tgen_clzi, 2037}; 2038 2039static void tgen_ctpop(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1) 2040{ 2041 RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_CPOPW : OPC_CPOP; 2042 tcg_out_opc_imm(s, insn, a0, a1, 0); 2043} 2044 2045static TCGConstraintSetIndex cset_ctpop(TCGType type, unsigned flags) 2046{ 2047 return cpuinfo & CPUINFO_ZBB ? C_O1_I1(r, r) : C_NotImplemented; 2048} 2049 2050static const TCGOutOpUnary outop_ctpop = { 2051 .base.static_constraint = C_Dynamic, 2052 .base.dynamic_constraint = cset_ctpop, 2053 .out_rr = tgen_ctpop, 2054}; 2055 2056static void tgen_ctz(TCGContext *s, TCGType type, 2057 TCGReg a0, TCGReg a1, TCGReg a2) 2058{ 2059 RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_CTZW : OPC_CTZ; 2060 tcg_out_cltz(s, type, insn, a0, a1, a2, false); 2061} 2062 2063static void tgen_ctzi(TCGContext *s, TCGType type, 2064 TCGReg a0, TCGReg a1, tcg_target_long a2) 2065{ 2066 RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_CTZW : OPC_CTZ; 2067 tcg_out_cltz(s, type, insn, a0, a1, a2, true); 2068} 2069 2070static const TCGOutOpBinary outop_ctz = { 2071 .base.static_constraint = C_Dynamic, 2072 .base.dynamic_constraint = cset_clzctz, 2073 .out_rrr = tgen_ctz, 2074 .out_rri = tgen_ctzi, 2075}; 2076 2077static void tgen_divs(TCGContext *s, TCGType type, 2078 TCGReg a0, TCGReg a1, TCGReg a2) 2079{ 2080 RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_DIVW : OPC_DIV; 2081 tcg_out_opc_reg(s, insn, a0, a1, a2); 2082} 2083 2084static const TCGOutOpBinary outop_divs = { 2085 .base.static_constraint = C_O1_I2(r, r, r), 2086 .out_rrr = tgen_divs, 2087}; 2088 2089static const TCGOutOpDivRem outop_divs2 = { 2090 .base.static_constraint = C_NotImplemented, 2091}; 2092 2093static void tgen_divu(TCGContext *s, TCGType type, 2094 TCGReg a0, TCGReg a1, TCGReg a2) 2095{ 2096 RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_DIVUW : OPC_DIVU; 2097 tcg_out_opc_reg(s, insn, a0, a1, a2); 2098} 2099 2100static const TCGOutOpBinary outop_divu = { 2101 .base.static_constraint = C_O1_I2(r, r, r), 2102 .out_rrr = tgen_divu, 2103}; 2104 2105static const TCGOutOpDivRem outop_divu2 = { 2106 .base.static_constraint = C_NotImplemented, 2107}; 2108 2109static void tgen_eqv(TCGContext *s, TCGType type, 2110 TCGReg a0, TCGReg a1, TCGReg a2) 2111{ 2112 tcg_out_opc_reg(s, OPC_XNOR, a0, a1, a2); 2113} 2114 2115static const TCGOutOpBinary outop_eqv = { 2116 .base.static_constraint = C_Dynamic, 2117 .base.dynamic_constraint = cset_zbb_rrr, 2118 .out_rrr = tgen_eqv, 2119}; 2120 2121static void tgen_extrh_i64_i32(TCGContext *s, TCGType t, TCGReg a0, TCGReg a1) 2122{ 2123 tcg_out_opc_imm(s, OPC_SRAI, a0, a1, 32); 2124} 2125 2126static const TCGOutOpUnary outop_extrh_i64_i32 = { 2127 .base.static_constraint = C_O1_I1(r, r), 2128 .out_rr = tgen_extrh_i64_i32, 2129}; 2130 2131static void tgen_mul(TCGContext *s, TCGType type, 2132 TCGReg a0, TCGReg a1, TCGReg a2) 2133{ 2134 RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_MULW : OPC_MUL; 2135 tcg_out_opc_reg(s, insn, a0, a1, a2); 2136} 2137 2138static const TCGOutOpBinary outop_mul = { 2139 .base.static_constraint = C_O1_I2(r, r, r), 2140 .out_rrr = tgen_mul, 2141}; 2142 2143static const TCGOutOpMul2 outop_muls2 = { 2144 .base.static_constraint = C_NotImplemented, 2145}; 2146 2147static TCGConstraintSetIndex cset_mulh(TCGType type, unsigned flags) 2148{ 2149 return type == TCG_TYPE_I32 ? C_NotImplemented : C_O1_I2(r, r, r); 2150} 2151 2152static void tgen_mulsh(TCGContext *s, TCGType type, 2153 TCGReg a0, TCGReg a1, TCGReg a2) 2154{ 2155 tcg_out_opc_reg(s, OPC_MULH, a0, a1, a2); 2156} 2157 2158static const TCGOutOpBinary outop_mulsh = { 2159 .base.static_constraint = C_Dynamic, 2160 .base.dynamic_constraint = cset_mulh, 2161 .out_rrr = tgen_mulsh, 2162}; 2163 2164static const TCGOutOpMul2 outop_mulu2 = { 2165 .base.static_constraint = C_NotImplemented, 2166}; 2167 2168static void tgen_muluh(TCGContext *s, TCGType type, 2169 TCGReg a0, TCGReg a1, TCGReg a2) 2170{ 2171 tcg_out_opc_reg(s, OPC_MULHU, a0, a1, a2); 2172} 2173 2174static const TCGOutOpBinary outop_muluh = { 2175 .base.static_constraint = C_Dynamic, 2176 .base.dynamic_constraint = cset_mulh, 2177 .out_rrr = tgen_muluh, 2178}; 2179 2180static const TCGOutOpBinary outop_nand = { 2181 .base.static_constraint = C_NotImplemented, 2182}; 2183 2184static const TCGOutOpBinary outop_nor = { 2185 .base.static_constraint = C_NotImplemented, 2186}; 2187 2188static void tgen_or(TCGContext *s, TCGType type, 2189 TCGReg a0, TCGReg a1, TCGReg a2) 2190{ 2191 tcg_out_opc_reg(s, OPC_OR, a0, a1, a2); 2192} 2193 2194static void tgen_ori(TCGContext *s, TCGType type, 2195 TCGReg a0, TCGReg a1, tcg_target_long a2) 2196{ 2197 tcg_out_opc_imm(s, OPC_ORI, a0, a1, a2); 2198} 2199 2200static const TCGOutOpBinary outop_or = { 2201 .base.static_constraint = C_O1_I2(r, r, rI), 2202 .out_rrr = tgen_or, 2203 .out_rri = tgen_ori, 2204}; 2205 2206static void tgen_orc(TCGContext *s, TCGType type, 2207 TCGReg a0, TCGReg a1, TCGReg a2) 2208{ 2209 tcg_out_opc_reg(s, OPC_ORN, a0, a1, a2); 2210} 2211 2212static const TCGOutOpBinary outop_orc = { 2213 .base.static_constraint = C_Dynamic, 2214 .base.dynamic_constraint = cset_zbb_rrr, 2215 .out_rrr = tgen_orc, 2216}; 2217 2218static void tgen_rems(TCGContext *s, TCGType type, 2219 TCGReg a0, TCGReg a1, TCGReg a2) 2220{ 2221 RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_REMW : OPC_REM; 2222 tcg_out_opc_reg(s, insn, a0, a1, a2); 2223} 2224 2225static const TCGOutOpBinary outop_rems = { 2226 .base.static_constraint = C_O1_I2(r, r, r), 2227 .out_rrr = tgen_rems, 2228}; 2229 2230static void tgen_remu(TCGContext *s, TCGType type, 2231 TCGReg a0, TCGReg a1, TCGReg a2) 2232{ 2233 RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_REMUW : OPC_REMU; 2234 tcg_out_opc_reg(s, insn, a0, a1, a2); 2235} 2236 2237static const TCGOutOpBinary outop_remu = { 2238 .base.static_constraint = C_O1_I2(r, r, r), 2239 .out_rrr = tgen_remu, 2240}; 2241 2242static TCGConstraintSetIndex cset_rot(TCGType type, unsigned flags) 2243{ 2244 return cpuinfo & CPUINFO_ZBB ? C_O1_I2(r, r, ri) : C_NotImplemented; 2245} 2246 2247static void tgen_rotr(TCGContext *s, TCGType type, 2248 TCGReg a0, TCGReg a1, TCGReg a2) 2249{ 2250 RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_RORW : OPC_ROR; 2251 tcg_out_opc_reg(s, insn, a0, a1, a2); 2252} 2253 2254static void tgen_rotri(TCGContext *s, TCGType type, 2255 TCGReg a0, TCGReg a1, tcg_target_long a2) 2256{ 2257 RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_RORIW : OPC_RORI; 2258 unsigned mask = type == TCG_TYPE_I32 ? 31 : 63; 2259 tcg_out_opc_imm(s, insn, a0, a1, a2 & mask); 2260} 2261 2262static const TCGOutOpBinary outop_rotr = { 2263 .base.static_constraint = C_Dynamic, 2264 .base.dynamic_constraint = cset_rot, 2265 .out_rrr = tgen_rotr, 2266 .out_rri = tgen_rotri, 2267}; 2268 2269static void tgen_rotl(TCGContext *s, TCGType type, 2270 TCGReg a0, TCGReg a1, TCGReg a2) 2271{ 2272 RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_ROLW : OPC_ROL; 2273 tcg_out_opc_reg(s, insn, a0, a1, a2); 2274} 2275 2276static void tgen_rotli(TCGContext *s, TCGType type, 2277 TCGReg a0, TCGReg a1, tcg_target_long a2) 2278{ 2279 tgen_rotri(s, type, a0, a1, -a2); 2280} 2281 2282static const TCGOutOpBinary outop_rotl = { 2283 .base.static_constraint = C_Dynamic, 2284 .base.dynamic_constraint = cset_rot, 2285 .out_rrr = tgen_rotl, 2286 .out_rri = tgen_rotli, 2287}; 2288 2289static void tgen_sar(TCGContext *s, TCGType type, 2290 TCGReg a0, TCGReg a1, TCGReg a2) 2291{ 2292 RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_SRAW : OPC_SRA; 2293 tcg_out_opc_reg(s, insn, a0, a1, a2); 2294} 2295 2296static void tgen_sari(TCGContext *s, TCGType type, 2297 TCGReg a0, TCGReg a1, tcg_target_long a2) 2298{ 2299 RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_SRAIW : OPC_SRAI; 2300 unsigned mask = type == TCG_TYPE_I32 ? 31 : 63; 2301 tcg_out_opc_imm(s, insn, a0, a1, a2 & mask); 2302} 2303 2304static const TCGOutOpBinary outop_sar = { 2305 .base.static_constraint = C_O1_I2(r, r, ri), 2306 .out_rrr = tgen_sar, 2307 .out_rri = tgen_sari, 2308}; 2309 2310static void tgen_shl(TCGContext *s, TCGType type, 2311 TCGReg a0, TCGReg a1, TCGReg a2) 2312{ 2313 RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_SLLW : OPC_SLL; 2314 tcg_out_opc_reg(s, insn, a0, a1, a2); 2315} 2316 2317static void tgen_shli(TCGContext *s, TCGType type, 2318 TCGReg a0, TCGReg a1, tcg_target_long a2) 2319{ 2320 RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_SLLIW : OPC_SLLI; 2321 unsigned mask = type == TCG_TYPE_I32 ? 31 : 63; 2322 tcg_out_opc_imm(s, insn, a0, a1, a2 & mask); 2323} 2324 2325static const TCGOutOpBinary outop_shl = { 2326 .base.static_constraint = C_O1_I2(r, r, ri), 2327 .out_rrr = tgen_shl, 2328 .out_rri = tgen_shli, 2329}; 2330 2331static void tgen_shr(TCGContext *s, TCGType type, 2332 TCGReg a0, TCGReg a1, TCGReg a2) 2333{ 2334 RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_SRLW : OPC_SRL; 2335 tcg_out_opc_reg(s, insn, a0, a1, a2); 2336} 2337 2338static void tgen_shri(TCGContext *s, TCGType type, 2339 TCGReg a0, TCGReg a1, tcg_target_long a2) 2340{ 2341 RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_SRLIW : OPC_SRLI; 2342 unsigned mask = type == TCG_TYPE_I32 ? 31 : 63; 2343 tcg_out_opc_imm(s, insn, a0, a1, a2 & mask); 2344} 2345 2346static const TCGOutOpBinary outop_shr = { 2347 .base.static_constraint = C_O1_I2(r, r, ri), 2348 .out_rrr = tgen_shr, 2349 .out_rri = tgen_shri, 2350}; 2351 2352static void tgen_sub(TCGContext *s, TCGType type, 2353 TCGReg a0, TCGReg a1, TCGReg a2) 2354{ 2355 RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_SUBW : OPC_SUB; 2356 tcg_out_opc_reg(s, insn, a0, a1, a2); 2357} 2358 2359static const TCGOutOpSubtract outop_sub = { 2360 .base.static_constraint = C_O1_I2(r, r, r), 2361 .out_rrr = tgen_sub, 2362}; 2363 2364static const TCGOutOpAddSubCarry outop_subbo = { 2365 .base.static_constraint = C_NotImplemented, 2366}; 2367 2368static const TCGOutOpAddSubCarry outop_subbi = { 2369 .base.static_constraint = C_NotImplemented, 2370}; 2371 2372static const TCGOutOpAddSubCarry outop_subbio = { 2373 .base.static_constraint = C_NotImplemented, 2374}; 2375 2376static void tcg_out_set_borrow(TCGContext *s) 2377{ 2378 g_assert_not_reached(); 2379} 2380 2381static void tgen_xor(TCGContext *s, TCGType type, 2382 TCGReg a0, TCGReg a1, TCGReg a2) 2383{ 2384 tcg_out_opc_reg(s, OPC_XOR, a0, a1, a2); 2385} 2386 2387static void tgen_xori(TCGContext *s, TCGType type, 2388 TCGReg a0, TCGReg a1, tcg_target_long a2) 2389{ 2390 tcg_out_opc_imm(s, OPC_XORI, a0, a1, a2); 2391} 2392 2393static const TCGOutOpBinary outop_xor = { 2394 .base.static_constraint = C_O1_I2(r, r, rI), 2395 .out_rrr = tgen_xor, 2396 .out_rri = tgen_xori, 2397}; 2398 2399static TCGConstraintSetIndex cset_bswap(TCGType type, unsigned flags) 2400{ 2401 return cpuinfo & CPUINFO_ZBB ? C_O1_I1(r, r) : C_NotImplemented; 2402} 2403 2404static void tgen_bswap16(TCGContext *s, TCGType type, 2405 TCGReg a0, TCGReg a1, unsigned flags) 2406{ 2407 tcg_out_opc_imm(s, OPC_REV8, a0, a1, 0); 2408 if (flags & TCG_BSWAP_OZ) { 2409 tcg_out_opc_imm(s, OPC_SRLI, a0, a0, 48); 2410 } else { 2411 tcg_out_opc_imm(s, OPC_SRAI, a0, a0, 48); 2412 } 2413} 2414 2415static const TCGOutOpBswap outop_bswap16 = { 2416 .base.static_constraint = C_Dynamic, 2417 .base.dynamic_constraint = cset_bswap, 2418 .out_rr = tgen_bswap16, 2419}; 2420 2421static void tgen_bswap32(TCGContext *s, TCGType type, 2422 TCGReg a0, TCGReg a1, unsigned flags) 2423{ 2424 tcg_out_opc_imm(s, OPC_REV8, a0, a1, 0); 2425 if (flags & TCG_BSWAP_OZ) { 2426 tcg_out_opc_imm(s, OPC_SRLI, a0, a0, 32); 2427 } else { 2428 tcg_out_opc_imm(s, OPC_SRAI, a0, a0, 32); 2429 } 2430} 2431 2432static const TCGOutOpBswap outop_bswap32 = { 2433 .base.static_constraint = C_Dynamic, 2434 .base.dynamic_constraint = cset_bswap, 2435 .out_rr = tgen_bswap32, 2436}; 2437 2438static void tgen_bswap64(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1) 2439{ 2440 tcg_out_opc_imm(s, OPC_REV8, a0, a1, 0); 2441} 2442 2443static const TCGOutOpUnary outop_bswap64 = { 2444 .base.static_constraint = C_Dynamic, 2445 .base.dynamic_constraint = cset_bswap, 2446 .out_rr = tgen_bswap64, 2447}; 2448 2449static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1) 2450{ 2451 tgen_sub(s, type, a0, TCG_REG_ZERO, a1); 2452} 2453 2454static const TCGOutOpUnary outop_neg = { 2455 .base.static_constraint = C_O1_I1(r, r), 2456 .out_rr = tgen_neg, 2457}; 2458 2459static void tgen_not(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1) 2460{ 2461 tgen_xori(s, type, a0, a1, -1); 2462} 2463 2464static const TCGOutOpUnary outop_not = { 2465 .base.static_constraint = C_O1_I1(r, r), 2466 .out_rr = tgen_not, 2467}; 2468 2469static const TCGOutOpDeposit outop_deposit = { 2470 .base.static_constraint = C_NotImplemented, 2471}; 2472 2473static void tgen_extract(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1, 2474 unsigned ofs, unsigned len) 2475{ 2476 if (ofs == 0) { 2477 switch (len) { 2478 case 16: 2479 tcg_out_ext16u(s, a0, a1); 2480 return; 2481 case 32: 2482 tcg_out_ext32u(s, a0, a1); 2483 return; 2484 } 2485 } 2486 if (ofs + len == 32) { 2487 tgen_shli(s, TCG_TYPE_I32, a0, a1, ofs); 2488 return; 2489 } 2490 if (len == 1) { 2491 tcg_out_opc_imm(s, OPC_BEXTI, a0, a1, ofs); 2492 return; 2493 } 2494 g_assert_not_reached(); 2495} 2496 2497static const TCGOutOpExtract outop_extract = { 2498 .base.static_constraint = C_O1_I1(r, r), 2499 .out_rr = tgen_extract, 2500}; 2501 2502static void tgen_sextract(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1, 2503 unsigned ofs, unsigned len) 2504{ 2505 if (ofs == 0) { 2506 switch (len) { 2507 case 8: 2508 tcg_out_ext8s(s, type, a0, a1); 2509 return; 2510 case 16: 2511 tcg_out_ext16s(s, type, a0, a1); 2512 return; 2513 case 32: 2514 tcg_out_ext32s(s, a0, a1); 2515 return; 2516 } 2517 } else if (ofs + len == 32) { 2518 tgen_sari(s, TCG_TYPE_I32, a0, a1, ofs); 2519 return; 2520 } 2521 g_assert_not_reached(); 2522} 2523 2524static const TCGOutOpExtract outop_sextract = { 2525 .base.static_constraint = C_O1_I1(r, r), 2526 .out_rr = tgen_sextract, 2527}; 2528 2529static const TCGOutOpExtract2 outop_extract2 = { 2530 .base.static_constraint = C_NotImplemented, 2531}; 2532 2533static void tgen_ld8u(TCGContext *s, TCGType type, TCGReg dest, 2534 TCGReg base, ptrdiff_t offset) 2535{ 2536 tcg_out_ldst(s, OPC_LBU, dest, base, offset); 2537} 2538 2539static const TCGOutOpLoad outop_ld8u = { 2540 .base.static_constraint = C_O1_I1(r, r), 2541 .out = tgen_ld8u, 2542}; 2543 2544static void tgen_ld8s(TCGContext *s, TCGType type, TCGReg dest, 2545 TCGReg base, ptrdiff_t offset) 2546{ 2547 tcg_out_ldst(s, OPC_LB, dest, base, offset); 2548} 2549 2550static const TCGOutOpLoad outop_ld8s = { 2551 .base.static_constraint = C_O1_I1(r, r), 2552 .out = tgen_ld8s, 2553}; 2554 2555static void tgen_ld16u(TCGContext *s, TCGType type, TCGReg dest, 2556 TCGReg base, ptrdiff_t offset) 2557{ 2558 tcg_out_ldst(s, OPC_LHU, dest, base, offset); 2559} 2560 2561static const TCGOutOpLoad outop_ld16u = { 2562 .base.static_constraint = C_O1_I1(r, r), 2563 .out = tgen_ld16u, 2564}; 2565 2566static void tgen_ld16s(TCGContext *s, TCGType type, TCGReg dest, 2567 TCGReg base, ptrdiff_t offset) 2568{ 2569 tcg_out_ldst(s, OPC_LH, dest, base, offset); 2570} 2571 2572static const TCGOutOpLoad outop_ld16s = { 2573 .base.static_constraint = C_O1_I1(r, r), 2574 .out = tgen_ld16s, 2575}; 2576 2577static void tgen_ld32u(TCGContext *s, TCGType type, TCGReg dest, 2578 TCGReg base, ptrdiff_t offset) 2579{ 2580 tcg_out_ldst(s, OPC_LWU, dest, base, offset); 2581} 2582 2583static const TCGOutOpLoad outop_ld32u = { 2584 .base.static_constraint = C_O1_I1(r, r), 2585 .out = tgen_ld32u, 2586}; 2587 2588static void tgen_ld32s(TCGContext *s, TCGType type, TCGReg dest, 2589 TCGReg base, ptrdiff_t offset) 2590{ 2591 tcg_out_ldst(s, OPC_LW, dest, base, offset); 2592} 2593 2594static const TCGOutOpLoad outop_ld32s = { 2595 .base.static_constraint = C_O1_I1(r, r), 2596 .out = tgen_ld32s, 2597}; 2598 2599static void tgen_st8_r(TCGContext *s, TCGType type, TCGReg data, 2600 TCGReg base, ptrdiff_t offset) 2601{ 2602 tcg_out_ldst(s, OPC_SB, data, base, offset); 2603} 2604 2605static const TCGOutOpStore outop_st8 = { 2606 .base.static_constraint = C_O0_I2(rz, r), 2607 .out_r = tgen_st8_r, 2608}; 2609 2610static void tgen_st16_r(TCGContext *s, TCGType type, TCGReg data, 2611 TCGReg base, ptrdiff_t offset) 2612{ 2613 tcg_out_ldst(s, OPC_SH, data, base, offset); 2614} 2615 2616static const TCGOutOpStore outop_st16 = { 2617 .base.static_constraint = C_O0_I2(rz, r), 2618 .out_r = tgen_st16_r, 2619}; 2620 2621static const TCGOutOpStore outop_st = { 2622 .base.static_constraint = C_O0_I2(rz, r), 2623 .out_r = tcg_out_st, 2624}; 2625 2626 2627static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type, 2628 const TCGArg args[TCG_MAX_OP_ARGS], 2629 const int const_args[TCG_MAX_OP_ARGS]) 2630{ 2631 TCGArg a0 = args[0]; 2632 TCGArg a1 = args[1]; 2633 TCGArg a2 = args[2]; 2634 2635 switch (opc) { 2636 case INDEX_op_qemu_ld_i32: 2637 tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I32); 2638 break; 2639 case INDEX_op_qemu_ld_i64: 2640 tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I64); 2641 break; 2642 case INDEX_op_qemu_st_i32: 2643 tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I32); 2644 break; 2645 case INDEX_op_qemu_st_i64: 2646 tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I64); 2647 break; 2648 2649 case INDEX_op_call: /* Always emitted via tcg_out_call. */ 2650 case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */ 2651 case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */ 2652 default: 2653 g_assert_not_reached(); 2654 } 2655} 2656 2657static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, 2658 unsigned vecl, unsigned vece, 2659 const TCGArg args[TCG_MAX_OP_ARGS], 2660 const int const_args[TCG_MAX_OP_ARGS]) 2661{ 2662 TCGType type = vecl + TCG_TYPE_V64; 2663 TCGArg a0, a1, a2; 2664 int c2; 2665 2666 a0 = args[0]; 2667 a1 = args[1]; 2668 a2 = args[2]; 2669 c2 = const_args[2]; 2670 2671 switch (opc) { 2672 case INDEX_op_dupm_vec: 2673 tcg_out_dupm_vec(s, type, vece, a0, a1, a2); 2674 break; 2675 case INDEX_op_ld_vec: 2676 tcg_out_ld(s, type, a0, a1, a2); 2677 break; 2678 case INDEX_op_st_vec: 2679 tcg_out_st(s, type, a0, a1, a2); 2680 break; 2681 case INDEX_op_add_vec: 2682 set_vtype_len_sew(s, type, vece); 2683 tcg_out_opc_vv_vi(s, OPC_VADD_VV, OPC_VADD_VI, a0, a1, a2, c2); 2684 break; 2685 case INDEX_op_sub_vec: 2686 set_vtype_len_sew(s, type, vece); 2687 if (const_args[1]) { 2688 tcg_out_opc_vi(s, OPC_VRSUB_VI, a0, a2, a1); 2689 } else { 2690 tcg_out_opc_vv(s, OPC_VSUB_VV, a0, a1, a2); 2691 } 2692 break; 2693 case INDEX_op_and_vec: 2694 set_vtype_len(s, type); 2695 tcg_out_opc_vv_vi(s, OPC_VAND_VV, OPC_VAND_VI, a0, a1, a2, c2); 2696 break; 2697 case INDEX_op_or_vec: 2698 set_vtype_len(s, type); 2699 tcg_out_opc_vv_vi(s, OPC_VOR_VV, OPC_VOR_VI, a0, a1, a2, c2); 2700 break; 2701 case INDEX_op_xor_vec: 2702 set_vtype_len(s, type); 2703 tcg_out_opc_vv_vi(s, OPC_VXOR_VV, OPC_VXOR_VI, a0, a1, a2, c2); 2704 break; 2705 case INDEX_op_not_vec: 2706 set_vtype_len(s, type); 2707 tcg_out_opc_vi(s, OPC_VXOR_VI, a0, a1, -1); 2708 break; 2709 case INDEX_op_neg_vec: 2710 set_vtype_len_sew(s, type, vece); 2711 tcg_out_opc_vi(s, OPC_VRSUB_VI, a0, a1, 0); 2712 break; 2713 case INDEX_op_mul_vec: 2714 set_vtype_len_sew(s, type, vece); 2715 tcg_out_opc_vv(s, OPC_VMUL_VV, a0, a1, a2); 2716 break; 2717 case INDEX_op_ssadd_vec: 2718 set_vtype_len_sew(s, type, vece); 2719 tcg_out_opc_vv_vi(s, OPC_VSADD_VV, OPC_VSADD_VI, a0, a1, a2, c2); 2720 break; 2721 case INDEX_op_sssub_vec: 2722 set_vtype_len_sew(s, type, vece); 2723 tcg_out_opc_vv_vi(s, OPC_VSSUB_VV, OPC_VSSUB_VI, a0, a1, a2, c2); 2724 break; 2725 case INDEX_op_usadd_vec: 2726 set_vtype_len_sew(s, type, vece); 2727 tcg_out_opc_vv_vi(s, OPC_VSADDU_VV, OPC_VSADDU_VI, a0, a1, a2, c2); 2728 break; 2729 case INDEX_op_ussub_vec: 2730 set_vtype_len_sew(s, type, vece); 2731 tcg_out_opc_vv_vi(s, OPC_VSSUBU_VV, OPC_VSSUBU_VI, a0, a1, a2, c2); 2732 break; 2733 case INDEX_op_smax_vec: 2734 set_vtype_len_sew(s, type, vece); 2735 tcg_out_opc_vv_vi(s, OPC_VMAX_VV, OPC_VMAX_VI, a0, a1, a2, c2); 2736 break; 2737 case INDEX_op_smin_vec: 2738 set_vtype_len_sew(s, type, vece); 2739 tcg_out_opc_vv_vi(s, OPC_VMIN_VV, OPC_VMIN_VI, a0, a1, a2, c2); 2740 break; 2741 case INDEX_op_umax_vec: 2742 set_vtype_len_sew(s, type, vece); 2743 tcg_out_opc_vv_vi(s, OPC_VMAXU_VV, OPC_VMAXU_VI, a0, a1, a2, c2); 2744 break; 2745 case INDEX_op_umin_vec: 2746 set_vtype_len_sew(s, type, vece); 2747 tcg_out_opc_vv_vi(s, OPC_VMINU_VV, OPC_VMINU_VI, a0, a1, a2, c2); 2748 break; 2749 case INDEX_op_shls_vec: 2750 set_vtype_len_sew(s, type, vece); 2751 tcg_out_opc_vx(s, OPC_VSLL_VX, a0, a1, a2); 2752 break; 2753 case INDEX_op_shrs_vec: 2754 set_vtype_len_sew(s, type, vece); 2755 tcg_out_opc_vx(s, OPC_VSRL_VX, a0, a1, a2); 2756 break; 2757 case INDEX_op_sars_vec: 2758 set_vtype_len_sew(s, type, vece); 2759 tcg_out_opc_vx(s, OPC_VSRA_VX, a0, a1, a2); 2760 break; 2761 case INDEX_op_shlv_vec: 2762 set_vtype_len_sew(s, type, vece); 2763 tcg_out_opc_vv(s, OPC_VSLL_VV, a0, a1, a2); 2764 break; 2765 case INDEX_op_shrv_vec: 2766 set_vtype_len_sew(s, type, vece); 2767 tcg_out_opc_vv(s, OPC_VSRL_VV, a0, a1, a2); 2768 break; 2769 case INDEX_op_sarv_vec: 2770 set_vtype_len_sew(s, type, vece); 2771 tcg_out_opc_vv(s, OPC_VSRA_VV, a0, a1, a2); 2772 break; 2773 case INDEX_op_shli_vec: 2774 set_vtype_len_sew(s, type, vece); 2775 tcg_out_vshifti(s, OPC_VSLL_VI, OPC_VSLL_VX, a0, a1, a2); 2776 break; 2777 case INDEX_op_shri_vec: 2778 set_vtype_len_sew(s, type, vece); 2779 tcg_out_vshifti(s, OPC_VSRL_VI, OPC_VSRL_VX, a0, a1, a2); 2780 break; 2781 case INDEX_op_sari_vec: 2782 set_vtype_len_sew(s, type, vece); 2783 tcg_out_vshifti(s, OPC_VSRA_VI, OPC_VSRA_VX, a0, a1, a2); 2784 break; 2785 case INDEX_op_rotli_vec: 2786 set_vtype_len_sew(s, type, vece); 2787 tcg_out_vshifti(s, OPC_VSLL_VI, OPC_VSLL_VX, TCG_REG_V0, a1, a2); 2788 tcg_out_vshifti(s, OPC_VSRL_VI, OPC_VSRL_VX, a0, a1, 2789 -a2 & ((8 << vece) - 1)); 2790 tcg_out_opc_vv(s, OPC_VOR_VV, a0, a0, TCG_REG_V0); 2791 break; 2792 case INDEX_op_rotls_vec: 2793 set_vtype_len_sew(s, type, vece); 2794 tcg_out_opc_vx(s, OPC_VSLL_VX, TCG_REG_V0, a1, a2); 2795 tcg_out_opc_reg(s, OPC_SUBW, TCG_REG_TMP0, TCG_REG_ZERO, a2); 2796 tcg_out_opc_vx(s, OPC_VSRL_VX, a0, a1, TCG_REG_TMP0); 2797 tcg_out_opc_vv(s, OPC_VOR_VV, a0, a0, TCG_REG_V0); 2798 break; 2799 case INDEX_op_rotlv_vec: 2800 set_vtype_len_sew(s, type, vece); 2801 tcg_out_opc_vi(s, OPC_VRSUB_VI, TCG_REG_V0, a2, 0); 2802 tcg_out_opc_vv(s, OPC_VSRL_VV, TCG_REG_V0, a1, TCG_REG_V0); 2803 tcg_out_opc_vv(s, OPC_VSLL_VV, a0, a1, a2); 2804 tcg_out_opc_vv(s, OPC_VOR_VV, a0, a0, TCG_REG_V0); 2805 break; 2806 case INDEX_op_rotrv_vec: 2807 set_vtype_len_sew(s, type, vece); 2808 tcg_out_opc_vi(s, OPC_VRSUB_VI, TCG_REG_V0, a2, 0); 2809 tcg_out_opc_vv(s, OPC_VSLL_VV, TCG_REG_V0, a1, TCG_REG_V0); 2810 tcg_out_opc_vv(s, OPC_VSRL_VV, a0, a1, a2); 2811 tcg_out_opc_vv(s, OPC_VOR_VV, a0, a0, TCG_REG_V0); 2812 break; 2813 case INDEX_op_cmp_vec: 2814 tcg_out_cmpsel(s, type, vece, args[3], a0, a1, a2, c2, 2815 -1, true, 0, true); 2816 break; 2817 case INDEX_op_cmpsel_vec: 2818 tcg_out_cmpsel(s, type, vece, args[5], a0, a1, a2, c2, 2819 args[3], const_args[3], args[4], const_args[4]); 2820 break; 2821 case INDEX_op_mov_vec: /* Always emitted via tcg_out_mov. */ 2822 case INDEX_op_dup_vec: /* Always emitted via tcg_out_dup_vec. */ 2823 default: 2824 g_assert_not_reached(); 2825 } 2826} 2827 2828void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece, 2829 TCGArg a0, ...) 2830{ 2831 g_assert_not_reached(); 2832} 2833 2834int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece) 2835{ 2836 switch (opc) { 2837 case INDEX_op_add_vec: 2838 case INDEX_op_sub_vec: 2839 case INDEX_op_and_vec: 2840 case INDEX_op_or_vec: 2841 case INDEX_op_xor_vec: 2842 case INDEX_op_not_vec: 2843 case INDEX_op_neg_vec: 2844 case INDEX_op_mul_vec: 2845 case INDEX_op_ssadd_vec: 2846 case INDEX_op_sssub_vec: 2847 case INDEX_op_usadd_vec: 2848 case INDEX_op_ussub_vec: 2849 case INDEX_op_smax_vec: 2850 case INDEX_op_smin_vec: 2851 case INDEX_op_umax_vec: 2852 case INDEX_op_umin_vec: 2853 case INDEX_op_shls_vec: 2854 case INDEX_op_shrs_vec: 2855 case INDEX_op_sars_vec: 2856 case INDEX_op_shlv_vec: 2857 case INDEX_op_shrv_vec: 2858 case INDEX_op_sarv_vec: 2859 case INDEX_op_shri_vec: 2860 case INDEX_op_shli_vec: 2861 case INDEX_op_sari_vec: 2862 case INDEX_op_rotls_vec: 2863 case INDEX_op_rotlv_vec: 2864 case INDEX_op_rotrv_vec: 2865 case INDEX_op_rotli_vec: 2866 case INDEX_op_cmp_vec: 2867 case INDEX_op_cmpsel_vec: 2868 return 1; 2869 default: 2870 return 0; 2871 } 2872} 2873 2874static TCGConstraintSetIndex 2875tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags) 2876{ 2877 switch (op) { 2878 case INDEX_op_qemu_ld_i32: 2879 case INDEX_op_qemu_ld_i64: 2880 return C_O1_I1(r, r); 2881 case INDEX_op_qemu_st_i32: 2882 case INDEX_op_qemu_st_i64: 2883 return C_O0_I2(rz, r); 2884 2885 case INDEX_op_st_vec: 2886 return C_O0_I2(v, r); 2887 case INDEX_op_dup_vec: 2888 case INDEX_op_dupm_vec: 2889 case INDEX_op_ld_vec: 2890 return C_O1_I1(v, r); 2891 case INDEX_op_neg_vec: 2892 case INDEX_op_not_vec: 2893 case INDEX_op_shli_vec: 2894 case INDEX_op_shri_vec: 2895 case INDEX_op_sari_vec: 2896 case INDEX_op_rotli_vec: 2897 return C_O1_I1(v, v); 2898 case INDEX_op_add_vec: 2899 case INDEX_op_and_vec: 2900 case INDEX_op_or_vec: 2901 case INDEX_op_xor_vec: 2902 case INDEX_op_ssadd_vec: 2903 case INDEX_op_sssub_vec: 2904 case INDEX_op_usadd_vec: 2905 case INDEX_op_ussub_vec: 2906 case INDEX_op_smax_vec: 2907 case INDEX_op_smin_vec: 2908 case INDEX_op_umax_vec: 2909 case INDEX_op_umin_vec: 2910 return C_O1_I2(v, v, vK); 2911 case INDEX_op_sub_vec: 2912 return C_O1_I2(v, vK, v); 2913 case INDEX_op_mul_vec: 2914 case INDEX_op_shlv_vec: 2915 case INDEX_op_shrv_vec: 2916 case INDEX_op_sarv_vec: 2917 case INDEX_op_rotlv_vec: 2918 case INDEX_op_rotrv_vec: 2919 return C_O1_I2(v, v, v); 2920 case INDEX_op_shls_vec: 2921 case INDEX_op_shrs_vec: 2922 case INDEX_op_sars_vec: 2923 case INDEX_op_rotls_vec: 2924 return C_O1_I2(v, v, r); 2925 case INDEX_op_cmp_vec: 2926 return C_O1_I2(v, v, vL); 2927 case INDEX_op_cmpsel_vec: 2928 return C_O1_I4(v, v, vL, vK, vK); 2929 default: 2930 return C_NotImplemented; 2931 } 2932} 2933 2934static const int tcg_target_callee_save_regs[] = { 2935 TCG_REG_S0, /* used for the global env (TCG_AREG0) */ 2936 TCG_REG_S1, 2937 TCG_REG_S2, 2938 TCG_REG_S3, 2939 TCG_REG_S4, 2940 TCG_REG_S5, 2941 TCG_REG_S6, 2942 TCG_REG_S7, 2943 TCG_REG_S8, 2944 TCG_REG_S9, 2945 TCG_REG_S10, 2946 TCG_REG_S11, 2947 TCG_REG_RA, /* should be last for ABI compliance */ 2948}; 2949 2950/* Stack frame parameters. */ 2951#define REG_SIZE (TCG_TARGET_REG_BITS / 8) 2952#define SAVE_SIZE ((int)ARRAY_SIZE(tcg_target_callee_save_regs) * REG_SIZE) 2953#define TEMP_SIZE (CPU_TEMP_BUF_NLONGS * (int)sizeof(long)) 2954#define FRAME_SIZE ((TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE + SAVE_SIZE \ 2955 + TCG_TARGET_STACK_ALIGN - 1) \ 2956 & -TCG_TARGET_STACK_ALIGN) 2957#define SAVE_OFS (TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE) 2958 2959/* We're expecting to be able to use an immediate for frame allocation. */ 2960QEMU_BUILD_BUG_ON(FRAME_SIZE > 0x7ff); 2961 2962/* Generate global QEMU prologue and epilogue code */ 2963static void tcg_target_qemu_prologue(TCGContext *s) 2964{ 2965 int i; 2966 2967 tcg_set_frame(s, TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE, TEMP_SIZE); 2968 2969 /* TB prologue */ 2970 tcg_out_opc_imm(s, OPC_ADDI, TCG_REG_SP, TCG_REG_SP, -FRAME_SIZE); 2971 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) { 2972 tcg_out_st(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i], 2973 TCG_REG_SP, SAVE_OFS + i * REG_SIZE); 2974 } 2975 2976 if (!tcg_use_softmmu && guest_base) { 2977 tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base); 2978 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG); 2979 } 2980 2981 /* Call generated code */ 2982 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]); 2983 tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, tcg_target_call_iarg_regs[1], 0); 2984 2985 /* Return path for goto_ptr. Set return value to 0 */ 2986 tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr); 2987 tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_A0, TCG_REG_ZERO); 2988 2989 /* TB epilogue */ 2990 tb_ret_addr = tcg_splitwx_to_rx(s->code_ptr); 2991 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) { 2992 tcg_out_ld(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i], 2993 TCG_REG_SP, SAVE_OFS + i * REG_SIZE); 2994 } 2995 2996 tcg_out_opc_imm(s, OPC_ADDI, TCG_REG_SP, TCG_REG_SP, FRAME_SIZE); 2997 tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, TCG_REG_RA, 0); 2998} 2999 3000static void tcg_out_tb_start(TCGContext *s) 3001{ 3002 init_setting_vtype(s); 3003} 3004 3005static bool vtype_check(unsigned vtype) 3006{ 3007 unsigned long tmp; 3008 3009 /* vsetvl tmp, zero, vtype */ 3010 asm(".insn r 0x57, 7, 0x40, %0, zero, %1" : "=r"(tmp) : "r"(vtype)); 3011 return tmp != 0; 3012} 3013 3014static void probe_frac_lmul_1(TCGType type, MemOp vsew) 3015{ 3016 VsetCache *p = &riscv_vset_cache[type - TCG_TYPE_V64][vsew]; 3017 unsigned avl = tcg_type_size(type) >> vsew; 3018 int lmul = type - riscv_lg2_vlenb; 3019 unsigned vtype = encode_vtype(true, true, vsew, lmul & 7); 3020 bool lmul_eq_avl = true; 3021 3022 /* Guaranteed by Zve64x. */ 3023 assert(lmul < 3); 3024 3025 /* 3026 * For LMUL < -3, the host vector size is so large that TYPE 3027 * is smaller than the minimum 1/8 fraction. 3028 * 3029 * For other fractional LMUL settings, implementations must 3030 * support SEW settings between SEW_MIN and LMUL * ELEN, inclusive. 3031 * So if ELEN = 64, LMUL = 1/2, then SEW will support e8, e16, e32, 3032 * but e64 may not be supported. In other words, the hardware only 3033 * guarantees SEW_MIN <= SEW <= LMUL * ELEN. Check. 3034 */ 3035 if (lmul < 0 && (lmul < -3 || !vtype_check(vtype))) { 3036 vtype = encode_vtype(true, true, vsew, VLMUL_M1); 3037 lmul_eq_avl = false; 3038 } 3039 3040 if (avl < 32) { 3041 p->vset_insn = encode_vseti(OPC_VSETIVLI, TCG_REG_ZERO, avl, vtype); 3042 } else if (lmul_eq_avl) { 3043 /* rd != 0 and rs1 == 0 uses vlmax */ 3044 p->vset_insn = encode_vset(OPC_VSETVLI, TCG_REG_TMP0, TCG_REG_ZERO, vtype); 3045 } else { 3046 p->movi_insn = encode_i(OPC_ADDI, TCG_REG_TMP0, TCG_REG_ZERO, avl); 3047 p->vset_insn = encode_vset(OPC_VSETVLI, TCG_REG_ZERO, TCG_REG_TMP0, vtype); 3048 } 3049} 3050 3051static void probe_frac_lmul(void) 3052{ 3053 /* Match riscv_lg2_vlenb to TCG_TYPE_V64. */ 3054 QEMU_BUILD_BUG_ON(TCG_TYPE_V64 != 3); 3055 3056 for (TCGType t = TCG_TYPE_V64; t <= TCG_TYPE_V256; t++) { 3057 for (MemOp e = MO_8; e <= MO_64; e++) { 3058 probe_frac_lmul_1(t, e); 3059 } 3060 } 3061} 3062 3063static void tcg_target_init(TCGContext *s) 3064{ 3065 tcg_target_available_regs[TCG_TYPE_I32] = 0xffffffff; 3066 tcg_target_available_regs[TCG_TYPE_I64] = 0xffffffff; 3067 3068 tcg_target_call_clobber_regs = -1; 3069 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S0); 3070 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S1); 3071 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S2); 3072 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S3); 3073 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S4); 3074 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S5); 3075 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S6); 3076 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S7); 3077 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S8); 3078 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S9); 3079 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S10); 3080 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S11); 3081 3082 s->reserved_regs = 0; 3083 tcg_regset_set_reg(s->reserved_regs, TCG_REG_ZERO); 3084 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP0); 3085 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP1); 3086 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP2); 3087 tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP); 3088 tcg_regset_set_reg(s->reserved_regs, TCG_REG_GP); 3089 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TP); 3090 3091 if (cpuinfo & CPUINFO_ZVE64X) { 3092 switch (riscv_lg2_vlenb) { 3093 case TCG_TYPE_V64: 3094 tcg_target_available_regs[TCG_TYPE_V64] = ALL_VECTOR_REGS; 3095 tcg_target_available_regs[TCG_TYPE_V128] = ALL_DVECTOR_REG_GROUPS; 3096 tcg_target_available_regs[TCG_TYPE_V256] = ALL_QVECTOR_REG_GROUPS; 3097 s->reserved_regs |= (~ALL_QVECTOR_REG_GROUPS & ALL_VECTOR_REGS); 3098 break; 3099 case TCG_TYPE_V128: 3100 tcg_target_available_regs[TCG_TYPE_V64] = ALL_VECTOR_REGS; 3101 tcg_target_available_regs[TCG_TYPE_V128] = ALL_VECTOR_REGS; 3102 tcg_target_available_regs[TCG_TYPE_V256] = ALL_DVECTOR_REG_GROUPS; 3103 s->reserved_regs |= (~ALL_DVECTOR_REG_GROUPS & ALL_VECTOR_REGS); 3104 break; 3105 default: 3106 /* Guaranteed by Zve64x. */ 3107 tcg_debug_assert(riscv_lg2_vlenb >= TCG_TYPE_V256); 3108 tcg_target_available_regs[TCG_TYPE_V64] = ALL_VECTOR_REGS; 3109 tcg_target_available_regs[TCG_TYPE_V128] = ALL_VECTOR_REGS; 3110 tcg_target_available_regs[TCG_TYPE_V256] = ALL_VECTOR_REGS; 3111 break; 3112 } 3113 tcg_regset_set_reg(s->reserved_regs, TCG_REG_V0); 3114 probe_frac_lmul(); 3115 } 3116} 3117 3118typedef struct { 3119 DebugFrameHeader h; 3120 uint8_t fde_def_cfa[4]; 3121 uint8_t fde_reg_ofs[ARRAY_SIZE(tcg_target_callee_save_regs) * 2]; 3122} DebugFrame; 3123 3124#define ELF_HOST_MACHINE EM_RISCV 3125 3126static const DebugFrame debug_frame = { 3127 .h.cie.len = sizeof(DebugFrameCIE) - 4, /* length after .len member */ 3128 .h.cie.id = -1, 3129 .h.cie.version = 1, 3130 .h.cie.code_align = 1, 3131 .h.cie.data_align = -(TCG_TARGET_REG_BITS / 8) & 0x7f, /* sleb128 */ 3132 .h.cie.return_column = TCG_REG_RA, 3133 3134 /* Total FDE size does not include the "len" member. */ 3135 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset), 3136 3137 .fde_def_cfa = { 3138 12, TCG_REG_SP, /* DW_CFA_def_cfa sp, ... */ 3139 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */ 3140 (FRAME_SIZE >> 7) 3141 }, 3142 .fde_reg_ofs = { 3143 0x80 + 9, 12, /* DW_CFA_offset, s1, -96 */ 3144 0x80 + 18, 11, /* DW_CFA_offset, s2, -88 */ 3145 0x80 + 19, 10, /* DW_CFA_offset, s3, -80 */ 3146 0x80 + 20, 9, /* DW_CFA_offset, s4, -72 */ 3147 0x80 + 21, 8, /* DW_CFA_offset, s5, -64 */ 3148 0x80 + 22, 7, /* DW_CFA_offset, s6, -56 */ 3149 0x80 + 23, 6, /* DW_CFA_offset, s7, -48 */ 3150 0x80 + 24, 5, /* DW_CFA_offset, s8, -40 */ 3151 0x80 + 25, 4, /* DW_CFA_offset, s9, -32 */ 3152 0x80 + 26, 3, /* DW_CFA_offset, s10, -24 */ 3153 0x80 + 27, 2, /* DW_CFA_offset, s11, -16 */ 3154 0x80 + 1 , 1, /* DW_CFA_offset, ra, -8 */ 3155 } 3156}; 3157 3158void tcg_register_jit(const void *buf, size_t buf_size) 3159{ 3160 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame)); 3161} 3162