1/* 2 * Tiny Code Generator for QEMU 3 * 4 * Copyright (c) 2008 Andrzej Zaborowski 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 25#include "elf.h" 26 27int arm_arch = __ARM_ARCH; 28 29#ifndef use_idiv_instructions 30bool use_idiv_instructions; 31#endif 32#ifndef use_neon_instructions 33bool use_neon_instructions; 34#endif 35 36/* Used for function call generation. */ 37#define TCG_TARGET_STACK_ALIGN 8 38#define TCG_TARGET_CALL_STACK_OFFSET 0 39#define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_NORMAL 40#define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_EVEN 41#define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_EVEN 42#define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_BY_REF 43 44#ifdef CONFIG_DEBUG_TCG 45static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { 46 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7", 47 "%r8", "%r9", "%r10", "%r11", "%r12", "%sp", "%r14", "%pc", 48 "%q0", "%q1", "%q2", "%q3", "%q4", "%q5", "%q6", "%q7", 49 "%q8", "%q9", "%q10", "%q11", "%q12", "%q13", "%q14", "%q15", 50}; 51#endif 52 53static const int tcg_target_reg_alloc_order[] = { 54 TCG_REG_R4, 55 TCG_REG_R5, 56 TCG_REG_R6, 57 TCG_REG_R7, 58 TCG_REG_R8, 59 TCG_REG_R9, 60 TCG_REG_R10, 61 TCG_REG_R11, 62 TCG_REG_R13, 63 TCG_REG_R0, 64 TCG_REG_R1, 65 TCG_REG_R2, 66 TCG_REG_R3, 67 TCG_REG_R12, 68 TCG_REG_R14, 69 70 TCG_REG_Q0, 71 TCG_REG_Q1, 72 TCG_REG_Q2, 73 TCG_REG_Q3, 74 /* Q4 - Q7 are call-saved, and skipped. */ 75 TCG_REG_Q8, 76 TCG_REG_Q9, 77 TCG_REG_Q10, 78 TCG_REG_Q11, 79 TCG_REG_Q12, 80 TCG_REG_Q13, 81 TCG_REG_Q14, 82 TCG_REG_Q15, 83}; 84 85static const int tcg_target_call_iarg_regs[4] = { 86 TCG_REG_R0, TCG_REG_R1, TCG_REG_R2, TCG_REG_R3 87}; 88 89static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot) 90{ 91 tcg_debug_assert(kind == TCG_CALL_RET_NORMAL); 92 tcg_debug_assert(slot >= 0 && slot <= 3); 93 return TCG_REG_R0 + slot; 94} 95 96#define TCG_REG_TMP TCG_REG_R12 97#define TCG_VEC_TMP TCG_REG_Q15 98#define TCG_REG_GUEST_BASE TCG_REG_R11 99 100typedef enum { 101 COND_EQ = 0x0, 102 COND_NE = 0x1, 103 COND_CS = 0x2, /* Unsigned greater or equal */ 104 COND_CC = 0x3, /* Unsigned less than */ 105 COND_MI = 0x4, /* Negative */ 106 COND_PL = 0x5, /* Zero or greater */ 107 COND_VS = 0x6, /* Overflow */ 108 COND_VC = 0x7, /* No overflow */ 109 COND_HI = 0x8, /* Unsigned greater than */ 110 COND_LS = 0x9, /* Unsigned less or equal */ 111 COND_GE = 0xa, 112 COND_LT = 0xb, 113 COND_GT = 0xc, 114 COND_LE = 0xd, 115 COND_AL = 0xe, 116} ARMCond; 117 118#define TO_CPSR (1 << 20) 119 120#define SHIFT_IMM_LSL(im) (((im) << 7) | 0x00) 121#define SHIFT_IMM_LSR(im) (((im) << 7) | 0x20) 122#define SHIFT_IMM_ASR(im) (((im) << 7) | 0x40) 123#define SHIFT_IMM_ROR(im) (((im) << 7) | 0x60) 124#define SHIFT_REG_LSL(rs) (((rs) << 8) | 0x10) 125#define SHIFT_REG_LSR(rs) (((rs) << 8) | 0x30) 126#define SHIFT_REG_ASR(rs) (((rs) << 8) | 0x50) 127#define SHIFT_REG_ROR(rs) (((rs) << 8) | 0x70) 128 129typedef enum { 130 ARITH_AND = 0x0 << 21, 131 ARITH_EOR = 0x1 << 21, 132 ARITH_SUB = 0x2 << 21, 133 ARITH_RSB = 0x3 << 21, 134 ARITH_ADD = 0x4 << 21, 135 ARITH_ADC = 0x5 << 21, 136 ARITH_SBC = 0x6 << 21, 137 ARITH_RSC = 0x7 << 21, 138 ARITH_TST = 0x8 << 21 | TO_CPSR, 139 ARITH_CMP = 0xa << 21 | TO_CPSR, 140 ARITH_CMN = 0xb << 21 | TO_CPSR, 141 ARITH_ORR = 0xc << 21, 142 ARITH_MOV = 0xd << 21, 143 ARITH_BIC = 0xe << 21, 144 ARITH_MVN = 0xf << 21, 145 146 INSN_B = 0x0a000000, 147 148 INSN_CLZ = 0x016f0f10, 149 INSN_RBIT = 0x06ff0f30, 150 151 INSN_LDMIA = 0x08b00000, 152 INSN_STMDB = 0x09200000, 153 154 INSN_LDR_IMM = 0x04100000, 155 INSN_LDR_REG = 0x06100000, 156 INSN_STR_IMM = 0x04000000, 157 INSN_STR_REG = 0x06000000, 158 159 INSN_LDRH_IMM = 0x005000b0, 160 INSN_LDRH_REG = 0x001000b0, 161 INSN_LDRSH_IMM = 0x005000f0, 162 INSN_LDRSH_REG = 0x001000f0, 163 INSN_STRH_IMM = 0x004000b0, 164 INSN_STRH_REG = 0x000000b0, 165 166 INSN_LDRB_IMM = 0x04500000, 167 INSN_LDRB_REG = 0x06500000, 168 INSN_LDRSB_IMM = 0x005000d0, 169 INSN_LDRSB_REG = 0x001000d0, 170 INSN_STRB_IMM = 0x04400000, 171 INSN_STRB_REG = 0x06400000, 172 173 INSN_LDRD_IMM = 0x004000d0, 174 INSN_LDRD_REG = 0x000000d0, 175 INSN_STRD_IMM = 0x004000f0, 176 INSN_STRD_REG = 0x000000f0, 177 178 INSN_DMB_ISH = 0xf57ff05b, 179 INSN_DMB_MCR = 0xee070fba, 180 181 /* Architected nop introduced in v6k. */ 182 /* ??? This is an MSR (imm) 0,0,0 insn. Anyone know if this 183 also Just So Happened to do nothing on pre-v6k so that we 184 don't need to conditionalize it? */ 185 INSN_NOP_v6k = 0xe320f000, 186 /* Otherwise the assembler uses mov r0,r0 */ 187 INSN_NOP_v4 = (COND_AL << 28) | ARITH_MOV, 188 189 INSN_VADD = 0xf2000800, 190 INSN_VAND = 0xf2000110, 191 INSN_VBIC = 0xf2100110, 192 INSN_VEOR = 0xf3000110, 193 INSN_VORN = 0xf2300110, 194 INSN_VORR = 0xf2200110, 195 INSN_VSUB = 0xf3000800, 196 INSN_VMUL = 0xf2000910, 197 INSN_VQADD = 0xf2000010, 198 INSN_VQADD_U = 0xf3000010, 199 INSN_VQSUB = 0xf2000210, 200 INSN_VQSUB_U = 0xf3000210, 201 INSN_VMAX = 0xf2000600, 202 INSN_VMAX_U = 0xf3000600, 203 INSN_VMIN = 0xf2000610, 204 INSN_VMIN_U = 0xf3000610, 205 206 INSN_VABS = 0xf3b10300, 207 INSN_VMVN = 0xf3b00580, 208 INSN_VNEG = 0xf3b10380, 209 210 INSN_VCEQ0 = 0xf3b10100, 211 INSN_VCGT0 = 0xf3b10000, 212 INSN_VCGE0 = 0xf3b10080, 213 INSN_VCLE0 = 0xf3b10180, 214 INSN_VCLT0 = 0xf3b10200, 215 216 INSN_VCEQ = 0xf3000810, 217 INSN_VCGE = 0xf2000310, 218 INSN_VCGT = 0xf2000300, 219 INSN_VCGE_U = 0xf3000310, 220 INSN_VCGT_U = 0xf3000300, 221 222 INSN_VSHLI = 0xf2800510, /* VSHL (immediate) */ 223 INSN_VSARI = 0xf2800010, /* VSHR.S */ 224 INSN_VSHRI = 0xf3800010, /* VSHR.U */ 225 INSN_VSLI = 0xf3800510, 226 INSN_VSHL_S = 0xf2000400, /* VSHL.S (register) */ 227 INSN_VSHL_U = 0xf3000400, /* VSHL.U (register) */ 228 229 INSN_VBSL = 0xf3100110, 230 INSN_VBIT = 0xf3200110, 231 INSN_VBIF = 0xf3300110, 232 233 INSN_VTST = 0xf2000810, 234 235 INSN_VDUP_G = 0xee800b10, /* VDUP (ARM core register) */ 236 INSN_VDUP_S = 0xf3b00c00, /* VDUP (scalar) */ 237 INSN_VLDR_D = 0xed100b00, /* VLDR.64 */ 238 INSN_VLD1 = 0xf4200000, /* VLD1 (multiple single elements) */ 239 INSN_VLD1R = 0xf4a00c00, /* VLD1 (single element to all lanes) */ 240 INSN_VST1 = 0xf4000000, /* VST1 (multiple single elements) */ 241 INSN_VMOVI = 0xf2800010, /* VMOV (immediate) */ 242} ARMInsn; 243 244#define INSN_NOP (use_armv7_instructions ? INSN_NOP_v6k : INSN_NOP_v4) 245 246static const uint8_t tcg_cond_to_arm_cond[] = { 247 [TCG_COND_EQ] = COND_EQ, 248 [TCG_COND_NE] = COND_NE, 249 [TCG_COND_LT] = COND_LT, 250 [TCG_COND_GE] = COND_GE, 251 [TCG_COND_LE] = COND_LE, 252 [TCG_COND_GT] = COND_GT, 253 /* unsigned */ 254 [TCG_COND_LTU] = COND_CC, 255 [TCG_COND_GEU] = COND_CS, 256 [TCG_COND_LEU] = COND_LS, 257 [TCG_COND_GTU] = COND_HI, 258}; 259 260static int encode_imm(uint32_t imm); 261 262/* TCG private relocation type: add with pc+imm8 */ 263#define R_ARM_PC8 11 264 265/* TCG private relocation type: vldr with imm8 << 2 */ 266#define R_ARM_PC11 12 267 268static bool reloc_pc24(tcg_insn_unit *src_rw, const tcg_insn_unit *target) 269{ 270 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw); 271 ptrdiff_t offset = (tcg_ptr_byte_diff(target, src_rx) - 8) >> 2; 272 273 if (offset == sextract32(offset, 0, 24)) { 274 *src_rw = deposit32(*src_rw, 0, 24, offset); 275 return true; 276 } 277 return false; 278} 279 280static bool reloc_pc13(tcg_insn_unit *src_rw, const tcg_insn_unit *target) 281{ 282 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw); 283 ptrdiff_t offset = tcg_ptr_byte_diff(target, src_rx) - 8; 284 285 if (offset >= -0xfff && offset <= 0xfff) { 286 tcg_insn_unit insn = *src_rw; 287 bool u = (offset >= 0); 288 if (!u) { 289 offset = -offset; 290 } 291 insn = deposit32(insn, 23, 1, u); 292 insn = deposit32(insn, 0, 12, offset); 293 *src_rw = insn; 294 return true; 295 } 296 return false; 297} 298 299static bool reloc_pc11(tcg_insn_unit *src_rw, const tcg_insn_unit *target) 300{ 301 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw); 302 ptrdiff_t offset = (tcg_ptr_byte_diff(target, src_rx) - 8) / 4; 303 304 if (offset >= -0xff && offset <= 0xff) { 305 tcg_insn_unit insn = *src_rw; 306 bool u = (offset >= 0); 307 if (!u) { 308 offset = -offset; 309 } 310 insn = deposit32(insn, 23, 1, u); 311 insn = deposit32(insn, 0, 8, offset); 312 *src_rw = insn; 313 return true; 314 } 315 return false; 316} 317 318static bool reloc_pc8(tcg_insn_unit *src_rw, const tcg_insn_unit *target) 319{ 320 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw); 321 ptrdiff_t offset = tcg_ptr_byte_diff(target, src_rx) - 8; 322 int imm12 = encode_imm(offset); 323 324 if (imm12 >= 0) { 325 *src_rw = deposit32(*src_rw, 0, 12, imm12); 326 return true; 327 } 328 return false; 329} 330 331static bool patch_reloc(tcg_insn_unit *code_ptr, int type, 332 intptr_t value, intptr_t addend) 333{ 334 tcg_debug_assert(addend == 0); 335 switch (type) { 336 case R_ARM_PC24: 337 return reloc_pc24(code_ptr, (const tcg_insn_unit *)value); 338 case R_ARM_PC13: 339 return reloc_pc13(code_ptr, (const tcg_insn_unit *)value); 340 case R_ARM_PC11: 341 return reloc_pc11(code_ptr, (const tcg_insn_unit *)value); 342 case R_ARM_PC8: 343 return reloc_pc8(code_ptr, (const tcg_insn_unit *)value); 344 default: 345 g_assert_not_reached(); 346 } 347} 348 349#define TCG_CT_CONST_ARM 0x100 350#define TCG_CT_CONST_INV 0x200 351#define TCG_CT_CONST_NEG 0x400 352#define TCG_CT_CONST_ZERO 0x800 353#define TCG_CT_CONST_ORRI 0x1000 354#define TCG_CT_CONST_ANDI 0x2000 355 356#define ALL_GENERAL_REGS 0xffffu 357#define ALL_VECTOR_REGS 0xffff0000u 358 359/* 360 * r0-r3 will be overwritten when reading the tlb entry (system-mode only); 361 * r14 will be overwritten by the BLNE branching to the slow path. 362 */ 363#define ALL_QLDST_REGS \ 364 (ALL_GENERAL_REGS & ~((tcg_use_softmmu ? 0xf : 0) | (1 << TCG_REG_R14))) 365 366/* 367 * ARM immediates for ALU instructions are made of an unsigned 8-bit 368 * right-rotated by an even amount between 0 and 30. 369 * 370 * Return < 0 if @imm cannot be encoded, else the entire imm12 field. 371 */ 372static int encode_imm(uint32_t imm) 373{ 374 uint32_t rot, imm8; 375 376 /* Simple case, no rotation required. */ 377 if ((imm & ~0xff) == 0) { 378 return imm; 379 } 380 381 /* Next, try a simple even shift. */ 382 rot = ctz32(imm) & ~1; 383 imm8 = imm >> rot; 384 rot = 32 - rot; 385 if ((imm8 & ~0xff) == 0) { 386 goto found; 387 } 388 389 /* 390 * Finally, try harder with rotations. 391 * The ctz test above will have taken care of rotates >= 8. 392 */ 393 for (rot = 2; rot < 8; rot += 2) { 394 imm8 = rol32(imm, rot); 395 if ((imm8 & ~0xff) == 0) { 396 goto found; 397 } 398 } 399 /* Fail: imm cannot be encoded. */ 400 return -1; 401 402 found: 403 /* Note that rot is even, and we discard bit 0 by shifting by 7. */ 404 return rot << 7 | imm8; 405} 406 407static int encode_imm_nofail(uint32_t imm) 408{ 409 int ret = encode_imm(imm); 410 tcg_debug_assert(ret >= 0); 411 return ret; 412} 413 414static bool check_fit_imm(uint32_t imm) 415{ 416 return encode_imm(imm) >= 0; 417} 418 419/* Return true if v16 is a valid 16-bit shifted immediate. */ 420static bool is_shimm16(uint16_t v16, int *cmode, int *imm8) 421{ 422 if (v16 == (v16 & 0xff)) { 423 *cmode = 0x8; 424 *imm8 = v16 & 0xff; 425 return true; 426 } else if (v16 == (v16 & 0xff00)) { 427 *cmode = 0xa; 428 *imm8 = v16 >> 8; 429 return true; 430 } 431 return false; 432} 433 434/* Return true if v32 is a valid 32-bit shifted immediate. */ 435static bool is_shimm32(uint32_t v32, int *cmode, int *imm8) 436{ 437 if (v32 == (v32 & 0xff)) { 438 *cmode = 0x0; 439 *imm8 = v32 & 0xff; 440 return true; 441 } else if (v32 == (v32 & 0xff00)) { 442 *cmode = 0x2; 443 *imm8 = (v32 >> 8) & 0xff; 444 return true; 445 } else if (v32 == (v32 & 0xff0000)) { 446 *cmode = 0x4; 447 *imm8 = (v32 >> 16) & 0xff; 448 return true; 449 } else if (v32 == (v32 & 0xff000000)) { 450 *cmode = 0x6; 451 *imm8 = v32 >> 24; 452 return true; 453 } 454 return false; 455} 456 457/* Return true if v32 is a valid 32-bit shifting ones immediate. */ 458static bool is_soimm32(uint32_t v32, int *cmode, int *imm8) 459{ 460 if ((v32 & 0xffff00ff) == 0xff) { 461 *cmode = 0xc; 462 *imm8 = (v32 >> 8) & 0xff; 463 return true; 464 } else if ((v32 & 0xff00ffff) == 0xffff) { 465 *cmode = 0xd; 466 *imm8 = (v32 >> 16) & 0xff; 467 return true; 468 } 469 return false; 470} 471 472/* 473 * Return non-zero if v32 can be formed by MOVI+ORR. 474 * Place the parameters for MOVI in (cmode, imm8). 475 * Return the cmode for ORR; the imm8 can be had via extraction from v32. 476 */ 477static int is_shimm32_pair(uint32_t v32, int *cmode, int *imm8) 478{ 479 int i; 480 481 for (i = 6; i > 0; i -= 2) { 482 /* Mask out one byte we can add with ORR. */ 483 uint32_t tmp = v32 & ~(0xffu << (i * 4)); 484 if (is_shimm32(tmp, cmode, imm8) || 485 is_soimm32(tmp, cmode, imm8)) { 486 break; 487 } 488 } 489 return i; 490} 491 492/* Return true if V is a valid 16-bit or 32-bit shifted immediate. */ 493static bool is_shimm1632(uint32_t v32, int *cmode, int *imm8) 494{ 495 if (v32 == deposit32(v32, 16, 16, v32)) { 496 return is_shimm16(v32, cmode, imm8); 497 } else { 498 return is_shimm32(v32, cmode, imm8); 499 } 500} 501 502/* Test if a constant matches the constraint. 503 * TODO: define constraints for: 504 * 505 * ldr/str offset: between -0xfff and 0xfff 506 * ldrh/strh offset: between -0xff and 0xff 507 * mov operand2: values represented with x << (2 * y), x < 0x100 508 * add, sub, eor...: ditto 509 */ 510static bool tcg_target_const_match(int64_t val, int ct, 511 TCGType type, TCGCond cond, int vece) 512{ 513 if (ct & TCG_CT_CONST) { 514 return 1; 515 } else if ((ct & TCG_CT_CONST_ARM) && check_fit_imm(val)) { 516 return 1; 517 } else if ((ct & TCG_CT_CONST_INV) && check_fit_imm(~val)) { 518 return 1; 519 } else if ((ct & TCG_CT_CONST_NEG) && check_fit_imm(-val)) { 520 return 1; 521 } else if ((ct & TCG_CT_CONST_ZERO) && val == 0) { 522 return 1; 523 } 524 525 switch (ct & (TCG_CT_CONST_ORRI | TCG_CT_CONST_ANDI)) { 526 case 0: 527 break; 528 case TCG_CT_CONST_ANDI: 529 val = ~val; 530 /* fallthru */ 531 case TCG_CT_CONST_ORRI: 532 if (val == deposit64(val, 32, 32, val)) { 533 int cmode, imm8; 534 return is_shimm1632(val, &cmode, &imm8); 535 } 536 break; 537 default: 538 /* Both bits should not be set for the same insn. */ 539 g_assert_not_reached(); 540 } 541 542 return 0; 543} 544 545static void tcg_out_b_imm(TCGContext *s, ARMCond cond, int32_t offset) 546{ 547 tcg_out32(s, (cond << 28) | INSN_B | 548 (((offset - 8) >> 2) & 0x00ffffff)); 549} 550 551static void tcg_out_bl_imm(TCGContext *s, ARMCond cond, int32_t offset) 552{ 553 tcg_out32(s, (cond << 28) | 0x0b000000 | 554 (((offset - 8) >> 2) & 0x00ffffff)); 555} 556 557static void tcg_out_blx_reg(TCGContext *s, ARMCond cond, TCGReg rn) 558{ 559 tcg_out32(s, (cond << 28) | 0x012fff30 | rn); 560} 561 562static void tcg_out_blx_imm(TCGContext *s, int32_t offset) 563{ 564 tcg_out32(s, 0xfa000000 | ((offset & 2) << 23) | 565 (((offset - 8) >> 2) & 0x00ffffff)); 566} 567 568static void tcg_out_dat_reg(TCGContext *s, ARMCond cond, ARMInsn opc, 569 TCGReg rd, TCGReg rn, TCGReg rm, int shift) 570{ 571 tcg_out32(s, (cond << 28) | (0 << 25) | opc | 572 (rn << 16) | (rd << 12) | shift | rm); 573} 574 575static void tcg_out_mov_reg(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rm) 576{ 577 /* Simple reg-reg move, optimising out the 'do nothing' case */ 578 if (rd != rm) { 579 tcg_out_dat_reg(s, cond, ARITH_MOV, rd, 0, rm, SHIFT_IMM_LSL(0)); 580 } 581} 582 583static void tcg_out_bx_reg(TCGContext *s, ARMCond cond, TCGReg rn) 584{ 585 tcg_out32(s, (cond << 28) | 0x012fff10 | rn); 586} 587 588static void tcg_out_b_reg(TCGContext *s, ARMCond cond, TCGReg rn) 589{ 590 /* 591 * Unless the C portion of QEMU is compiled as thumb, we don't need 592 * true BX semantics; merely a branch to an address held in a register. 593 */ 594 tcg_out_bx_reg(s, cond, rn); 595} 596 597static void tcg_out_dat_imm(TCGContext *s, ARMCond cond, ARMInsn opc, 598 TCGReg rd, TCGReg rn, int im) 599{ 600 tcg_out32(s, (cond << 28) | (1 << 25) | opc | 601 (rn << 16) | (rd << 12) | im); 602} 603 604static void tcg_out_ldstm(TCGContext *s, ARMCond cond, ARMInsn opc, 605 TCGReg rn, uint16_t mask) 606{ 607 tcg_out32(s, (cond << 28) | opc | (rn << 16) | mask); 608} 609 610/* Note that this routine is used for both LDR and LDRH formats, so we do 611 not wish to include an immediate shift at this point. */ 612static void tcg_out_memop_r(TCGContext *s, ARMCond cond, ARMInsn opc, TCGReg rt, 613 TCGReg rn, TCGReg rm, bool u, bool p, bool w) 614{ 615 tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24) 616 | (w << 21) | (rn << 16) | (rt << 12) | rm); 617} 618 619static void tcg_out_memop_8(TCGContext *s, ARMCond cond, ARMInsn opc, TCGReg rt, 620 TCGReg rn, int imm8, bool p, bool w) 621{ 622 bool u = 1; 623 if (imm8 < 0) { 624 imm8 = -imm8; 625 u = 0; 626 } 627 tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24) | (w << 21) | 628 (rn << 16) | (rt << 12) | ((imm8 & 0xf0) << 4) | (imm8 & 0xf)); 629} 630 631static void tcg_out_memop_12(TCGContext *s, ARMCond cond, ARMInsn opc, 632 TCGReg rt, TCGReg rn, int imm12, bool p, bool w) 633{ 634 bool u = 1; 635 if (imm12 < 0) { 636 imm12 = -imm12; 637 u = 0; 638 } 639 tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24) | (w << 21) | 640 (rn << 16) | (rt << 12) | imm12); 641} 642 643static void tcg_out_ld32_12(TCGContext *s, ARMCond cond, TCGReg rt, 644 TCGReg rn, int imm12) 645{ 646 tcg_out_memop_12(s, cond, INSN_LDR_IMM, rt, rn, imm12, 1, 0); 647} 648 649static void tcg_out_st32_12(TCGContext *s, ARMCond cond, TCGReg rt, 650 TCGReg rn, int imm12) 651{ 652 tcg_out_memop_12(s, cond, INSN_STR_IMM, rt, rn, imm12, 1, 0); 653} 654 655static void tcg_out_ld32_r(TCGContext *s, ARMCond cond, TCGReg rt, 656 TCGReg rn, TCGReg rm) 657{ 658 tcg_out_memop_r(s, cond, INSN_LDR_REG, rt, rn, rm, 1, 1, 0); 659} 660 661static void tcg_out_st32_r(TCGContext *s, ARMCond cond, TCGReg rt, 662 TCGReg rn, TCGReg rm) 663{ 664 tcg_out_memop_r(s, cond, INSN_STR_REG, rt, rn, rm, 1, 1, 0); 665} 666 667static void tcg_out_ldrd_8(TCGContext *s, ARMCond cond, TCGReg rt, 668 TCGReg rn, int imm8) 669{ 670 tcg_out_memop_8(s, cond, INSN_LDRD_IMM, rt, rn, imm8, 1, 0); 671} 672 673static void tcg_out_ldrd_r(TCGContext *s, ARMCond cond, TCGReg rt, 674 TCGReg rn, TCGReg rm) 675{ 676 tcg_out_memop_r(s, cond, INSN_LDRD_REG, rt, rn, rm, 1, 1, 0); 677} 678 679static void __attribute__((unused)) 680tcg_out_ldrd_rwb(TCGContext *s, ARMCond cond, TCGReg rt, TCGReg rn, TCGReg rm) 681{ 682 tcg_out_memop_r(s, cond, INSN_LDRD_REG, rt, rn, rm, 1, 1, 1); 683} 684 685static void __attribute__((unused)) 686tcg_out_strd_8(TCGContext *s, ARMCond cond, TCGReg rt, TCGReg rn, int imm8) 687{ 688 tcg_out_memop_8(s, cond, INSN_STRD_IMM, rt, rn, imm8, 1, 0); 689} 690 691static void tcg_out_strd_r(TCGContext *s, ARMCond cond, TCGReg rt, 692 TCGReg rn, TCGReg rm) 693{ 694 tcg_out_memop_r(s, cond, INSN_STRD_REG, rt, rn, rm, 1, 1, 0); 695} 696 697/* Register pre-increment with base writeback. */ 698static void tcg_out_ld32_rwb(TCGContext *s, ARMCond cond, TCGReg rt, 699 TCGReg rn, TCGReg rm) 700{ 701 tcg_out_memop_r(s, cond, INSN_LDR_REG, rt, rn, rm, 1, 1, 1); 702} 703 704static void tcg_out_st32_rwb(TCGContext *s, ARMCond cond, TCGReg rt, 705 TCGReg rn, TCGReg rm) 706{ 707 tcg_out_memop_r(s, cond, INSN_STR_REG, rt, rn, rm, 1, 1, 1); 708} 709 710static void tcg_out_ld16u_8(TCGContext *s, ARMCond cond, TCGReg rt, 711 TCGReg rn, int imm8) 712{ 713 tcg_out_memop_8(s, cond, INSN_LDRH_IMM, rt, rn, imm8, 1, 0); 714} 715 716static void tcg_out_st16_8(TCGContext *s, ARMCond cond, TCGReg rt, 717 TCGReg rn, int imm8) 718{ 719 tcg_out_memop_8(s, cond, INSN_STRH_IMM, rt, rn, imm8, 1, 0); 720} 721 722static void tcg_out_ld16u_r(TCGContext *s, ARMCond cond, TCGReg rt, 723 TCGReg rn, TCGReg rm) 724{ 725 tcg_out_memop_r(s, cond, INSN_LDRH_REG, rt, rn, rm, 1, 1, 0); 726} 727 728static void tcg_out_st16_r(TCGContext *s, ARMCond cond, TCGReg rt, 729 TCGReg rn, TCGReg rm) 730{ 731 tcg_out_memop_r(s, cond, INSN_STRH_REG, rt, rn, rm, 1, 1, 0); 732} 733 734static void tcg_out_ld16s_8(TCGContext *s, ARMCond cond, TCGReg rt, 735 TCGReg rn, int imm8) 736{ 737 tcg_out_memop_8(s, cond, INSN_LDRSH_IMM, rt, rn, imm8, 1, 0); 738} 739 740static void tcg_out_ld16s_r(TCGContext *s, ARMCond cond, TCGReg rt, 741 TCGReg rn, TCGReg rm) 742{ 743 tcg_out_memop_r(s, cond, INSN_LDRSH_REG, rt, rn, rm, 1, 1, 0); 744} 745 746static void tcg_out_ld8_12(TCGContext *s, ARMCond cond, TCGReg rt, 747 TCGReg rn, int imm12) 748{ 749 tcg_out_memop_12(s, cond, INSN_LDRB_IMM, rt, rn, imm12, 1, 0); 750} 751 752static void tcg_out_st8_12(TCGContext *s, ARMCond cond, TCGReg rt, 753 TCGReg rn, int imm12) 754{ 755 tcg_out_memop_12(s, cond, INSN_STRB_IMM, rt, rn, imm12, 1, 0); 756} 757 758static void tcg_out_ld8_r(TCGContext *s, ARMCond cond, TCGReg rt, 759 TCGReg rn, TCGReg rm) 760{ 761 tcg_out_memop_r(s, cond, INSN_LDRB_REG, rt, rn, rm, 1, 1, 0); 762} 763 764static void tcg_out_st8_r(TCGContext *s, ARMCond cond, TCGReg rt, 765 TCGReg rn, TCGReg rm) 766{ 767 tcg_out_memop_r(s, cond, INSN_STRB_REG, rt, rn, rm, 1, 1, 0); 768} 769 770static void tcg_out_ld8s_8(TCGContext *s, ARMCond cond, TCGReg rt, 771 TCGReg rn, int imm8) 772{ 773 tcg_out_memop_8(s, cond, INSN_LDRSB_IMM, rt, rn, imm8, 1, 0); 774} 775 776static void tcg_out_ld8s_r(TCGContext *s, ARMCond cond, TCGReg rt, 777 TCGReg rn, TCGReg rm) 778{ 779 tcg_out_memop_r(s, cond, INSN_LDRSB_REG, rt, rn, rm, 1, 1, 0); 780} 781 782static void tcg_out_movi_pool(TCGContext *s, ARMCond cond, 783 TCGReg rd, uint32_t arg) 784{ 785 new_pool_label(s, arg, R_ARM_PC13, s->code_ptr, 0); 786 tcg_out_ld32_12(s, cond, rd, TCG_REG_PC, 0); 787} 788 789static void tcg_out_movi32(TCGContext *s, ARMCond cond, 790 TCGReg rd, uint32_t arg) 791{ 792 int imm12, diff, opc, sh1, sh2; 793 uint32_t tt0, tt1, tt2; 794 795 /* Check a single MOV/MVN before anything else. */ 796 imm12 = encode_imm(arg); 797 if (imm12 >= 0) { 798 tcg_out_dat_imm(s, cond, ARITH_MOV, rd, 0, imm12); 799 return; 800 } 801 imm12 = encode_imm(~arg); 802 if (imm12 >= 0) { 803 tcg_out_dat_imm(s, cond, ARITH_MVN, rd, 0, imm12); 804 return; 805 } 806 807 /* Check for a pc-relative address. This will usually be the TB, 808 or within the TB, which is immediately before the code block. */ 809 diff = tcg_pcrel_diff(s, (void *)arg) - 8; 810 if (diff >= 0) { 811 imm12 = encode_imm(diff); 812 if (imm12 >= 0) { 813 tcg_out_dat_imm(s, cond, ARITH_ADD, rd, TCG_REG_PC, imm12); 814 return; 815 } 816 } else { 817 imm12 = encode_imm(-diff); 818 if (imm12 >= 0) { 819 tcg_out_dat_imm(s, cond, ARITH_SUB, rd, TCG_REG_PC, imm12); 820 return; 821 } 822 } 823 824 /* Use movw + movt. */ 825 if (use_armv7_instructions) { 826 /* movw */ 827 tcg_out32(s, (cond << 28) | 0x03000000 | (rd << 12) 828 | ((arg << 4) & 0x000f0000) | (arg & 0xfff)); 829 if (arg & 0xffff0000) { 830 /* movt */ 831 tcg_out32(s, (cond << 28) | 0x03400000 | (rd << 12) 832 | ((arg >> 12) & 0x000f0000) | ((arg >> 16) & 0xfff)); 833 } 834 return; 835 } 836 837 /* Look for sequences of two insns. If we have lots of 1's, we can 838 shorten the sequence by beginning with mvn and then clearing 839 higher bits with eor. */ 840 tt0 = arg; 841 opc = ARITH_MOV; 842 if (ctpop32(arg) > 16) { 843 tt0 = ~arg; 844 opc = ARITH_MVN; 845 } 846 sh1 = ctz32(tt0) & ~1; 847 tt1 = tt0 & ~(0xff << sh1); 848 sh2 = ctz32(tt1) & ~1; 849 tt2 = tt1 & ~(0xff << sh2); 850 if (tt2 == 0) { 851 int rot; 852 853 rot = ((32 - sh1) << 7) & 0xf00; 854 tcg_out_dat_imm(s, cond, opc, rd, 0, ((tt0 >> sh1) & 0xff) | rot); 855 rot = ((32 - sh2) << 7) & 0xf00; 856 tcg_out_dat_imm(s, cond, ARITH_EOR, rd, rd, 857 ((tt0 >> sh2) & 0xff) | rot); 858 return; 859 } 860 861 /* Otherwise, drop it into the constant pool. */ 862 tcg_out_movi_pool(s, cond, rd, arg); 863} 864 865/* 866 * Emit either the reg,imm or reg,reg form of a data-processing insn. 867 * rhs must satisfy the "rI" constraint. 868 */ 869static void tcg_out_dat_rI(TCGContext *s, ARMCond cond, ARMInsn opc, 870 TCGReg dst, TCGReg lhs, TCGArg rhs, int rhs_is_const) 871{ 872 if (rhs_is_const) { 873 tcg_out_dat_imm(s, cond, opc, dst, lhs, encode_imm_nofail(rhs)); 874 } else { 875 tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0)); 876 } 877} 878 879/* 880 * Emit either the reg,imm or reg,reg form of a data-processing insn. 881 * rhs must satisfy the "rIK" constraint. 882 */ 883static void tcg_out_dat_rIK(TCGContext *s, ARMCond cond, ARMInsn opc, 884 ARMInsn opinv, TCGReg dst, TCGReg lhs, TCGArg rhs, 885 bool rhs_is_const) 886{ 887 if (rhs_is_const) { 888 int imm12 = encode_imm(rhs); 889 if (imm12 < 0) { 890 imm12 = encode_imm_nofail(~rhs); 891 opc = opinv; 892 } 893 tcg_out_dat_imm(s, cond, opc, dst, lhs, imm12); 894 } else { 895 tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0)); 896 } 897} 898 899static void tcg_out_dat_rIN(TCGContext *s, ARMCond cond, ARMInsn opc, 900 ARMInsn opneg, TCGReg dst, TCGReg lhs, TCGArg rhs, 901 bool rhs_is_const) 902{ 903 /* Emit either the reg,imm or reg,reg form of a data-processing insn. 904 * rhs must satisfy the "rIN" constraint. 905 */ 906 if (rhs_is_const) { 907 int imm12 = encode_imm(rhs); 908 if (imm12 < 0) { 909 imm12 = encode_imm_nofail(-rhs); 910 opc = opneg; 911 } 912 tcg_out_dat_imm(s, cond, opc, dst, lhs, imm12); 913 } else { 914 tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0)); 915 } 916} 917 918static void tcg_out_mul32(TCGContext *s, ARMCond cond, TCGReg rd, 919 TCGReg rn, TCGReg rm) 920{ 921 /* mul */ 922 tcg_out32(s, (cond << 28) | 0x90 | (rd << 16) | (rm << 8) | rn); 923} 924 925static void tcg_out_umull32(TCGContext *s, ARMCond cond, TCGReg rd0, 926 TCGReg rd1, TCGReg rn, TCGReg rm) 927{ 928 /* umull */ 929 tcg_out32(s, (cond << 28) | 0x00800090 | 930 (rd1 << 16) | (rd0 << 12) | (rm << 8) | rn); 931} 932 933static void tcg_out_smull32(TCGContext *s, ARMCond cond, TCGReg rd0, 934 TCGReg rd1, TCGReg rn, TCGReg rm) 935{ 936 /* smull */ 937 tcg_out32(s, (cond << 28) | 0x00c00090 | 938 (rd1 << 16) | (rd0 << 12) | (rm << 8) | rn); 939} 940 941static void tcg_out_sdiv(TCGContext *s, ARMCond cond, 942 TCGReg rd, TCGReg rn, TCGReg rm) 943{ 944 tcg_out32(s, 0x0710f010 | (cond << 28) | (rd << 16) | rn | (rm << 8)); 945} 946 947static void tcg_out_udiv(TCGContext *s, ARMCond cond, 948 TCGReg rd, TCGReg rn, TCGReg rm) 949{ 950 tcg_out32(s, 0x0730f010 | (cond << 28) | (rd << 16) | rn | (rm << 8)); 951} 952 953static void tcg_out_ext8s(TCGContext *s, TCGType t, TCGReg rd, TCGReg rn) 954{ 955 /* sxtb */ 956 tcg_out32(s, 0x06af0070 | (COND_AL << 28) | (rd << 12) | rn); 957} 958 959static void tcg_out_ext8u(TCGContext *s, TCGReg rd, TCGReg rn) 960{ 961 tcg_out_dat_imm(s, COND_AL, ARITH_AND, rd, rn, 0xff); 962} 963 964static void tcg_out_ext16s(TCGContext *s, TCGType t, TCGReg rd, TCGReg rn) 965{ 966 /* sxth */ 967 tcg_out32(s, 0x06bf0070 | (COND_AL << 28) | (rd << 12) | rn); 968} 969 970static void tcg_out_ext16u(TCGContext *s, TCGReg rd, TCGReg rn) 971{ 972 /* uxth */ 973 tcg_out32(s, 0x06ff0070 | (COND_AL << 28) | (rd << 12) | rn); 974} 975 976static void tcg_out_ext32s(TCGContext *s, TCGReg rd, TCGReg rn) 977{ 978 g_assert_not_reached(); 979} 980 981static void tcg_out_ext32u(TCGContext *s, TCGReg rd, TCGReg rn) 982{ 983 g_assert_not_reached(); 984} 985 986static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg rd, TCGReg rn) 987{ 988 g_assert_not_reached(); 989} 990 991static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg rd, TCGReg rn) 992{ 993 g_assert_not_reached(); 994} 995 996static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg rd, TCGReg rn) 997{ 998 g_assert_not_reached(); 999} 1000 1001static void tcg_out_bswap16(TCGContext *s, ARMCond cond, 1002 TCGReg rd, TCGReg rn, int flags) 1003{ 1004 if (flags & TCG_BSWAP_OS) { 1005 /* revsh */ 1006 tcg_out32(s, 0x06ff0fb0 | (cond << 28) | (rd << 12) | rn); 1007 return; 1008 } 1009 1010 /* rev16 */ 1011 tcg_out32(s, 0x06bf0fb0 | (cond << 28) | (rd << 12) | rn); 1012 if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) { 1013 /* uxth */ 1014 tcg_out32(s, 0x06ff0070 | (cond << 28) | (rd << 12) | rd); 1015 } 1016} 1017 1018static void tcg_out_bswap32(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rn) 1019{ 1020 /* rev */ 1021 tcg_out32(s, 0x06bf0f30 | (cond << 28) | (rd << 12) | rn); 1022} 1023 1024static void tcg_out_deposit(TCGContext *s, ARMCond cond, TCGReg rd, 1025 TCGArg a1, int ofs, int len, bool const_a1) 1026{ 1027 if (const_a1) { 1028 /* bfi becomes bfc with rn == 15. */ 1029 a1 = 15; 1030 } 1031 /* bfi/bfc */ 1032 tcg_out32(s, 0x07c00010 | (cond << 28) | (rd << 12) | a1 1033 | (ofs << 7) | ((ofs + len - 1) << 16)); 1034} 1035 1036static void tcg_out_extract(TCGContext *s, ARMCond cond, TCGReg rd, 1037 TCGReg rn, int ofs, int len) 1038{ 1039 /* According to gcc, AND can be faster. */ 1040 if (ofs == 0 && len <= 8) { 1041 tcg_out_dat_imm(s, cond, ARITH_AND, rd, rn, 1042 encode_imm_nofail((1 << len) - 1)); 1043 return; 1044 } 1045 1046 if (use_armv7_instructions) { 1047 /* ubfx */ 1048 tcg_out32(s, 0x07e00050 | (cond << 28) | (rd << 12) | rn 1049 | (ofs << 7) | ((len - 1) << 16)); 1050 return; 1051 } 1052 1053 assert(ofs % 8 == 0); 1054 switch (len) { 1055 case 8: 1056 /* uxtb */ 1057 tcg_out32(s, 0x06ef0070 | (cond << 28) | (rd << 12) | (ofs << 7) | rn); 1058 break; 1059 case 16: 1060 /* uxth */ 1061 tcg_out32(s, 0x06ff0070 | (cond << 28) | (rd << 12) | (ofs << 7) | rn); 1062 break; 1063 default: 1064 g_assert_not_reached(); 1065 } 1066} 1067 1068static void tcg_out_sextract(TCGContext *s, ARMCond cond, TCGReg rd, 1069 TCGReg rn, int ofs, int len) 1070{ 1071 if (use_armv7_instructions) { 1072 /* sbfx */ 1073 tcg_out32(s, 0x07a00050 | (cond << 28) | (rd << 12) | rn 1074 | (ofs << 7) | ((len - 1) << 16)); 1075 return; 1076 } 1077 1078 assert(ofs % 8 == 0); 1079 switch (len) { 1080 case 8: 1081 /* sxtb */ 1082 tcg_out32(s, 0x06af0070 | (cond << 28) | (rd << 12) | (ofs << 7) | rn); 1083 break; 1084 case 16: 1085 /* sxth */ 1086 tcg_out32(s, 0x06bf0070 | (cond << 28) | (rd << 12) | (ofs << 7) | rn); 1087 break; 1088 default: 1089 g_assert_not_reached(); 1090 } 1091} 1092 1093 1094static void tcg_out_ld32u(TCGContext *s, ARMCond cond, 1095 TCGReg rd, TCGReg rn, int32_t offset) 1096{ 1097 if (offset > 0xfff || offset < -0xfff) { 1098 tcg_out_movi32(s, cond, TCG_REG_TMP, offset); 1099 tcg_out_ld32_r(s, cond, rd, rn, TCG_REG_TMP); 1100 } else 1101 tcg_out_ld32_12(s, cond, rd, rn, offset); 1102} 1103 1104static void tcg_out_st32(TCGContext *s, ARMCond cond, 1105 TCGReg rd, TCGReg rn, int32_t offset) 1106{ 1107 if (offset > 0xfff || offset < -0xfff) { 1108 tcg_out_movi32(s, cond, TCG_REG_TMP, offset); 1109 tcg_out_st32_r(s, cond, rd, rn, TCG_REG_TMP); 1110 } else 1111 tcg_out_st32_12(s, cond, rd, rn, offset); 1112} 1113 1114static void tcg_out_ld16u(TCGContext *s, ARMCond cond, 1115 TCGReg rd, TCGReg rn, int32_t offset) 1116{ 1117 if (offset > 0xff || offset < -0xff) { 1118 tcg_out_movi32(s, cond, TCG_REG_TMP, offset); 1119 tcg_out_ld16u_r(s, cond, rd, rn, TCG_REG_TMP); 1120 } else 1121 tcg_out_ld16u_8(s, cond, rd, rn, offset); 1122} 1123 1124static void tcg_out_ld16s(TCGContext *s, ARMCond cond, 1125 TCGReg rd, TCGReg rn, int32_t offset) 1126{ 1127 if (offset > 0xff || offset < -0xff) { 1128 tcg_out_movi32(s, cond, TCG_REG_TMP, offset); 1129 tcg_out_ld16s_r(s, cond, rd, rn, TCG_REG_TMP); 1130 } else 1131 tcg_out_ld16s_8(s, cond, rd, rn, offset); 1132} 1133 1134static void tcg_out_st16(TCGContext *s, ARMCond cond, 1135 TCGReg rd, TCGReg rn, int32_t offset) 1136{ 1137 if (offset > 0xff || offset < -0xff) { 1138 tcg_out_movi32(s, cond, TCG_REG_TMP, offset); 1139 tcg_out_st16_r(s, cond, rd, rn, TCG_REG_TMP); 1140 } else 1141 tcg_out_st16_8(s, cond, rd, rn, offset); 1142} 1143 1144static void tcg_out_ld8u(TCGContext *s, ARMCond cond, 1145 TCGReg rd, TCGReg rn, int32_t offset) 1146{ 1147 if (offset > 0xfff || offset < -0xfff) { 1148 tcg_out_movi32(s, cond, TCG_REG_TMP, offset); 1149 tcg_out_ld8_r(s, cond, rd, rn, TCG_REG_TMP); 1150 } else 1151 tcg_out_ld8_12(s, cond, rd, rn, offset); 1152} 1153 1154static void tcg_out_ld8s(TCGContext *s, ARMCond cond, 1155 TCGReg rd, TCGReg rn, int32_t offset) 1156{ 1157 if (offset > 0xff || offset < -0xff) { 1158 tcg_out_movi32(s, cond, TCG_REG_TMP, offset); 1159 tcg_out_ld8s_r(s, cond, rd, rn, TCG_REG_TMP); 1160 } else 1161 tcg_out_ld8s_8(s, cond, rd, rn, offset); 1162} 1163 1164static void tcg_out_st8(TCGContext *s, ARMCond cond, 1165 TCGReg rd, TCGReg rn, int32_t offset) 1166{ 1167 if (offset > 0xfff || offset < -0xfff) { 1168 tcg_out_movi32(s, cond, TCG_REG_TMP, offset); 1169 tcg_out_st8_r(s, cond, rd, rn, TCG_REG_TMP); 1170 } else 1171 tcg_out_st8_12(s, cond, rd, rn, offset); 1172} 1173 1174/* 1175 * The _goto case is normally between TBs within the same code buffer, and 1176 * with the code buffer limited to 16MB we wouldn't need the long case. 1177 * But we also use it for the tail-call to the qemu_ld/st helpers, which does. 1178 */ 1179static void tcg_out_goto(TCGContext *s, ARMCond cond, const tcg_insn_unit *addr) 1180{ 1181 intptr_t addri = (intptr_t)addr; 1182 ptrdiff_t disp = tcg_pcrel_diff(s, addr); 1183 bool arm_mode = !(addri & 1); 1184 1185 if (arm_mode && disp - 8 < 0x01fffffd && disp - 8 > -0x01fffffd) { 1186 tcg_out_b_imm(s, cond, disp); 1187 return; 1188 } 1189 1190 /* LDR is interworking from v5t. */ 1191 tcg_out_movi_pool(s, cond, TCG_REG_PC, addri); 1192} 1193 1194/* 1195 * The call case is mostly used for helpers - so it's not unreasonable 1196 * for them to be beyond branch range. 1197 */ 1198static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *addr) 1199{ 1200 intptr_t addri = (intptr_t)addr; 1201 ptrdiff_t disp = tcg_pcrel_diff(s, addr); 1202 bool arm_mode = !(addri & 1); 1203 1204 if (disp - 8 < 0x02000000 && disp - 8 >= -0x02000000) { 1205 if (arm_mode) { 1206 tcg_out_bl_imm(s, COND_AL, disp); 1207 } else { 1208 tcg_out_blx_imm(s, disp); 1209 } 1210 return; 1211 } 1212 1213 tcg_out_movi32(s, COND_AL, TCG_REG_TMP, addri); 1214 tcg_out_blx_reg(s, COND_AL, TCG_REG_TMP); 1215} 1216 1217static void tcg_out_call(TCGContext *s, const tcg_insn_unit *addr, 1218 const TCGHelperInfo *info) 1219{ 1220 tcg_out_call_int(s, addr); 1221} 1222 1223static void tcg_out_goto_label(TCGContext *s, ARMCond cond, TCGLabel *l) 1224{ 1225 if (l->has_value) { 1226 tcg_out_goto(s, cond, l->u.value_ptr); 1227 } else { 1228 tcg_out_reloc(s, s->code_ptr, R_ARM_PC24, l, 0); 1229 tcg_out_b_imm(s, cond, 0); 1230 } 1231} 1232 1233static void tcg_out_mb(TCGContext *s, TCGArg a0) 1234{ 1235 if (use_armv7_instructions) { 1236 tcg_out32(s, INSN_DMB_ISH); 1237 } else { 1238 tcg_out32(s, INSN_DMB_MCR); 1239 } 1240} 1241 1242static TCGCond tcg_out_cmp(TCGContext *s, TCGCond cond, TCGReg a, 1243 TCGArg b, int b_const) 1244{ 1245 if (!is_tst_cond(cond)) { 1246 tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0, a, b, b_const); 1247 return cond; 1248 } 1249 1250 cond = tcg_tst_eqne_cond(cond); 1251 if (b_const) { 1252 int imm12 = encode_imm(b); 1253 1254 /* 1255 * The compare constraints allow rIN, but TST does not support N. 1256 * Be prepared to load the constant into a scratch register. 1257 */ 1258 if (imm12 >= 0) { 1259 tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, a, imm12); 1260 return cond; 1261 } 1262 tcg_out_movi32(s, COND_AL, TCG_REG_TMP, b); 1263 b = TCG_REG_TMP; 1264 } 1265 tcg_out_dat_reg(s, COND_AL, ARITH_TST, 0, a, b, SHIFT_IMM_LSL(0)); 1266 return cond; 1267} 1268 1269static TCGCond tcg_out_cmp2(TCGContext *s, const TCGArg *args, 1270 const int *const_args) 1271{ 1272 TCGReg al = args[0]; 1273 TCGReg ah = args[1]; 1274 TCGArg bl = args[2]; 1275 TCGArg bh = args[3]; 1276 TCGCond cond = args[4]; 1277 int const_bl = const_args[2]; 1278 int const_bh = const_args[3]; 1279 1280 switch (cond) { 1281 case TCG_COND_EQ: 1282 case TCG_COND_NE: 1283 case TCG_COND_LTU: 1284 case TCG_COND_LEU: 1285 case TCG_COND_GTU: 1286 case TCG_COND_GEU: 1287 /* 1288 * We perform a conditional comparison. If the high half is 1289 * equal, then overwrite the flags with the comparison of the 1290 * low half. The resulting flags cover the whole. 1291 */ 1292 tcg_out_dat_rI(s, COND_AL, ARITH_CMP, 0, ah, bh, const_bh); 1293 tcg_out_dat_rI(s, COND_EQ, ARITH_CMP, 0, al, bl, const_bl); 1294 return cond; 1295 1296 case TCG_COND_TSTEQ: 1297 case TCG_COND_TSTNE: 1298 /* Similar, but with TST instead of CMP. */ 1299 tcg_out_dat_rI(s, COND_AL, ARITH_TST, 0, ah, bh, const_bh); 1300 tcg_out_dat_rI(s, COND_EQ, ARITH_TST, 0, al, bl, const_bl); 1301 return tcg_tst_eqne_cond(cond); 1302 1303 case TCG_COND_LT: 1304 case TCG_COND_GE: 1305 /* We perform a double-word subtraction and examine the result. 1306 We do not actually need the result of the subtract, so the 1307 low part "subtract" is a compare. For the high half we have 1308 no choice but to compute into a temporary. */ 1309 tcg_out_dat_rI(s, COND_AL, ARITH_CMP, 0, al, bl, const_bl); 1310 tcg_out_dat_rI(s, COND_AL, ARITH_SBC | TO_CPSR, 1311 TCG_REG_TMP, ah, bh, const_bh); 1312 return cond; 1313 1314 case TCG_COND_LE: 1315 case TCG_COND_GT: 1316 /* Similar, but with swapped arguments, via reversed subtract. */ 1317 tcg_out_dat_rI(s, COND_AL, ARITH_RSB | TO_CPSR, 1318 TCG_REG_TMP, al, bl, const_bl); 1319 tcg_out_dat_rI(s, COND_AL, ARITH_RSC | TO_CPSR, 1320 TCG_REG_TMP, ah, bh, const_bh); 1321 return tcg_swap_cond(cond); 1322 1323 default: 1324 g_assert_not_reached(); 1325 } 1326} 1327 1328/* 1329 * Note that TCGReg references Q-registers. 1330 * Q-regno = 2 * D-regno, so shift left by 1 while inserting. 1331 */ 1332static uint32_t encode_vd(TCGReg rd) 1333{ 1334 tcg_debug_assert(rd >= TCG_REG_Q0); 1335 return (extract32(rd, 3, 1) << 22) | (extract32(rd, 0, 3) << 13); 1336} 1337 1338static uint32_t encode_vn(TCGReg rn) 1339{ 1340 tcg_debug_assert(rn >= TCG_REG_Q0); 1341 return (extract32(rn, 3, 1) << 7) | (extract32(rn, 0, 3) << 17); 1342} 1343 1344static uint32_t encode_vm(TCGReg rm) 1345{ 1346 tcg_debug_assert(rm >= TCG_REG_Q0); 1347 return (extract32(rm, 3, 1) << 5) | (extract32(rm, 0, 3) << 1); 1348} 1349 1350static void tcg_out_vreg2(TCGContext *s, ARMInsn insn, int q, int vece, 1351 TCGReg d, TCGReg m) 1352{ 1353 tcg_out32(s, insn | (vece << 18) | (q << 6) | 1354 encode_vd(d) | encode_vm(m)); 1355} 1356 1357static void tcg_out_vreg3(TCGContext *s, ARMInsn insn, int q, int vece, 1358 TCGReg d, TCGReg n, TCGReg m) 1359{ 1360 tcg_out32(s, insn | (vece << 20) | (q << 6) | 1361 encode_vd(d) | encode_vn(n) | encode_vm(m)); 1362} 1363 1364static void tcg_out_vmovi(TCGContext *s, TCGReg rd, 1365 int q, int op, int cmode, uint8_t imm8) 1366{ 1367 tcg_out32(s, INSN_VMOVI | encode_vd(rd) | (q << 6) | (op << 5) 1368 | (cmode << 8) | extract32(imm8, 0, 4) 1369 | (extract32(imm8, 4, 3) << 16) 1370 | (extract32(imm8, 7, 1) << 24)); 1371} 1372 1373static void tcg_out_vshifti(TCGContext *s, ARMInsn insn, int q, 1374 TCGReg rd, TCGReg rm, int l_imm6) 1375{ 1376 tcg_out32(s, insn | (q << 6) | encode_vd(rd) | encode_vm(rm) | 1377 (extract32(l_imm6, 6, 1) << 7) | 1378 (extract32(l_imm6, 0, 6) << 16)); 1379} 1380 1381static void tcg_out_vldst(TCGContext *s, ARMInsn insn, 1382 TCGReg rd, TCGReg rn, int offset) 1383{ 1384 if (offset != 0) { 1385 if (check_fit_imm(offset) || check_fit_imm(-offset)) { 1386 tcg_out_dat_rIN(s, COND_AL, ARITH_ADD, ARITH_SUB, 1387 TCG_REG_TMP, rn, offset, true); 1388 } else { 1389 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, offset); 1390 tcg_out_dat_reg(s, COND_AL, ARITH_ADD, 1391 TCG_REG_TMP, TCG_REG_TMP, rn, 0); 1392 } 1393 rn = TCG_REG_TMP; 1394 } 1395 tcg_out32(s, insn | (rn << 16) | encode_vd(rd) | 0xf); 1396} 1397 1398typedef struct { 1399 ARMCond cond; 1400 TCGReg base; 1401 int index; 1402 bool index_scratch; 1403 TCGAtomAlign aa; 1404} HostAddress; 1405 1406bool tcg_target_has_memory_bswap(MemOp memop) 1407{ 1408 return false; 1409} 1410 1411static TCGReg ldst_ra_gen(TCGContext *s, const TCGLabelQemuLdst *l, int arg) 1412{ 1413 /* We arrive at the slow path via "BLNE", so R14 contains l->raddr. */ 1414 return TCG_REG_R14; 1415} 1416 1417static const TCGLdstHelperParam ldst_helper_param = { 1418 .ra_gen = ldst_ra_gen, 1419 .ntmp = 1, 1420 .tmp = { TCG_REG_TMP }, 1421}; 1422 1423static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) 1424{ 1425 MemOp opc = get_memop(lb->oi); 1426 1427 if (!reloc_pc24(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { 1428 return false; 1429 } 1430 1431 tcg_out_ld_helper_args(s, lb, &ldst_helper_param); 1432 tcg_out_call_int(s, qemu_ld_helpers[opc & MO_SIZE]); 1433 tcg_out_ld_helper_ret(s, lb, false, &ldst_helper_param); 1434 1435 tcg_out_goto(s, COND_AL, lb->raddr); 1436 return true; 1437} 1438 1439static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) 1440{ 1441 MemOp opc = get_memop(lb->oi); 1442 1443 if (!reloc_pc24(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { 1444 return false; 1445 } 1446 1447 tcg_out_st_helper_args(s, lb, &ldst_helper_param); 1448 1449 /* Tail-call to the helper, which will return to the fast path. */ 1450 tcg_out_goto(s, COND_AL, qemu_st_helpers[opc & MO_SIZE]); 1451 return true; 1452} 1453 1454/* We expect to use an 9-bit sign-magnitude negative offset from ENV. */ 1455#define MIN_TLB_MASK_TABLE_OFS -256 1456 1457static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, 1458 TCGReg addrlo, TCGReg addrhi, 1459 MemOpIdx oi, bool is_ld) 1460{ 1461 TCGLabelQemuLdst *ldst = NULL; 1462 MemOp opc = get_memop(oi); 1463 unsigned a_mask; 1464 1465 if (tcg_use_softmmu) { 1466 *h = (HostAddress){ 1467 .cond = COND_AL, 1468 .base = addrlo, 1469 .index = TCG_REG_R1, 1470 .index_scratch = true, 1471 }; 1472 } else { 1473 *h = (HostAddress){ 1474 .cond = COND_AL, 1475 .base = addrlo, 1476 .index = guest_base ? TCG_REG_GUEST_BASE : -1, 1477 .index_scratch = false, 1478 }; 1479 } 1480 1481 h->aa = atom_and_align_for_opc(s, opc, MO_ATOM_IFALIGN, false); 1482 a_mask = (1 << h->aa.align) - 1; 1483 1484 if (tcg_use_softmmu) { 1485 int mem_index = get_mmuidx(oi); 1486 int cmp_off = is_ld ? offsetof(CPUTLBEntry, addr_read) 1487 : offsetof(CPUTLBEntry, addr_write); 1488 int fast_off = tlb_mask_table_ofs(s, mem_index); 1489 unsigned s_mask = (1 << (opc & MO_SIZE)) - 1; 1490 TCGReg t_addr; 1491 1492 ldst = new_ldst_label(s); 1493 ldst->is_ld = is_ld; 1494 ldst->oi = oi; 1495 ldst->addrlo_reg = addrlo; 1496 ldst->addrhi_reg = addrhi; 1497 1498 /* Load cpu->neg.tlb.f[mmu_idx].{mask,table} into {r0,r1}. */ 1499 QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, mask) != 0); 1500 QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, table) != 4); 1501 tcg_out_ldrd_8(s, COND_AL, TCG_REG_R0, TCG_AREG0, fast_off); 1502 1503 /* Extract the tlb index from the address into R0. */ 1504 tcg_out_dat_reg(s, COND_AL, ARITH_AND, TCG_REG_R0, TCG_REG_R0, addrlo, 1505 SHIFT_IMM_LSR(s->page_bits - CPU_TLB_ENTRY_BITS)); 1506 1507 /* 1508 * Add the tlb_table pointer, creating the CPUTLBEntry address in R1. 1509 * Load the tlb comparator into R2/R3 and the fast path addend into R1. 1510 */ 1511 QEMU_BUILD_BUG_ON(HOST_BIG_ENDIAN); 1512 if (cmp_off == 0) { 1513 if (s->addr_type == TCG_TYPE_I32) { 1514 tcg_out_ld32_rwb(s, COND_AL, TCG_REG_R2, 1515 TCG_REG_R1, TCG_REG_R0); 1516 } else { 1517 tcg_out_ldrd_rwb(s, COND_AL, TCG_REG_R2, 1518 TCG_REG_R1, TCG_REG_R0); 1519 } 1520 } else { 1521 tcg_out_dat_reg(s, COND_AL, ARITH_ADD, 1522 TCG_REG_R1, TCG_REG_R1, TCG_REG_R0, 0); 1523 if (s->addr_type == TCG_TYPE_I32) { 1524 tcg_out_ld32_12(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off); 1525 } else { 1526 tcg_out_ldrd_8(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off); 1527 } 1528 } 1529 1530 /* Load the tlb addend. */ 1531 tcg_out_ld32_12(s, COND_AL, TCG_REG_R1, TCG_REG_R1, 1532 offsetof(CPUTLBEntry, addend)); 1533 1534 /* 1535 * Check alignment, check comparators. 1536 * Do this in 2-4 insns. Use MOVW for v7, if possible, 1537 * to reduce the number of sequential conditional instructions. 1538 * Almost all guests have at least 4k pages, which means that we need 1539 * to clear at least 9 bits even for an 8-byte memory, which means it 1540 * isn't worth checking for an immediate operand for BIC. 1541 * 1542 * For unaligned accesses, test the page of the last unit of alignment. 1543 * This leaves the least significant alignment bits unchanged, and of 1544 * course must be zero. 1545 */ 1546 t_addr = addrlo; 1547 if (a_mask < s_mask) { 1548 t_addr = TCG_REG_R0; 1549 tcg_out_dat_imm(s, COND_AL, ARITH_ADD, t_addr, 1550 addrlo, s_mask - a_mask); 1551 } 1552 if (use_armv7_instructions && s->page_bits <= 16) { 1553 tcg_out_movi32(s, COND_AL, TCG_REG_TMP, ~(s->page_mask | a_mask)); 1554 tcg_out_dat_reg(s, COND_AL, ARITH_BIC, TCG_REG_TMP, 1555 t_addr, TCG_REG_TMP, 0); 1556 tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, 1557 TCG_REG_R2, TCG_REG_TMP, 0); 1558 } else { 1559 if (a_mask) { 1560 tcg_debug_assert(a_mask <= 0xff); 1561 tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addrlo, a_mask); 1562 } 1563 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP, 0, t_addr, 1564 SHIFT_IMM_LSR(s->page_bits)); 1565 tcg_out_dat_reg(s, (a_mask ? COND_EQ : COND_AL), ARITH_CMP, 1566 0, TCG_REG_R2, TCG_REG_TMP, 1567 SHIFT_IMM_LSL(s->page_bits)); 1568 } 1569 1570 if (s->addr_type != TCG_TYPE_I32) { 1571 tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0, TCG_REG_R3, addrhi, 0); 1572 } 1573 } else if (a_mask) { 1574 ldst = new_ldst_label(s); 1575 ldst->is_ld = is_ld; 1576 ldst->oi = oi; 1577 ldst->addrlo_reg = addrlo; 1578 ldst->addrhi_reg = addrhi; 1579 1580 /* We are expecting alignment to max out at 7 */ 1581 tcg_debug_assert(a_mask <= 0xff); 1582 /* tst addr, #mask */ 1583 tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addrlo, a_mask); 1584 } 1585 1586 return ldst; 1587} 1588 1589static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg datalo, 1590 TCGReg datahi, HostAddress h) 1591{ 1592 TCGReg base; 1593 1594 /* Byte swapping is left to middle-end expansion. */ 1595 tcg_debug_assert((opc & MO_BSWAP) == 0); 1596 1597 switch (opc & MO_SSIZE) { 1598 case MO_UB: 1599 if (h.index < 0) { 1600 tcg_out_ld8_12(s, h.cond, datalo, h.base, 0); 1601 } else { 1602 tcg_out_ld8_r(s, h.cond, datalo, h.base, h.index); 1603 } 1604 break; 1605 case MO_SB: 1606 if (h.index < 0) { 1607 tcg_out_ld8s_8(s, h.cond, datalo, h.base, 0); 1608 } else { 1609 tcg_out_ld8s_r(s, h.cond, datalo, h.base, h.index); 1610 } 1611 break; 1612 case MO_UW: 1613 if (h.index < 0) { 1614 tcg_out_ld16u_8(s, h.cond, datalo, h.base, 0); 1615 } else { 1616 tcg_out_ld16u_r(s, h.cond, datalo, h.base, h.index); 1617 } 1618 break; 1619 case MO_SW: 1620 if (h.index < 0) { 1621 tcg_out_ld16s_8(s, h.cond, datalo, h.base, 0); 1622 } else { 1623 tcg_out_ld16s_r(s, h.cond, datalo, h.base, h.index); 1624 } 1625 break; 1626 case MO_UL: 1627 if (h.index < 0) { 1628 tcg_out_ld32_12(s, h.cond, datalo, h.base, 0); 1629 } else { 1630 tcg_out_ld32_r(s, h.cond, datalo, h.base, h.index); 1631 } 1632 break; 1633 case MO_UQ: 1634 /* We used pair allocation for datalo, so already should be aligned. */ 1635 tcg_debug_assert((datalo & 1) == 0); 1636 tcg_debug_assert(datahi == datalo + 1); 1637 /* LDRD requires alignment; double-check that. */ 1638 if (memop_alignment_bits(opc) >= MO_64) { 1639 if (h.index < 0) { 1640 tcg_out_ldrd_8(s, h.cond, datalo, h.base, 0); 1641 break; 1642 } 1643 /* 1644 * Rm (the second address op) must not overlap Rt or Rt + 1. 1645 * Since datalo is aligned, we can simplify the test via alignment. 1646 * Flip the two address arguments if that works. 1647 */ 1648 if ((h.index & ~1) != datalo) { 1649 tcg_out_ldrd_r(s, h.cond, datalo, h.base, h.index); 1650 break; 1651 } 1652 if ((h.base & ~1) != datalo) { 1653 tcg_out_ldrd_r(s, h.cond, datalo, h.index, h.base); 1654 break; 1655 } 1656 } 1657 if (h.index < 0) { 1658 base = h.base; 1659 if (datalo == h.base) { 1660 tcg_out_mov_reg(s, h.cond, TCG_REG_TMP, base); 1661 base = TCG_REG_TMP; 1662 } 1663 } else if (h.index_scratch) { 1664 tcg_out_ld32_rwb(s, h.cond, datalo, h.index, h.base); 1665 tcg_out_ld32_12(s, h.cond, datahi, h.index, 4); 1666 break; 1667 } else { 1668 tcg_out_dat_reg(s, h.cond, ARITH_ADD, TCG_REG_TMP, 1669 h.base, h.index, SHIFT_IMM_LSL(0)); 1670 base = TCG_REG_TMP; 1671 } 1672 tcg_out_ld32_12(s, h.cond, datalo, base, 0); 1673 tcg_out_ld32_12(s, h.cond, datahi, base, 4); 1674 break; 1675 default: 1676 g_assert_not_reached(); 1677 } 1678} 1679 1680static void tcg_out_qemu_ld(TCGContext *s, TCGReg datalo, TCGReg datahi, 1681 TCGReg addrlo, TCGReg addrhi, 1682 MemOpIdx oi, TCGType data_type) 1683{ 1684 MemOp opc = get_memop(oi); 1685 TCGLabelQemuLdst *ldst; 1686 HostAddress h; 1687 1688 ldst = prepare_host_addr(s, &h, addrlo, addrhi, oi, true); 1689 if (ldst) { 1690 ldst->type = data_type; 1691 ldst->datalo_reg = datalo; 1692 ldst->datahi_reg = datahi; 1693 1694 /* 1695 * This a conditional BL only to load a pointer within this 1696 * opcode into LR for the slow path. We will not be using 1697 * the value for a tail call. 1698 */ 1699 ldst->label_ptr[0] = s->code_ptr; 1700 tcg_out_bl_imm(s, COND_NE, 0); 1701 1702 tcg_out_qemu_ld_direct(s, opc, datalo, datahi, h); 1703 ldst->raddr = tcg_splitwx_to_rx(s->code_ptr); 1704 } else { 1705 tcg_out_qemu_ld_direct(s, opc, datalo, datahi, h); 1706 } 1707} 1708 1709static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg datalo, 1710 TCGReg datahi, HostAddress h) 1711{ 1712 /* Byte swapping is left to middle-end expansion. */ 1713 tcg_debug_assert((opc & MO_BSWAP) == 0); 1714 1715 switch (opc & MO_SIZE) { 1716 case MO_8: 1717 if (h.index < 0) { 1718 tcg_out_st8_12(s, h.cond, datalo, h.base, 0); 1719 } else { 1720 tcg_out_st8_r(s, h.cond, datalo, h.base, h.index); 1721 } 1722 break; 1723 case MO_16: 1724 if (h.index < 0) { 1725 tcg_out_st16_8(s, h.cond, datalo, h.base, 0); 1726 } else { 1727 tcg_out_st16_r(s, h.cond, datalo, h.base, h.index); 1728 } 1729 break; 1730 case MO_32: 1731 if (h.index < 0) { 1732 tcg_out_st32_12(s, h.cond, datalo, h.base, 0); 1733 } else { 1734 tcg_out_st32_r(s, h.cond, datalo, h.base, h.index); 1735 } 1736 break; 1737 case MO_64: 1738 /* We used pair allocation for datalo, so already should be aligned. */ 1739 tcg_debug_assert((datalo & 1) == 0); 1740 tcg_debug_assert(datahi == datalo + 1); 1741 /* STRD requires alignment; double-check that. */ 1742 if (memop_alignment_bits(opc) >= MO_64) { 1743 if (h.index < 0) { 1744 tcg_out_strd_8(s, h.cond, datalo, h.base, 0); 1745 } else { 1746 tcg_out_strd_r(s, h.cond, datalo, h.base, h.index); 1747 } 1748 } else if (h.index < 0) { 1749 tcg_out_st32_12(s, h.cond, datalo, h.base, 0); 1750 tcg_out_st32_12(s, h.cond, datahi, h.base, 4); 1751 } else if (h.index_scratch) { 1752 tcg_out_st32_rwb(s, h.cond, datalo, h.index, h.base); 1753 tcg_out_st32_12(s, h.cond, datahi, h.index, 4); 1754 } else { 1755 tcg_out_dat_reg(s, h.cond, ARITH_ADD, TCG_REG_TMP, 1756 h.base, h.index, SHIFT_IMM_LSL(0)); 1757 tcg_out_st32_12(s, h.cond, datalo, TCG_REG_TMP, 0); 1758 tcg_out_st32_12(s, h.cond, datahi, TCG_REG_TMP, 4); 1759 } 1760 break; 1761 default: 1762 g_assert_not_reached(); 1763 } 1764} 1765 1766static void tcg_out_qemu_st(TCGContext *s, TCGReg datalo, TCGReg datahi, 1767 TCGReg addrlo, TCGReg addrhi, 1768 MemOpIdx oi, TCGType data_type) 1769{ 1770 MemOp opc = get_memop(oi); 1771 TCGLabelQemuLdst *ldst; 1772 HostAddress h; 1773 1774 ldst = prepare_host_addr(s, &h, addrlo, addrhi, oi, false); 1775 if (ldst) { 1776 ldst->type = data_type; 1777 ldst->datalo_reg = datalo; 1778 ldst->datahi_reg = datahi; 1779 1780 h.cond = COND_EQ; 1781 tcg_out_qemu_st_direct(s, opc, datalo, datahi, h); 1782 1783 /* The conditional call is last, as we're going to return here. */ 1784 ldst->label_ptr[0] = s->code_ptr; 1785 tcg_out_bl_imm(s, COND_NE, 0); 1786 ldst->raddr = tcg_splitwx_to_rx(s->code_ptr); 1787 } else { 1788 tcg_out_qemu_st_direct(s, opc, datalo, datahi, h); 1789 } 1790} 1791 1792static void tcg_out_epilogue(TCGContext *s); 1793 1794static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg) 1795{ 1796 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R0, arg); 1797 tcg_out_epilogue(s); 1798} 1799 1800static void tcg_out_goto_tb(TCGContext *s, int which) 1801{ 1802 uintptr_t i_addr; 1803 intptr_t i_disp; 1804 1805 /* Direct branch will be patched by tb_target_set_jmp_target. */ 1806 set_jmp_insn_offset(s, which); 1807 tcg_out32(s, INSN_NOP); 1808 1809 /* When branch is out of range, fall through to indirect. */ 1810 i_addr = get_jmp_target_addr(s, which); 1811 i_disp = tcg_pcrel_diff(s, (void *)i_addr) - 8; 1812 tcg_debug_assert(i_disp < 0); 1813 if (i_disp >= -0xfff) { 1814 tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, i_disp); 1815 } else { 1816 /* 1817 * The TB is close, but outside the 12 bits addressable by 1818 * the load. We can extend this to 20 bits with a sub of a 1819 * shifted immediate from pc. 1820 */ 1821 int h = -i_disp; 1822 int l = -(h & 0xfff); 1823 1824 h = encode_imm_nofail(h + l); 1825 tcg_out_dat_imm(s, COND_AL, ARITH_SUB, TCG_REG_R0, TCG_REG_PC, h); 1826 tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_R0, l); 1827 } 1828 set_jmp_reset_offset(s, which); 1829} 1830 1831void tb_target_set_jmp_target(const TranslationBlock *tb, int n, 1832 uintptr_t jmp_rx, uintptr_t jmp_rw) 1833{ 1834 uintptr_t addr = tb->jmp_target_addr[n]; 1835 ptrdiff_t offset = addr - (jmp_rx + 8); 1836 tcg_insn_unit insn; 1837 1838 /* Either directly branch, or fall through to indirect branch. */ 1839 if (offset == sextract64(offset, 0, 26)) { 1840 /* B <addr> */ 1841 insn = deposit32((COND_AL << 28) | INSN_B, 0, 24, offset >> 2); 1842 } else { 1843 insn = INSN_NOP; 1844 } 1845 1846 qatomic_set((uint32_t *)jmp_rw, insn); 1847 flush_idcache_range(jmp_rx, jmp_rw, 4); 1848} 1849 1850static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type, 1851 const TCGArg args[TCG_MAX_OP_ARGS], 1852 const int const_args[TCG_MAX_OP_ARGS]) 1853{ 1854 TCGArg a0, a1, a2, a3, a4, a5; 1855 int c; 1856 1857 switch (opc) { 1858 case INDEX_op_goto_ptr: 1859 tcg_out_b_reg(s, COND_AL, args[0]); 1860 break; 1861 case INDEX_op_br: 1862 tcg_out_goto_label(s, COND_AL, arg_label(args[0])); 1863 break; 1864 1865 case INDEX_op_ld8u_i32: 1866 tcg_out_ld8u(s, COND_AL, args[0], args[1], args[2]); 1867 break; 1868 case INDEX_op_ld8s_i32: 1869 tcg_out_ld8s(s, COND_AL, args[0], args[1], args[2]); 1870 break; 1871 case INDEX_op_ld16u_i32: 1872 tcg_out_ld16u(s, COND_AL, args[0], args[1], args[2]); 1873 break; 1874 case INDEX_op_ld16s_i32: 1875 tcg_out_ld16s(s, COND_AL, args[0], args[1], args[2]); 1876 break; 1877 case INDEX_op_ld_i32: 1878 tcg_out_ld32u(s, COND_AL, args[0], args[1], args[2]); 1879 break; 1880 case INDEX_op_st8_i32: 1881 tcg_out_st8(s, COND_AL, args[0], args[1], args[2]); 1882 break; 1883 case INDEX_op_st16_i32: 1884 tcg_out_st16(s, COND_AL, args[0], args[1], args[2]); 1885 break; 1886 case INDEX_op_st_i32: 1887 tcg_out_st32(s, COND_AL, args[0], args[1], args[2]); 1888 break; 1889 1890 case INDEX_op_movcond_i32: 1891 /* Constraints mean that v2 is always in the same register as dest, 1892 * so we only need to do "if condition passed, move v1 to dest". 1893 */ 1894 c = tcg_out_cmp(s, args[5], args[1], args[2], const_args[2]); 1895 tcg_out_dat_rIK(s, tcg_cond_to_arm_cond[c], ARITH_MOV, 1896 ARITH_MVN, args[0], 0, args[3], const_args[3]); 1897 break; 1898 case INDEX_op_add_i32: 1899 tcg_out_dat_rIN(s, COND_AL, ARITH_ADD, ARITH_SUB, 1900 args[0], args[1], args[2], const_args[2]); 1901 break; 1902 case INDEX_op_sub_i32: 1903 if (const_args[1]) { 1904 if (const_args[2]) { 1905 tcg_out_movi32(s, COND_AL, args[0], args[1] - args[2]); 1906 } else { 1907 tcg_out_dat_rI(s, COND_AL, ARITH_RSB, 1908 args[0], args[2], args[1], 1); 1909 } 1910 } else { 1911 tcg_out_dat_rIN(s, COND_AL, ARITH_SUB, ARITH_ADD, 1912 args[0], args[1], args[2], const_args[2]); 1913 } 1914 break; 1915 case INDEX_op_and_i32: 1916 tcg_out_dat_rIK(s, COND_AL, ARITH_AND, ARITH_BIC, 1917 args[0], args[1], args[2], const_args[2]); 1918 break; 1919 case INDEX_op_andc_i32: 1920 tcg_out_dat_rIK(s, COND_AL, ARITH_BIC, ARITH_AND, 1921 args[0], args[1], args[2], const_args[2]); 1922 break; 1923 case INDEX_op_or_i32: 1924 c = ARITH_ORR; 1925 goto gen_arith; 1926 case INDEX_op_xor_i32: 1927 c = ARITH_EOR; 1928 /* Fall through. */ 1929 gen_arith: 1930 tcg_out_dat_rI(s, COND_AL, c, args[0], args[1], args[2], const_args[2]); 1931 break; 1932 case INDEX_op_add2_i32: 1933 a0 = args[0], a1 = args[1], a2 = args[2]; 1934 a3 = args[3], a4 = args[4], a5 = args[5]; 1935 if (a0 == a3 || (a0 == a5 && !const_args[5])) { 1936 a0 = TCG_REG_TMP; 1937 } 1938 tcg_out_dat_rIN(s, COND_AL, ARITH_ADD | TO_CPSR, ARITH_SUB | TO_CPSR, 1939 a0, a2, a4, const_args[4]); 1940 tcg_out_dat_rIK(s, COND_AL, ARITH_ADC, ARITH_SBC, 1941 a1, a3, a5, const_args[5]); 1942 tcg_out_mov_reg(s, COND_AL, args[0], a0); 1943 break; 1944 case INDEX_op_sub2_i32: 1945 a0 = args[0], a1 = args[1], a2 = args[2]; 1946 a3 = args[3], a4 = args[4], a5 = args[5]; 1947 if ((a0 == a3 && !const_args[3]) || (a0 == a5 && !const_args[5])) { 1948 a0 = TCG_REG_TMP; 1949 } 1950 if (const_args[2]) { 1951 if (const_args[4]) { 1952 tcg_out_movi32(s, COND_AL, a0, a4); 1953 a4 = a0; 1954 } 1955 tcg_out_dat_rI(s, COND_AL, ARITH_RSB | TO_CPSR, a0, a4, a2, 1); 1956 } else { 1957 tcg_out_dat_rIN(s, COND_AL, ARITH_SUB | TO_CPSR, 1958 ARITH_ADD | TO_CPSR, a0, a2, a4, const_args[4]); 1959 } 1960 if (const_args[3]) { 1961 if (const_args[5]) { 1962 tcg_out_movi32(s, COND_AL, a1, a5); 1963 a5 = a1; 1964 } 1965 tcg_out_dat_rI(s, COND_AL, ARITH_RSC, a1, a5, a3, 1); 1966 } else { 1967 tcg_out_dat_rIK(s, COND_AL, ARITH_SBC, ARITH_ADC, 1968 a1, a3, a5, const_args[5]); 1969 } 1970 tcg_out_mov_reg(s, COND_AL, args[0], a0); 1971 break; 1972 case INDEX_op_neg_i32: 1973 tcg_out_dat_imm(s, COND_AL, ARITH_RSB, args[0], args[1], 0); 1974 break; 1975 case INDEX_op_not_i32: 1976 tcg_out_dat_reg(s, COND_AL, 1977 ARITH_MVN, args[0], 0, args[1], SHIFT_IMM_LSL(0)); 1978 break; 1979 case INDEX_op_mul_i32: 1980 tcg_out_mul32(s, COND_AL, args[0], args[1], args[2]); 1981 break; 1982 case INDEX_op_mulu2_i32: 1983 tcg_out_umull32(s, COND_AL, args[0], args[1], args[2], args[3]); 1984 break; 1985 case INDEX_op_muls2_i32: 1986 tcg_out_smull32(s, COND_AL, args[0], args[1], args[2], args[3]); 1987 break; 1988 /* XXX: Perhaps args[2] & 0x1f is wrong */ 1989 case INDEX_op_shl_i32: 1990 c = const_args[2] ? 1991 SHIFT_IMM_LSL(args[2] & 0x1f) : SHIFT_REG_LSL(args[2]); 1992 goto gen_shift32; 1993 case INDEX_op_shr_i32: 1994 c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_LSR(args[2] & 0x1f) : 1995 SHIFT_IMM_LSL(0) : SHIFT_REG_LSR(args[2]); 1996 goto gen_shift32; 1997 case INDEX_op_sar_i32: 1998 c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_ASR(args[2] & 0x1f) : 1999 SHIFT_IMM_LSL(0) : SHIFT_REG_ASR(args[2]); 2000 goto gen_shift32; 2001 case INDEX_op_rotr_i32: 2002 c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_ROR(args[2] & 0x1f) : 2003 SHIFT_IMM_LSL(0) : SHIFT_REG_ROR(args[2]); 2004 /* Fall through. */ 2005 gen_shift32: 2006 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1], c); 2007 break; 2008 2009 case INDEX_op_rotl_i32: 2010 if (const_args[2]) { 2011 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1], 2012 ((0x20 - args[2]) & 0x1f) ? 2013 SHIFT_IMM_ROR((0x20 - args[2]) & 0x1f) : 2014 SHIFT_IMM_LSL(0)); 2015 } else { 2016 tcg_out_dat_imm(s, COND_AL, ARITH_RSB, TCG_REG_TMP, args[2], 0x20); 2017 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1], 2018 SHIFT_REG_ROR(TCG_REG_TMP)); 2019 } 2020 break; 2021 2022 case INDEX_op_ctz_i32: 2023 tcg_out_dat_reg(s, COND_AL, INSN_RBIT, TCG_REG_TMP, 0, args[1], 0); 2024 a1 = TCG_REG_TMP; 2025 goto do_clz; 2026 2027 case INDEX_op_clz_i32: 2028 a1 = args[1]; 2029 do_clz: 2030 a0 = args[0]; 2031 a2 = args[2]; 2032 c = const_args[2]; 2033 if (c && a2 == 32) { 2034 tcg_out_dat_reg(s, COND_AL, INSN_CLZ, a0, 0, a1, 0); 2035 break; 2036 } 2037 tcg_out_dat_imm(s, COND_AL, ARITH_CMP, 0, a1, 0); 2038 tcg_out_dat_reg(s, COND_NE, INSN_CLZ, a0, 0, a1, 0); 2039 if (c || a0 != a2) { 2040 tcg_out_dat_rIK(s, COND_EQ, ARITH_MOV, ARITH_MVN, a0, 0, a2, c); 2041 } 2042 break; 2043 2044 case INDEX_op_brcond_i32: 2045 c = tcg_out_cmp(s, args[2], args[0], args[1], const_args[1]); 2046 tcg_out_goto_label(s, tcg_cond_to_arm_cond[c], arg_label(args[3])); 2047 break; 2048 case INDEX_op_setcond_i32: 2049 c = tcg_out_cmp(s, args[3], args[1], args[2], const_args[2]); 2050 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[c], 2051 ARITH_MOV, args[0], 0, 1); 2052 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(c)], 2053 ARITH_MOV, args[0], 0, 0); 2054 break; 2055 case INDEX_op_negsetcond_i32: 2056 c = tcg_out_cmp(s, args[3], args[1], args[2], const_args[2]); 2057 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[c], 2058 ARITH_MVN, args[0], 0, 0); 2059 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(c)], 2060 ARITH_MOV, args[0], 0, 0); 2061 break; 2062 2063 case INDEX_op_brcond2_i32: 2064 c = tcg_out_cmp2(s, args, const_args); 2065 tcg_out_goto_label(s, tcg_cond_to_arm_cond[c], arg_label(args[5])); 2066 break; 2067 case INDEX_op_setcond2_i32: 2068 c = tcg_out_cmp2(s, args + 1, const_args + 1); 2069 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[c], ARITH_MOV, args[0], 0, 1); 2070 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(c)], 2071 ARITH_MOV, args[0], 0, 0); 2072 break; 2073 2074 case INDEX_op_qemu_ld_a32_i32: 2075 tcg_out_qemu_ld(s, args[0], -1, args[1], -1, args[2], TCG_TYPE_I32); 2076 break; 2077 case INDEX_op_qemu_ld_a64_i32: 2078 tcg_out_qemu_ld(s, args[0], -1, args[1], args[2], 2079 args[3], TCG_TYPE_I32); 2080 break; 2081 case INDEX_op_qemu_ld_a32_i64: 2082 tcg_out_qemu_ld(s, args[0], args[1], args[2], -1, 2083 args[3], TCG_TYPE_I64); 2084 break; 2085 case INDEX_op_qemu_ld_a64_i64: 2086 tcg_out_qemu_ld(s, args[0], args[1], args[2], args[3], 2087 args[4], TCG_TYPE_I64); 2088 break; 2089 2090 case INDEX_op_qemu_st_a32_i32: 2091 tcg_out_qemu_st(s, args[0], -1, args[1], -1, args[2], TCG_TYPE_I32); 2092 break; 2093 case INDEX_op_qemu_st_a64_i32: 2094 tcg_out_qemu_st(s, args[0], -1, args[1], args[2], 2095 args[3], TCG_TYPE_I32); 2096 break; 2097 case INDEX_op_qemu_st_a32_i64: 2098 tcg_out_qemu_st(s, args[0], args[1], args[2], -1, 2099 args[3], TCG_TYPE_I64); 2100 break; 2101 case INDEX_op_qemu_st_a64_i64: 2102 tcg_out_qemu_st(s, args[0], args[1], args[2], args[3], 2103 args[4], TCG_TYPE_I64); 2104 break; 2105 2106 case INDEX_op_bswap16_i32: 2107 tcg_out_bswap16(s, COND_AL, args[0], args[1], args[2]); 2108 break; 2109 case INDEX_op_bswap32_i32: 2110 tcg_out_bswap32(s, COND_AL, args[0], args[1]); 2111 break; 2112 2113 case INDEX_op_deposit_i32: 2114 tcg_out_deposit(s, COND_AL, args[0], args[2], 2115 args[3], args[4], const_args[2]); 2116 break; 2117 case INDEX_op_extract_i32: 2118 tcg_out_extract(s, COND_AL, args[0], args[1], args[2], args[3]); 2119 break; 2120 case INDEX_op_sextract_i32: 2121 tcg_out_sextract(s, COND_AL, args[0], args[1], args[2], args[3]); 2122 break; 2123 case INDEX_op_extract2_i32: 2124 /* ??? These optimization vs zero should be generic. */ 2125 /* ??? But we can't substitute 2 for 1 in the opcode stream yet. */ 2126 if (const_args[1]) { 2127 if (const_args[2]) { 2128 tcg_out_movi(s, TCG_TYPE_REG, args[0], 0); 2129 } else { 2130 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, 2131 args[2], SHIFT_IMM_LSL(32 - args[3])); 2132 } 2133 } else if (const_args[2]) { 2134 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, 2135 args[1], SHIFT_IMM_LSR(args[3])); 2136 } else { 2137 /* We can do extract2 in 2 insns, vs the 3 required otherwise. */ 2138 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP, 0, 2139 args[2], SHIFT_IMM_LSL(32 - args[3])); 2140 tcg_out_dat_reg(s, COND_AL, ARITH_ORR, args[0], TCG_REG_TMP, 2141 args[1], SHIFT_IMM_LSR(args[3])); 2142 } 2143 break; 2144 2145 case INDEX_op_div_i32: 2146 tcg_out_sdiv(s, COND_AL, args[0], args[1], args[2]); 2147 break; 2148 case INDEX_op_divu_i32: 2149 tcg_out_udiv(s, COND_AL, args[0], args[1], args[2]); 2150 break; 2151 2152 case INDEX_op_mb: 2153 tcg_out_mb(s, args[0]); 2154 break; 2155 2156 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ 2157 case INDEX_op_call: /* Always emitted via tcg_out_call. */ 2158 case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */ 2159 case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */ 2160 case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */ 2161 case INDEX_op_ext8u_i32: 2162 case INDEX_op_ext16s_i32: 2163 case INDEX_op_ext16u_i32: 2164 default: 2165 g_assert_not_reached(); 2166 } 2167} 2168 2169static TCGConstraintSetIndex 2170tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags) 2171{ 2172 switch (op) { 2173 case INDEX_op_goto_ptr: 2174 return C_O0_I1(r); 2175 2176 case INDEX_op_ld8u_i32: 2177 case INDEX_op_ld8s_i32: 2178 case INDEX_op_ld16u_i32: 2179 case INDEX_op_ld16s_i32: 2180 case INDEX_op_ld_i32: 2181 case INDEX_op_neg_i32: 2182 case INDEX_op_not_i32: 2183 case INDEX_op_bswap16_i32: 2184 case INDEX_op_bswap32_i32: 2185 case INDEX_op_ext8s_i32: 2186 case INDEX_op_ext16s_i32: 2187 case INDEX_op_ext16u_i32: 2188 case INDEX_op_extract_i32: 2189 case INDEX_op_sextract_i32: 2190 return C_O1_I1(r, r); 2191 2192 case INDEX_op_st8_i32: 2193 case INDEX_op_st16_i32: 2194 case INDEX_op_st_i32: 2195 return C_O0_I2(r, r); 2196 2197 case INDEX_op_add_i32: 2198 case INDEX_op_sub_i32: 2199 case INDEX_op_setcond_i32: 2200 case INDEX_op_negsetcond_i32: 2201 return C_O1_I2(r, r, rIN); 2202 2203 case INDEX_op_and_i32: 2204 case INDEX_op_andc_i32: 2205 case INDEX_op_clz_i32: 2206 case INDEX_op_ctz_i32: 2207 return C_O1_I2(r, r, rIK); 2208 2209 case INDEX_op_mul_i32: 2210 case INDEX_op_div_i32: 2211 case INDEX_op_divu_i32: 2212 return C_O1_I2(r, r, r); 2213 2214 case INDEX_op_mulu2_i32: 2215 case INDEX_op_muls2_i32: 2216 return C_O2_I2(r, r, r, r); 2217 2218 case INDEX_op_or_i32: 2219 case INDEX_op_xor_i32: 2220 return C_O1_I2(r, r, rI); 2221 2222 case INDEX_op_shl_i32: 2223 case INDEX_op_shr_i32: 2224 case INDEX_op_sar_i32: 2225 case INDEX_op_rotl_i32: 2226 case INDEX_op_rotr_i32: 2227 return C_O1_I2(r, r, ri); 2228 2229 case INDEX_op_brcond_i32: 2230 return C_O0_I2(r, rIN); 2231 case INDEX_op_deposit_i32: 2232 return C_O1_I2(r, 0, rZ); 2233 case INDEX_op_extract2_i32: 2234 return C_O1_I2(r, rZ, rZ); 2235 case INDEX_op_movcond_i32: 2236 return C_O1_I4(r, r, rIN, rIK, 0); 2237 case INDEX_op_add2_i32: 2238 return C_O2_I4(r, r, r, r, rIN, rIK); 2239 case INDEX_op_sub2_i32: 2240 return C_O2_I4(r, r, rI, rI, rIN, rIK); 2241 case INDEX_op_brcond2_i32: 2242 return C_O0_I4(r, r, rI, rI); 2243 case INDEX_op_setcond2_i32: 2244 return C_O1_I4(r, r, r, rI, rI); 2245 2246 case INDEX_op_qemu_ld_a32_i32: 2247 return C_O1_I1(r, q); 2248 case INDEX_op_qemu_ld_a64_i32: 2249 return C_O1_I2(r, q, q); 2250 case INDEX_op_qemu_ld_a32_i64: 2251 return C_O2_I1(e, p, q); 2252 case INDEX_op_qemu_ld_a64_i64: 2253 return C_O2_I2(e, p, q, q); 2254 case INDEX_op_qemu_st_a32_i32: 2255 return C_O0_I2(q, q); 2256 case INDEX_op_qemu_st_a64_i32: 2257 return C_O0_I3(q, q, q); 2258 case INDEX_op_qemu_st_a32_i64: 2259 return C_O0_I3(Q, p, q); 2260 case INDEX_op_qemu_st_a64_i64: 2261 return C_O0_I4(Q, p, q, q); 2262 2263 case INDEX_op_st_vec: 2264 return C_O0_I2(w, r); 2265 case INDEX_op_ld_vec: 2266 case INDEX_op_dupm_vec: 2267 return C_O1_I1(w, r); 2268 case INDEX_op_dup_vec: 2269 return C_O1_I1(w, wr); 2270 case INDEX_op_abs_vec: 2271 case INDEX_op_neg_vec: 2272 case INDEX_op_not_vec: 2273 case INDEX_op_shli_vec: 2274 case INDEX_op_shri_vec: 2275 case INDEX_op_sari_vec: 2276 return C_O1_I1(w, w); 2277 case INDEX_op_dup2_vec: 2278 case INDEX_op_add_vec: 2279 case INDEX_op_mul_vec: 2280 case INDEX_op_smax_vec: 2281 case INDEX_op_smin_vec: 2282 case INDEX_op_ssadd_vec: 2283 case INDEX_op_sssub_vec: 2284 case INDEX_op_sub_vec: 2285 case INDEX_op_umax_vec: 2286 case INDEX_op_umin_vec: 2287 case INDEX_op_usadd_vec: 2288 case INDEX_op_ussub_vec: 2289 case INDEX_op_xor_vec: 2290 case INDEX_op_arm_sshl_vec: 2291 case INDEX_op_arm_ushl_vec: 2292 return C_O1_I2(w, w, w); 2293 case INDEX_op_arm_sli_vec: 2294 return C_O1_I2(w, 0, w); 2295 case INDEX_op_or_vec: 2296 case INDEX_op_andc_vec: 2297 return C_O1_I2(w, w, wO); 2298 case INDEX_op_and_vec: 2299 case INDEX_op_orc_vec: 2300 return C_O1_I2(w, w, wV); 2301 case INDEX_op_cmp_vec: 2302 return C_O1_I2(w, w, wZ); 2303 case INDEX_op_bitsel_vec: 2304 return C_O1_I3(w, w, w, w); 2305 default: 2306 return C_NotImplemented; 2307 } 2308} 2309 2310static void tcg_target_init(TCGContext *s) 2311{ 2312 /* 2313 * Only probe for the platform and capabilities if we haven't already 2314 * determined maximum values at compile time. 2315 */ 2316#if !defined(use_idiv_instructions) || !defined(use_neon_instructions) 2317 { 2318 unsigned long hwcap = qemu_getauxval(AT_HWCAP); 2319#ifndef use_idiv_instructions 2320 use_idiv_instructions = (hwcap & HWCAP_ARM_IDIVA) != 0; 2321#endif 2322#ifndef use_neon_instructions 2323 use_neon_instructions = (hwcap & HWCAP_ARM_NEON) != 0; 2324#endif 2325 } 2326#endif 2327 2328 if (__ARM_ARCH < 7) { 2329 const char *pl = (const char *)qemu_getauxval(AT_PLATFORM); 2330 if (pl != NULL && pl[0] == 'v' && pl[1] >= '4' && pl[1] <= '9') { 2331 arm_arch = pl[1] - '0'; 2332 } 2333 2334 if (arm_arch < 6) { 2335 error_report("TCG: ARMv%d is unsupported; exiting", arm_arch); 2336 exit(EXIT_FAILURE); 2337 } 2338 } 2339 2340 tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS; 2341 2342 tcg_target_call_clobber_regs = 0; 2343 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R0); 2344 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R1); 2345 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R2); 2346 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R3); 2347 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R12); 2348 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R14); 2349 2350 if (use_neon_instructions) { 2351 tcg_target_available_regs[TCG_TYPE_V64] = ALL_VECTOR_REGS; 2352 tcg_target_available_regs[TCG_TYPE_V128] = ALL_VECTOR_REGS; 2353 2354 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q0); 2355 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q1); 2356 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q2); 2357 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q3); 2358 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q8); 2359 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q9); 2360 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q10); 2361 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q11); 2362 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q12); 2363 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q13); 2364 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q14); 2365 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q15); 2366 } 2367 2368 s->reserved_regs = 0; 2369 tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK); 2370 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP); 2371 tcg_regset_set_reg(s->reserved_regs, TCG_REG_PC); 2372 tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP); 2373} 2374 2375static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg, 2376 TCGReg arg1, intptr_t arg2) 2377{ 2378 switch (type) { 2379 case TCG_TYPE_I32: 2380 tcg_out_ld32u(s, COND_AL, arg, arg1, arg2); 2381 return; 2382 case TCG_TYPE_V64: 2383 /* regs 1; size 8; align 8 */ 2384 tcg_out_vldst(s, INSN_VLD1 | 0x7d0, arg, arg1, arg2); 2385 return; 2386 case TCG_TYPE_V128: 2387 /* 2388 * We have only 8-byte alignment for the stack per the ABI. 2389 * Rather than dynamically re-align the stack, it's easier 2390 * to simply not request alignment beyond that. So: 2391 * regs 2; size 8; align 8 2392 */ 2393 tcg_out_vldst(s, INSN_VLD1 | 0xad0, arg, arg1, arg2); 2394 return; 2395 default: 2396 g_assert_not_reached(); 2397 } 2398} 2399 2400static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, 2401 TCGReg arg1, intptr_t arg2) 2402{ 2403 switch (type) { 2404 case TCG_TYPE_I32: 2405 tcg_out_st32(s, COND_AL, arg, arg1, arg2); 2406 return; 2407 case TCG_TYPE_V64: 2408 /* regs 1; size 8; align 8 */ 2409 tcg_out_vldst(s, INSN_VST1 | 0x7d0, arg, arg1, arg2); 2410 return; 2411 case TCG_TYPE_V128: 2412 /* See tcg_out_ld re alignment: regs 2; size 8; align 8 */ 2413 tcg_out_vldst(s, INSN_VST1 | 0xad0, arg, arg1, arg2); 2414 return; 2415 default: 2416 g_assert_not_reached(); 2417 } 2418} 2419 2420static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, 2421 TCGReg base, intptr_t ofs) 2422{ 2423 return false; 2424} 2425 2426static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) 2427{ 2428 if (ret == arg) { 2429 return true; 2430 } 2431 switch (type) { 2432 case TCG_TYPE_I32: 2433 if (ret < TCG_REG_Q0 && arg < TCG_REG_Q0) { 2434 tcg_out_mov_reg(s, COND_AL, ret, arg); 2435 return true; 2436 } 2437 return false; 2438 2439 case TCG_TYPE_V64: 2440 case TCG_TYPE_V128: 2441 /* "VMOV D,N" is an alias for "VORR D,N,N". */ 2442 tcg_out_vreg3(s, INSN_VORR, type - TCG_TYPE_V64, 0, ret, arg, arg); 2443 return true; 2444 2445 default: 2446 g_assert_not_reached(); 2447 } 2448} 2449 2450static void tcg_out_movi(TCGContext *s, TCGType type, 2451 TCGReg ret, tcg_target_long arg) 2452{ 2453 tcg_debug_assert(type == TCG_TYPE_I32); 2454 tcg_debug_assert(ret < TCG_REG_Q0); 2455 tcg_out_movi32(s, COND_AL, ret, arg); 2456} 2457 2458static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2) 2459{ 2460 return false; 2461} 2462 2463static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs, 2464 tcg_target_long imm) 2465{ 2466 int enc, opc = ARITH_ADD; 2467 2468 /* All of the easiest immediates to encode are positive. */ 2469 if (imm < 0) { 2470 imm = -imm; 2471 opc = ARITH_SUB; 2472 } 2473 enc = encode_imm(imm); 2474 if (enc >= 0) { 2475 tcg_out_dat_imm(s, COND_AL, opc, rd, rs, enc); 2476 } else { 2477 tcg_out_movi32(s, COND_AL, TCG_REG_TMP, imm); 2478 tcg_out_dat_reg(s, COND_AL, opc, rd, rs, 2479 TCG_REG_TMP, SHIFT_IMM_LSL(0)); 2480 } 2481} 2482 2483/* Type is always V128, with I64 elements. */ 2484static void tcg_out_dup2_vec(TCGContext *s, TCGReg rd, TCGReg rl, TCGReg rh) 2485{ 2486 /* Move high element into place first. */ 2487 /* VMOV Dd+1, Ds */ 2488 tcg_out_vreg3(s, INSN_VORR | (1 << 12), 0, 0, rd, rh, rh); 2489 /* Move low element into place; tcg_out_mov will check for nop. */ 2490 tcg_out_mov(s, TCG_TYPE_V64, rd, rl); 2491} 2492 2493static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece, 2494 TCGReg rd, TCGReg rs) 2495{ 2496 int q = type - TCG_TYPE_V64; 2497 2498 if (vece == MO_64) { 2499 if (type == TCG_TYPE_V128) { 2500 tcg_out_dup2_vec(s, rd, rs, rs); 2501 } else { 2502 tcg_out_mov(s, TCG_TYPE_V64, rd, rs); 2503 } 2504 } else if (rs < TCG_REG_Q0) { 2505 int b = (vece == MO_8); 2506 int e = (vece == MO_16); 2507 tcg_out32(s, INSN_VDUP_G | (b << 22) | (q << 21) | (e << 5) | 2508 encode_vn(rd) | (rs << 12)); 2509 } else { 2510 int imm4 = 1 << vece; 2511 tcg_out32(s, INSN_VDUP_S | (imm4 << 16) | (q << 6) | 2512 encode_vd(rd) | encode_vm(rs)); 2513 } 2514 return true; 2515} 2516 2517static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece, 2518 TCGReg rd, TCGReg base, intptr_t offset) 2519{ 2520 if (vece == MO_64) { 2521 tcg_out_ld(s, TCG_TYPE_V64, rd, base, offset); 2522 if (type == TCG_TYPE_V128) { 2523 tcg_out_dup2_vec(s, rd, rd, rd); 2524 } 2525 } else { 2526 int q = type - TCG_TYPE_V64; 2527 tcg_out_vldst(s, INSN_VLD1R | (vece << 6) | (q << 5), 2528 rd, base, offset); 2529 } 2530 return true; 2531} 2532 2533static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece, 2534 TCGReg rd, int64_t v64) 2535{ 2536 int q = type - TCG_TYPE_V64; 2537 int cmode, imm8, i; 2538 2539 /* Test all bytes equal first. */ 2540 if (vece == MO_8) { 2541 tcg_out_vmovi(s, rd, q, 0, 0xe, v64); 2542 return; 2543 } 2544 2545 /* 2546 * Test all bytes 0x00 or 0xff second. This can match cases that 2547 * might otherwise take 2 or 3 insns for MO_16 or MO_32 below. 2548 */ 2549 for (i = imm8 = 0; i < 8; i++) { 2550 uint8_t byte = v64 >> (i * 8); 2551 if (byte == 0xff) { 2552 imm8 |= 1 << i; 2553 } else if (byte != 0) { 2554 goto fail_bytes; 2555 } 2556 } 2557 tcg_out_vmovi(s, rd, q, 1, 0xe, imm8); 2558 return; 2559 fail_bytes: 2560 2561 /* 2562 * Tests for various replications. For each element width, if we 2563 * cannot find an expansion there's no point checking a larger 2564 * width because we already know by replication it cannot match. 2565 */ 2566 if (vece == MO_16) { 2567 uint16_t v16 = v64; 2568 2569 if (is_shimm16(v16, &cmode, &imm8)) { 2570 tcg_out_vmovi(s, rd, q, 0, cmode, imm8); 2571 return; 2572 } 2573 if (is_shimm16(~v16, &cmode, &imm8)) { 2574 tcg_out_vmovi(s, rd, q, 1, cmode, imm8); 2575 return; 2576 } 2577 2578 /* 2579 * Otherwise, all remaining constants can be loaded in two insns: 2580 * rd = v16 & 0xff, rd |= v16 & 0xff00. 2581 */ 2582 tcg_out_vmovi(s, rd, q, 0, 0x8, v16 & 0xff); 2583 tcg_out_vmovi(s, rd, q, 0, 0xb, v16 >> 8); /* VORRI */ 2584 return; 2585 } 2586 2587 if (vece == MO_32) { 2588 uint32_t v32 = v64; 2589 2590 if (is_shimm32(v32, &cmode, &imm8) || 2591 is_soimm32(v32, &cmode, &imm8)) { 2592 tcg_out_vmovi(s, rd, q, 0, cmode, imm8); 2593 return; 2594 } 2595 if (is_shimm32(~v32, &cmode, &imm8) || 2596 is_soimm32(~v32, &cmode, &imm8)) { 2597 tcg_out_vmovi(s, rd, q, 1, cmode, imm8); 2598 return; 2599 } 2600 2601 /* 2602 * Restrict the set of constants to those we can load with 2603 * two instructions. Others we load from the pool. 2604 */ 2605 i = is_shimm32_pair(v32, &cmode, &imm8); 2606 if (i) { 2607 tcg_out_vmovi(s, rd, q, 0, cmode, imm8); 2608 tcg_out_vmovi(s, rd, q, 0, i | 1, extract32(v32, i * 4, 8)); 2609 return; 2610 } 2611 i = is_shimm32_pair(~v32, &cmode, &imm8); 2612 if (i) { 2613 tcg_out_vmovi(s, rd, q, 1, cmode, imm8); 2614 tcg_out_vmovi(s, rd, q, 1, i | 1, extract32(~v32, i * 4, 8)); 2615 return; 2616 } 2617 } 2618 2619 /* 2620 * As a last resort, load from the constant pool. 2621 */ 2622 if (!q || vece == MO_64) { 2623 new_pool_l2(s, R_ARM_PC11, s->code_ptr, 0, v64, v64 >> 32); 2624 /* VLDR Dd, [pc + offset] */ 2625 tcg_out32(s, INSN_VLDR_D | encode_vd(rd) | (0xf << 16)); 2626 if (q) { 2627 tcg_out_dup2_vec(s, rd, rd, rd); 2628 } 2629 } else { 2630 new_pool_label(s, (uint32_t)v64, R_ARM_PC8, s->code_ptr, 0); 2631 /* add tmp, pc, offset */ 2632 tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_TMP, TCG_REG_PC, 0); 2633 tcg_out_dupm_vec(s, type, MO_32, rd, TCG_REG_TMP, 0); 2634 } 2635} 2636 2637static const ARMInsn vec_cmp_insn[16] = { 2638 [TCG_COND_EQ] = INSN_VCEQ, 2639 [TCG_COND_GT] = INSN_VCGT, 2640 [TCG_COND_GE] = INSN_VCGE, 2641 [TCG_COND_GTU] = INSN_VCGT_U, 2642 [TCG_COND_GEU] = INSN_VCGE_U, 2643}; 2644 2645static const ARMInsn vec_cmp0_insn[16] = { 2646 [TCG_COND_EQ] = INSN_VCEQ0, 2647 [TCG_COND_GT] = INSN_VCGT0, 2648 [TCG_COND_GE] = INSN_VCGE0, 2649 [TCG_COND_LT] = INSN_VCLT0, 2650 [TCG_COND_LE] = INSN_VCLE0, 2651}; 2652 2653static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, 2654 unsigned vecl, unsigned vece, 2655 const TCGArg args[TCG_MAX_OP_ARGS], 2656 const int const_args[TCG_MAX_OP_ARGS]) 2657{ 2658 TCGType type = vecl + TCG_TYPE_V64; 2659 unsigned q = vecl; 2660 TCGArg a0, a1, a2, a3; 2661 int cmode, imm8; 2662 2663 a0 = args[0]; 2664 a1 = args[1]; 2665 a2 = args[2]; 2666 2667 switch (opc) { 2668 case INDEX_op_ld_vec: 2669 tcg_out_ld(s, type, a0, a1, a2); 2670 return; 2671 case INDEX_op_st_vec: 2672 tcg_out_st(s, type, a0, a1, a2); 2673 return; 2674 case INDEX_op_dupm_vec: 2675 tcg_out_dupm_vec(s, type, vece, a0, a1, a2); 2676 return; 2677 case INDEX_op_dup2_vec: 2678 tcg_out_dup2_vec(s, a0, a1, a2); 2679 return; 2680 case INDEX_op_abs_vec: 2681 tcg_out_vreg2(s, INSN_VABS, q, vece, a0, a1); 2682 return; 2683 case INDEX_op_neg_vec: 2684 tcg_out_vreg2(s, INSN_VNEG, q, vece, a0, a1); 2685 return; 2686 case INDEX_op_not_vec: 2687 tcg_out_vreg2(s, INSN_VMVN, q, 0, a0, a1); 2688 return; 2689 case INDEX_op_add_vec: 2690 tcg_out_vreg3(s, INSN_VADD, q, vece, a0, a1, a2); 2691 return; 2692 case INDEX_op_mul_vec: 2693 tcg_out_vreg3(s, INSN_VMUL, q, vece, a0, a1, a2); 2694 return; 2695 case INDEX_op_smax_vec: 2696 tcg_out_vreg3(s, INSN_VMAX, q, vece, a0, a1, a2); 2697 return; 2698 case INDEX_op_smin_vec: 2699 tcg_out_vreg3(s, INSN_VMIN, q, vece, a0, a1, a2); 2700 return; 2701 case INDEX_op_sub_vec: 2702 tcg_out_vreg3(s, INSN_VSUB, q, vece, a0, a1, a2); 2703 return; 2704 case INDEX_op_ssadd_vec: 2705 tcg_out_vreg3(s, INSN_VQADD, q, vece, a0, a1, a2); 2706 return; 2707 case INDEX_op_sssub_vec: 2708 tcg_out_vreg3(s, INSN_VQSUB, q, vece, a0, a1, a2); 2709 return; 2710 case INDEX_op_umax_vec: 2711 tcg_out_vreg3(s, INSN_VMAX_U, q, vece, a0, a1, a2); 2712 return; 2713 case INDEX_op_umin_vec: 2714 tcg_out_vreg3(s, INSN_VMIN_U, q, vece, a0, a1, a2); 2715 return; 2716 case INDEX_op_usadd_vec: 2717 tcg_out_vreg3(s, INSN_VQADD_U, q, vece, a0, a1, a2); 2718 return; 2719 case INDEX_op_ussub_vec: 2720 tcg_out_vreg3(s, INSN_VQSUB_U, q, vece, a0, a1, a2); 2721 return; 2722 case INDEX_op_xor_vec: 2723 tcg_out_vreg3(s, INSN_VEOR, q, 0, a0, a1, a2); 2724 return; 2725 case INDEX_op_arm_sshl_vec: 2726 /* 2727 * Note that Vm is the data and Vn is the shift count, 2728 * therefore the arguments appear reversed. 2729 */ 2730 tcg_out_vreg3(s, INSN_VSHL_S, q, vece, a0, a2, a1); 2731 return; 2732 case INDEX_op_arm_ushl_vec: 2733 /* See above. */ 2734 tcg_out_vreg3(s, INSN_VSHL_U, q, vece, a0, a2, a1); 2735 return; 2736 case INDEX_op_shli_vec: 2737 tcg_out_vshifti(s, INSN_VSHLI, q, a0, a1, a2 + (8 << vece)); 2738 return; 2739 case INDEX_op_shri_vec: 2740 tcg_out_vshifti(s, INSN_VSHRI, q, a0, a1, (16 << vece) - a2); 2741 return; 2742 case INDEX_op_sari_vec: 2743 tcg_out_vshifti(s, INSN_VSARI, q, a0, a1, (16 << vece) - a2); 2744 return; 2745 case INDEX_op_arm_sli_vec: 2746 tcg_out_vshifti(s, INSN_VSLI, q, a0, a2, args[3] + (8 << vece)); 2747 return; 2748 2749 case INDEX_op_andc_vec: 2750 if (!const_args[2]) { 2751 tcg_out_vreg3(s, INSN_VBIC, q, 0, a0, a1, a2); 2752 return; 2753 } 2754 a2 = ~a2; 2755 /* fall through */ 2756 case INDEX_op_and_vec: 2757 if (const_args[2]) { 2758 is_shimm1632(~a2, &cmode, &imm8); 2759 if (a0 == a1) { 2760 tcg_out_vmovi(s, a0, q, 1, cmode | 1, imm8); /* VBICI */ 2761 return; 2762 } 2763 tcg_out_vmovi(s, a0, q, 1, cmode, imm8); /* VMVNI */ 2764 a2 = a0; 2765 } 2766 tcg_out_vreg3(s, INSN_VAND, q, 0, a0, a1, a2); 2767 return; 2768 2769 case INDEX_op_orc_vec: 2770 if (!const_args[2]) { 2771 tcg_out_vreg3(s, INSN_VORN, q, 0, a0, a1, a2); 2772 return; 2773 } 2774 a2 = ~a2; 2775 /* fall through */ 2776 case INDEX_op_or_vec: 2777 if (const_args[2]) { 2778 is_shimm1632(a2, &cmode, &imm8); 2779 if (a0 == a1) { 2780 tcg_out_vmovi(s, a0, q, 0, cmode | 1, imm8); /* VORRI */ 2781 return; 2782 } 2783 tcg_out_vmovi(s, a0, q, 0, cmode, imm8); /* VMOVI */ 2784 a2 = a0; 2785 } 2786 tcg_out_vreg3(s, INSN_VORR, q, 0, a0, a1, a2); 2787 return; 2788 2789 case INDEX_op_cmp_vec: 2790 { 2791 TCGCond cond = args[3]; 2792 ARMInsn insn; 2793 2794 switch (cond) { 2795 case TCG_COND_NE: 2796 if (const_args[2]) { 2797 tcg_out_vreg3(s, INSN_VTST, q, vece, a0, a1, a1); 2798 } else { 2799 tcg_out_vreg3(s, INSN_VCEQ, q, vece, a0, a1, a2); 2800 tcg_out_vreg2(s, INSN_VMVN, q, 0, a0, a0); 2801 } 2802 break; 2803 2804 case TCG_COND_TSTNE: 2805 case TCG_COND_TSTEQ: 2806 if (const_args[2]) { 2807 /* (x & 0) == 0 */ 2808 tcg_out_dupi_vec(s, type, MO_8, a0, 2809 -(cond == TCG_COND_TSTEQ)); 2810 break; 2811 } 2812 tcg_out_vreg3(s, INSN_VTST, q, vece, a0, a1, a2); 2813 if (cond == TCG_COND_TSTEQ) { 2814 tcg_out_vreg2(s, INSN_VMVN, q, 0, a0, a0); 2815 } 2816 break; 2817 2818 default: 2819 if (const_args[2]) { 2820 insn = vec_cmp0_insn[cond]; 2821 if (insn) { 2822 tcg_out_vreg2(s, insn, q, vece, a0, a1); 2823 return; 2824 } 2825 tcg_out_dupi_vec(s, type, MO_8, TCG_VEC_TMP, 0); 2826 a2 = TCG_VEC_TMP; 2827 } 2828 insn = vec_cmp_insn[cond]; 2829 if (insn == 0) { 2830 TCGArg t; 2831 t = a1, a1 = a2, a2 = t; 2832 cond = tcg_swap_cond(cond); 2833 insn = vec_cmp_insn[cond]; 2834 tcg_debug_assert(insn != 0); 2835 } 2836 tcg_out_vreg3(s, insn, q, vece, a0, a1, a2); 2837 break; 2838 } 2839 } 2840 return; 2841 2842 case INDEX_op_bitsel_vec: 2843 a3 = args[3]; 2844 if (a0 == a3) { 2845 tcg_out_vreg3(s, INSN_VBIT, q, 0, a0, a2, a1); 2846 } else if (a0 == a2) { 2847 tcg_out_vreg3(s, INSN_VBIF, q, 0, a0, a3, a1); 2848 } else { 2849 tcg_out_mov(s, type, a0, a1); 2850 tcg_out_vreg3(s, INSN_VBSL, q, 0, a0, a2, a3); 2851 } 2852 return; 2853 2854 case INDEX_op_mov_vec: /* Always emitted via tcg_out_mov. */ 2855 case INDEX_op_dup_vec: /* Always emitted via tcg_out_dup_vec. */ 2856 default: 2857 g_assert_not_reached(); 2858 } 2859} 2860 2861int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece) 2862{ 2863 switch (opc) { 2864 case INDEX_op_add_vec: 2865 case INDEX_op_sub_vec: 2866 case INDEX_op_and_vec: 2867 case INDEX_op_andc_vec: 2868 case INDEX_op_or_vec: 2869 case INDEX_op_orc_vec: 2870 case INDEX_op_xor_vec: 2871 case INDEX_op_not_vec: 2872 case INDEX_op_shli_vec: 2873 case INDEX_op_shri_vec: 2874 case INDEX_op_sari_vec: 2875 case INDEX_op_ssadd_vec: 2876 case INDEX_op_sssub_vec: 2877 case INDEX_op_usadd_vec: 2878 case INDEX_op_ussub_vec: 2879 case INDEX_op_bitsel_vec: 2880 return 1; 2881 case INDEX_op_abs_vec: 2882 case INDEX_op_cmp_vec: 2883 case INDEX_op_mul_vec: 2884 case INDEX_op_neg_vec: 2885 case INDEX_op_smax_vec: 2886 case INDEX_op_smin_vec: 2887 case INDEX_op_umax_vec: 2888 case INDEX_op_umin_vec: 2889 return vece < MO_64; 2890 case INDEX_op_shlv_vec: 2891 case INDEX_op_shrv_vec: 2892 case INDEX_op_sarv_vec: 2893 case INDEX_op_rotli_vec: 2894 case INDEX_op_rotlv_vec: 2895 case INDEX_op_rotrv_vec: 2896 return -1; 2897 default: 2898 return 0; 2899 } 2900} 2901 2902void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece, 2903 TCGArg a0, ...) 2904{ 2905 va_list va; 2906 TCGv_vec v0, v1, v2, t1, t2, c1; 2907 TCGArg a2; 2908 2909 va_start(va, a0); 2910 v0 = temp_tcgv_vec(arg_temp(a0)); 2911 v1 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg))); 2912 a2 = va_arg(va, TCGArg); 2913 va_end(va); 2914 2915 switch (opc) { 2916 case INDEX_op_shlv_vec: 2917 /* 2918 * Merely propagate shlv_vec to arm_ushl_vec. 2919 * In this way we don't set TCG_TARGET_HAS_shv_vec 2920 * because everything is done via expansion. 2921 */ 2922 v2 = temp_tcgv_vec(arg_temp(a2)); 2923 vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(v0), 2924 tcgv_vec_arg(v1), tcgv_vec_arg(v2)); 2925 break; 2926 2927 case INDEX_op_shrv_vec: 2928 case INDEX_op_sarv_vec: 2929 /* Right shifts are negative left shifts for NEON. */ 2930 v2 = temp_tcgv_vec(arg_temp(a2)); 2931 t1 = tcg_temp_new_vec(type); 2932 tcg_gen_neg_vec(vece, t1, v2); 2933 if (opc == INDEX_op_shrv_vec) { 2934 opc = INDEX_op_arm_ushl_vec; 2935 } else { 2936 opc = INDEX_op_arm_sshl_vec; 2937 } 2938 vec_gen_3(opc, type, vece, tcgv_vec_arg(v0), 2939 tcgv_vec_arg(v1), tcgv_vec_arg(t1)); 2940 tcg_temp_free_vec(t1); 2941 break; 2942 2943 case INDEX_op_rotli_vec: 2944 t1 = tcg_temp_new_vec(type); 2945 tcg_gen_shri_vec(vece, t1, v1, -a2 & ((8 << vece) - 1)); 2946 vec_gen_4(INDEX_op_arm_sli_vec, type, vece, 2947 tcgv_vec_arg(v0), tcgv_vec_arg(t1), tcgv_vec_arg(v1), a2); 2948 tcg_temp_free_vec(t1); 2949 break; 2950 2951 case INDEX_op_rotlv_vec: 2952 v2 = temp_tcgv_vec(arg_temp(a2)); 2953 t1 = tcg_temp_new_vec(type); 2954 c1 = tcg_constant_vec(type, vece, 8 << vece); 2955 tcg_gen_sub_vec(vece, t1, v2, c1); 2956 /* Right shifts are negative left shifts for NEON. */ 2957 vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(t1), 2958 tcgv_vec_arg(v1), tcgv_vec_arg(t1)); 2959 vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(v0), 2960 tcgv_vec_arg(v1), tcgv_vec_arg(v2)); 2961 tcg_gen_or_vec(vece, v0, v0, t1); 2962 tcg_temp_free_vec(t1); 2963 break; 2964 2965 case INDEX_op_rotrv_vec: 2966 v2 = temp_tcgv_vec(arg_temp(a2)); 2967 t1 = tcg_temp_new_vec(type); 2968 t2 = tcg_temp_new_vec(type); 2969 c1 = tcg_constant_vec(type, vece, 8 << vece); 2970 tcg_gen_neg_vec(vece, t1, v2); 2971 tcg_gen_sub_vec(vece, t2, c1, v2); 2972 /* Right shifts are negative left shifts for NEON. */ 2973 vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(t1), 2974 tcgv_vec_arg(v1), tcgv_vec_arg(t1)); 2975 vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(t2), 2976 tcgv_vec_arg(v1), tcgv_vec_arg(t2)); 2977 tcg_gen_or_vec(vece, v0, t1, t2); 2978 tcg_temp_free_vec(t1); 2979 tcg_temp_free_vec(t2); 2980 break; 2981 2982 default: 2983 g_assert_not_reached(); 2984 } 2985} 2986 2987static void tcg_out_nop_fill(tcg_insn_unit *p, int count) 2988{ 2989 int i; 2990 for (i = 0; i < count; ++i) { 2991 p[i] = INSN_NOP; 2992 } 2993} 2994 2995/* Compute frame size via macros, to share between tcg_target_qemu_prologue 2996 and tcg_register_jit. */ 2997 2998#define PUSH_SIZE ((11 - 4 + 1 + 1) * sizeof(tcg_target_long)) 2999 3000#define FRAME_SIZE \ 3001 ((PUSH_SIZE \ 3002 + TCG_STATIC_CALL_ARGS_SIZE \ 3003 + CPU_TEMP_BUF_NLONGS * sizeof(long) \ 3004 + TCG_TARGET_STACK_ALIGN - 1) \ 3005 & -TCG_TARGET_STACK_ALIGN) 3006 3007#define STACK_ADDEND (FRAME_SIZE - PUSH_SIZE) 3008 3009static void tcg_target_qemu_prologue(TCGContext *s) 3010{ 3011 /* Calling convention requires us to save r4-r11 and lr. */ 3012 /* stmdb sp!, { r4 - r11, lr } */ 3013 tcg_out_ldstm(s, COND_AL, INSN_STMDB, TCG_REG_CALL_STACK, 3014 (1 << TCG_REG_R4) | (1 << TCG_REG_R5) | (1 << TCG_REG_R6) | 3015 (1 << TCG_REG_R7) | (1 << TCG_REG_R8) | (1 << TCG_REG_R9) | 3016 (1 << TCG_REG_R10) | (1 << TCG_REG_R11) | (1 << TCG_REG_R14)); 3017 3018 /* Reserve callee argument and tcg temp space. */ 3019 tcg_out_dat_rI(s, COND_AL, ARITH_SUB, TCG_REG_CALL_STACK, 3020 TCG_REG_CALL_STACK, STACK_ADDEND, 1); 3021 tcg_set_frame(s, TCG_REG_CALL_STACK, TCG_STATIC_CALL_ARGS_SIZE, 3022 CPU_TEMP_BUF_NLONGS * sizeof(long)); 3023 3024 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]); 3025 3026 if (!tcg_use_softmmu && guest_base) { 3027 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_GUEST_BASE, guest_base); 3028 tcg_regset_set_reg(s->reserved_regs, TCG_REG_GUEST_BASE); 3029 } 3030 3031 tcg_out_b_reg(s, COND_AL, tcg_target_call_iarg_regs[1]); 3032 3033 /* 3034 * Return path for goto_ptr. Set return value to 0, a-la exit_tb, 3035 * and fall through to the rest of the epilogue. 3036 */ 3037 tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr); 3038 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R0, 0); 3039 tcg_out_epilogue(s); 3040} 3041 3042static void tcg_out_epilogue(TCGContext *s) 3043{ 3044 /* Release local stack frame. */ 3045 tcg_out_dat_rI(s, COND_AL, ARITH_ADD, TCG_REG_CALL_STACK, 3046 TCG_REG_CALL_STACK, STACK_ADDEND, 1); 3047 3048 /* ldmia sp!, { r4 - r11, pc } */ 3049 tcg_out_ldstm(s, COND_AL, INSN_LDMIA, TCG_REG_CALL_STACK, 3050 (1 << TCG_REG_R4) | (1 << TCG_REG_R5) | (1 << TCG_REG_R6) | 3051 (1 << TCG_REG_R7) | (1 << TCG_REG_R8) | (1 << TCG_REG_R9) | 3052 (1 << TCG_REG_R10) | (1 << TCG_REG_R11) | (1 << TCG_REG_PC)); 3053} 3054 3055static void tcg_out_tb_start(TCGContext *s) 3056{ 3057 /* nothing to do */ 3058} 3059 3060typedef struct { 3061 DebugFrameHeader h; 3062 uint8_t fde_def_cfa[4]; 3063 uint8_t fde_reg_ofs[18]; 3064} DebugFrame; 3065 3066#define ELF_HOST_MACHINE EM_ARM 3067 3068/* We're expecting a 2 byte uleb128 encoded value. */ 3069QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14)); 3070 3071static const DebugFrame debug_frame = { 3072 .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */ 3073 .h.cie.id = -1, 3074 .h.cie.version = 1, 3075 .h.cie.code_align = 1, 3076 .h.cie.data_align = 0x7c, /* sleb128 -4 */ 3077 .h.cie.return_column = 14, 3078 3079 /* Total FDE size does not include the "len" member. */ 3080 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset), 3081 3082 .fde_def_cfa = { 3083 12, 13, /* DW_CFA_def_cfa sp, ... */ 3084 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */ 3085 (FRAME_SIZE >> 7) 3086 }, 3087 .fde_reg_ofs = { 3088 /* The following must match the stmdb in the prologue. */ 3089 0x8e, 1, /* DW_CFA_offset, lr, -4 */ 3090 0x8b, 2, /* DW_CFA_offset, r11, -8 */ 3091 0x8a, 3, /* DW_CFA_offset, r10, -12 */ 3092 0x89, 4, /* DW_CFA_offset, r9, -16 */ 3093 0x88, 5, /* DW_CFA_offset, r8, -20 */ 3094 0x87, 6, /* DW_CFA_offset, r7, -24 */ 3095 0x86, 7, /* DW_CFA_offset, r6, -28 */ 3096 0x85, 8, /* DW_CFA_offset, r5, -32 */ 3097 0x84, 9, /* DW_CFA_offset, r4, -36 */ 3098 } 3099}; 3100 3101void tcg_register_jit(const void *buf, size_t buf_size) 3102{ 3103 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame)); 3104} 3105