1/* 2 * Tiny Code Generator for QEMU 3 * 4 * Copyright (c) 2008 Andrzej Zaborowski 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 25#include "elf.h" 26 27int arm_arch = __ARM_ARCH; 28 29#ifndef use_idiv_instructions 30bool use_idiv_instructions; 31#endif 32#ifndef use_neon_instructions 33bool use_neon_instructions; 34#endif 35 36/* Used for function call generation. */ 37#define TCG_TARGET_STACK_ALIGN 8 38#define TCG_TARGET_CALL_STACK_OFFSET 0 39#define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_NORMAL 40#define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_EVEN 41#define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_EVEN 42#define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_BY_REF 43 44#ifdef CONFIG_DEBUG_TCG 45static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { 46 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7", 47 "%r8", "%r9", "%r10", "%r11", "%r12", "%sp", "%r14", "%pc", 48 "%q0", "%q1", "%q2", "%q3", "%q4", "%q5", "%q6", "%q7", 49 "%q8", "%q9", "%q10", "%q11", "%q12", "%q13", "%q14", "%q15", 50}; 51#endif 52 53static const int tcg_target_reg_alloc_order[] = { 54 TCG_REG_R4, 55 TCG_REG_R5, 56 TCG_REG_R6, 57 TCG_REG_R7, 58 TCG_REG_R8, 59 TCG_REG_R9, 60 TCG_REG_R10, 61 TCG_REG_R11, 62 TCG_REG_R13, 63 TCG_REG_R0, 64 TCG_REG_R1, 65 TCG_REG_R2, 66 TCG_REG_R3, 67 TCG_REG_R12, 68 TCG_REG_R14, 69 70 TCG_REG_Q0, 71 TCG_REG_Q1, 72 TCG_REG_Q2, 73 TCG_REG_Q3, 74 /* Q4 - Q7 are call-saved, and skipped. */ 75 TCG_REG_Q8, 76 TCG_REG_Q9, 77 TCG_REG_Q10, 78 TCG_REG_Q11, 79 TCG_REG_Q12, 80 TCG_REG_Q13, 81 TCG_REG_Q14, 82 TCG_REG_Q15, 83}; 84 85static const int tcg_target_call_iarg_regs[4] = { 86 TCG_REG_R0, TCG_REG_R1, TCG_REG_R2, TCG_REG_R3 87}; 88 89static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot) 90{ 91 tcg_debug_assert(kind == TCG_CALL_RET_NORMAL); 92 tcg_debug_assert(slot >= 0 && slot <= 3); 93 return TCG_REG_R0 + slot; 94} 95 96#define TCG_REG_TMP TCG_REG_R12 97#define TCG_VEC_TMP TCG_REG_Q15 98#define TCG_REG_GUEST_BASE TCG_REG_R11 99 100typedef enum { 101 COND_EQ = 0x0, 102 COND_NE = 0x1, 103 COND_CS = 0x2, /* Unsigned greater or equal */ 104 COND_CC = 0x3, /* Unsigned less than */ 105 COND_MI = 0x4, /* Negative */ 106 COND_PL = 0x5, /* Zero or greater */ 107 COND_VS = 0x6, /* Overflow */ 108 COND_VC = 0x7, /* No overflow */ 109 COND_HI = 0x8, /* Unsigned greater than */ 110 COND_LS = 0x9, /* Unsigned less or equal */ 111 COND_GE = 0xa, 112 COND_LT = 0xb, 113 COND_GT = 0xc, 114 COND_LE = 0xd, 115 COND_AL = 0xe, 116} ARMCond; 117 118#define TO_CPSR (1 << 20) 119 120#define SHIFT_IMM_LSL(im) (((im) << 7) | 0x00) 121#define SHIFT_IMM_LSR(im) (((im) << 7) | 0x20) 122#define SHIFT_IMM_ASR(im) (((im) << 7) | 0x40) 123#define SHIFT_IMM_ROR(im) (((im) << 7) | 0x60) 124#define SHIFT_REG_LSL(rs) (((rs) << 8) | 0x10) 125#define SHIFT_REG_LSR(rs) (((rs) << 8) | 0x30) 126#define SHIFT_REG_ASR(rs) (((rs) << 8) | 0x50) 127#define SHIFT_REG_ROR(rs) (((rs) << 8) | 0x70) 128 129typedef enum { 130 ARITH_AND = 0x0 << 21, 131 ARITH_EOR = 0x1 << 21, 132 ARITH_SUB = 0x2 << 21, 133 ARITH_RSB = 0x3 << 21, 134 ARITH_ADD = 0x4 << 21, 135 ARITH_ADC = 0x5 << 21, 136 ARITH_SBC = 0x6 << 21, 137 ARITH_RSC = 0x7 << 21, 138 ARITH_TST = 0x8 << 21 | TO_CPSR, 139 ARITH_CMP = 0xa << 21 | TO_CPSR, 140 ARITH_CMN = 0xb << 21 | TO_CPSR, 141 ARITH_ORR = 0xc << 21, 142 ARITH_MOV = 0xd << 21, 143 ARITH_BIC = 0xe << 21, 144 ARITH_MVN = 0xf << 21, 145 146 INSN_B = 0x0a000000, 147 148 INSN_CLZ = 0x016f0f10, 149 INSN_RBIT = 0x06ff0f30, 150 151 INSN_LDMIA = 0x08b00000, 152 INSN_STMDB = 0x09200000, 153 154 INSN_LDR_IMM = 0x04100000, 155 INSN_LDR_REG = 0x06100000, 156 INSN_STR_IMM = 0x04000000, 157 INSN_STR_REG = 0x06000000, 158 159 INSN_LDRH_IMM = 0x005000b0, 160 INSN_LDRH_REG = 0x001000b0, 161 INSN_LDRSH_IMM = 0x005000f0, 162 INSN_LDRSH_REG = 0x001000f0, 163 INSN_STRH_IMM = 0x004000b0, 164 INSN_STRH_REG = 0x000000b0, 165 166 INSN_LDRB_IMM = 0x04500000, 167 INSN_LDRB_REG = 0x06500000, 168 INSN_LDRSB_IMM = 0x005000d0, 169 INSN_LDRSB_REG = 0x001000d0, 170 INSN_STRB_IMM = 0x04400000, 171 INSN_STRB_REG = 0x06400000, 172 173 INSN_LDRD_IMM = 0x004000d0, 174 INSN_LDRD_REG = 0x000000d0, 175 INSN_STRD_IMM = 0x004000f0, 176 INSN_STRD_REG = 0x000000f0, 177 178 INSN_DMB_ISH = 0xf57ff05b, 179 INSN_DMB_MCR = 0xee070fba, 180 181 /* Architected nop introduced in v6k. */ 182 /* ??? This is an MSR (imm) 0,0,0 insn. Anyone know if this 183 also Just So Happened to do nothing on pre-v6k so that we 184 don't need to conditionalize it? */ 185 INSN_NOP_v6k = 0xe320f000, 186 /* Otherwise the assembler uses mov r0,r0 */ 187 INSN_NOP_v4 = (COND_AL << 28) | ARITH_MOV, 188 189 INSN_VADD = 0xf2000800, 190 INSN_VAND = 0xf2000110, 191 INSN_VBIC = 0xf2100110, 192 INSN_VEOR = 0xf3000110, 193 INSN_VORN = 0xf2300110, 194 INSN_VORR = 0xf2200110, 195 INSN_VSUB = 0xf3000800, 196 INSN_VMUL = 0xf2000910, 197 INSN_VQADD = 0xf2000010, 198 INSN_VQADD_U = 0xf3000010, 199 INSN_VQSUB = 0xf2000210, 200 INSN_VQSUB_U = 0xf3000210, 201 INSN_VMAX = 0xf2000600, 202 INSN_VMAX_U = 0xf3000600, 203 INSN_VMIN = 0xf2000610, 204 INSN_VMIN_U = 0xf3000610, 205 206 INSN_VABS = 0xf3b10300, 207 INSN_VMVN = 0xf3b00580, 208 INSN_VNEG = 0xf3b10380, 209 210 INSN_VCEQ0 = 0xf3b10100, 211 INSN_VCGT0 = 0xf3b10000, 212 INSN_VCGE0 = 0xf3b10080, 213 INSN_VCLE0 = 0xf3b10180, 214 INSN_VCLT0 = 0xf3b10200, 215 216 INSN_VCEQ = 0xf3000810, 217 INSN_VCGE = 0xf2000310, 218 INSN_VCGT = 0xf2000300, 219 INSN_VCGE_U = 0xf3000310, 220 INSN_VCGT_U = 0xf3000300, 221 222 INSN_VSHLI = 0xf2800510, /* VSHL (immediate) */ 223 INSN_VSARI = 0xf2800010, /* VSHR.S */ 224 INSN_VSHRI = 0xf3800010, /* VSHR.U */ 225 INSN_VSLI = 0xf3800510, 226 INSN_VSHL_S = 0xf2000400, /* VSHL.S (register) */ 227 INSN_VSHL_U = 0xf3000400, /* VSHL.U (register) */ 228 229 INSN_VBSL = 0xf3100110, 230 INSN_VBIT = 0xf3200110, 231 INSN_VBIF = 0xf3300110, 232 233 INSN_VTST = 0xf2000810, 234 235 INSN_VDUP_G = 0xee800b10, /* VDUP (ARM core register) */ 236 INSN_VDUP_S = 0xf3b00c00, /* VDUP (scalar) */ 237 INSN_VLDR_D = 0xed100b00, /* VLDR.64 */ 238 INSN_VLD1 = 0xf4200000, /* VLD1 (multiple single elements) */ 239 INSN_VLD1R = 0xf4a00c00, /* VLD1 (single element to all lanes) */ 240 INSN_VST1 = 0xf4000000, /* VST1 (multiple single elements) */ 241 INSN_VMOVI = 0xf2800010, /* VMOV (immediate) */ 242} ARMInsn; 243 244#define INSN_NOP (use_armv7_instructions ? INSN_NOP_v6k : INSN_NOP_v4) 245 246static const uint8_t tcg_cond_to_arm_cond[] = { 247 [TCG_COND_EQ] = COND_EQ, 248 [TCG_COND_NE] = COND_NE, 249 [TCG_COND_LT] = COND_LT, 250 [TCG_COND_GE] = COND_GE, 251 [TCG_COND_LE] = COND_LE, 252 [TCG_COND_GT] = COND_GT, 253 /* unsigned */ 254 [TCG_COND_LTU] = COND_CC, 255 [TCG_COND_GEU] = COND_CS, 256 [TCG_COND_LEU] = COND_LS, 257 [TCG_COND_GTU] = COND_HI, 258}; 259 260static int encode_imm(uint32_t imm); 261 262/* TCG private relocation type: add with pc+imm8 */ 263#define R_ARM_PC8 11 264 265/* TCG private relocation type: vldr with imm8 << 2 */ 266#define R_ARM_PC11 12 267 268static bool reloc_pc24(tcg_insn_unit *src_rw, const tcg_insn_unit *target) 269{ 270 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw); 271 ptrdiff_t offset = (tcg_ptr_byte_diff(target, src_rx) - 8) >> 2; 272 273 if (offset == sextract32(offset, 0, 24)) { 274 *src_rw = deposit32(*src_rw, 0, 24, offset); 275 return true; 276 } 277 return false; 278} 279 280static bool reloc_pc13(tcg_insn_unit *src_rw, const tcg_insn_unit *target) 281{ 282 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw); 283 ptrdiff_t offset = tcg_ptr_byte_diff(target, src_rx) - 8; 284 285 if (offset >= -0xfff && offset <= 0xfff) { 286 tcg_insn_unit insn = *src_rw; 287 bool u = (offset >= 0); 288 if (!u) { 289 offset = -offset; 290 } 291 insn = deposit32(insn, 23, 1, u); 292 insn = deposit32(insn, 0, 12, offset); 293 *src_rw = insn; 294 return true; 295 } 296 return false; 297} 298 299static bool reloc_pc11(tcg_insn_unit *src_rw, const tcg_insn_unit *target) 300{ 301 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw); 302 ptrdiff_t offset = (tcg_ptr_byte_diff(target, src_rx) - 8) / 4; 303 304 if (offset >= -0xff && offset <= 0xff) { 305 tcg_insn_unit insn = *src_rw; 306 bool u = (offset >= 0); 307 if (!u) { 308 offset = -offset; 309 } 310 insn = deposit32(insn, 23, 1, u); 311 insn = deposit32(insn, 0, 8, offset); 312 *src_rw = insn; 313 return true; 314 } 315 return false; 316} 317 318static bool reloc_pc8(tcg_insn_unit *src_rw, const tcg_insn_unit *target) 319{ 320 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw); 321 ptrdiff_t offset = tcg_ptr_byte_diff(target, src_rx) - 8; 322 int imm12 = encode_imm(offset); 323 324 if (imm12 >= 0) { 325 *src_rw = deposit32(*src_rw, 0, 12, imm12); 326 return true; 327 } 328 return false; 329} 330 331static bool patch_reloc(tcg_insn_unit *code_ptr, int type, 332 intptr_t value, intptr_t addend) 333{ 334 tcg_debug_assert(addend == 0); 335 switch (type) { 336 case R_ARM_PC24: 337 return reloc_pc24(code_ptr, (const tcg_insn_unit *)value); 338 case R_ARM_PC13: 339 return reloc_pc13(code_ptr, (const tcg_insn_unit *)value); 340 case R_ARM_PC11: 341 return reloc_pc11(code_ptr, (const tcg_insn_unit *)value); 342 case R_ARM_PC8: 343 return reloc_pc8(code_ptr, (const tcg_insn_unit *)value); 344 default: 345 g_assert_not_reached(); 346 } 347} 348 349#define TCG_CT_CONST_ARM 0x100 350#define TCG_CT_CONST_INV 0x200 351#define TCG_CT_CONST_NEG 0x400 352#define TCG_CT_CONST_ZERO 0x800 353#define TCG_CT_CONST_ORRI 0x1000 354#define TCG_CT_CONST_ANDI 0x2000 355 356#define ALL_GENERAL_REGS 0xffffu 357#define ALL_VECTOR_REGS 0xffff0000u 358 359/* 360 * r0-r3 will be overwritten when reading the tlb entry (system-mode only); 361 * r14 will be overwritten by the BLNE branching to the slow path. 362 */ 363#define ALL_QLDST_REGS \ 364 (ALL_GENERAL_REGS & ~((tcg_use_softmmu ? 0xf : 0) | (1 << TCG_REG_R14))) 365 366/* 367 * ARM immediates for ALU instructions are made of an unsigned 8-bit 368 * right-rotated by an even amount between 0 and 30. 369 * 370 * Return < 0 if @imm cannot be encoded, else the entire imm12 field. 371 */ 372static int encode_imm(uint32_t imm) 373{ 374 uint32_t rot, imm8; 375 376 /* Simple case, no rotation required. */ 377 if ((imm & ~0xff) == 0) { 378 return imm; 379 } 380 381 /* Next, try a simple even shift. */ 382 rot = ctz32(imm) & ~1; 383 imm8 = imm >> rot; 384 rot = 32 - rot; 385 if ((imm8 & ~0xff) == 0) { 386 goto found; 387 } 388 389 /* 390 * Finally, try harder with rotations. 391 * The ctz test above will have taken care of rotates >= 8. 392 */ 393 for (rot = 2; rot < 8; rot += 2) { 394 imm8 = rol32(imm, rot); 395 if ((imm8 & ~0xff) == 0) { 396 goto found; 397 } 398 } 399 /* Fail: imm cannot be encoded. */ 400 return -1; 401 402 found: 403 /* Note that rot is even, and we discard bit 0 by shifting by 7. */ 404 return rot << 7 | imm8; 405} 406 407static int encode_imm_nofail(uint32_t imm) 408{ 409 int ret = encode_imm(imm); 410 tcg_debug_assert(ret >= 0); 411 return ret; 412} 413 414static bool check_fit_imm(uint32_t imm) 415{ 416 return encode_imm(imm) >= 0; 417} 418 419/* Return true if v16 is a valid 16-bit shifted immediate. */ 420static bool is_shimm16(uint16_t v16, int *cmode, int *imm8) 421{ 422 if (v16 == (v16 & 0xff)) { 423 *cmode = 0x8; 424 *imm8 = v16 & 0xff; 425 return true; 426 } else if (v16 == (v16 & 0xff00)) { 427 *cmode = 0xa; 428 *imm8 = v16 >> 8; 429 return true; 430 } 431 return false; 432} 433 434/* Return true if v32 is a valid 32-bit shifted immediate. */ 435static bool is_shimm32(uint32_t v32, int *cmode, int *imm8) 436{ 437 if (v32 == (v32 & 0xff)) { 438 *cmode = 0x0; 439 *imm8 = v32 & 0xff; 440 return true; 441 } else if (v32 == (v32 & 0xff00)) { 442 *cmode = 0x2; 443 *imm8 = (v32 >> 8) & 0xff; 444 return true; 445 } else if (v32 == (v32 & 0xff0000)) { 446 *cmode = 0x4; 447 *imm8 = (v32 >> 16) & 0xff; 448 return true; 449 } else if (v32 == (v32 & 0xff000000)) { 450 *cmode = 0x6; 451 *imm8 = v32 >> 24; 452 return true; 453 } 454 return false; 455} 456 457/* Return true if v32 is a valid 32-bit shifting ones immediate. */ 458static bool is_soimm32(uint32_t v32, int *cmode, int *imm8) 459{ 460 if ((v32 & 0xffff00ff) == 0xff) { 461 *cmode = 0xc; 462 *imm8 = (v32 >> 8) & 0xff; 463 return true; 464 } else if ((v32 & 0xff00ffff) == 0xffff) { 465 *cmode = 0xd; 466 *imm8 = (v32 >> 16) & 0xff; 467 return true; 468 } 469 return false; 470} 471 472/* 473 * Return non-zero if v32 can be formed by MOVI+ORR. 474 * Place the parameters for MOVI in (cmode, imm8). 475 * Return the cmode for ORR; the imm8 can be had via extraction from v32. 476 */ 477static int is_shimm32_pair(uint32_t v32, int *cmode, int *imm8) 478{ 479 int i; 480 481 for (i = 6; i > 0; i -= 2) { 482 /* Mask out one byte we can add with ORR. */ 483 uint32_t tmp = v32 & ~(0xffu << (i * 4)); 484 if (is_shimm32(tmp, cmode, imm8) || 485 is_soimm32(tmp, cmode, imm8)) { 486 break; 487 } 488 } 489 return i; 490} 491 492/* Return true if V is a valid 16-bit or 32-bit shifted immediate. */ 493static bool is_shimm1632(uint32_t v32, int *cmode, int *imm8) 494{ 495 if (v32 == deposit32(v32, 16, 16, v32)) { 496 return is_shimm16(v32, cmode, imm8); 497 } else { 498 return is_shimm32(v32, cmode, imm8); 499 } 500} 501 502/* Test if a constant matches the constraint. 503 * TODO: define constraints for: 504 * 505 * ldr/str offset: between -0xfff and 0xfff 506 * ldrh/strh offset: between -0xff and 0xff 507 * mov operand2: values represented with x << (2 * y), x < 0x100 508 * add, sub, eor...: ditto 509 */ 510static bool tcg_target_const_match(int64_t val, int ct, 511 TCGType type, TCGCond cond, int vece) 512{ 513 if (ct & TCG_CT_CONST) { 514 return 1; 515 } else if ((ct & TCG_CT_CONST_ARM) && check_fit_imm(val)) { 516 return 1; 517 } else if ((ct & TCG_CT_CONST_INV) && check_fit_imm(~val)) { 518 return 1; 519 } else if ((ct & TCG_CT_CONST_NEG) && check_fit_imm(-val)) { 520 return 1; 521 } else if ((ct & TCG_CT_CONST_ZERO) && val == 0) { 522 return 1; 523 } 524 525 switch (ct & (TCG_CT_CONST_ORRI | TCG_CT_CONST_ANDI)) { 526 case 0: 527 break; 528 case TCG_CT_CONST_ANDI: 529 val = ~val; 530 /* fallthru */ 531 case TCG_CT_CONST_ORRI: 532 if (val == deposit64(val, 32, 32, val)) { 533 int cmode, imm8; 534 return is_shimm1632(val, &cmode, &imm8); 535 } 536 break; 537 default: 538 /* Both bits should not be set for the same insn. */ 539 g_assert_not_reached(); 540 } 541 542 return 0; 543} 544 545static void tcg_out_b_imm(TCGContext *s, ARMCond cond, int32_t offset) 546{ 547 tcg_out32(s, (cond << 28) | INSN_B | 548 (((offset - 8) >> 2) & 0x00ffffff)); 549} 550 551static void tcg_out_bl_imm(TCGContext *s, ARMCond cond, int32_t offset) 552{ 553 tcg_out32(s, (cond << 28) | 0x0b000000 | 554 (((offset - 8) >> 2) & 0x00ffffff)); 555} 556 557static void tcg_out_blx_reg(TCGContext *s, ARMCond cond, TCGReg rn) 558{ 559 tcg_out32(s, (cond << 28) | 0x012fff30 | rn); 560} 561 562static void tcg_out_blx_imm(TCGContext *s, int32_t offset) 563{ 564 tcg_out32(s, 0xfa000000 | ((offset & 2) << 23) | 565 (((offset - 8) >> 2) & 0x00ffffff)); 566} 567 568static void tcg_out_dat_reg(TCGContext *s, ARMCond cond, ARMInsn opc, 569 TCGReg rd, TCGReg rn, TCGReg rm, int shift) 570{ 571 tcg_out32(s, (cond << 28) | (0 << 25) | opc | 572 (rn << 16) | (rd << 12) | shift | rm); 573} 574 575static void tcg_out_mov_reg(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rm) 576{ 577 /* Simple reg-reg move, optimising out the 'do nothing' case */ 578 if (rd != rm) { 579 tcg_out_dat_reg(s, cond, ARITH_MOV, rd, 0, rm, SHIFT_IMM_LSL(0)); 580 } 581} 582 583static void tcg_out_bx_reg(TCGContext *s, ARMCond cond, TCGReg rn) 584{ 585 tcg_out32(s, (cond << 28) | 0x012fff10 | rn); 586} 587 588static void tcg_out_b_reg(TCGContext *s, ARMCond cond, TCGReg rn) 589{ 590 /* 591 * Unless the C portion of QEMU is compiled as thumb, we don't need 592 * true BX semantics; merely a branch to an address held in a register. 593 */ 594 tcg_out_bx_reg(s, cond, rn); 595} 596 597static void tcg_out_dat_imm(TCGContext *s, ARMCond cond, ARMInsn opc, 598 TCGReg rd, TCGReg rn, int im) 599{ 600 tcg_out32(s, (cond << 28) | (1 << 25) | opc | 601 (rn << 16) | (rd << 12) | im); 602} 603 604static void tcg_out_ldstm(TCGContext *s, ARMCond cond, ARMInsn opc, 605 TCGReg rn, uint16_t mask) 606{ 607 tcg_out32(s, (cond << 28) | opc | (rn << 16) | mask); 608} 609 610/* Note that this routine is used for both LDR and LDRH formats, so we do 611 not wish to include an immediate shift at this point. */ 612static void tcg_out_memop_r(TCGContext *s, ARMCond cond, ARMInsn opc, TCGReg rt, 613 TCGReg rn, TCGReg rm, bool u, bool p, bool w) 614{ 615 tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24) 616 | (w << 21) | (rn << 16) | (rt << 12) | rm); 617} 618 619static void tcg_out_memop_8(TCGContext *s, ARMCond cond, ARMInsn opc, TCGReg rt, 620 TCGReg rn, int imm8, bool p, bool w) 621{ 622 bool u = 1; 623 if (imm8 < 0) { 624 imm8 = -imm8; 625 u = 0; 626 } 627 tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24) | (w << 21) | 628 (rn << 16) | (rt << 12) | ((imm8 & 0xf0) << 4) | (imm8 & 0xf)); 629} 630 631static void tcg_out_memop_12(TCGContext *s, ARMCond cond, ARMInsn opc, 632 TCGReg rt, TCGReg rn, int imm12, bool p, bool w) 633{ 634 bool u = 1; 635 if (imm12 < 0) { 636 imm12 = -imm12; 637 u = 0; 638 } 639 tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24) | (w << 21) | 640 (rn << 16) | (rt << 12) | imm12); 641} 642 643static void tcg_out_ld32_12(TCGContext *s, ARMCond cond, TCGReg rt, 644 TCGReg rn, int imm12) 645{ 646 tcg_out_memop_12(s, cond, INSN_LDR_IMM, rt, rn, imm12, 1, 0); 647} 648 649static void tcg_out_st32_12(TCGContext *s, ARMCond cond, TCGReg rt, 650 TCGReg rn, int imm12) 651{ 652 tcg_out_memop_12(s, cond, INSN_STR_IMM, rt, rn, imm12, 1, 0); 653} 654 655static void tcg_out_ld32_r(TCGContext *s, ARMCond cond, TCGReg rt, 656 TCGReg rn, TCGReg rm) 657{ 658 tcg_out_memop_r(s, cond, INSN_LDR_REG, rt, rn, rm, 1, 1, 0); 659} 660 661static void tcg_out_st32_r(TCGContext *s, ARMCond cond, TCGReg rt, 662 TCGReg rn, TCGReg rm) 663{ 664 tcg_out_memop_r(s, cond, INSN_STR_REG, rt, rn, rm, 1, 1, 0); 665} 666 667static void tcg_out_ldrd_8(TCGContext *s, ARMCond cond, TCGReg rt, 668 TCGReg rn, int imm8) 669{ 670 tcg_out_memop_8(s, cond, INSN_LDRD_IMM, rt, rn, imm8, 1, 0); 671} 672 673static void tcg_out_ldrd_r(TCGContext *s, ARMCond cond, TCGReg rt, 674 TCGReg rn, TCGReg rm) 675{ 676 tcg_out_memop_r(s, cond, INSN_LDRD_REG, rt, rn, rm, 1, 1, 0); 677} 678 679static void tcg_out_strd_8(TCGContext *s, ARMCond cond, TCGReg rt, 680 TCGReg rn, int imm8) 681{ 682 tcg_out_memop_8(s, cond, INSN_STRD_IMM, rt, rn, imm8, 1, 0); 683} 684 685static void tcg_out_strd_r(TCGContext *s, ARMCond cond, TCGReg rt, 686 TCGReg rn, TCGReg rm) 687{ 688 tcg_out_memop_r(s, cond, INSN_STRD_REG, rt, rn, rm, 1, 1, 0); 689} 690 691/* Register pre-increment with base writeback. */ 692static void tcg_out_ld32_rwb(TCGContext *s, ARMCond cond, TCGReg rt, 693 TCGReg rn, TCGReg rm) 694{ 695 tcg_out_memop_r(s, cond, INSN_LDR_REG, rt, rn, rm, 1, 1, 1); 696} 697 698static void tcg_out_st32_rwb(TCGContext *s, ARMCond cond, TCGReg rt, 699 TCGReg rn, TCGReg rm) 700{ 701 tcg_out_memop_r(s, cond, INSN_STR_REG, rt, rn, rm, 1, 1, 1); 702} 703 704static void tcg_out_ld16u_8(TCGContext *s, ARMCond cond, TCGReg rt, 705 TCGReg rn, int imm8) 706{ 707 tcg_out_memop_8(s, cond, INSN_LDRH_IMM, rt, rn, imm8, 1, 0); 708} 709 710static void tcg_out_st16_8(TCGContext *s, ARMCond cond, TCGReg rt, 711 TCGReg rn, int imm8) 712{ 713 tcg_out_memop_8(s, cond, INSN_STRH_IMM, rt, rn, imm8, 1, 0); 714} 715 716static void tcg_out_ld16u_r(TCGContext *s, ARMCond cond, TCGReg rt, 717 TCGReg rn, TCGReg rm) 718{ 719 tcg_out_memop_r(s, cond, INSN_LDRH_REG, rt, rn, rm, 1, 1, 0); 720} 721 722static void tcg_out_st16_r(TCGContext *s, ARMCond cond, TCGReg rt, 723 TCGReg rn, TCGReg rm) 724{ 725 tcg_out_memop_r(s, cond, INSN_STRH_REG, rt, rn, rm, 1, 1, 0); 726} 727 728static void tcg_out_ld16s_8(TCGContext *s, ARMCond cond, TCGReg rt, 729 TCGReg rn, int imm8) 730{ 731 tcg_out_memop_8(s, cond, INSN_LDRSH_IMM, rt, rn, imm8, 1, 0); 732} 733 734static void tcg_out_ld16s_r(TCGContext *s, ARMCond cond, TCGReg rt, 735 TCGReg rn, TCGReg rm) 736{ 737 tcg_out_memop_r(s, cond, INSN_LDRSH_REG, rt, rn, rm, 1, 1, 0); 738} 739 740static void tcg_out_ld8_12(TCGContext *s, ARMCond cond, TCGReg rt, 741 TCGReg rn, int imm12) 742{ 743 tcg_out_memop_12(s, cond, INSN_LDRB_IMM, rt, rn, imm12, 1, 0); 744} 745 746static void tcg_out_st8_12(TCGContext *s, ARMCond cond, TCGReg rt, 747 TCGReg rn, int imm12) 748{ 749 tcg_out_memop_12(s, cond, INSN_STRB_IMM, rt, rn, imm12, 1, 0); 750} 751 752static void tcg_out_ld8_r(TCGContext *s, ARMCond cond, TCGReg rt, 753 TCGReg rn, TCGReg rm) 754{ 755 tcg_out_memop_r(s, cond, INSN_LDRB_REG, rt, rn, rm, 1, 1, 0); 756} 757 758static void tcg_out_st8_r(TCGContext *s, ARMCond cond, TCGReg rt, 759 TCGReg rn, TCGReg rm) 760{ 761 tcg_out_memop_r(s, cond, INSN_STRB_REG, rt, rn, rm, 1, 1, 0); 762} 763 764static void tcg_out_ld8s_8(TCGContext *s, ARMCond cond, TCGReg rt, 765 TCGReg rn, int imm8) 766{ 767 tcg_out_memop_8(s, cond, INSN_LDRSB_IMM, rt, rn, imm8, 1, 0); 768} 769 770static void tcg_out_ld8s_r(TCGContext *s, ARMCond cond, TCGReg rt, 771 TCGReg rn, TCGReg rm) 772{ 773 tcg_out_memop_r(s, cond, INSN_LDRSB_REG, rt, rn, rm, 1, 1, 0); 774} 775 776static void tcg_out_movi_pool(TCGContext *s, ARMCond cond, 777 TCGReg rd, uint32_t arg) 778{ 779 new_pool_label(s, arg, R_ARM_PC13, s->code_ptr, 0); 780 tcg_out_ld32_12(s, cond, rd, TCG_REG_PC, 0); 781} 782 783static void tcg_out_movi32(TCGContext *s, ARMCond cond, 784 TCGReg rd, uint32_t arg) 785{ 786 int imm12, diff, opc, sh1, sh2; 787 uint32_t tt0, tt1, tt2; 788 789 /* Check a single MOV/MVN before anything else. */ 790 imm12 = encode_imm(arg); 791 if (imm12 >= 0) { 792 tcg_out_dat_imm(s, cond, ARITH_MOV, rd, 0, imm12); 793 return; 794 } 795 imm12 = encode_imm(~arg); 796 if (imm12 >= 0) { 797 tcg_out_dat_imm(s, cond, ARITH_MVN, rd, 0, imm12); 798 return; 799 } 800 801 /* Check for a pc-relative address. This will usually be the TB, 802 or within the TB, which is immediately before the code block. */ 803 diff = tcg_pcrel_diff(s, (void *)arg) - 8; 804 if (diff >= 0) { 805 imm12 = encode_imm(diff); 806 if (imm12 >= 0) { 807 tcg_out_dat_imm(s, cond, ARITH_ADD, rd, TCG_REG_PC, imm12); 808 return; 809 } 810 } else { 811 imm12 = encode_imm(-diff); 812 if (imm12 >= 0) { 813 tcg_out_dat_imm(s, cond, ARITH_SUB, rd, TCG_REG_PC, imm12); 814 return; 815 } 816 } 817 818 /* Use movw + movt. */ 819 if (use_armv7_instructions) { 820 /* movw */ 821 tcg_out32(s, (cond << 28) | 0x03000000 | (rd << 12) 822 | ((arg << 4) & 0x000f0000) | (arg & 0xfff)); 823 if (arg & 0xffff0000) { 824 /* movt */ 825 tcg_out32(s, (cond << 28) | 0x03400000 | (rd << 12) 826 | ((arg >> 12) & 0x000f0000) | ((arg >> 16) & 0xfff)); 827 } 828 return; 829 } 830 831 /* Look for sequences of two insns. If we have lots of 1's, we can 832 shorten the sequence by beginning with mvn and then clearing 833 higher bits with eor. */ 834 tt0 = arg; 835 opc = ARITH_MOV; 836 if (ctpop32(arg) > 16) { 837 tt0 = ~arg; 838 opc = ARITH_MVN; 839 } 840 sh1 = ctz32(tt0) & ~1; 841 tt1 = tt0 & ~(0xff << sh1); 842 sh2 = ctz32(tt1) & ~1; 843 tt2 = tt1 & ~(0xff << sh2); 844 if (tt2 == 0) { 845 int rot; 846 847 rot = ((32 - sh1) << 7) & 0xf00; 848 tcg_out_dat_imm(s, cond, opc, rd, 0, ((tt0 >> sh1) & 0xff) | rot); 849 rot = ((32 - sh2) << 7) & 0xf00; 850 tcg_out_dat_imm(s, cond, ARITH_EOR, rd, rd, 851 ((tt0 >> sh2) & 0xff) | rot); 852 return; 853 } 854 855 /* Otherwise, drop it into the constant pool. */ 856 tcg_out_movi_pool(s, cond, rd, arg); 857} 858 859/* 860 * Emit either the reg,imm or reg,reg form of a data-processing insn. 861 * rhs must satisfy the "rI" constraint. 862 */ 863static void tcg_out_dat_rI(TCGContext *s, ARMCond cond, ARMInsn opc, 864 TCGReg dst, TCGReg lhs, TCGArg rhs, int rhs_is_const) 865{ 866 if (rhs_is_const) { 867 tcg_out_dat_imm(s, cond, opc, dst, lhs, encode_imm_nofail(rhs)); 868 } else { 869 tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0)); 870 } 871} 872 873/* 874 * Emit either the reg,imm or reg,reg form of a data-processing insn. 875 * rhs must satisfy the "rIK" constraint. 876 */ 877static void tcg_out_dat_IK(TCGContext *s, ARMCond cond, ARMInsn opc, 878 ARMInsn opinv, TCGReg dst, TCGReg lhs, TCGArg rhs) 879{ 880 int imm12 = encode_imm(rhs); 881 if (imm12 < 0) { 882 imm12 = encode_imm_nofail(~rhs); 883 opc = opinv; 884 } 885 tcg_out_dat_imm(s, cond, opc, dst, lhs, imm12); 886} 887 888static void tcg_out_dat_rIK(TCGContext *s, ARMCond cond, ARMInsn opc, 889 ARMInsn opinv, TCGReg dst, TCGReg lhs, TCGArg rhs, 890 bool rhs_is_const) 891{ 892 if (rhs_is_const) { 893 tcg_out_dat_IK(s, cond, opc, opinv, dst, lhs, rhs); 894 } else { 895 tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0)); 896 } 897} 898 899static void tcg_out_dat_IN(TCGContext *s, ARMCond cond, ARMInsn opc, 900 ARMInsn opneg, TCGReg dst, TCGReg lhs, TCGArg rhs) 901{ 902 int imm12 = encode_imm(rhs); 903 if (imm12 < 0) { 904 imm12 = encode_imm_nofail(-rhs); 905 opc = opneg; 906 } 907 tcg_out_dat_imm(s, cond, opc, dst, lhs, imm12); 908} 909 910static void tcg_out_dat_rIN(TCGContext *s, ARMCond cond, ARMInsn opc, 911 ARMInsn opneg, TCGReg dst, TCGReg lhs, TCGArg rhs, 912 bool rhs_is_const) 913{ 914 /* Emit either the reg,imm or reg,reg form of a data-processing insn. 915 * rhs must satisfy the "rIN" constraint. 916 */ 917 if (rhs_is_const) { 918 tcg_out_dat_IN(s, cond, opc, opneg, dst, lhs, rhs); 919 } else { 920 tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0)); 921 } 922} 923 924static void tcg_out_ext8s(TCGContext *s, TCGType t, TCGReg rd, TCGReg rn) 925{ 926 /* sxtb */ 927 tcg_out32(s, 0x06af0070 | (COND_AL << 28) | (rd << 12) | rn); 928} 929 930static void tcg_out_ext8u(TCGContext *s, TCGReg rd, TCGReg rn) 931{ 932 tcg_out_dat_imm(s, COND_AL, ARITH_AND, rd, rn, 0xff); 933} 934 935static void tcg_out_ext16s(TCGContext *s, TCGType t, TCGReg rd, TCGReg rn) 936{ 937 /* sxth */ 938 tcg_out32(s, 0x06bf0070 | (COND_AL << 28) | (rd << 12) | rn); 939} 940 941static void tcg_out_ext16u(TCGContext *s, TCGReg rd, TCGReg rn) 942{ 943 /* uxth */ 944 tcg_out32(s, 0x06ff0070 | (COND_AL << 28) | (rd << 12) | rn); 945} 946 947static void tcg_out_ext32s(TCGContext *s, TCGReg rd, TCGReg rn) 948{ 949 g_assert_not_reached(); 950} 951 952static void tcg_out_ext32u(TCGContext *s, TCGReg rd, TCGReg rn) 953{ 954 g_assert_not_reached(); 955} 956 957static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg rd, TCGReg rn) 958{ 959 g_assert_not_reached(); 960} 961 962static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg rd, TCGReg rn) 963{ 964 g_assert_not_reached(); 965} 966 967static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg rd, TCGReg rn) 968{ 969 g_assert_not_reached(); 970} 971 972static void tgen_deposit(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1, 973 TCGReg a2, unsigned ofs, unsigned len) 974{ 975 /* bfi/bfc */ 976 tcg_out32(s, 0x07c00010 | (COND_AL << 28) | (a0 << 12) | a1 977 | (ofs << 7) | ((ofs + len - 1) << 16)); 978} 979 980static void tgen_depositi(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1, 981 tcg_target_long a2, unsigned ofs, unsigned len) 982{ 983 /* bfi becomes bfc with rn == 15. */ 984 tgen_deposit(s, type, a0, a1, 15, ofs, len); 985} 986 987static const TCGOutOpDeposit outop_deposit = { 988 .base.static_constraint = C_O1_I2(r, 0, rZ), 989 .out_rrr = tgen_deposit, 990 .out_rri = tgen_depositi, 991}; 992 993static void tgen_extract(TCGContext *s, TCGType type, TCGReg rd, TCGReg rn, 994 unsigned ofs, unsigned len) 995{ 996 /* According to gcc, AND can be faster. */ 997 if (ofs == 0 && len <= 8) { 998 tcg_out_dat_imm(s, COND_AL, ARITH_AND, rd, rn, 999 encode_imm_nofail((1 << len) - 1)); 1000 return; 1001 } 1002 1003 if (use_armv7_instructions) { 1004 /* ubfx */ 1005 tcg_out32(s, 0x07e00050 | (COND_AL << 28) | (rd << 12) | rn 1006 | (ofs << 7) | ((len - 1) << 16)); 1007 return; 1008 } 1009 1010 assert(ofs % 8 == 0); 1011 switch (len) { 1012 case 8: 1013 /* uxtb */ 1014 tcg_out32(s, 0x06ef0070 | (COND_AL << 28) | 1015 (rd << 12) | (ofs << 7) | rn); 1016 break; 1017 case 16: 1018 /* uxth */ 1019 tcg_out32(s, 0x06ff0070 | (COND_AL << 28) | 1020 (rd << 12) | (ofs << 7) | rn); 1021 break; 1022 default: 1023 g_assert_not_reached(); 1024 } 1025} 1026 1027static const TCGOutOpExtract outop_extract = { 1028 .base.static_constraint = C_O1_I1(r, r), 1029 .out_rr = tgen_extract, 1030}; 1031 1032static void tgen_sextract(TCGContext *s, TCGType type, TCGReg rd, TCGReg rn, 1033 unsigned ofs, unsigned len) 1034{ 1035 if (use_armv7_instructions) { 1036 /* sbfx */ 1037 tcg_out32(s, 0x07a00050 | (COND_AL << 28) | (rd << 12) | rn 1038 | (ofs << 7) | ((len - 1) << 16)); 1039 return; 1040 } 1041 1042 assert(ofs % 8 == 0); 1043 switch (len) { 1044 case 8: 1045 /* sxtb */ 1046 tcg_out32(s, 0x06af0070 | (COND_AL << 28) | 1047 (rd << 12) | (ofs << 7) | rn); 1048 break; 1049 case 16: 1050 /* sxth */ 1051 tcg_out32(s, 0x06bf0070 | (COND_AL << 28) | 1052 (rd << 12) | (ofs << 7) | rn); 1053 break; 1054 default: 1055 g_assert_not_reached(); 1056 } 1057} 1058 1059static const TCGOutOpExtract outop_sextract = { 1060 .base.static_constraint = C_O1_I1(r, r), 1061 .out_rr = tgen_sextract, 1062}; 1063 1064 1065static void tcg_out_ld32u(TCGContext *s, ARMCond cond, 1066 TCGReg rd, TCGReg rn, int32_t offset) 1067{ 1068 if (offset > 0xfff || offset < -0xfff) { 1069 tcg_out_movi32(s, cond, TCG_REG_TMP, offset); 1070 tcg_out_ld32_r(s, cond, rd, rn, TCG_REG_TMP); 1071 } else 1072 tcg_out_ld32_12(s, cond, rd, rn, offset); 1073} 1074 1075static void tcg_out_st32(TCGContext *s, ARMCond cond, 1076 TCGReg rd, TCGReg rn, int32_t offset) 1077{ 1078 if (offset > 0xfff || offset < -0xfff) { 1079 tcg_out_movi32(s, cond, TCG_REG_TMP, offset); 1080 tcg_out_st32_r(s, cond, rd, rn, TCG_REG_TMP); 1081 } else 1082 tcg_out_st32_12(s, cond, rd, rn, offset); 1083} 1084 1085static void tcg_out_ld16u(TCGContext *s, ARMCond cond, 1086 TCGReg rd, TCGReg rn, int32_t offset) 1087{ 1088 if (offset > 0xff || offset < -0xff) { 1089 tcg_out_movi32(s, cond, TCG_REG_TMP, offset); 1090 tcg_out_ld16u_r(s, cond, rd, rn, TCG_REG_TMP); 1091 } else 1092 tcg_out_ld16u_8(s, cond, rd, rn, offset); 1093} 1094 1095static void tcg_out_ld16s(TCGContext *s, ARMCond cond, 1096 TCGReg rd, TCGReg rn, int32_t offset) 1097{ 1098 if (offset > 0xff || offset < -0xff) { 1099 tcg_out_movi32(s, cond, TCG_REG_TMP, offset); 1100 tcg_out_ld16s_r(s, cond, rd, rn, TCG_REG_TMP); 1101 } else 1102 tcg_out_ld16s_8(s, cond, rd, rn, offset); 1103} 1104 1105static void tcg_out_st16(TCGContext *s, ARMCond cond, 1106 TCGReg rd, TCGReg rn, int32_t offset) 1107{ 1108 if (offset > 0xff || offset < -0xff) { 1109 tcg_out_movi32(s, cond, TCG_REG_TMP, offset); 1110 tcg_out_st16_r(s, cond, rd, rn, TCG_REG_TMP); 1111 } else 1112 tcg_out_st16_8(s, cond, rd, rn, offset); 1113} 1114 1115static void tcg_out_ld8u(TCGContext *s, ARMCond cond, 1116 TCGReg rd, TCGReg rn, int32_t offset) 1117{ 1118 if (offset > 0xfff || offset < -0xfff) { 1119 tcg_out_movi32(s, cond, TCG_REG_TMP, offset); 1120 tcg_out_ld8_r(s, cond, rd, rn, TCG_REG_TMP); 1121 } else 1122 tcg_out_ld8_12(s, cond, rd, rn, offset); 1123} 1124 1125static void tcg_out_ld8s(TCGContext *s, ARMCond cond, 1126 TCGReg rd, TCGReg rn, int32_t offset) 1127{ 1128 if (offset > 0xff || offset < -0xff) { 1129 tcg_out_movi32(s, cond, TCG_REG_TMP, offset); 1130 tcg_out_ld8s_r(s, cond, rd, rn, TCG_REG_TMP); 1131 } else 1132 tcg_out_ld8s_8(s, cond, rd, rn, offset); 1133} 1134 1135static void tcg_out_st8(TCGContext *s, ARMCond cond, 1136 TCGReg rd, TCGReg rn, int32_t offset) 1137{ 1138 if (offset > 0xfff || offset < -0xfff) { 1139 tcg_out_movi32(s, cond, TCG_REG_TMP, offset); 1140 tcg_out_st8_r(s, cond, rd, rn, TCG_REG_TMP); 1141 } else 1142 tcg_out_st8_12(s, cond, rd, rn, offset); 1143} 1144 1145/* 1146 * The _goto case is normally between TBs within the same code buffer, and 1147 * with the code buffer limited to 16MB we wouldn't need the long case. 1148 * But we also use it for the tail-call to the qemu_ld/st helpers, which does. 1149 */ 1150static void tcg_out_goto(TCGContext *s, ARMCond cond, const tcg_insn_unit *addr) 1151{ 1152 intptr_t addri = (intptr_t)addr; 1153 ptrdiff_t disp = tcg_pcrel_diff(s, addr); 1154 bool arm_mode = !(addri & 1); 1155 1156 if (arm_mode && disp - 8 < 0x01fffffd && disp - 8 > -0x01fffffd) { 1157 tcg_out_b_imm(s, cond, disp); 1158 return; 1159 } 1160 1161 /* LDR is interworking from v5t. */ 1162 tcg_out_movi_pool(s, cond, TCG_REG_PC, addri); 1163} 1164 1165/* 1166 * The call case is mostly used for helpers - so it's not unreasonable 1167 * for them to be beyond branch range. 1168 */ 1169static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *addr) 1170{ 1171 intptr_t addri = (intptr_t)addr; 1172 ptrdiff_t disp = tcg_pcrel_diff(s, addr); 1173 bool arm_mode = !(addri & 1); 1174 1175 if (disp - 8 < 0x02000000 && disp - 8 >= -0x02000000) { 1176 if (arm_mode) { 1177 tcg_out_bl_imm(s, COND_AL, disp); 1178 } else { 1179 tcg_out_blx_imm(s, disp); 1180 } 1181 return; 1182 } 1183 1184 tcg_out_movi32(s, COND_AL, TCG_REG_TMP, addri); 1185 tcg_out_blx_reg(s, COND_AL, TCG_REG_TMP); 1186} 1187 1188static void tcg_out_call(TCGContext *s, const tcg_insn_unit *addr, 1189 const TCGHelperInfo *info) 1190{ 1191 tcg_out_call_int(s, addr); 1192} 1193 1194static void tcg_out_goto_label(TCGContext *s, ARMCond cond, TCGLabel *l) 1195{ 1196 if (l->has_value) { 1197 tcg_out_goto(s, cond, l->u.value_ptr); 1198 } else { 1199 tcg_out_reloc(s, s->code_ptr, R_ARM_PC24, l, 0); 1200 tcg_out_b_imm(s, cond, 0); 1201 } 1202} 1203 1204static void tcg_out_mb(TCGContext *s, TCGArg a0) 1205{ 1206 if (use_armv7_instructions) { 1207 tcg_out32(s, INSN_DMB_ISH); 1208 } else { 1209 tcg_out32(s, INSN_DMB_MCR); 1210 } 1211} 1212 1213static TCGCond tgen_cmp(TCGContext *s, TCGCond cond, TCGReg a, TCGReg b) 1214{ 1215 if (is_tst_cond(cond)) { 1216 tcg_out_dat_reg(s, COND_AL, ARITH_TST, 0, a, b, SHIFT_IMM_LSL(0)); 1217 return tcg_tst_eqne_cond(cond); 1218 } 1219 tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, a, b, SHIFT_IMM_LSL(0)); 1220 return cond; 1221} 1222 1223static TCGCond tgen_cmpi(TCGContext *s, TCGCond cond, TCGReg a, TCGArg b) 1224{ 1225 int imm12; 1226 1227 if (!is_tst_cond(cond)) { 1228 tcg_out_dat_IN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0, a, b); 1229 return cond; 1230 } 1231 1232 /* 1233 * The compare constraints allow rIN, but TST does not support N. 1234 * Be prepared to load the constant into a scratch register. 1235 */ 1236 imm12 = encode_imm(b); 1237 if (imm12 >= 0) { 1238 tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, a, imm12); 1239 } else { 1240 tcg_out_movi32(s, COND_AL, TCG_REG_TMP, b); 1241 tcg_out_dat_reg(s, COND_AL, ARITH_TST, 0, 1242 a, TCG_REG_TMP, SHIFT_IMM_LSL(0)); 1243 } 1244 return tcg_tst_eqne_cond(cond); 1245} 1246 1247static TCGCond tcg_out_cmp(TCGContext *s, TCGCond cond, TCGReg a, 1248 TCGArg b, int b_const) 1249{ 1250 if (b_const) { 1251 return tgen_cmpi(s, cond, a, b); 1252 } else { 1253 return tgen_cmp(s, cond, a, b); 1254 } 1255} 1256 1257static TCGCond tcg_out_cmp2(TCGContext *s, TCGCond cond, TCGReg al, TCGReg ah, 1258 TCGArg bl, bool const_bl, TCGArg bh, bool const_bh) 1259{ 1260 switch (cond) { 1261 case TCG_COND_EQ: 1262 case TCG_COND_NE: 1263 case TCG_COND_LTU: 1264 case TCG_COND_LEU: 1265 case TCG_COND_GTU: 1266 case TCG_COND_GEU: 1267 /* 1268 * We perform a conditional comparison. If the high half is 1269 * equal, then overwrite the flags with the comparison of the 1270 * low half. The resulting flags cover the whole. 1271 */ 1272 tcg_out_dat_rI(s, COND_AL, ARITH_CMP, 0, ah, bh, const_bh); 1273 tcg_out_dat_rI(s, COND_EQ, ARITH_CMP, 0, al, bl, const_bl); 1274 return cond; 1275 1276 case TCG_COND_TSTEQ: 1277 case TCG_COND_TSTNE: 1278 /* Similar, but with TST instead of CMP. */ 1279 tcg_out_dat_rI(s, COND_AL, ARITH_TST, 0, ah, bh, const_bh); 1280 tcg_out_dat_rI(s, COND_EQ, ARITH_TST, 0, al, bl, const_bl); 1281 return tcg_tst_eqne_cond(cond); 1282 1283 case TCG_COND_LT: 1284 case TCG_COND_GE: 1285 /* We perform a double-word subtraction and examine the result. 1286 We do not actually need the result of the subtract, so the 1287 low part "subtract" is a compare. For the high half we have 1288 no choice but to compute into a temporary. */ 1289 tcg_out_dat_rI(s, COND_AL, ARITH_CMP, 0, al, bl, const_bl); 1290 tcg_out_dat_rI(s, COND_AL, ARITH_SBC | TO_CPSR, 1291 TCG_REG_TMP, ah, bh, const_bh); 1292 return cond; 1293 1294 case TCG_COND_LE: 1295 case TCG_COND_GT: 1296 /* Similar, but with swapped arguments, via reversed subtract. */ 1297 tcg_out_dat_rI(s, COND_AL, ARITH_RSB | TO_CPSR, 1298 TCG_REG_TMP, al, bl, const_bl); 1299 tcg_out_dat_rI(s, COND_AL, ARITH_RSC | TO_CPSR, 1300 TCG_REG_TMP, ah, bh, const_bh); 1301 return tcg_swap_cond(cond); 1302 1303 default: 1304 g_assert_not_reached(); 1305 } 1306} 1307 1308/* 1309 * Note that TCGReg references Q-registers. 1310 * Q-regno = 2 * D-regno, so shift left by 1 while inserting. 1311 */ 1312static uint32_t encode_vd(TCGReg rd) 1313{ 1314 tcg_debug_assert(rd >= TCG_REG_Q0); 1315 return (extract32(rd, 3, 1) << 22) | (extract32(rd, 0, 3) << 13); 1316} 1317 1318static uint32_t encode_vn(TCGReg rn) 1319{ 1320 tcg_debug_assert(rn >= TCG_REG_Q0); 1321 return (extract32(rn, 3, 1) << 7) | (extract32(rn, 0, 3) << 17); 1322} 1323 1324static uint32_t encode_vm(TCGReg rm) 1325{ 1326 tcg_debug_assert(rm >= TCG_REG_Q0); 1327 return (extract32(rm, 3, 1) << 5) | (extract32(rm, 0, 3) << 1); 1328} 1329 1330static void tcg_out_vreg2(TCGContext *s, ARMInsn insn, int q, int vece, 1331 TCGReg d, TCGReg m) 1332{ 1333 tcg_out32(s, insn | (vece << 18) | (q << 6) | 1334 encode_vd(d) | encode_vm(m)); 1335} 1336 1337static void tcg_out_vreg3(TCGContext *s, ARMInsn insn, int q, int vece, 1338 TCGReg d, TCGReg n, TCGReg m) 1339{ 1340 tcg_out32(s, insn | (vece << 20) | (q << 6) | 1341 encode_vd(d) | encode_vn(n) | encode_vm(m)); 1342} 1343 1344static void tcg_out_vmovi(TCGContext *s, TCGReg rd, 1345 int q, int op, int cmode, uint8_t imm8) 1346{ 1347 tcg_out32(s, INSN_VMOVI | encode_vd(rd) | (q << 6) | (op << 5) 1348 | (cmode << 8) | extract32(imm8, 0, 4) 1349 | (extract32(imm8, 4, 3) << 16) 1350 | (extract32(imm8, 7, 1) << 24)); 1351} 1352 1353static void tcg_out_vshifti(TCGContext *s, ARMInsn insn, int q, 1354 TCGReg rd, TCGReg rm, int l_imm6) 1355{ 1356 tcg_out32(s, insn | (q << 6) | encode_vd(rd) | encode_vm(rm) | 1357 (extract32(l_imm6, 6, 1) << 7) | 1358 (extract32(l_imm6, 0, 6) << 16)); 1359} 1360 1361static void tcg_out_vldst(TCGContext *s, ARMInsn insn, 1362 TCGReg rd, TCGReg rn, int offset) 1363{ 1364 if (offset != 0) { 1365 if (check_fit_imm(offset) || check_fit_imm(-offset)) { 1366 tcg_out_dat_rIN(s, COND_AL, ARITH_ADD, ARITH_SUB, 1367 TCG_REG_TMP, rn, offset, true); 1368 } else { 1369 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, offset); 1370 tcg_out_dat_reg(s, COND_AL, ARITH_ADD, 1371 TCG_REG_TMP, TCG_REG_TMP, rn, 0); 1372 } 1373 rn = TCG_REG_TMP; 1374 } 1375 tcg_out32(s, insn | (rn << 16) | encode_vd(rd) | 0xf); 1376} 1377 1378typedef struct { 1379 ARMCond cond; 1380 TCGReg base; 1381 int index; 1382 bool index_scratch; 1383 TCGAtomAlign aa; 1384} HostAddress; 1385 1386bool tcg_target_has_memory_bswap(MemOp memop) 1387{ 1388 return false; 1389} 1390 1391static TCGReg ldst_ra_gen(TCGContext *s, const TCGLabelQemuLdst *l, int arg) 1392{ 1393 /* We arrive at the slow path via "BLNE", so R14 contains l->raddr. */ 1394 return TCG_REG_R14; 1395} 1396 1397static const TCGLdstHelperParam ldst_helper_param = { 1398 .ra_gen = ldst_ra_gen, 1399 .ntmp = 1, 1400 .tmp = { TCG_REG_TMP }, 1401}; 1402 1403static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) 1404{ 1405 MemOp opc = get_memop(lb->oi); 1406 1407 if (!reloc_pc24(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { 1408 return false; 1409 } 1410 1411 tcg_out_ld_helper_args(s, lb, &ldst_helper_param); 1412 tcg_out_call_int(s, qemu_ld_helpers[opc & MO_SIZE]); 1413 tcg_out_ld_helper_ret(s, lb, false, &ldst_helper_param); 1414 1415 tcg_out_goto(s, COND_AL, lb->raddr); 1416 return true; 1417} 1418 1419static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) 1420{ 1421 MemOp opc = get_memop(lb->oi); 1422 1423 if (!reloc_pc24(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { 1424 return false; 1425 } 1426 1427 tcg_out_st_helper_args(s, lb, &ldst_helper_param); 1428 1429 /* Tail-call to the helper, which will return to the fast path. */ 1430 tcg_out_goto(s, COND_AL, qemu_st_helpers[opc & MO_SIZE]); 1431 return true; 1432} 1433 1434/* We expect to use an 9-bit sign-magnitude negative offset from ENV. */ 1435#define MIN_TLB_MASK_TABLE_OFS -256 1436 1437static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, 1438 TCGReg addr, MemOpIdx oi, bool is_ld) 1439{ 1440 TCGLabelQemuLdst *ldst = NULL; 1441 MemOp opc = get_memop(oi); 1442 unsigned a_mask; 1443 1444 if (tcg_use_softmmu) { 1445 *h = (HostAddress){ 1446 .cond = COND_AL, 1447 .base = addr, 1448 .index = TCG_REG_R1, 1449 .index_scratch = true, 1450 }; 1451 } else { 1452 *h = (HostAddress){ 1453 .cond = COND_AL, 1454 .base = addr, 1455 .index = guest_base ? TCG_REG_GUEST_BASE : -1, 1456 .index_scratch = false, 1457 }; 1458 } 1459 1460 h->aa = atom_and_align_for_opc(s, opc, MO_ATOM_IFALIGN, false); 1461 a_mask = (1 << h->aa.align) - 1; 1462 1463 if (tcg_use_softmmu) { 1464 int mem_index = get_mmuidx(oi); 1465 int cmp_off = is_ld ? offsetof(CPUTLBEntry, addr_read) 1466 : offsetof(CPUTLBEntry, addr_write); 1467 int fast_off = tlb_mask_table_ofs(s, mem_index); 1468 unsigned s_mask = (1 << (opc & MO_SIZE)) - 1; 1469 TCGReg t_addr; 1470 1471 ldst = new_ldst_label(s); 1472 ldst->is_ld = is_ld; 1473 ldst->oi = oi; 1474 ldst->addr_reg = addr; 1475 1476 /* Load cpu->neg.tlb.f[mmu_idx].{mask,table} into {r0,r1}. */ 1477 QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, mask) != 0); 1478 QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, table) != 4); 1479 tcg_out_ldrd_8(s, COND_AL, TCG_REG_R0, TCG_AREG0, fast_off); 1480 1481 /* Extract the tlb index from the address into R0. */ 1482 tcg_out_dat_reg(s, COND_AL, ARITH_AND, TCG_REG_R0, TCG_REG_R0, addr, 1483 SHIFT_IMM_LSR(s->page_bits - CPU_TLB_ENTRY_BITS)); 1484 1485 /* 1486 * Add the tlb_table pointer, creating the CPUTLBEntry address in R1. 1487 * Load the tlb comparator into R2 and the fast path addend into R1. 1488 */ 1489 if (cmp_off == 0) { 1490 tcg_out_ld32_rwb(s, COND_AL, TCG_REG_R2, TCG_REG_R1, TCG_REG_R0); 1491 } else { 1492 tcg_out_dat_reg(s, COND_AL, ARITH_ADD, 1493 TCG_REG_R1, TCG_REG_R1, TCG_REG_R0, 0); 1494 tcg_out_ld32_12(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off); 1495 } 1496 1497 /* Load the tlb addend. */ 1498 tcg_out_ld32_12(s, COND_AL, TCG_REG_R1, TCG_REG_R1, 1499 offsetof(CPUTLBEntry, addend)); 1500 1501 /* 1502 * Check alignment, check comparators. 1503 * Do this in 2-4 insns. Use MOVW for v7, if possible, 1504 * to reduce the number of sequential conditional instructions. 1505 * Almost all guests have at least 4k pages, which means that we need 1506 * to clear at least 9 bits even for an 8-byte memory, which means it 1507 * isn't worth checking for an immediate operand for BIC. 1508 * 1509 * For unaligned accesses, test the page of the last unit of alignment. 1510 * This leaves the least significant alignment bits unchanged, and of 1511 * course must be zero. 1512 */ 1513 t_addr = addr; 1514 if (a_mask < s_mask) { 1515 t_addr = TCG_REG_R0; 1516 tcg_out_dat_imm(s, COND_AL, ARITH_ADD, t_addr, 1517 addr, s_mask - a_mask); 1518 } 1519 if (use_armv7_instructions && s->page_bits <= 16) { 1520 tcg_out_movi32(s, COND_AL, TCG_REG_TMP, ~(s->page_mask | a_mask)); 1521 tcg_out_dat_reg(s, COND_AL, ARITH_BIC, TCG_REG_TMP, 1522 t_addr, TCG_REG_TMP, 0); 1523 tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, 1524 TCG_REG_R2, TCG_REG_TMP, 0); 1525 } else { 1526 if (a_mask) { 1527 tcg_debug_assert(a_mask <= 0xff); 1528 tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addr, a_mask); 1529 } 1530 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP, 0, t_addr, 1531 SHIFT_IMM_LSR(s->page_bits)); 1532 tcg_out_dat_reg(s, (a_mask ? COND_EQ : COND_AL), ARITH_CMP, 1533 0, TCG_REG_R2, TCG_REG_TMP, 1534 SHIFT_IMM_LSL(s->page_bits)); 1535 } 1536 } else if (a_mask) { 1537 ldst = new_ldst_label(s); 1538 ldst->is_ld = is_ld; 1539 ldst->oi = oi; 1540 ldst->addr_reg = addr; 1541 1542 /* We are expecting alignment to max out at 7 */ 1543 tcg_debug_assert(a_mask <= 0xff); 1544 /* tst addr, #mask */ 1545 tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addr, a_mask); 1546 } 1547 1548 return ldst; 1549} 1550 1551static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg datalo, 1552 TCGReg datahi, HostAddress h) 1553{ 1554 TCGReg base; 1555 1556 /* Byte swapping is left to middle-end expansion. */ 1557 tcg_debug_assert((opc & MO_BSWAP) == 0); 1558 1559 switch (opc & MO_SSIZE) { 1560 case MO_UB: 1561 if (h.index < 0) { 1562 tcg_out_ld8_12(s, h.cond, datalo, h.base, 0); 1563 } else { 1564 tcg_out_ld8_r(s, h.cond, datalo, h.base, h.index); 1565 } 1566 break; 1567 case MO_SB: 1568 if (h.index < 0) { 1569 tcg_out_ld8s_8(s, h.cond, datalo, h.base, 0); 1570 } else { 1571 tcg_out_ld8s_r(s, h.cond, datalo, h.base, h.index); 1572 } 1573 break; 1574 case MO_UW: 1575 if (h.index < 0) { 1576 tcg_out_ld16u_8(s, h.cond, datalo, h.base, 0); 1577 } else { 1578 tcg_out_ld16u_r(s, h.cond, datalo, h.base, h.index); 1579 } 1580 break; 1581 case MO_SW: 1582 if (h.index < 0) { 1583 tcg_out_ld16s_8(s, h.cond, datalo, h.base, 0); 1584 } else { 1585 tcg_out_ld16s_r(s, h.cond, datalo, h.base, h.index); 1586 } 1587 break; 1588 case MO_UL: 1589 if (h.index < 0) { 1590 tcg_out_ld32_12(s, h.cond, datalo, h.base, 0); 1591 } else { 1592 tcg_out_ld32_r(s, h.cond, datalo, h.base, h.index); 1593 } 1594 break; 1595 case MO_UQ: 1596 /* We used pair allocation for datalo, so already should be aligned. */ 1597 tcg_debug_assert((datalo & 1) == 0); 1598 tcg_debug_assert(datahi == datalo + 1); 1599 /* LDRD requires alignment; double-check that. */ 1600 if (memop_alignment_bits(opc) >= MO_64) { 1601 if (h.index < 0) { 1602 tcg_out_ldrd_8(s, h.cond, datalo, h.base, 0); 1603 break; 1604 } 1605 /* 1606 * Rm (the second address op) must not overlap Rt or Rt + 1. 1607 * Since datalo is aligned, we can simplify the test via alignment. 1608 * Flip the two address arguments if that works. 1609 */ 1610 if ((h.index & ~1) != datalo) { 1611 tcg_out_ldrd_r(s, h.cond, datalo, h.base, h.index); 1612 break; 1613 } 1614 if ((h.base & ~1) != datalo) { 1615 tcg_out_ldrd_r(s, h.cond, datalo, h.index, h.base); 1616 break; 1617 } 1618 } 1619 if (h.index < 0) { 1620 base = h.base; 1621 if (datalo == h.base) { 1622 tcg_out_mov_reg(s, h.cond, TCG_REG_TMP, base); 1623 base = TCG_REG_TMP; 1624 } 1625 } else if (h.index_scratch) { 1626 tcg_out_ld32_rwb(s, h.cond, datalo, h.index, h.base); 1627 tcg_out_ld32_12(s, h.cond, datahi, h.index, 4); 1628 break; 1629 } else { 1630 tcg_out_dat_reg(s, h.cond, ARITH_ADD, TCG_REG_TMP, 1631 h.base, h.index, SHIFT_IMM_LSL(0)); 1632 base = TCG_REG_TMP; 1633 } 1634 tcg_out_ld32_12(s, h.cond, datalo, base, 0); 1635 tcg_out_ld32_12(s, h.cond, datahi, base, 4); 1636 break; 1637 default: 1638 g_assert_not_reached(); 1639 } 1640} 1641 1642static void tcg_out_qemu_ld(TCGContext *s, TCGReg datalo, TCGReg datahi, 1643 TCGReg addr, MemOpIdx oi, TCGType data_type) 1644{ 1645 MemOp opc = get_memop(oi); 1646 TCGLabelQemuLdst *ldst; 1647 HostAddress h; 1648 1649 ldst = prepare_host_addr(s, &h, addr, oi, true); 1650 if (ldst) { 1651 ldst->type = data_type; 1652 ldst->datalo_reg = datalo; 1653 ldst->datahi_reg = datahi; 1654 1655 /* 1656 * This a conditional BL only to load a pointer within this 1657 * opcode into LR for the slow path. We will not be using 1658 * the value for a tail call. 1659 */ 1660 ldst->label_ptr[0] = s->code_ptr; 1661 tcg_out_bl_imm(s, COND_NE, 0); 1662 1663 tcg_out_qemu_ld_direct(s, opc, datalo, datahi, h); 1664 ldst->raddr = tcg_splitwx_to_rx(s->code_ptr); 1665 } else { 1666 tcg_out_qemu_ld_direct(s, opc, datalo, datahi, h); 1667 } 1668} 1669 1670static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg datalo, 1671 TCGReg datahi, HostAddress h) 1672{ 1673 /* Byte swapping is left to middle-end expansion. */ 1674 tcg_debug_assert((opc & MO_BSWAP) == 0); 1675 1676 switch (opc & MO_SIZE) { 1677 case MO_8: 1678 if (h.index < 0) { 1679 tcg_out_st8_12(s, h.cond, datalo, h.base, 0); 1680 } else { 1681 tcg_out_st8_r(s, h.cond, datalo, h.base, h.index); 1682 } 1683 break; 1684 case MO_16: 1685 if (h.index < 0) { 1686 tcg_out_st16_8(s, h.cond, datalo, h.base, 0); 1687 } else { 1688 tcg_out_st16_r(s, h.cond, datalo, h.base, h.index); 1689 } 1690 break; 1691 case MO_32: 1692 if (h.index < 0) { 1693 tcg_out_st32_12(s, h.cond, datalo, h.base, 0); 1694 } else { 1695 tcg_out_st32_r(s, h.cond, datalo, h.base, h.index); 1696 } 1697 break; 1698 case MO_64: 1699 /* We used pair allocation for datalo, so already should be aligned. */ 1700 tcg_debug_assert((datalo & 1) == 0); 1701 tcg_debug_assert(datahi == datalo + 1); 1702 /* STRD requires alignment; double-check that. */ 1703 if (memop_alignment_bits(opc) >= MO_64) { 1704 if (h.index < 0) { 1705 tcg_out_strd_8(s, h.cond, datalo, h.base, 0); 1706 } else { 1707 tcg_out_strd_r(s, h.cond, datalo, h.base, h.index); 1708 } 1709 } else if (h.index < 0) { 1710 tcg_out_st32_12(s, h.cond, datalo, h.base, 0); 1711 tcg_out_st32_12(s, h.cond, datahi, h.base, 4); 1712 } else if (h.index_scratch) { 1713 tcg_out_st32_rwb(s, h.cond, datalo, h.index, h.base); 1714 tcg_out_st32_12(s, h.cond, datahi, h.index, 4); 1715 } else { 1716 tcg_out_dat_reg(s, h.cond, ARITH_ADD, TCG_REG_TMP, 1717 h.base, h.index, SHIFT_IMM_LSL(0)); 1718 tcg_out_st32_12(s, h.cond, datalo, TCG_REG_TMP, 0); 1719 tcg_out_st32_12(s, h.cond, datahi, TCG_REG_TMP, 4); 1720 } 1721 break; 1722 default: 1723 g_assert_not_reached(); 1724 } 1725} 1726 1727static void tcg_out_qemu_st(TCGContext *s, TCGReg datalo, TCGReg datahi, 1728 TCGReg addr, MemOpIdx oi, TCGType data_type) 1729{ 1730 MemOp opc = get_memop(oi); 1731 TCGLabelQemuLdst *ldst; 1732 HostAddress h; 1733 1734 ldst = prepare_host_addr(s, &h, addr, oi, false); 1735 if (ldst) { 1736 ldst->type = data_type; 1737 ldst->datalo_reg = datalo; 1738 ldst->datahi_reg = datahi; 1739 1740 h.cond = COND_EQ; 1741 tcg_out_qemu_st_direct(s, opc, datalo, datahi, h); 1742 1743 /* The conditional call is last, as we're going to return here. */ 1744 ldst->label_ptr[0] = s->code_ptr; 1745 tcg_out_bl_imm(s, COND_NE, 0); 1746 ldst->raddr = tcg_splitwx_to_rx(s->code_ptr); 1747 } else { 1748 tcg_out_qemu_st_direct(s, opc, datalo, datahi, h); 1749 } 1750} 1751 1752static void tcg_out_epilogue(TCGContext *s); 1753 1754static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg) 1755{ 1756 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R0, arg); 1757 tcg_out_epilogue(s); 1758} 1759 1760static void tcg_out_goto_tb(TCGContext *s, int which) 1761{ 1762 uintptr_t i_addr; 1763 intptr_t i_disp; 1764 1765 /* Direct branch will be patched by tb_target_set_jmp_target. */ 1766 set_jmp_insn_offset(s, which); 1767 tcg_out32(s, INSN_NOP); 1768 1769 /* When branch is out of range, fall through to indirect. */ 1770 i_addr = get_jmp_target_addr(s, which); 1771 i_disp = tcg_pcrel_diff(s, (void *)i_addr) - 8; 1772 tcg_debug_assert(i_disp < 0); 1773 if (i_disp >= -0xfff) { 1774 tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, i_disp); 1775 } else { 1776 /* 1777 * The TB is close, but outside the 12 bits addressable by 1778 * the load. We can extend this to 20 bits with a sub of a 1779 * shifted immediate from pc. 1780 */ 1781 int h = -i_disp; 1782 int l = -(h & 0xfff); 1783 1784 h = encode_imm_nofail(h + l); 1785 tcg_out_dat_imm(s, COND_AL, ARITH_SUB, TCG_REG_R0, TCG_REG_PC, h); 1786 tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_R0, l); 1787 } 1788 set_jmp_reset_offset(s, which); 1789} 1790 1791void tb_target_set_jmp_target(const TranslationBlock *tb, int n, 1792 uintptr_t jmp_rx, uintptr_t jmp_rw) 1793{ 1794 uintptr_t addr = tb->jmp_target_addr[n]; 1795 ptrdiff_t offset = addr - (jmp_rx + 8); 1796 tcg_insn_unit insn; 1797 1798 /* Either directly branch, or fall through to indirect branch. */ 1799 if (offset == sextract64(offset, 0, 26)) { 1800 /* B <addr> */ 1801 insn = deposit32((COND_AL << 28) | INSN_B, 0, 24, offset >> 2); 1802 } else { 1803 insn = INSN_NOP; 1804 } 1805 1806 qatomic_set((uint32_t *)jmp_rw, insn); 1807 flush_idcache_range(jmp_rx, jmp_rw, 4); 1808} 1809 1810 1811static void tgen_add(TCGContext *s, TCGType type, 1812 TCGReg a0, TCGReg a1, TCGReg a2) 1813{ 1814 tcg_out_dat_reg(s, COND_AL, ARITH_ADD, a0, a1, a2, SHIFT_IMM_LSL(0)); 1815} 1816 1817static void tgen_addi(TCGContext *s, TCGType type, 1818 TCGReg a0, TCGReg a1, tcg_target_long a2) 1819{ 1820 tcg_out_dat_IN(s, COND_AL, ARITH_ADD, ARITH_SUB, a0, a1, a2); 1821} 1822 1823static const TCGOutOpBinary outop_add = { 1824 .base.static_constraint = C_O1_I2(r, r, rIN), 1825 .out_rrr = tgen_add, 1826 .out_rri = tgen_addi, 1827}; 1828 1829static void tgen_and(TCGContext *s, TCGType type, 1830 TCGReg a0, TCGReg a1, TCGReg a2) 1831{ 1832 tcg_out_dat_reg(s, COND_AL, ARITH_AND, a0, a1, a2, SHIFT_IMM_LSL(0)); 1833} 1834 1835static void tgen_andi(TCGContext *s, TCGType type, 1836 TCGReg a0, TCGReg a1, tcg_target_long a2) 1837{ 1838 tcg_out_dat_IK(s, COND_AL, ARITH_AND, ARITH_BIC, a0, a1, a2); 1839} 1840 1841static const TCGOutOpBinary outop_and = { 1842 .base.static_constraint = C_O1_I2(r, r, rIK), 1843 .out_rrr = tgen_and, 1844 .out_rri = tgen_andi, 1845}; 1846 1847static void tgen_andc(TCGContext *s, TCGType type, 1848 TCGReg a0, TCGReg a1, TCGReg a2) 1849{ 1850 tcg_out_dat_reg(s, COND_AL, ARITH_BIC, a0, a1, a2, SHIFT_IMM_LSL(0)); 1851} 1852 1853static const TCGOutOpBinary outop_andc = { 1854 .base.static_constraint = C_O1_I2(r, r, r), 1855 .out_rrr = tgen_andc, 1856}; 1857 1858static void tgen_clz(TCGContext *s, TCGType type, 1859 TCGReg a0, TCGReg a1, TCGReg a2) 1860{ 1861 tcg_out_dat_imm(s, COND_AL, ARITH_CMP, 0, a1, 0); 1862 tcg_out_dat_reg(s, COND_NE, INSN_CLZ, a0, 0, a1, 0); 1863 tcg_out_mov_reg(s, COND_EQ, a0, a2); 1864} 1865 1866static void tgen_clzi(TCGContext *s, TCGType type, 1867 TCGReg a0, TCGReg a1, tcg_target_long a2) 1868{ 1869 if (a2 == 32) { 1870 tcg_out_dat_reg(s, COND_AL, INSN_CLZ, a0, 0, a1, 0); 1871 } else { 1872 tcg_out_dat_imm(s, COND_AL, ARITH_CMP, 0, a1, 0); 1873 tcg_out_dat_reg(s, COND_NE, INSN_CLZ, a0, 0, a1, 0); 1874 tcg_out_movi32(s, COND_EQ, a0, a2); 1875 } 1876} 1877 1878static const TCGOutOpBinary outop_clz = { 1879 .base.static_constraint = C_O1_I2(r, r, rIK), 1880 .out_rrr = tgen_clz, 1881 .out_rri = tgen_clzi, 1882}; 1883 1884static const TCGOutOpUnary outop_ctpop = { 1885 .base.static_constraint = C_NotImplemented, 1886}; 1887 1888static void tgen_ctz(TCGContext *s, TCGType type, 1889 TCGReg a0, TCGReg a1, TCGReg a2) 1890{ 1891 tcg_out_dat_reg(s, COND_AL, INSN_RBIT, TCG_REG_TMP, 0, a1, 0); 1892 tgen_clz(s, TCG_TYPE_I32, a0, TCG_REG_TMP, a2); 1893} 1894 1895static void tgen_ctzi(TCGContext *s, TCGType type, 1896 TCGReg a0, TCGReg a1, tcg_target_long a2) 1897{ 1898 tcg_out_dat_reg(s, COND_AL, INSN_RBIT, TCG_REG_TMP, 0, a1, 0); 1899 tgen_clzi(s, TCG_TYPE_I32, a0, TCG_REG_TMP, a2); 1900} 1901 1902static TCGConstraintSetIndex cset_ctz(TCGType type, unsigned flags) 1903{ 1904 return use_armv7_instructions ? C_O1_I2(r, r, rIK) : C_NotImplemented; 1905} 1906 1907static const TCGOutOpBinary outop_ctz = { 1908 .base.static_constraint = C_Dynamic, 1909 .base.dynamic_constraint = cset_ctz, 1910 .out_rrr = tgen_ctz, 1911 .out_rri = tgen_ctzi, 1912}; 1913 1914static TCGConstraintSetIndex cset_idiv(TCGType type, unsigned flags) 1915{ 1916 return use_idiv_instructions ? C_O1_I2(r, r, r) : C_NotImplemented; 1917} 1918 1919static void tgen_divs(TCGContext *s, TCGType type, 1920 TCGReg a0, TCGReg a1, TCGReg a2) 1921{ 1922 /* sdiv */ 1923 tcg_out32(s, 0x0710f010 | (COND_AL << 28) | (a0 << 16) | a1 | (a2 << 8)); 1924} 1925 1926static const TCGOutOpBinary outop_divs = { 1927 .base.static_constraint = C_Dynamic, 1928 .base.dynamic_constraint = cset_idiv, 1929 .out_rrr = tgen_divs, 1930}; 1931 1932static const TCGOutOpDivRem outop_divs2 = { 1933 .base.static_constraint = C_NotImplemented, 1934}; 1935 1936static void tgen_divu(TCGContext *s, TCGType type, 1937 TCGReg a0, TCGReg a1, TCGReg a2) 1938{ 1939 /* udiv */ 1940 tcg_out32(s, 0x0730f010 | (COND_AL << 28) | (a0 << 16) | a1 | (a2 << 8)); 1941} 1942 1943static const TCGOutOpBinary outop_divu = { 1944 .base.static_constraint = C_Dynamic, 1945 .base.dynamic_constraint = cset_idiv, 1946 .out_rrr = tgen_divu, 1947}; 1948 1949static const TCGOutOpDivRem outop_divu2 = { 1950 .base.static_constraint = C_NotImplemented, 1951}; 1952 1953static const TCGOutOpBinary outop_eqv = { 1954 .base.static_constraint = C_NotImplemented, 1955}; 1956 1957static void tgen_mul(TCGContext *s, TCGType type, 1958 TCGReg a0, TCGReg a1, TCGReg a2) 1959{ 1960 /* mul */ 1961 tcg_out32(s, (COND_AL << 28) | 0x90 | (a0 << 16) | (a1 << 8) | a2); 1962} 1963 1964static const TCGOutOpBinary outop_mul = { 1965 .base.static_constraint = C_O1_I2(r, r, r), 1966 .out_rrr = tgen_mul, 1967}; 1968 1969static void tgen_muls2(TCGContext *s, TCGType type, 1970 TCGReg rd0, TCGReg rd1, TCGReg rn, TCGReg rm) 1971{ 1972 /* smull */ 1973 tcg_out32(s, (COND_AL << 28) | 0x00c00090 | 1974 (rd1 << 16) | (rd0 << 12) | (rm << 8) | rn); 1975} 1976 1977static const TCGOutOpMul2 outop_muls2 = { 1978 .base.static_constraint = C_O2_I2(r, r, r, r), 1979 .out_rrrr = tgen_muls2, 1980}; 1981 1982static const TCGOutOpBinary outop_mulsh = { 1983 .base.static_constraint = C_NotImplemented, 1984}; 1985 1986static void tgen_mulu2(TCGContext *s, TCGType type, 1987 TCGReg rd0, TCGReg rd1, TCGReg rn, TCGReg rm) 1988{ 1989 /* umull */ 1990 tcg_out32(s, (COND_AL << 28) | 0x00800090 | 1991 (rd1 << 16) | (rd0 << 12) | (rm << 8) | rn); 1992} 1993 1994static const TCGOutOpMul2 outop_mulu2 = { 1995 .base.static_constraint = C_O2_I2(r, r, r, r), 1996 .out_rrrr = tgen_mulu2, 1997}; 1998 1999static const TCGOutOpBinary outop_muluh = { 2000 .base.static_constraint = C_NotImplemented, 2001}; 2002 2003static const TCGOutOpBinary outop_nand = { 2004 .base.static_constraint = C_NotImplemented, 2005}; 2006 2007static const TCGOutOpBinary outop_nor = { 2008 .base.static_constraint = C_NotImplemented, 2009}; 2010 2011static void tgen_or(TCGContext *s, TCGType type, 2012 TCGReg a0, TCGReg a1, TCGReg a2) 2013{ 2014 tcg_out_dat_reg(s, COND_AL, ARITH_ORR, a0, a1, a2, SHIFT_IMM_LSL(0)); 2015} 2016 2017static void tgen_ori(TCGContext *s, TCGType type, 2018 TCGReg a0, TCGReg a1, tcg_target_long a2) 2019{ 2020 tcg_out_dat_imm(s, COND_AL, ARITH_ORR, a0, a1, encode_imm_nofail(a2)); 2021} 2022 2023static const TCGOutOpBinary outop_or = { 2024 .base.static_constraint = C_O1_I2(r, r, rI), 2025 .out_rrr = tgen_or, 2026 .out_rri = tgen_ori, 2027}; 2028 2029static const TCGOutOpBinary outop_orc = { 2030 .base.static_constraint = C_NotImplemented, 2031}; 2032 2033static const TCGOutOpBinary outop_rems = { 2034 .base.static_constraint = C_NotImplemented, 2035}; 2036 2037static const TCGOutOpBinary outop_remu = { 2038 .base.static_constraint = C_NotImplemented, 2039}; 2040 2041static const TCGOutOpBinary outop_rotl = { 2042 .base.static_constraint = C_NotImplemented, 2043}; 2044 2045static void tgen_rotr(TCGContext *s, TCGType type, 2046 TCGReg a0, TCGReg a1, TCGReg a2) 2047{ 2048 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, a0, 0, a1, SHIFT_REG_ROR(a2)); 2049} 2050 2051static void tgen_rotri(TCGContext *s, TCGType type, 2052 TCGReg a0, TCGReg a1, tcg_target_long a2) 2053{ 2054 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, a0, 0, a1, SHIFT_IMM_ROR(a2 & 0x1f)); 2055} 2056 2057static const TCGOutOpBinary outop_rotr = { 2058 .base.static_constraint = C_O1_I2(r, r, ri), 2059 .out_rrr = tgen_rotr, 2060 .out_rri = tgen_rotri, 2061}; 2062 2063static void tgen_sar(TCGContext *s, TCGType type, 2064 TCGReg a0, TCGReg a1, TCGReg a2) 2065{ 2066 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, a0, 0, a1, SHIFT_REG_ASR(a2)); 2067} 2068 2069static void tgen_sari(TCGContext *s, TCGType type, 2070 TCGReg a0, TCGReg a1, tcg_target_long a2) 2071{ 2072 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, a0, 0, a1, 2073 SHIFT_IMM_ASR(a2 & 0x1f)); 2074} 2075 2076static const TCGOutOpBinary outop_sar = { 2077 .base.static_constraint = C_O1_I2(r, r, ri), 2078 .out_rrr = tgen_sar, 2079 .out_rri = tgen_sari, 2080}; 2081 2082static void tgen_shl(TCGContext *s, TCGType type, 2083 TCGReg a0, TCGReg a1, TCGReg a2) 2084{ 2085 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, a0, 0, a1, SHIFT_REG_LSL(a2)); 2086} 2087 2088static void tgen_shli(TCGContext *s, TCGType type, 2089 TCGReg a0, TCGReg a1, tcg_target_long a2) 2090{ 2091 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, a0, 0, a1, 2092 SHIFT_IMM_LSL(a2 & 0x1f)); 2093} 2094 2095static const TCGOutOpBinary outop_shl = { 2096 .base.static_constraint = C_O1_I2(r, r, ri), 2097 .out_rrr = tgen_shl, 2098 .out_rri = tgen_shli, 2099}; 2100 2101static void tgen_shr(TCGContext *s, TCGType type, 2102 TCGReg a0, TCGReg a1, TCGReg a2) 2103{ 2104 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, a0, 0, a1, SHIFT_REG_LSR(a2)); 2105} 2106 2107static void tgen_shri(TCGContext *s, TCGType type, 2108 TCGReg a0, TCGReg a1, tcg_target_long a2) 2109{ 2110 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, a0, 0, a1, 2111 SHIFT_IMM_LSR(a2 & 0x1f)); 2112} 2113 2114static const TCGOutOpBinary outop_shr = { 2115 .base.static_constraint = C_O1_I2(r, r, ri), 2116 .out_rrr = tgen_shr, 2117 .out_rri = tgen_shri, 2118}; 2119 2120static void tgen_sub(TCGContext *s, TCGType type, 2121 TCGReg a0, TCGReg a1, TCGReg a2) 2122{ 2123 tcg_out_dat_reg(s, COND_AL, ARITH_SUB, a0, a1, a2, SHIFT_IMM_LSL(0)); 2124} 2125 2126static void tgen_subfi(TCGContext *s, TCGType type, 2127 TCGReg a0, tcg_target_long a1, TCGReg a2) 2128{ 2129 tcg_out_dat_imm(s, COND_AL, ARITH_RSB, a0, a2, encode_imm_nofail(a1)); 2130} 2131 2132static const TCGOutOpSubtract outop_sub = { 2133 .base.static_constraint = C_O1_I2(r, rI, r), 2134 .out_rrr = tgen_sub, 2135 .out_rir = tgen_subfi, 2136}; 2137 2138static void tgen_xor(TCGContext *s, TCGType type, 2139 TCGReg a0, TCGReg a1, TCGReg a2) 2140{ 2141 tcg_out_dat_reg(s, COND_AL, ARITH_EOR, a0, a1, a2, SHIFT_IMM_LSL(0)); 2142} 2143 2144static void tgen_xori(TCGContext *s, TCGType type, 2145 TCGReg a0, TCGReg a1, tcg_target_long a2) 2146{ 2147 tcg_out_dat_imm(s, COND_AL, ARITH_EOR, a0, a1, encode_imm_nofail(a2)); 2148} 2149 2150static const TCGOutOpBinary outop_xor = { 2151 .base.static_constraint = C_O1_I2(r, r, rI), 2152 .out_rrr = tgen_xor, 2153 .out_rri = tgen_xori, 2154}; 2155 2156static void tgen_bswap16(TCGContext *s, TCGType type, 2157 TCGReg rd, TCGReg rn, unsigned flags) 2158{ 2159 if (flags & TCG_BSWAP_OS) { 2160 /* revsh */ 2161 tcg_out32(s, 0x06ff0fb0 | (COND_AL << 28) | (rd << 12) | rn); 2162 return; 2163 } 2164 2165 /* rev16 */ 2166 tcg_out32(s, 0x06bf0fb0 | (COND_AL << 28) | (rd << 12) | rn); 2167 if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) { 2168 tcg_out_ext16u(s, rd, rd); 2169 } 2170} 2171 2172static const TCGOutOpBswap outop_bswap16 = { 2173 .base.static_constraint = C_O1_I1(r, r), 2174 .out_rr = tgen_bswap16, 2175}; 2176 2177static void tgen_bswap32(TCGContext *s, TCGType type, 2178 TCGReg rd, TCGReg rn, unsigned flags) 2179{ 2180 /* rev */ 2181 tcg_out32(s, 0x06bf0f30 | (COND_AL << 28) | (rd << 12) | rn); 2182} 2183 2184static const TCGOutOpBswap outop_bswap32 = { 2185 .base.static_constraint = C_O1_I1(r, r), 2186 .out_rr = tgen_bswap32, 2187}; 2188 2189static const TCGOutOpUnary outop_bswap64 = { 2190 .base.static_constraint = C_NotImplemented, 2191}; 2192 2193static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1) 2194{ 2195 tgen_subfi(s, type, a0, 0, a1); 2196} 2197 2198static const TCGOutOpUnary outop_neg = { 2199 .base.static_constraint = C_O1_I1(r, r), 2200 .out_rr = tgen_neg, 2201}; 2202 2203static void tgen_not(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1) 2204{ 2205 tcg_out_dat_reg(s, COND_AL, ARITH_MVN, a0, 0, a1, SHIFT_IMM_LSL(0)); 2206} 2207 2208static const TCGOutOpUnary outop_not = { 2209 .base.static_constraint = C_O1_I1(r, r), 2210 .out_rr = tgen_not, 2211}; 2212 2213static void tgen_brcond(TCGContext *s, TCGType type, TCGCond cond, 2214 TCGReg a0, TCGReg a1, TCGLabel *l) 2215{ 2216 cond = tgen_cmp(s, cond, a0, a1); 2217 tcg_out_goto_label(s, tcg_cond_to_arm_cond[cond], l); 2218} 2219 2220static void tgen_brcondi(TCGContext *s, TCGType type, TCGCond cond, 2221 TCGReg a0, tcg_target_long a1, TCGLabel *l) 2222{ 2223 cond = tgen_cmpi(s, cond, a0, a1); 2224 tcg_out_goto_label(s, tcg_cond_to_arm_cond[cond], l); 2225} 2226 2227static const TCGOutOpBrcond outop_brcond = { 2228 .base.static_constraint = C_O0_I2(r, rIN), 2229 .out_rr = tgen_brcond, 2230 .out_ri = tgen_brcondi, 2231}; 2232 2233static void finish_setcond(TCGContext *s, TCGCond cond, TCGReg ret, bool neg) 2234{ 2235 tcg_out_movi32(s, tcg_cond_to_arm_cond[tcg_invert_cond(cond)], ret, 0); 2236 tcg_out_movi32(s, tcg_cond_to_arm_cond[cond], ret, neg ? -1 : 1); 2237} 2238 2239static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond, 2240 TCGReg a0, TCGReg a1, TCGReg a2) 2241{ 2242 cond = tgen_cmp(s, cond, a1, a2); 2243 finish_setcond(s, cond, a0, false); 2244} 2245 2246static void tgen_setcondi(TCGContext *s, TCGType type, TCGCond cond, 2247 TCGReg a0, TCGReg a1, tcg_target_long a2) 2248{ 2249 cond = tgen_cmpi(s, cond, a1, a2); 2250 finish_setcond(s, cond, a0, false); 2251} 2252 2253static const TCGOutOpSetcond outop_setcond = { 2254 .base.static_constraint = C_O1_I2(r, r, rIN), 2255 .out_rrr = tgen_setcond, 2256 .out_rri = tgen_setcondi, 2257}; 2258 2259static void tgen_negsetcond(TCGContext *s, TCGType type, TCGCond cond, 2260 TCGReg a0, TCGReg a1, TCGReg a2) 2261{ 2262 cond = tgen_cmp(s, cond, a1, a2); 2263 finish_setcond(s, cond, a0, true); 2264} 2265 2266static void tgen_negsetcondi(TCGContext *s, TCGType type, TCGCond cond, 2267 TCGReg a0, TCGReg a1, tcg_target_long a2) 2268{ 2269 cond = tgen_cmpi(s, cond, a1, a2); 2270 finish_setcond(s, cond, a0, true); 2271} 2272 2273static const TCGOutOpSetcond outop_negsetcond = { 2274 .base.static_constraint = C_O1_I2(r, r, rIN), 2275 .out_rrr = tgen_negsetcond, 2276 .out_rri = tgen_negsetcondi, 2277}; 2278 2279static void tgen_movcond(TCGContext *s, TCGType type, TCGCond cond, 2280 TCGReg ret, TCGReg c1, TCGArg c2, bool const_c2, 2281 TCGArg vt, bool const_vt, TCGArg vf, bool consf_vf) 2282{ 2283 cond = tcg_out_cmp(s, cond, c1, c2, const_c2); 2284 tcg_out_dat_rIK(s, tcg_cond_to_arm_cond[cond], ARITH_MOV, ARITH_MVN, 2285 ret, 0, vt, const_vt); 2286} 2287 2288static const TCGOutOpMovcond outop_movcond = { 2289 .base.static_constraint = C_O1_I4(r, r, rIN, rIK, 0), 2290 .out = tgen_movcond, 2291}; 2292 2293static void tgen_brcond2(TCGContext *s, TCGCond cond, TCGReg al, TCGReg ah, 2294 TCGArg bl, bool const_bl, TCGArg bh, bool const_bh, 2295 TCGLabel *l) 2296{ 2297 cond = tcg_out_cmp2(s, cond, al, ah, bl, const_bl, bh, const_bh); 2298 tcg_out_goto_label(s, tcg_cond_to_arm_cond[cond], l); 2299} 2300 2301static const TCGOutOpBrcond2 outop_brcond2 = { 2302 .base.static_constraint = C_O0_I4(r, r, rI, rI), 2303 .out = tgen_brcond2, 2304}; 2305 2306static void tgen_setcond2(TCGContext *s, TCGCond cond, TCGReg ret, 2307 TCGReg al, TCGReg ah, 2308 TCGArg bl, bool const_bl, 2309 TCGArg bh, bool const_bh) 2310{ 2311 cond = tcg_out_cmp2(s, cond, al, ah, bl, const_bl, bh, const_bh); 2312 finish_setcond(s, cond, ret, false); 2313} 2314 2315static const TCGOutOpSetcond2 outop_setcond2 = { 2316 .base.static_constraint = C_O1_I4(r, r, r, rI, rI), 2317 .out = tgen_setcond2, 2318}; 2319 2320static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type, 2321 const TCGArg args[TCG_MAX_OP_ARGS], 2322 const int const_args[TCG_MAX_OP_ARGS]) 2323{ 2324 TCGArg a0, a1, a2, a3, a4, a5; 2325 2326 switch (opc) { 2327 case INDEX_op_goto_ptr: 2328 tcg_out_b_reg(s, COND_AL, args[0]); 2329 break; 2330 case INDEX_op_br: 2331 tcg_out_goto_label(s, COND_AL, arg_label(args[0])); 2332 break; 2333 2334 case INDEX_op_ld8u_i32: 2335 tcg_out_ld8u(s, COND_AL, args[0], args[1], args[2]); 2336 break; 2337 case INDEX_op_ld8s_i32: 2338 tcg_out_ld8s(s, COND_AL, args[0], args[1], args[2]); 2339 break; 2340 case INDEX_op_ld16u_i32: 2341 tcg_out_ld16u(s, COND_AL, args[0], args[1], args[2]); 2342 break; 2343 case INDEX_op_ld16s_i32: 2344 tcg_out_ld16s(s, COND_AL, args[0], args[1], args[2]); 2345 break; 2346 case INDEX_op_ld_i32: 2347 tcg_out_ld32u(s, COND_AL, args[0], args[1], args[2]); 2348 break; 2349 case INDEX_op_st8_i32: 2350 tcg_out_st8(s, COND_AL, args[0], args[1], args[2]); 2351 break; 2352 case INDEX_op_st16_i32: 2353 tcg_out_st16(s, COND_AL, args[0], args[1], args[2]); 2354 break; 2355 case INDEX_op_st_i32: 2356 tcg_out_st32(s, COND_AL, args[0], args[1], args[2]); 2357 break; 2358 2359 case INDEX_op_add2_i32: 2360 a0 = args[0], a1 = args[1], a2 = args[2]; 2361 a3 = args[3], a4 = args[4], a5 = args[5]; 2362 if (a0 == a3 || (a0 == a5 && !const_args[5])) { 2363 a0 = TCG_REG_TMP; 2364 } 2365 tcg_out_dat_rIN(s, COND_AL, ARITH_ADD | TO_CPSR, ARITH_SUB | TO_CPSR, 2366 a0, a2, a4, const_args[4]); 2367 tcg_out_dat_rIK(s, COND_AL, ARITH_ADC, ARITH_SBC, 2368 a1, a3, a5, const_args[5]); 2369 tcg_out_mov_reg(s, COND_AL, args[0], a0); 2370 break; 2371 case INDEX_op_sub2_i32: 2372 a0 = args[0], a1 = args[1], a2 = args[2]; 2373 a3 = args[3], a4 = args[4], a5 = args[5]; 2374 if ((a0 == a3 && !const_args[3]) || (a0 == a5 && !const_args[5])) { 2375 a0 = TCG_REG_TMP; 2376 } 2377 if (const_args[2]) { 2378 if (const_args[4]) { 2379 tcg_out_movi32(s, COND_AL, a0, a4); 2380 a4 = a0; 2381 } 2382 tcg_out_dat_rI(s, COND_AL, ARITH_RSB | TO_CPSR, a0, a4, a2, 1); 2383 } else { 2384 tcg_out_dat_rIN(s, COND_AL, ARITH_SUB | TO_CPSR, 2385 ARITH_ADD | TO_CPSR, a0, a2, a4, const_args[4]); 2386 } 2387 if (const_args[3]) { 2388 if (const_args[5]) { 2389 tcg_out_movi32(s, COND_AL, a1, a5); 2390 a5 = a1; 2391 } 2392 tcg_out_dat_rI(s, COND_AL, ARITH_RSC, a1, a5, a3, 1); 2393 } else { 2394 tcg_out_dat_rIK(s, COND_AL, ARITH_SBC, ARITH_ADC, 2395 a1, a3, a5, const_args[5]); 2396 } 2397 tcg_out_mov_reg(s, COND_AL, args[0], a0); 2398 break; 2399 2400 case INDEX_op_qemu_ld_i32: 2401 tcg_out_qemu_ld(s, args[0], -1, args[1], args[2], TCG_TYPE_I32); 2402 break; 2403 case INDEX_op_qemu_ld_i64: 2404 tcg_out_qemu_ld(s, args[0], args[1], args[2], args[3], TCG_TYPE_I64); 2405 break; 2406 2407 case INDEX_op_qemu_st_i32: 2408 tcg_out_qemu_st(s, args[0], -1, args[1], args[2], TCG_TYPE_I32); 2409 break; 2410 case INDEX_op_qemu_st_i64: 2411 tcg_out_qemu_st(s, args[0], args[1], args[2], args[3], TCG_TYPE_I64); 2412 break; 2413 2414 case INDEX_op_extract2_i32: 2415 /* ??? These optimization vs zero should be generic. */ 2416 /* ??? But we can't substitute 2 for 1 in the opcode stream yet. */ 2417 if (const_args[1]) { 2418 if (const_args[2]) { 2419 tcg_out_movi(s, TCG_TYPE_REG, args[0], 0); 2420 } else { 2421 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, 2422 args[2], SHIFT_IMM_LSL(32 - args[3])); 2423 } 2424 } else if (const_args[2]) { 2425 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, 2426 args[1], SHIFT_IMM_LSR(args[3])); 2427 } else { 2428 /* We can do extract2 in 2 insns, vs the 3 required otherwise. */ 2429 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP, 0, 2430 args[2], SHIFT_IMM_LSL(32 - args[3])); 2431 tcg_out_dat_reg(s, COND_AL, ARITH_ORR, args[0], TCG_REG_TMP, 2432 args[1], SHIFT_IMM_LSR(args[3])); 2433 } 2434 break; 2435 2436 case INDEX_op_mb: 2437 tcg_out_mb(s, args[0]); 2438 break; 2439 2440 case INDEX_op_call: /* Always emitted via tcg_out_call. */ 2441 case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */ 2442 case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */ 2443 default: 2444 g_assert_not_reached(); 2445 } 2446} 2447 2448static TCGConstraintSetIndex 2449tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags) 2450{ 2451 switch (op) { 2452 case INDEX_op_goto_ptr: 2453 return C_O0_I1(r); 2454 2455 case INDEX_op_ld8u_i32: 2456 case INDEX_op_ld8s_i32: 2457 case INDEX_op_ld16u_i32: 2458 case INDEX_op_ld16s_i32: 2459 case INDEX_op_ld_i32: 2460 return C_O1_I1(r, r); 2461 2462 case INDEX_op_st8_i32: 2463 case INDEX_op_st16_i32: 2464 case INDEX_op_st_i32: 2465 return C_O0_I2(r, r); 2466 2467 case INDEX_op_extract2_i32: 2468 return C_O1_I2(r, rZ, rZ); 2469 case INDEX_op_add2_i32: 2470 return C_O2_I4(r, r, r, r, rIN, rIK); 2471 case INDEX_op_sub2_i32: 2472 return C_O2_I4(r, r, rI, rI, rIN, rIK); 2473 case INDEX_op_qemu_ld_i32: 2474 return C_O1_I1(r, q); 2475 case INDEX_op_qemu_ld_i64: 2476 return C_O2_I1(e, p, q); 2477 case INDEX_op_qemu_st_i32: 2478 return C_O0_I2(q, q); 2479 case INDEX_op_qemu_st_i64: 2480 return C_O0_I3(Q, p, q); 2481 2482 case INDEX_op_st_vec: 2483 return C_O0_I2(w, r); 2484 case INDEX_op_ld_vec: 2485 case INDEX_op_dupm_vec: 2486 return C_O1_I1(w, r); 2487 case INDEX_op_dup_vec: 2488 return C_O1_I1(w, wr); 2489 case INDEX_op_abs_vec: 2490 case INDEX_op_neg_vec: 2491 case INDEX_op_not_vec: 2492 case INDEX_op_shli_vec: 2493 case INDEX_op_shri_vec: 2494 case INDEX_op_sari_vec: 2495 return C_O1_I1(w, w); 2496 case INDEX_op_dup2_vec: 2497 case INDEX_op_add_vec: 2498 case INDEX_op_mul_vec: 2499 case INDEX_op_smax_vec: 2500 case INDEX_op_smin_vec: 2501 case INDEX_op_ssadd_vec: 2502 case INDEX_op_sssub_vec: 2503 case INDEX_op_sub_vec: 2504 case INDEX_op_umax_vec: 2505 case INDEX_op_umin_vec: 2506 case INDEX_op_usadd_vec: 2507 case INDEX_op_ussub_vec: 2508 case INDEX_op_xor_vec: 2509 case INDEX_op_arm_sshl_vec: 2510 case INDEX_op_arm_ushl_vec: 2511 return C_O1_I2(w, w, w); 2512 case INDEX_op_arm_sli_vec: 2513 return C_O1_I2(w, 0, w); 2514 case INDEX_op_or_vec: 2515 case INDEX_op_andc_vec: 2516 return C_O1_I2(w, w, wO); 2517 case INDEX_op_and_vec: 2518 case INDEX_op_orc_vec: 2519 return C_O1_I2(w, w, wV); 2520 case INDEX_op_cmp_vec: 2521 return C_O1_I2(w, w, wZ); 2522 case INDEX_op_bitsel_vec: 2523 return C_O1_I3(w, w, w, w); 2524 default: 2525 return C_NotImplemented; 2526 } 2527} 2528 2529static void tcg_target_init(TCGContext *s) 2530{ 2531 /* 2532 * Only probe for the platform and capabilities if we haven't already 2533 * determined maximum values at compile time. 2534 */ 2535#if !defined(use_idiv_instructions) || !defined(use_neon_instructions) 2536 { 2537 unsigned long hwcap = qemu_getauxval(AT_HWCAP); 2538#ifndef use_idiv_instructions 2539 use_idiv_instructions = (hwcap & HWCAP_ARM_IDIVA) != 0; 2540#endif 2541#ifndef use_neon_instructions 2542 use_neon_instructions = (hwcap & HWCAP_ARM_NEON) != 0; 2543#endif 2544 } 2545#endif 2546 2547 if (__ARM_ARCH < 7) { 2548 const char *pl = (const char *)qemu_getauxval(AT_PLATFORM); 2549 if (pl != NULL && pl[0] == 'v' && pl[1] >= '4' && pl[1] <= '9') { 2550 arm_arch = pl[1] - '0'; 2551 } 2552 2553 if (arm_arch < 6) { 2554 error_report("TCG: ARMv%d is unsupported; exiting", arm_arch); 2555 exit(EXIT_FAILURE); 2556 } 2557 } 2558 2559 tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS; 2560 2561 tcg_target_call_clobber_regs = 0; 2562 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R0); 2563 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R1); 2564 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R2); 2565 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R3); 2566 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R12); 2567 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R14); 2568 2569 if (use_neon_instructions) { 2570 tcg_target_available_regs[TCG_TYPE_V64] = ALL_VECTOR_REGS; 2571 tcg_target_available_regs[TCG_TYPE_V128] = ALL_VECTOR_REGS; 2572 2573 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q0); 2574 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q1); 2575 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q2); 2576 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q3); 2577 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q8); 2578 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q9); 2579 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q10); 2580 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q11); 2581 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q12); 2582 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q13); 2583 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q14); 2584 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q15); 2585 } 2586 2587 s->reserved_regs = 0; 2588 tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK); 2589 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP); 2590 tcg_regset_set_reg(s->reserved_regs, TCG_REG_PC); 2591 tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP); 2592} 2593 2594static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg, 2595 TCGReg arg1, intptr_t arg2) 2596{ 2597 switch (type) { 2598 case TCG_TYPE_I32: 2599 tcg_out_ld32u(s, COND_AL, arg, arg1, arg2); 2600 return; 2601 case TCG_TYPE_V64: 2602 /* regs 1; size 8; align 8 */ 2603 tcg_out_vldst(s, INSN_VLD1 | 0x7d0, arg, arg1, arg2); 2604 return; 2605 case TCG_TYPE_V128: 2606 /* 2607 * We have only 8-byte alignment for the stack per the ABI. 2608 * Rather than dynamically re-align the stack, it's easier 2609 * to simply not request alignment beyond that. So: 2610 * regs 2; size 8; align 8 2611 */ 2612 tcg_out_vldst(s, INSN_VLD1 | 0xad0, arg, arg1, arg2); 2613 return; 2614 default: 2615 g_assert_not_reached(); 2616 } 2617} 2618 2619static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, 2620 TCGReg arg1, intptr_t arg2) 2621{ 2622 switch (type) { 2623 case TCG_TYPE_I32: 2624 tcg_out_st32(s, COND_AL, arg, arg1, arg2); 2625 return; 2626 case TCG_TYPE_V64: 2627 /* regs 1; size 8; align 8 */ 2628 tcg_out_vldst(s, INSN_VST1 | 0x7d0, arg, arg1, arg2); 2629 return; 2630 case TCG_TYPE_V128: 2631 /* See tcg_out_ld re alignment: regs 2; size 8; align 8 */ 2632 tcg_out_vldst(s, INSN_VST1 | 0xad0, arg, arg1, arg2); 2633 return; 2634 default: 2635 g_assert_not_reached(); 2636 } 2637} 2638 2639static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, 2640 TCGReg base, intptr_t ofs) 2641{ 2642 return false; 2643} 2644 2645static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) 2646{ 2647 if (ret == arg) { 2648 return true; 2649 } 2650 switch (type) { 2651 case TCG_TYPE_I32: 2652 if (ret < TCG_REG_Q0 && arg < TCG_REG_Q0) { 2653 tcg_out_mov_reg(s, COND_AL, ret, arg); 2654 return true; 2655 } 2656 return false; 2657 2658 case TCG_TYPE_V64: 2659 case TCG_TYPE_V128: 2660 /* "VMOV D,N" is an alias for "VORR D,N,N". */ 2661 tcg_out_vreg3(s, INSN_VORR, type - TCG_TYPE_V64, 0, ret, arg, arg); 2662 return true; 2663 2664 default: 2665 g_assert_not_reached(); 2666 } 2667} 2668 2669static void tcg_out_movi(TCGContext *s, TCGType type, 2670 TCGReg ret, tcg_target_long arg) 2671{ 2672 tcg_debug_assert(type == TCG_TYPE_I32); 2673 tcg_debug_assert(ret < TCG_REG_Q0); 2674 tcg_out_movi32(s, COND_AL, ret, arg); 2675} 2676 2677static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2) 2678{ 2679 return false; 2680} 2681 2682static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs, 2683 tcg_target_long imm) 2684{ 2685 int enc, opc = ARITH_ADD; 2686 2687 /* All of the easiest immediates to encode are positive. */ 2688 if (imm < 0) { 2689 imm = -imm; 2690 opc = ARITH_SUB; 2691 } 2692 enc = encode_imm(imm); 2693 if (enc >= 0) { 2694 tcg_out_dat_imm(s, COND_AL, opc, rd, rs, enc); 2695 } else { 2696 tcg_out_movi32(s, COND_AL, TCG_REG_TMP, imm); 2697 tcg_out_dat_reg(s, COND_AL, opc, rd, rs, 2698 TCG_REG_TMP, SHIFT_IMM_LSL(0)); 2699 } 2700} 2701 2702/* Type is always V128, with I64 elements. */ 2703static void tcg_out_dup2_vec(TCGContext *s, TCGReg rd, TCGReg rl, TCGReg rh) 2704{ 2705 /* Move high element into place first. */ 2706 /* VMOV Dd+1, Ds */ 2707 tcg_out_vreg3(s, INSN_VORR | (1 << 12), 0, 0, rd, rh, rh); 2708 /* Move low element into place; tcg_out_mov will check for nop. */ 2709 tcg_out_mov(s, TCG_TYPE_V64, rd, rl); 2710} 2711 2712static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece, 2713 TCGReg rd, TCGReg rs) 2714{ 2715 int q = type - TCG_TYPE_V64; 2716 2717 if (vece == MO_64) { 2718 if (type == TCG_TYPE_V128) { 2719 tcg_out_dup2_vec(s, rd, rs, rs); 2720 } else { 2721 tcg_out_mov(s, TCG_TYPE_V64, rd, rs); 2722 } 2723 } else if (rs < TCG_REG_Q0) { 2724 int b = (vece == MO_8); 2725 int e = (vece == MO_16); 2726 tcg_out32(s, INSN_VDUP_G | (b << 22) | (q << 21) | (e << 5) | 2727 encode_vn(rd) | (rs << 12)); 2728 } else { 2729 int imm4 = 1 << vece; 2730 tcg_out32(s, INSN_VDUP_S | (imm4 << 16) | (q << 6) | 2731 encode_vd(rd) | encode_vm(rs)); 2732 } 2733 return true; 2734} 2735 2736static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece, 2737 TCGReg rd, TCGReg base, intptr_t offset) 2738{ 2739 if (vece == MO_64) { 2740 tcg_out_ld(s, TCG_TYPE_V64, rd, base, offset); 2741 if (type == TCG_TYPE_V128) { 2742 tcg_out_dup2_vec(s, rd, rd, rd); 2743 } 2744 } else { 2745 int q = type - TCG_TYPE_V64; 2746 tcg_out_vldst(s, INSN_VLD1R | (vece << 6) | (q << 5), 2747 rd, base, offset); 2748 } 2749 return true; 2750} 2751 2752static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece, 2753 TCGReg rd, int64_t v64) 2754{ 2755 int q = type - TCG_TYPE_V64; 2756 int cmode, imm8, i; 2757 2758 /* Test all bytes equal first. */ 2759 if (vece == MO_8) { 2760 tcg_out_vmovi(s, rd, q, 0, 0xe, v64); 2761 return; 2762 } 2763 2764 /* 2765 * Test all bytes 0x00 or 0xff second. This can match cases that 2766 * might otherwise take 2 or 3 insns for MO_16 or MO_32 below. 2767 */ 2768 for (i = imm8 = 0; i < 8; i++) { 2769 uint8_t byte = v64 >> (i * 8); 2770 if (byte == 0xff) { 2771 imm8 |= 1 << i; 2772 } else if (byte != 0) { 2773 goto fail_bytes; 2774 } 2775 } 2776 tcg_out_vmovi(s, rd, q, 1, 0xe, imm8); 2777 return; 2778 fail_bytes: 2779 2780 /* 2781 * Tests for various replications. For each element width, if we 2782 * cannot find an expansion there's no point checking a larger 2783 * width because we already know by replication it cannot match. 2784 */ 2785 if (vece == MO_16) { 2786 uint16_t v16 = v64; 2787 2788 if (is_shimm16(v16, &cmode, &imm8)) { 2789 tcg_out_vmovi(s, rd, q, 0, cmode, imm8); 2790 return; 2791 } 2792 if (is_shimm16(~v16, &cmode, &imm8)) { 2793 tcg_out_vmovi(s, rd, q, 1, cmode, imm8); 2794 return; 2795 } 2796 2797 /* 2798 * Otherwise, all remaining constants can be loaded in two insns: 2799 * rd = v16 & 0xff, rd |= v16 & 0xff00. 2800 */ 2801 tcg_out_vmovi(s, rd, q, 0, 0x8, v16 & 0xff); 2802 tcg_out_vmovi(s, rd, q, 0, 0xb, v16 >> 8); /* VORRI */ 2803 return; 2804 } 2805 2806 if (vece == MO_32) { 2807 uint32_t v32 = v64; 2808 2809 if (is_shimm32(v32, &cmode, &imm8) || 2810 is_soimm32(v32, &cmode, &imm8)) { 2811 tcg_out_vmovi(s, rd, q, 0, cmode, imm8); 2812 return; 2813 } 2814 if (is_shimm32(~v32, &cmode, &imm8) || 2815 is_soimm32(~v32, &cmode, &imm8)) { 2816 tcg_out_vmovi(s, rd, q, 1, cmode, imm8); 2817 return; 2818 } 2819 2820 /* 2821 * Restrict the set of constants to those we can load with 2822 * two instructions. Others we load from the pool. 2823 */ 2824 i = is_shimm32_pair(v32, &cmode, &imm8); 2825 if (i) { 2826 tcg_out_vmovi(s, rd, q, 0, cmode, imm8); 2827 tcg_out_vmovi(s, rd, q, 0, i | 1, extract32(v32, i * 4, 8)); 2828 return; 2829 } 2830 i = is_shimm32_pair(~v32, &cmode, &imm8); 2831 if (i) { 2832 tcg_out_vmovi(s, rd, q, 1, cmode, imm8); 2833 tcg_out_vmovi(s, rd, q, 1, i | 1, extract32(~v32, i * 4, 8)); 2834 return; 2835 } 2836 } 2837 2838 /* 2839 * As a last resort, load from the constant pool. 2840 */ 2841 if (!q || vece == MO_64) { 2842 new_pool_l2(s, R_ARM_PC11, s->code_ptr, 0, v64, v64 >> 32); 2843 /* VLDR Dd, [pc + offset] */ 2844 tcg_out32(s, INSN_VLDR_D | encode_vd(rd) | (0xf << 16)); 2845 if (q) { 2846 tcg_out_dup2_vec(s, rd, rd, rd); 2847 } 2848 } else { 2849 new_pool_label(s, (uint32_t)v64, R_ARM_PC8, s->code_ptr, 0); 2850 /* add tmp, pc, offset */ 2851 tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_TMP, TCG_REG_PC, 0); 2852 tcg_out_dupm_vec(s, type, MO_32, rd, TCG_REG_TMP, 0); 2853 } 2854} 2855 2856static const ARMInsn vec_cmp_insn[16] = { 2857 [TCG_COND_EQ] = INSN_VCEQ, 2858 [TCG_COND_GT] = INSN_VCGT, 2859 [TCG_COND_GE] = INSN_VCGE, 2860 [TCG_COND_GTU] = INSN_VCGT_U, 2861 [TCG_COND_GEU] = INSN_VCGE_U, 2862}; 2863 2864static const ARMInsn vec_cmp0_insn[16] = { 2865 [TCG_COND_EQ] = INSN_VCEQ0, 2866 [TCG_COND_GT] = INSN_VCGT0, 2867 [TCG_COND_GE] = INSN_VCGE0, 2868 [TCG_COND_LT] = INSN_VCLT0, 2869 [TCG_COND_LE] = INSN_VCLE0, 2870}; 2871 2872static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, 2873 unsigned vecl, unsigned vece, 2874 const TCGArg args[TCG_MAX_OP_ARGS], 2875 const int const_args[TCG_MAX_OP_ARGS]) 2876{ 2877 TCGType type = vecl + TCG_TYPE_V64; 2878 unsigned q = vecl; 2879 TCGArg a0, a1, a2, a3; 2880 int cmode, imm8; 2881 2882 a0 = args[0]; 2883 a1 = args[1]; 2884 a2 = args[2]; 2885 2886 switch (opc) { 2887 case INDEX_op_ld_vec: 2888 tcg_out_ld(s, type, a0, a1, a2); 2889 return; 2890 case INDEX_op_st_vec: 2891 tcg_out_st(s, type, a0, a1, a2); 2892 return; 2893 case INDEX_op_dupm_vec: 2894 tcg_out_dupm_vec(s, type, vece, a0, a1, a2); 2895 return; 2896 case INDEX_op_dup2_vec: 2897 tcg_out_dup2_vec(s, a0, a1, a2); 2898 return; 2899 case INDEX_op_abs_vec: 2900 tcg_out_vreg2(s, INSN_VABS, q, vece, a0, a1); 2901 return; 2902 case INDEX_op_neg_vec: 2903 tcg_out_vreg2(s, INSN_VNEG, q, vece, a0, a1); 2904 return; 2905 case INDEX_op_not_vec: 2906 tcg_out_vreg2(s, INSN_VMVN, q, 0, a0, a1); 2907 return; 2908 case INDEX_op_add_vec: 2909 tcg_out_vreg3(s, INSN_VADD, q, vece, a0, a1, a2); 2910 return; 2911 case INDEX_op_mul_vec: 2912 tcg_out_vreg3(s, INSN_VMUL, q, vece, a0, a1, a2); 2913 return; 2914 case INDEX_op_smax_vec: 2915 tcg_out_vreg3(s, INSN_VMAX, q, vece, a0, a1, a2); 2916 return; 2917 case INDEX_op_smin_vec: 2918 tcg_out_vreg3(s, INSN_VMIN, q, vece, a0, a1, a2); 2919 return; 2920 case INDEX_op_sub_vec: 2921 tcg_out_vreg3(s, INSN_VSUB, q, vece, a0, a1, a2); 2922 return; 2923 case INDEX_op_ssadd_vec: 2924 tcg_out_vreg3(s, INSN_VQADD, q, vece, a0, a1, a2); 2925 return; 2926 case INDEX_op_sssub_vec: 2927 tcg_out_vreg3(s, INSN_VQSUB, q, vece, a0, a1, a2); 2928 return; 2929 case INDEX_op_umax_vec: 2930 tcg_out_vreg3(s, INSN_VMAX_U, q, vece, a0, a1, a2); 2931 return; 2932 case INDEX_op_umin_vec: 2933 tcg_out_vreg3(s, INSN_VMIN_U, q, vece, a0, a1, a2); 2934 return; 2935 case INDEX_op_usadd_vec: 2936 tcg_out_vreg3(s, INSN_VQADD_U, q, vece, a0, a1, a2); 2937 return; 2938 case INDEX_op_ussub_vec: 2939 tcg_out_vreg3(s, INSN_VQSUB_U, q, vece, a0, a1, a2); 2940 return; 2941 case INDEX_op_xor_vec: 2942 tcg_out_vreg3(s, INSN_VEOR, q, 0, a0, a1, a2); 2943 return; 2944 case INDEX_op_arm_sshl_vec: 2945 /* 2946 * Note that Vm is the data and Vn is the shift count, 2947 * therefore the arguments appear reversed. 2948 */ 2949 tcg_out_vreg3(s, INSN_VSHL_S, q, vece, a0, a2, a1); 2950 return; 2951 case INDEX_op_arm_ushl_vec: 2952 /* See above. */ 2953 tcg_out_vreg3(s, INSN_VSHL_U, q, vece, a0, a2, a1); 2954 return; 2955 case INDEX_op_shli_vec: 2956 tcg_out_vshifti(s, INSN_VSHLI, q, a0, a1, a2 + (8 << vece)); 2957 return; 2958 case INDEX_op_shri_vec: 2959 tcg_out_vshifti(s, INSN_VSHRI, q, a0, a1, (16 << vece) - a2); 2960 return; 2961 case INDEX_op_sari_vec: 2962 tcg_out_vshifti(s, INSN_VSARI, q, a0, a1, (16 << vece) - a2); 2963 return; 2964 case INDEX_op_arm_sli_vec: 2965 tcg_out_vshifti(s, INSN_VSLI, q, a0, a2, args[3] + (8 << vece)); 2966 return; 2967 2968 case INDEX_op_andc_vec: 2969 if (!const_args[2]) { 2970 tcg_out_vreg3(s, INSN_VBIC, q, 0, a0, a1, a2); 2971 return; 2972 } 2973 a2 = ~a2; 2974 /* fall through */ 2975 case INDEX_op_and_vec: 2976 if (const_args[2]) { 2977 is_shimm1632(~a2, &cmode, &imm8); 2978 if (a0 == a1) { 2979 tcg_out_vmovi(s, a0, q, 1, cmode | 1, imm8); /* VBICI */ 2980 return; 2981 } 2982 tcg_out_vmovi(s, a0, q, 1, cmode, imm8); /* VMVNI */ 2983 a2 = a0; 2984 } 2985 tcg_out_vreg3(s, INSN_VAND, q, 0, a0, a1, a2); 2986 return; 2987 2988 case INDEX_op_orc_vec: 2989 if (!const_args[2]) { 2990 tcg_out_vreg3(s, INSN_VORN, q, 0, a0, a1, a2); 2991 return; 2992 } 2993 a2 = ~a2; 2994 /* fall through */ 2995 case INDEX_op_or_vec: 2996 if (const_args[2]) { 2997 is_shimm1632(a2, &cmode, &imm8); 2998 if (a0 == a1) { 2999 tcg_out_vmovi(s, a0, q, 0, cmode | 1, imm8); /* VORRI */ 3000 return; 3001 } 3002 tcg_out_vmovi(s, a0, q, 0, cmode, imm8); /* VMOVI */ 3003 a2 = a0; 3004 } 3005 tcg_out_vreg3(s, INSN_VORR, q, 0, a0, a1, a2); 3006 return; 3007 3008 case INDEX_op_cmp_vec: 3009 { 3010 TCGCond cond = args[3]; 3011 ARMInsn insn; 3012 3013 switch (cond) { 3014 case TCG_COND_NE: 3015 if (const_args[2]) { 3016 tcg_out_vreg3(s, INSN_VTST, q, vece, a0, a1, a1); 3017 } else { 3018 tcg_out_vreg3(s, INSN_VCEQ, q, vece, a0, a1, a2); 3019 tcg_out_vreg2(s, INSN_VMVN, q, 0, a0, a0); 3020 } 3021 break; 3022 3023 case TCG_COND_TSTNE: 3024 case TCG_COND_TSTEQ: 3025 if (const_args[2]) { 3026 /* (x & 0) == 0 */ 3027 tcg_out_dupi_vec(s, type, MO_8, a0, 3028 -(cond == TCG_COND_TSTEQ)); 3029 break; 3030 } 3031 tcg_out_vreg3(s, INSN_VTST, q, vece, a0, a1, a2); 3032 if (cond == TCG_COND_TSTEQ) { 3033 tcg_out_vreg2(s, INSN_VMVN, q, 0, a0, a0); 3034 } 3035 break; 3036 3037 default: 3038 if (const_args[2]) { 3039 insn = vec_cmp0_insn[cond]; 3040 if (insn) { 3041 tcg_out_vreg2(s, insn, q, vece, a0, a1); 3042 return; 3043 } 3044 tcg_out_dupi_vec(s, type, MO_8, TCG_VEC_TMP, 0); 3045 a2 = TCG_VEC_TMP; 3046 } 3047 insn = vec_cmp_insn[cond]; 3048 if (insn == 0) { 3049 TCGArg t; 3050 t = a1, a1 = a2, a2 = t; 3051 cond = tcg_swap_cond(cond); 3052 insn = vec_cmp_insn[cond]; 3053 tcg_debug_assert(insn != 0); 3054 } 3055 tcg_out_vreg3(s, insn, q, vece, a0, a1, a2); 3056 break; 3057 } 3058 } 3059 return; 3060 3061 case INDEX_op_bitsel_vec: 3062 a3 = args[3]; 3063 if (a0 == a3) { 3064 tcg_out_vreg3(s, INSN_VBIT, q, 0, a0, a2, a1); 3065 } else if (a0 == a2) { 3066 tcg_out_vreg3(s, INSN_VBIF, q, 0, a0, a3, a1); 3067 } else { 3068 tcg_out_mov(s, type, a0, a1); 3069 tcg_out_vreg3(s, INSN_VBSL, q, 0, a0, a2, a3); 3070 } 3071 return; 3072 3073 case INDEX_op_mov_vec: /* Always emitted via tcg_out_mov. */ 3074 case INDEX_op_dup_vec: /* Always emitted via tcg_out_dup_vec. */ 3075 default: 3076 g_assert_not_reached(); 3077 } 3078} 3079 3080int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece) 3081{ 3082 switch (opc) { 3083 case INDEX_op_add_vec: 3084 case INDEX_op_sub_vec: 3085 case INDEX_op_and_vec: 3086 case INDEX_op_andc_vec: 3087 case INDEX_op_or_vec: 3088 case INDEX_op_orc_vec: 3089 case INDEX_op_xor_vec: 3090 case INDEX_op_not_vec: 3091 case INDEX_op_shli_vec: 3092 case INDEX_op_shri_vec: 3093 case INDEX_op_sari_vec: 3094 case INDEX_op_ssadd_vec: 3095 case INDEX_op_sssub_vec: 3096 case INDEX_op_usadd_vec: 3097 case INDEX_op_ussub_vec: 3098 case INDEX_op_bitsel_vec: 3099 return 1; 3100 case INDEX_op_abs_vec: 3101 case INDEX_op_cmp_vec: 3102 case INDEX_op_mul_vec: 3103 case INDEX_op_neg_vec: 3104 case INDEX_op_smax_vec: 3105 case INDEX_op_smin_vec: 3106 case INDEX_op_umax_vec: 3107 case INDEX_op_umin_vec: 3108 return vece < MO_64; 3109 case INDEX_op_shlv_vec: 3110 case INDEX_op_shrv_vec: 3111 case INDEX_op_sarv_vec: 3112 case INDEX_op_rotli_vec: 3113 case INDEX_op_rotlv_vec: 3114 case INDEX_op_rotrv_vec: 3115 return -1; 3116 default: 3117 return 0; 3118 } 3119} 3120 3121void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece, 3122 TCGArg a0, ...) 3123{ 3124 va_list va; 3125 TCGv_vec v0, v1, v2, t1, t2, c1; 3126 TCGArg a2; 3127 3128 va_start(va, a0); 3129 v0 = temp_tcgv_vec(arg_temp(a0)); 3130 v1 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg))); 3131 a2 = va_arg(va, TCGArg); 3132 va_end(va); 3133 3134 switch (opc) { 3135 case INDEX_op_shlv_vec: 3136 /* 3137 * Merely propagate shlv_vec to arm_ushl_vec. 3138 * In this way we don't set TCG_TARGET_HAS_shv_vec 3139 * because everything is done via expansion. 3140 */ 3141 v2 = temp_tcgv_vec(arg_temp(a2)); 3142 vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(v0), 3143 tcgv_vec_arg(v1), tcgv_vec_arg(v2)); 3144 break; 3145 3146 case INDEX_op_shrv_vec: 3147 case INDEX_op_sarv_vec: 3148 /* Right shifts are negative left shifts for NEON. */ 3149 v2 = temp_tcgv_vec(arg_temp(a2)); 3150 t1 = tcg_temp_new_vec(type); 3151 tcg_gen_neg_vec(vece, t1, v2); 3152 if (opc == INDEX_op_shrv_vec) { 3153 opc = INDEX_op_arm_ushl_vec; 3154 } else { 3155 opc = INDEX_op_arm_sshl_vec; 3156 } 3157 vec_gen_3(opc, type, vece, tcgv_vec_arg(v0), 3158 tcgv_vec_arg(v1), tcgv_vec_arg(t1)); 3159 tcg_temp_free_vec(t1); 3160 break; 3161 3162 case INDEX_op_rotli_vec: 3163 t1 = tcg_temp_new_vec(type); 3164 tcg_gen_shri_vec(vece, t1, v1, -a2 & ((8 << vece) - 1)); 3165 vec_gen_4(INDEX_op_arm_sli_vec, type, vece, 3166 tcgv_vec_arg(v0), tcgv_vec_arg(t1), tcgv_vec_arg(v1), a2); 3167 tcg_temp_free_vec(t1); 3168 break; 3169 3170 case INDEX_op_rotlv_vec: 3171 v2 = temp_tcgv_vec(arg_temp(a2)); 3172 t1 = tcg_temp_new_vec(type); 3173 c1 = tcg_constant_vec(type, vece, 8 << vece); 3174 tcg_gen_sub_vec(vece, t1, v2, c1); 3175 /* Right shifts are negative left shifts for NEON. */ 3176 vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(t1), 3177 tcgv_vec_arg(v1), tcgv_vec_arg(t1)); 3178 vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(v0), 3179 tcgv_vec_arg(v1), tcgv_vec_arg(v2)); 3180 tcg_gen_or_vec(vece, v0, v0, t1); 3181 tcg_temp_free_vec(t1); 3182 break; 3183 3184 case INDEX_op_rotrv_vec: 3185 v2 = temp_tcgv_vec(arg_temp(a2)); 3186 t1 = tcg_temp_new_vec(type); 3187 t2 = tcg_temp_new_vec(type); 3188 c1 = tcg_constant_vec(type, vece, 8 << vece); 3189 tcg_gen_neg_vec(vece, t1, v2); 3190 tcg_gen_sub_vec(vece, t2, c1, v2); 3191 /* Right shifts are negative left shifts for NEON. */ 3192 vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(t1), 3193 tcgv_vec_arg(v1), tcgv_vec_arg(t1)); 3194 vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(t2), 3195 tcgv_vec_arg(v1), tcgv_vec_arg(t2)); 3196 tcg_gen_or_vec(vece, v0, t1, t2); 3197 tcg_temp_free_vec(t1); 3198 tcg_temp_free_vec(t2); 3199 break; 3200 3201 default: 3202 g_assert_not_reached(); 3203 } 3204} 3205 3206static void tcg_out_nop_fill(tcg_insn_unit *p, int count) 3207{ 3208 int i; 3209 for (i = 0; i < count; ++i) { 3210 p[i] = INSN_NOP; 3211 } 3212} 3213 3214/* Compute frame size via macros, to share between tcg_target_qemu_prologue 3215 and tcg_register_jit. */ 3216 3217#define PUSH_SIZE ((11 - 4 + 1 + 1) * sizeof(tcg_target_long)) 3218 3219#define FRAME_SIZE \ 3220 ((PUSH_SIZE \ 3221 + TCG_STATIC_CALL_ARGS_SIZE \ 3222 + CPU_TEMP_BUF_NLONGS * sizeof(long) \ 3223 + TCG_TARGET_STACK_ALIGN - 1) \ 3224 & -TCG_TARGET_STACK_ALIGN) 3225 3226#define STACK_ADDEND (FRAME_SIZE - PUSH_SIZE) 3227 3228static void tcg_target_qemu_prologue(TCGContext *s) 3229{ 3230 /* Calling convention requires us to save r4-r11 and lr. */ 3231 /* stmdb sp!, { r4 - r11, lr } */ 3232 tcg_out_ldstm(s, COND_AL, INSN_STMDB, TCG_REG_CALL_STACK, 3233 (1 << TCG_REG_R4) | (1 << TCG_REG_R5) | (1 << TCG_REG_R6) | 3234 (1 << TCG_REG_R7) | (1 << TCG_REG_R8) | (1 << TCG_REG_R9) | 3235 (1 << TCG_REG_R10) | (1 << TCG_REG_R11) | (1 << TCG_REG_R14)); 3236 3237 /* Reserve callee argument and tcg temp space. */ 3238 tcg_out_dat_rI(s, COND_AL, ARITH_SUB, TCG_REG_CALL_STACK, 3239 TCG_REG_CALL_STACK, STACK_ADDEND, 1); 3240 tcg_set_frame(s, TCG_REG_CALL_STACK, TCG_STATIC_CALL_ARGS_SIZE, 3241 CPU_TEMP_BUF_NLONGS * sizeof(long)); 3242 3243 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]); 3244 3245 if (!tcg_use_softmmu && guest_base) { 3246 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_GUEST_BASE, guest_base); 3247 tcg_regset_set_reg(s->reserved_regs, TCG_REG_GUEST_BASE); 3248 } 3249 3250 tcg_out_b_reg(s, COND_AL, tcg_target_call_iarg_regs[1]); 3251 3252 /* 3253 * Return path for goto_ptr. Set return value to 0, a-la exit_tb, 3254 * and fall through to the rest of the epilogue. 3255 */ 3256 tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr); 3257 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R0, 0); 3258 tcg_out_epilogue(s); 3259} 3260 3261static void tcg_out_epilogue(TCGContext *s) 3262{ 3263 /* Release local stack frame. */ 3264 tcg_out_dat_rI(s, COND_AL, ARITH_ADD, TCG_REG_CALL_STACK, 3265 TCG_REG_CALL_STACK, STACK_ADDEND, 1); 3266 3267 /* ldmia sp!, { r4 - r11, pc } */ 3268 tcg_out_ldstm(s, COND_AL, INSN_LDMIA, TCG_REG_CALL_STACK, 3269 (1 << TCG_REG_R4) | (1 << TCG_REG_R5) | (1 << TCG_REG_R6) | 3270 (1 << TCG_REG_R7) | (1 << TCG_REG_R8) | (1 << TCG_REG_R9) | 3271 (1 << TCG_REG_R10) | (1 << TCG_REG_R11) | (1 << TCG_REG_PC)); 3272} 3273 3274static void tcg_out_tb_start(TCGContext *s) 3275{ 3276 /* nothing to do */ 3277} 3278 3279typedef struct { 3280 DebugFrameHeader h; 3281 uint8_t fde_def_cfa[4]; 3282 uint8_t fde_reg_ofs[18]; 3283} DebugFrame; 3284 3285#define ELF_HOST_MACHINE EM_ARM 3286 3287/* We're expecting a 2 byte uleb128 encoded value. */ 3288QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14)); 3289 3290static const DebugFrame debug_frame = { 3291 .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */ 3292 .h.cie.id = -1, 3293 .h.cie.version = 1, 3294 .h.cie.code_align = 1, 3295 .h.cie.data_align = 0x7c, /* sleb128 -4 */ 3296 .h.cie.return_column = 14, 3297 3298 /* Total FDE size does not include the "len" member. */ 3299 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset), 3300 3301 .fde_def_cfa = { 3302 12, 13, /* DW_CFA_def_cfa sp, ... */ 3303 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */ 3304 (FRAME_SIZE >> 7) 3305 }, 3306 .fde_reg_ofs = { 3307 /* The following must match the stmdb in the prologue. */ 3308 0x8e, 1, /* DW_CFA_offset, lr, -4 */ 3309 0x8b, 2, /* DW_CFA_offset, r11, -8 */ 3310 0x8a, 3, /* DW_CFA_offset, r10, -12 */ 3311 0x89, 4, /* DW_CFA_offset, r9, -16 */ 3312 0x88, 5, /* DW_CFA_offset, r8, -20 */ 3313 0x87, 6, /* DW_CFA_offset, r7, -24 */ 3314 0x86, 7, /* DW_CFA_offset, r6, -28 */ 3315 0x85, 8, /* DW_CFA_offset, r5, -32 */ 3316 0x84, 9, /* DW_CFA_offset, r4, -36 */ 3317 } 3318}; 3319 3320void tcg_register_jit(const void *buf, size_t buf_size) 3321{ 3322 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame)); 3323} 3324