1/* 2 * Tiny Code Generator for QEMU 3 * 4 * Copyright (c) 2008 Andrzej Zaborowski 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 25#include "elf.h" 26#include "../tcg-ldst.c.inc" 27#include "../tcg-pool.c.inc" 28 29int arm_arch = __ARM_ARCH; 30 31#ifndef use_idiv_instructions 32bool use_idiv_instructions; 33#endif 34#ifndef use_neon_instructions 35bool use_neon_instructions; 36#endif 37 38#ifdef CONFIG_DEBUG_TCG 39static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { 40 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7", 41 "%r8", "%r9", "%r10", "%r11", "%r12", "%sp", "%r14", "%pc", 42 "%q0", "%q1", "%q2", "%q3", "%q4", "%q5", "%q6", "%q7", 43 "%q8", "%q9", "%q10", "%q11", "%q12", "%q13", "%q14", "%q15", 44}; 45#endif 46 47static const int tcg_target_reg_alloc_order[] = { 48 TCG_REG_R4, 49 TCG_REG_R5, 50 TCG_REG_R6, 51 TCG_REG_R7, 52 TCG_REG_R8, 53 TCG_REG_R9, 54 TCG_REG_R10, 55 TCG_REG_R11, 56 TCG_REG_R13, 57 TCG_REG_R0, 58 TCG_REG_R1, 59 TCG_REG_R2, 60 TCG_REG_R3, 61 TCG_REG_R12, 62 TCG_REG_R14, 63 64 TCG_REG_Q0, 65 TCG_REG_Q1, 66 TCG_REG_Q2, 67 TCG_REG_Q3, 68 /* Q4 - Q7 are call-saved, and skipped. */ 69 TCG_REG_Q8, 70 TCG_REG_Q9, 71 TCG_REG_Q10, 72 TCG_REG_Q11, 73 TCG_REG_Q12, 74 TCG_REG_Q13, 75 TCG_REG_Q14, 76 TCG_REG_Q15, 77}; 78 79static const int tcg_target_call_iarg_regs[4] = { 80 TCG_REG_R0, TCG_REG_R1, TCG_REG_R2, TCG_REG_R3 81}; 82static const int tcg_target_call_oarg_regs[2] = { 83 TCG_REG_R0, TCG_REG_R1 84}; 85 86#define TCG_REG_TMP TCG_REG_R12 87#define TCG_VEC_TMP TCG_REG_Q15 88#ifndef CONFIG_SOFTMMU 89#define TCG_REG_GUEST_BASE TCG_REG_R11 90#endif 91 92typedef enum { 93 COND_EQ = 0x0, 94 COND_NE = 0x1, 95 COND_CS = 0x2, /* Unsigned greater or equal */ 96 COND_CC = 0x3, /* Unsigned less than */ 97 COND_MI = 0x4, /* Negative */ 98 COND_PL = 0x5, /* Zero or greater */ 99 COND_VS = 0x6, /* Overflow */ 100 COND_VC = 0x7, /* No overflow */ 101 COND_HI = 0x8, /* Unsigned greater than */ 102 COND_LS = 0x9, /* Unsigned less or equal */ 103 COND_GE = 0xa, 104 COND_LT = 0xb, 105 COND_GT = 0xc, 106 COND_LE = 0xd, 107 COND_AL = 0xe, 108} ARMCond; 109 110#define TO_CPSR (1 << 20) 111 112#define SHIFT_IMM_LSL(im) (((im) << 7) | 0x00) 113#define SHIFT_IMM_LSR(im) (((im) << 7) | 0x20) 114#define SHIFT_IMM_ASR(im) (((im) << 7) | 0x40) 115#define SHIFT_IMM_ROR(im) (((im) << 7) | 0x60) 116#define SHIFT_REG_LSL(rs) (((rs) << 8) | 0x10) 117#define SHIFT_REG_LSR(rs) (((rs) << 8) | 0x30) 118#define SHIFT_REG_ASR(rs) (((rs) << 8) | 0x50) 119#define SHIFT_REG_ROR(rs) (((rs) << 8) | 0x70) 120 121typedef enum { 122 ARITH_AND = 0x0 << 21, 123 ARITH_EOR = 0x1 << 21, 124 ARITH_SUB = 0x2 << 21, 125 ARITH_RSB = 0x3 << 21, 126 ARITH_ADD = 0x4 << 21, 127 ARITH_ADC = 0x5 << 21, 128 ARITH_SBC = 0x6 << 21, 129 ARITH_RSC = 0x7 << 21, 130 ARITH_TST = 0x8 << 21 | TO_CPSR, 131 ARITH_CMP = 0xa << 21 | TO_CPSR, 132 ARITH_CMN = 0xb << 21 | TO_CPSR, 133 ARITH_ORR = 0xc << 21, 134 ARITH_MOV = 0xd << 21, 135 ARITH_BIC = 0xe << 21, 136 ARITH_MVN = 0xf << 21, 137 138 INSN_B = 0x0a000000, 139 140 INSN_CLZ = 0x016f0f10, 141 INSN_RBIT = 0x06ff0f30, 142 143 INSN_LDMIA = 0x08b00000, 144 INSN_STMDB = 0x09200000, 145 146 INSN_LDR_IMM = 0x04100000, 147 INSN_LDR_REG = 0x06100000, 148 INSN_STR_IMM = 0x04000000, 149 INSN_STR_REG = 0x06000000, 150 151 INSN_LDRH_IMM = 0x005000b0, 152 INSN_LDRH_REG = 0x001000b0, 153 INSN_LDRSH_IMM = 0x005000f0, 154 INSN_LDRSH_REG = 0x001000f0, 155 INSN_STRH_IMM = 0x004000b0, 156 INSN_STRH_REG = 0x000000b0, 157 158 INSN_LDRB_IMM = 0x04500000, 159 INSN_LDRB_REG = 0x06500000, 160 INSN_LDRSB_IMM = 0x005000d0, 161 INSN_LDRSB_REG = 0x001000d0, 162 INSN_STRB_IMM = 0x04400000, 163 INSN_STRB_REG = 0x06400000, 164 165 INSN_LDRD_IMM = 0x004000d0, 166 INSN_LDRD_REG = 0x000000d0, 167 INSN_STRD_IMM = 0x004000f0, 168 INSN_STRD_REG = 0x000000f0, 169 170 INSN_DMB_ISH = 0xf57ff05b, 171 INSN_DMB_MCR = 0xee070fba, 172 173 /* Architected nop introduced in v6k. */ 174 /* ??? This is an MSR (imm) 0,0,0 insn. Anyone know if this 175 also Just So Happened to do nothing on pre-v6k so that we 176 don't need to conditionalize it? */ 177 INSN_NOP_v6k = 0xe320f000, 178 /* Otherwise the assembler uses mov r0,r0 */ 179 INSN_NOP_v4 = (COND_AL << 28) | ARITH_MOV, 180 181 INSN_VADD = 0xf2000800, 182 INSN_VAND = 0xf2000110, 183 INSN_VBIC = 0xf2100110, 184 INSN_VEOR = 0xf3000110, 185 INSN_VORN = 0xf2300110, 186 INSN_VORR = 0xf2200110, 187 INSN_VSUB = 0xf3000800, 188 INSN_VMUL = 0xf2000910, 189 INSN_VQADD = 0xf2000010, 190 INSN_VQADD_U = 0xf3000010, 191 INSN_VQSUB = 0xf2000210, 192 INSN_VQSUB_U = 0xf3000210, 193 INSN_VMAX = 0xf2000600, 194 INSN_VMAX_U = 0xf3000600, 195 INSN_VMIN = 0xf2000610, 196 INSN_VMIN_U = 0xf3000610, 197 198 INSN_VABS = 0xf3b10300, 199 INSN_VMVN = 0xf3b00580, 200 INSN_VNEG = 0xf3b10380, 201 202 INSN_VCEQ0 = 0xf3b10100, 203 INSN_VCGT0 = 0xf3b10000, 204 INSN_VCGE0 = 0xf3b10080, 205 INSN_VCLE0 = 0xf3b10180, 206 INSN_VCLT0 = 0xf3b10200, 207 208 INSN_VCEQ = 0xf3000810, 209 INSN_VCGE = 0xf2000310, 210 INSN_VCGT = 0xf2000300, 211 INSN_VCGE_U = 0xf3000310, 212 INSN_VCGT_U = 0xf3000300, 213 214 INSN_VSHLI = 0xf2800510, /* VSHL (immediate) */ 215 INSN_VSARI = 0xf2800010, /* VSHR.S */ 216 INSN_VSHRI = 0xf3800010, /* VSHR.U */ 217 INSN_VSLI = 0xf3800510, 218 INSN_VSHL_S = 0xf2000400, /* VSHL.S (register) */ 219 INSN_VSHL_U = 0xf3000400, /* VSHL.U (register) */ 220 221 INSN_VBSL = 0xf3100110, 222 INSN_VBIT = 0xf3200110, 223 INSN_VBIF = 0xf3300110, 224 225 INSN_VTST = 0xf2000810, 226 227 INSN_VDUP_G = 0xee800b10, /* VDUP (ARM core register) */ 228 INSN_VDUP_S = 0xf3b00c00, /* VDUP (scalar) */ 229 INSN_VLDR_D = 0xed100b00, /* VLDR.64 */ 230 INSN_VLD1 = 0xf4200000, /* VLD1 (multiple single elements) */ 231 INSN_VLD1R = 0xf4a00c00, /* VLD1 (single element to all lanes) */ 232 INSN_VST1 = 0xf4000000, /* VST1 (multiple single elements) */ 233 INSN_VMOVI = 0xf2800010, /* VMOV (immediate) */ 234} ARMInsn; 235 236#define INSN_NOP (use_armv7_instructions ? INSN_NOP_v6k : INSN_NOP_v4) 237 238static const uint8_t tcg_cond_to_arm_cond[] = { 239 [TCG_COND_EQ] = COND_EQ, 240 [TCG_COND_NE] = COND_NE, 241 [TCG_COND_LT] = COND_LT, 242 [TCG_COND_GE] = COND_GE, 243 [TCG_COND_LE] = COND_LE, 244 [TCG_COND_GT] = COND_GT, 245 /* unsigned */ 246 [TCG_COND_LTU] = COND_CC, 247 [TCG_COND_GEU] = COND_CS, 248 [TCG_COND_LEU] = COND_LS, 249 [TCG_COND_GTU] = COND_HI, 250}; 251 252static int encode_imm(uint32_t imm); 253 254/* TCG private relocation type: add with pc+imm8 */ 255#define R_ARM_PC8 11 256 257/* TCG private relocation type: vldr with imm8 << 2 */ 258#define R_ARM_PC11 12 259 260static bool reloc_pc24(tcg_insn_unit *src_rw, const tcg_insn_unit *target) 261{ 262 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw); 263 ptrdiff_t offset = (tcg_ptr_byte_diff(target, src_rx) - 8) >> 2; 264 265 if (offset == sextract32(offset, 0, 24)) { 266 *src_rw = deposit32(*src_rw, 0, 24, offset); 267 return true; 268 } 269 return false; 270} 271 272static bool reloc_pc13(tcg_insn_unit *src_rw, const tcg_insn_unit *target) 273{ 274 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw); 275 ptrdiff_t offset = tcg_ptr_byte_diff(target, src_rx) - 8; 276 277 if (offset >= -0xfff && offset <= 0xfff) { 278 tcg_insn_unit insn = *src_rw; 279 bool u = (offset >= 0); 280 if (!u) { 281 offset = -offset; 282 } 283 insn = deposit32(insn, 23, 1, u); 284 insn = deposit32(insn, 0, 12, offset); 285 *src_rw = insn; 286 return true; 287 } 288 return false; 289} 290 291static bool reloc_pc11(tcg_insn_unit *src_rw, const tcg_insn_unit *target) 292{ 293 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw); 294 ptrdiff_t offset = (tcg_ptr_byte_diff(target, src_rx) - 8) / 4; 295 296 if (offset >= -0xff && offset <= 0xff) { 297 tcg_insn_unit insn = *src_rw; 298 bool u = (offset >= 0); 299 if (!u) { 300 offset = -offset; 301 } 302 insn = deposit32(insn, 23, 1, u); 303 insn = deposit32(insn, 0, 8, offset); 304 *src_rw = insn; 305 return true; 306 } 307 return false; 308} 309 310static bool reloc_pc8(tcg_insn_unit *src_rw, const tcg_insn_unit *target) 311{ 312 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw); 313 ptrdiff_t offset = tcg_ptr_byte_diff(target, src_rx) - 8; 314 int imm12 = encode_imm(offset); 315 316 if (imm12 >= 0) { 317 *src_rw = deposit32(*src_rw, 0, 12, imm12); 318 return true; 319 } 320 return false; 321} 322 323static bool patch_reloc(tcg_insn_unit *code_ptr, int type, 324 intptr_t value, intptr_t addend) 325{ 326 tcg_debug_assert(addend == 0); 327 switch (type) { 328 case R_ARM_PC24: 329 return reloc_pc24(code_ptr, (const tcg_insn_unit *)value); 330 case R_ARM_PC13: 331 return reloc_pc13(code_ptr, (const tcg_insn_unit *)value); 332 case R_ARM_PC11: 333 return reloc_pc11(code_ptr, (const tcg_insn_unit *)value); 334 case R_ARM_PC8: 335 return reloc_pc8(code_ptr, (const tcg_insn_unit *)value); 336 default: 337 g_assert_not_reached(); 338 } 339} 340 341#define TCG_CT_CONST_ARM 0x100 342#define TCG_CT_CONST_INV 0x200 343#define TCG_CT_CONST_NEG 0x400 344#define TCG_CT_CONST_ZERO 0x800 345#define TCG_CT_CONST_ORRI 0x1000 346#define TCG_CT_CONST_ANDI 0x2000 347 348#define ALL_GENERAL_REGS 0xffffu 349#define ALL_VECTOR_REGS 0xffff0000u 350 351/* 352 * r0-r2 will be overwritten when reading the tlb entry (softmmu only) 353 * and r0-r1 doing the byte swapping, so don't use these. 354 * r3 is removed for softmmu to avoid clashes with helper arguments. 355 */ 356#ifdef CONFIG_SOFTMMU 357#define ALL_QLOAD_REGS \ 358 (ALL_GENERAL_REGS & ~((1 << TCG_REG_R0) | (1 << TCG_REG_R1) | \ 359 (1 << TCG_REG_R2) | (1 << TCG_REG_R3) | \ 360 (1 << TCG_REG_R14))) 361#define ALL_QSTORE_REGS \ 362 (ALL_GENERAL_REGS & ~((1 << TCG_REG_R0) | (1 << TCG_REG_R1) | \ 363 (1 << TCG_REG_R2) | (1 << TCG_REG_R14) | \ 364 ((TARGET_LONG_BITS == 64) << TCG_REG_R3))) 365#else 366#define ALL_QLOAD_REGS ALL_GENERAL_REGS 367#define ALL_QSTORE_REGS \ 368 (ALL_GENERAL_REGS & ~((1 << TCG_REG_R0) | (1 << TCG_REG_R1))) 369#endif 370 371/* 372 * ARM immediates for ALU instructions are made of an unsigned 8-bit 373 * right-rotated by an even amount between 0 and 30. 374 * 375 * Return < 0 if @imm cannot be encoded, else the entire imm12 field. 376 */ 377static int encode_imm(uint32_t imm) 378{ 379 uint32_t rot, imm8; 380 381 /* Simple case, no rotation required. */ 382 if ((imm & ~0xff) == 0) { 383 return imm; 384 } 385 386 /* Next, try a simple even shift. */ 387 rot = ctz32(imm) & ~1; 388 imm8 = imm >> rot; 389 rot = 32 - rot; 390 if ((imm8 & ~0xff) == 0) { 391 goto found; 392 } 393 394 /* 395 * Finally, try harder with rotations. 396 * The ctz test above will have taken care of rotates >= 8. 397 */ 398 for (rot = 2; rot < 8; rot += 2) { 399 imm8 = rol32(imm, rot); 400 if ((imm8 & ~0xff) == 0) { 401 goto found; 402 } 403 } 404 /* Fail: imm cannot be encoded. */ 405 return -1; 406 407 found: 408 /* Note that rot is even, and we discard bit 0 by shifting by 7. */ 409 return rot << 7 | imm8; 410} 411 412static int encode_imm_nofail(uint32_t imm) 413{ 414 int ret = encode_imm(imm); 415 tcg_debug_assert(ret >= 0); 416 return ret; 417} 418 419static bool check_fit_imm(uint32_t imm) 420{ 421 return encode_imm(imm) >= 0; 422} 423 424/* Return true if v16 is a valid 16-bit shifted immediate. */ 425static bool is_shimm16(uint16_t v16, int *cmode, int *imm8) 426{ 427 if (v16 == (v16 & 0xff)) { 428 *cmode = 0x8; 429 *imm8 = v16 & 0xff; 430 return true; 431 } else if (v16 == (v16 & 0xff00)) { 432 *cmode = 0xa; 433 *imm8 = v16 >> 8; 434 return true; 435 } 436 return false; 437} 438 439/* Return true if v32 is a valid 32-bit shifted immediate. */ 440static bool is_shimm32(uint32_t v32, int *cmode, int *imm8) 441{ 442 if (v32 == (v32 & 0xff)) { 443 *cmode = 0x0; 444 *imm8 = v32 & 0xff; 445 return true; 446 } else if (v32 == (v32 & 0xff00)) { 447 *cmode = 0x2; 448 *imm8 = (v32 >> 8) & 0xff; 449 return true; 450 } else if (v32 == (v32 & 0xff0000)) { 451 *cmode = 0x4; 452 *imm8 = (v32 >> 16) & 0xff; 453 return true; 454 } else if (v32 == (v32 & 0xff000000)) { 455 *cmode = 0x6; 456 *imm8 = v32 >> 24; 457 return true; 458 } 459 return false; 460} 461 462/* Return true if v32 is a valid 32-bit shifting ones immediate. */ 463static bool is_soimm32(uint32_t v32, int *cmode, int *imm8) 464{ 465 if ((v32 & 0xffff00ff) == 0xff) { 466 *cmode = 0xc; 467 *imm8 = (v32 >> 8) & 0xff; 468 return true; 469 } else if ((v32 & 0xff00ffff) == 0xffff) { 470 *cmode = 0xd; 471 *imm8 = (v32 >> 16) & 0xff; 472 return true; 473 } 474 return false; 475} 476 477/* 478 * Return non-zero if v32 can be formed by MOVI+ORR. 479 * Place the parameters for MOVI in (cmode, imm8). 480 * Return the cmode for ORR; the imm8 can be had via extraction from v32. 481 */ 482static int is_shimm32_pair(uint32_t v32, int *cmode, int *imm8) 483{ 484 int i; 485 486 for (i = 6; i > 0; i -= 2) { 487 /* Mask out one byte we can add with ORR. */ 488 uint32_t tmp = v32 & ~(0xffu << (i * 4)); 489 if (is_shimm32(tmp, cmode, imm8) || 490 is_soimm32(tmp, cmode, imm8)) { 491 break; 492 } 493 } 494 return i; 495} 496 497/* Return true if V is a valid 16-bit or 32-bit shifted immediate. */ 498static bool is_shimm1632(uint32_t v32, int *cmode, int *imm8) 499{ 500 if (v32 == deposit32(v32, 16, 16, v32)) { 501 return is_shimm16(v32, cmode, imm8); 502 } else { 503 return is_shimm32(v32, cmode, imm8); 504 } 505} 506 507/* Test if a constant matches the constraint. 508 * TODO: define constraints for: 509 * 510 * ldr/str offset: between -0xfff and 0xfff 511 * ldrh/strh offset: between -0xff and 0xff 512 * mov operand2: values represented with x << (2 * y), x < 0x100 513 * add, sub, eor...: ditto 514 */ 515static bool tcg_target_const_match(int64_t val, TCGType type, int ct) 516{ 517 if (ct & TCG_CT_CONST) { 518 return 1; 519 } else if ((ct & TCG_CT_CONST_ARM) && check_fit_imm(val)) { 520 return 1; 521 } else if ((ct & TCG_CT_CONST_INV) && check_fit_imm(~val)) { 522 return 1; 523 } else if ((ct & TCG_CT_CONST_NEG) && check_fit_imm(-val)) { 524 return 1; 525 } else if ((ct & TCG_CT_CONST_ZERO) && val == 0) { 526 return 1; 527 } 528 529 switch (ct & (TCG_CT_CONST_ORRI | TCG_CT_CONST_ANDI)) { 530 case 0: 531 break; 532 case TCG_CT_CONST_ANDI: 533 val = ~val; 534 /* fallthru */ 535 case TCG_CT_CONST_ORRI: 536 if (val == deposit64(val, 32, 32, val)) { 537 int cmode, imm8; 538 return is_shimm1632(val, &cmode, &imm8); 539 } 540 break; 541 default: 542 /* Both bits should not be set for the same insn. */ 543 g_assert_not_reached(); 544 } 545 546 return 0; 547} 548 549static void tcg_out_b_imm(TCGContext *s, ARMCond cond, int32_t offset) 550{ 551 tcg_out32(s, (cond << 28) | INSN_B | 552 (((offset - 8) >> 2) & 0x00ffffff)); 553} 554 555static void tcg_out_bl_imm(TCGContext *s, ARMCond cond, int32_t offset) 556{ 557 tcg_out32(s, (cond << 28) | 0x0b000000 | 558 (((offset - 8) >> 2) & 0x00ffffff)); 559} 560 561static void tcg_out_blx_reg(TCGContext *s, ARMCond cond, TCGReg rn) 562{ 563 tcg_out32(s, (cond << 28) | 0x012fff30 | rn); 564} 565 566static void tcg_out_blx_imm(TCGContext *s, int32_t offset) 567{ 568 tcg_out32(s, 0xfa000000 | ((offset & 2) << 23) | 569 (((offset - 8) >> 2) & 0x00ffffff)); 570} 571 572static void tcg_out_dat_reg(TCGContext *s, ARMCond cond, ARMInsn opc, 573 TCGReg rd, TCGReg rn, TCGReg rm, int shift) 574{ 575 tcg_out32(s, (cond << 28) | (0 << 25) | opc | 576 (rn << 16) | (rd << 12) | shift | rm); 577} 578 579static void tcg_out_mov_reg(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rm) 580{ 581 /* Simple reg-reg move, optimising out the 'do nothing' case */ 582 if (rd != rm) { 583 tcg_out_dat_reg(s, cond, ARITH_MOV, rd, 0, rm, SHIFT_IMM_LSL(0)); 584 } 585} 586 587static void tcg_out_bx_reg(TCGContext *s, ARMCond cond, TCGReg rn) 588{ 589 tcg_out32(s, (cond << 28) | 0x012fff10 | rn); 590} 591 592static void tcg_out_b_reg(TCGContext *s, ARMCond cond, TCGReg rn) 593{ 594 /* 595 * Unless the C portion of QEMU is compiled as thumb, we don't need 596 * true BX semantics; merely a branch to an address held in a register. 597 */ 598 tcg_out_bx_reg(s, cond, rn); 599} 600 601static void tcg_out_dat_imm(TCGContext *s, ARMCond cond, ARMInsn opc, 602 TCGReg rd, TCGReg rn, int im) 603{ 604 tcg_out32(s, (cond << 28) | (1 << 25) | opc | 605 (rn << 16) | (rd << 12) | im); 606} 607 608static void tcg_out_ldstm(TCGContext *s, ARMCond cond, ARMInsn opc, 609 TCGReg rn, uint16_t mask) 610{ 611 tcg_out32(s, (cond << 28) | opc | (rn << 16) | mask); 612} 613 614/* Note that this routine is used for both LDR and LDRH formats, so we do 615 not wish to include an immediate shift at this point. */ 616static void tcg_out_memop_r(TCGContext *s, ARMCond cond, ARMInsn opc, TCGReg rt, 617 TCGReg rn, TCGReg rm, bool u, bool p, bool w) 618{ 619 tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24) 620 | (w << 21) | (rn << 16) | (rt << 12) | rm); 621} 622 623static void tcg_out_memop_8(TCGContext *s, ARMCond cond, ARMInsn opc, TCGReg rt, 624 TCGReg rn, int imm8, bool p, bool w) 625{ 626 bool u = 1; 627 if (imm8 < 0) { 628 imm8 = -imm8; 629 u = 0; 630 } 631 tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24) | (w << 21) | 632 (rn << 16) | (rt << 12) | ((imm8 & 0xf0) << 4) | (imm8 & 0xf)); 633} 634 635static void tcg_out_memop_12(TCGContext *s, ARMCond cond, ARMInsn opc, 636 TCGReg rt, TCGReg rn, int imm12, bool p, bool w) 637{ 638 bool u = 1; 639 if (imm12 < 0) { 640 imm12 = -imm12; 641 u = 0; 642 } 643 tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24) | (w << 21) | 644 (rn << 16) | (rt << 12) | imm12); 645} 646 647static void tcg_out_ld32_12(TCGContext *s, ARMCond cond, TCGReg rt, 648 TCGReg rn, int imm12) 649{ 650 tcg_out_memop_12(s, cond, INSN_LDR_IMM, rt, rn, imm12, 1, 0); 651} 652 653static void tcg_out_st32_12(TCGContext *s, ARMCond cond, TCGReg rt, 654 TCGReg rn, int imm12) 655{ 656 tcg_out_memop_12(s, cond, INSN_STR_IMM, rt, rn, imm12, 1, 0); 657} 658 659static void tcg_out_ld32_r(TCGContext *s, ARMCond cond, TCGReg rt, 660 TCGReg rn, TCGReg rm) 661{ 662 tcg_out_memop_r(s, cond, INSN_LDR_REG, rt, rn, rm, 1, 1, 0); 663} 664 665static void tcg_out_st32_r(TCGContext *s, ARMCond cond, TCGReg rt, 666 TCGReg rn, TCGReg rm) 667{ 668 tcg_out_memop_r(s, cond, INSN_STR_REG, rt, rn, rm, 1, 1, 0); 669} 670 671static void tcg_out_ldrd_8(TCGContext *s, ARMCond cond, TCGReg rt, 672 TCGReg rn, int imm8) 673{ 674 tcg_out_memop_8(s, cond, INSN_LDRD_IMM, rt, rn, imm8, 1, 0); 675} 676 677static void tcg_out_ldrd_r(TCGContext *s, ARMCond cond, TCGReg rt, 678 TCGReg rn, TCGReg rm) 679{ 680 tcg_out_memop_r(s, cond, INSN_LDRD_REG, rt, rn, rm, 1, 1, 0); 681} 682 683static void __attribute__((unused)) 684tcg_out_ldrd_rwb(TCGContext *s, ARMCond cond, TCGReg rt, TCGReg rn, TCGReg rm) 685{ 686 tcg_out_memop_r(s, cond, INSN_LDRD_REG, rt, rn, rm, 1, 1, 1); 687} 688 689static void tcg_out_strd_8(TCGContext *s, ARMCond cond, TCGReg rt, 690 TCGReg rn, int imm8) 691{ 692 tcg_out_memop_8(s, cond, INSN_STRD_IMM, rt, rn, imm8, 1, 0); 693} 694 695static void tcg_out_strd_r(TCGContext *s, ARMCond cond, TCGReg rt, 696 TCGReg rn, TCGReg rm) 697{ 698 tcg_out_memop_r(s, cond, INSN_STRD_REG, rt, rn, rm, 1, 1, 0); 699} 700 701/* Register pre-increment with base writeback. */ 702static void tcg_out_ld32_rwb(TCGContext *s, ARMCond cond, TCGReg rt, 703 TCGReg rn, TCGReg rm) 704{ 705 tcg_out_memop_r(s, cond, INSN_LDR_REG, rt, rn, rm, 1, 1, 1); 706} 707 708static void tcg_out_st32_rwb(TCGContext *s, ARMCond cond, TCGReg rt, 709 TCGReg rn, TCGReg rm) 710{ 711 tcg_out_memop_r(s, cond, INSN_STR_REG, rt, rn, rm, 1, 1, 1); 712} 713 714static void tcg_out_ld16u_8(TCGContext *s, ARMCond cond, TCGReg rt, 715 TCGReg rn, int imm8) 716{ 717 tcg_out_memop_8(s, cond, INSN_LDRH_IMM, rt, rn, imm8, 1, 0); 718} 719 720static void tcg_out_st16_8(TCGContext *s, ARMCond cond, TCGReg rt, 721 TCGReg rn, int imm8) 722{ 723 tcg_out_memop_8(s, cond, INSN_STRH_IMM, rt, rn, imm8, 1, 0); 724} 725 726static void tcg_out_ld16u_r(TCGContext *s, ARMCond cond, TCGReg rt, 727 TCGReg rn, TCGReg rm) 728{ 729 tcg_out_memop_r(s, cond, INSN_LDRH_REG, rt, rn, rm, 1, 1, 0); 730} 731 732static void tcg_out_st16_r(TCGContext *s, ARMCond cond, TCGReg rt, 733 TCGReg rn, TCGReg rm) 734{ 735 tcg_out_memop_r(s, cond, INSN_STRH_REG, rt, rn, rm, 1, 1, 0); 736} 737 738static void tcg_out_ld16s_8(TCGContext *s, ARMCond cond, TCGReg rt, 739 TCGReg rn, int imm8) 740{ 741 tcg_out_memop_8(s, cond, INSN_LDRSH_IMM, rt, rn, imm8, 1, 0); 742} 743 744static void tcg_out_ld16s_r(TCGContext *s, ARMCond cond, TCGReg rt, 745 TCGReg rn, TCGReg rm) 746{ 747 tcg_out_memop_r(s, cond, INSN_LDRSH_REG, rt, rn, rm, 1, 1, 0); 748} 749 750static void tcg_out_ld8_12(TCGContext *s, ARMCond cond, TCGReg rt, 751 TCGReg rn, int imm12) 752{ 753 tcg_out_memop_12(s, cond, INSN_LDRB_IMM, rt, rn, imm12, 1, 0); 754} 755 756static void tcg_out_st8_12(TCGContext *s, ARMCond cond, TCGReg rt, 757 TCGReg rn, int imm12) 758{ 759 tcg_out_memop_12(s, cond, INSN_STRB_IMM, rt, rn, imm12, 1, 0); 760} 761 762static void tcg_out_ld8_r(TCGContext *s, ARMCond cond, TCGReg rt, 763 TCGReg rn, TCGReg rm) 764{ 765 tcg_out_memop_r(s, cond, INSN_LDRB_REG, rt, rn, rm, 1, 1, 0); 766} 767 768static void tcg_out_st8_r(TCGContext *s, ARMCond cond, TCGReg rt, 769 TCGReg rn, TCGReg rm) 770{ 771 tcg_out_memop_r(s, cond, INSN_STRB_REG, rt, rn, rm, 1, 1, 0); 772} 773 774static void tcg_out_ld8s_8(TCGContext *s, ARMCond cond, TCGReg rt, 775 TCGReg rn, int imm8) 776{ 777 tcg_out_memop_8(s, cond, INSN_LDRSB_IMM, rt, rn, imm8, 1, 0); 778} 779 780static void tcg_out_ld8s_r(TCGContext *s, ARMCond cond, TCGReg rt, 781 TCGReg rn, TCGReg rm) 782{ 783 tcg_out_memop_r(s, cond, INSN_LDRSB_REG, rt, rn, rm, 1, 1, 0); 784} 785 786static void tcg_out_movi_pool(TCGContext *s, ARMCond cond, 787 TCGReg rd, uint32_t arg) 788{ 789 new_pool_label(s, arg, R_ARM_PC13, s->code_ptr, 0); 790 tcg_out_ld32_12(s, cond, rd, TCG_REG_PC, 0); 791} 792 793static void tcg_out_movi32(TCGContext *s, ARMCond cond, 794 TCGReg rd, uint32_t arg) 795{ 796 int imm12, diff, opc, sh1, sh2; 797 uint32_t tt0, tt1, tt2; 798 799 /* Check a single MOV/MVN before anything else. */ 800 imm12 = encode_imm(arg); 801 if (imm12 >= 0) { 802 tcg_out_dat_imm(s, cond, ARITH_MOV, rd, 0, imm12); 803 return; 804 } 805 imm12 = encode_imm(~arg); 806 if (imm12 >= 0) { 807 tcg_out_dat_imm(s, cond, ARITH_MVN, rd, 0, imm12); 808 return; 809 } 810 811 /* Check for a pc-relative address. This will usually be the TB, 812 or within the TB, which is immediately before the code block. */ 813 diff = tcg_pcrel_diff(s, (void *)arg) - 8; 814 if (diff >= 0) { 815 imm12 = encode_imm(diff); 816 if (imm12 >= 0) { 817 tcg_out_dat_imm(s, cond, ARITH_ADD, rd, TCG_REG_PC, imm12); 818 return; 819 } 820 } else { 821 imm12 = encode_imm(-diff); 822 if (imm12 >= 0) { 823 tcg_out_dat_imm(s, cond, ARITH_SUB, rd, TCG_REG_PC, imm12); 824 return; 825 } 826 } 827 828 /* Use movw + movt. */ 829 if (use_armv7_instructions) { 830 /* movw */ 831 tcg_out32(s, (cond << 28) | 0x03000000 | (rd << 12) 832 | ((arg << 4) & 0x000f0000) | (arg & 0xfff)); 833 if (arg & 0xffff0000) { 834 /* movt */ 835 tcg_out32(s, (cond << 28) | 0x03400000 | (rd << 12) 836 | ((arg >> 12) & 0x000f0000) | ((arg >> 16) & 0xfff)); 837 } 838 return; 839 } 840 841 /* Look for sequences of two insns. If we have lots of 1's, we can 842 shorten the sequence by beginning with mvn and then clearing 843 higher bits with eor. */ 844 tt0 = arg; 845 opc = ARITH_MOV; 846 if (ctpop32(arg) > 16) { 847 tt0 = ~arg; 848 opc = ARITH_MVN; 849 } 850 sh1 = ctz32(tt0) & ~1; 851 tt1 = tt0 & ~(0xff << sh1); 852 sh2 = ctz32(tt1) & ~1; 853 tt2 = tt1 & ~(0xff << sh2); 854 if (tt2 == 0) { 855 int rot; 856 857 rot = ((32 - sh1) << 7) & 0xf00; 858 tcg_out_dat_imm(s, cond, opc, rd, 0, ((tt0 >> sh1) & 0xff) | rot); 859 rot = ((32 - sh2) << 7) & 0xf00; 860 tcg_out_dat_imm(s, cond, ARITH_EOR, rd, rd, 861 ((tt0 >> sh2) & 0xff) | rot); 862 return; 863 } 864 865 /* Otherwise, drop it into the constant pool. */ 866 tcg_out_movi_pool(s, cond, rd, arg); 867} 868 869/* 870 * Emit either the reg,imm or reg,reg form of a data-processing insn. 871 * rhs must satisfy the "rI" constraint. 872 */ 873static void tcg_out_dat_rI(TCGContext *s, ARMCond cond, ARMInsn opc, 874 TCGReg dst, TCGReg lhs, TCGArg rhs, int rhs_is_const) 875{ 876 if (rhs_is_const) { 877 tcg_out_dat_imm(s, cond, opc, dst, lhs, encode_imm_nofail(rhs)); 878 } else { 879 tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0)); 880 } 881} 882 883/* 884 * Emit either the reg,imm or reg,reg form of a data-processing insn. 885 * rhs must satisfy the "rIK" constraint. 886 */ 887static void tcg_out_dat_rIK(TCGContext *s, ARMCond cond, ARMInsn opc, 888 ARMInsn opinv, TCGReg dst, TCGReg lhs, TCGArg rhs, 889 bool rhs_is_const) 890{ 891 if (rhs_is_const) { 892 int imm12 = encode_imm(rhs); 893 if (imm12 < 0) { 894 imm12 = encode_imm_nofail(~rhs); 895 opc = opinv; 896 } 897 tcg_out_dat_imm(s, cond, opc, dst, lhs, imm12); 898 } else { 899 tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0)); 900 } 901} 902 903static void tcg_out_dat_rIN(TCGContext *s, ARMCond cond, ARMInsn opc, 904 ARMInsn opneg, TCGReg dst, TCGReg lhs, TCGArg rhs, 905 bool rhs_is_const) 906{ 907 /* Emit either the reg,imm or reg,reg form of a data-processing insn. 908 * rhs must satisfy the "rIN" constraint. 909 */ 910 if (rhs_is_const) { 911 int imm12 = encode_imm(rhs); 912 if (imm12 < 0) { 913 imm12 = encode_imm_nofail(-rhs); 914 opc = opneg; 915 } 916 tcg_out_dat_imm(s, cond, opc, dst, lhs, imm12); 917 } else { 918 tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0)); 919 } 920} 921 922static void tcg_out_mul32(TCGContext *s, ARMCond cond, TCGReg rd, 923 TCGReg rn, TCGReg rm) 924{ 925 /* mul */ 926 tcg_out32(s, (cond << 28) | 0x90 | (rd << 16) | (rm << 8) | rn); 927} 928 929static void tcg_out_umull32(TCGContext *s, ARMCond cond, TCGReg rd0, 930 TCGReg rd1, TCGReg rn, TCGReg rm) 931{ 932 /* umull */ 933 tcg_out32(s, (cond << 28) | 0x00800090 | 934 (rd1 << 16) | (rd0 << 12) | (rm << 8) | rn); 935} 936 937static void tcg_out_smull32(TCGContext *s, ARMCond cond, TCGReg rd0, 938 TCGReg rd1, TCGReg rn, TCGReg rm) 939{ 940 /* smull */ 941 tcg_out32(s, (cond << 28) | 0x00c00090 | 942 (rd1 << 16) | (rd0 << 12) | (rm << 8) | rn); 943} 944 945static void tcg_out_sdiv(TCGContext *s, ARMCond cond, 946 TCGReg rd, TCGReg rn, TCGReg rm) 947{ 948 tcg_out32(s, 0x0710f010 | (cond << 28) | (rd << 16) | rn | (rm << 8)); 949} 950 951static void tcg_out_udiv(TCGContext *s, ARMCond cond, 952 TCGReg rd, TCGReg rn, TCGReg rm) 953{ 954 tcg_out32(s, 0x0730f010 | (cond << 28) | (rd << 16) | rn | (rm << 8)); 955} 956 957static void tcg_out_ext8s(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rn) 958{ 959 /* sxtb */ 960 tcg_out32(s, 0x06af0070 | (cond << 28) | (rd << 12) | rn); 961} 962 963static void __attribute__((unused)) 964tcg_out_ext8u(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rn) 965{ 966 tcg_out_dat_imm(s, cond, ARITH_AND, rd, rn, 0xff); 967} 968 969static void tcg_out_ext16s(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rn) 970{ 971 /* sxth */ 972 tcg_out32(s, 0x06bf0070 | (cond << 28) | (rd << 12) | rn); 973} 974 975static void tcg_out_ext16u(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rn) 976{ 977 /* uxth */ 978 tcg_out32(s, 0x06ff0070 | (cond << 28) | (rd << 12) | rn); 979} 980 981static void tcg_out_bswap16(TCGContext *s, ARMCond cond, 982 TCGReg rd, TCGReg rn, int flags) 983{ 984 if (flags & TCG_BSWAP_OS) { 985 /* revsh */ 986 tcg_out32(s, 0x06ff0fb0 | (cond << 28) | (rd << 12) | rn); 987 return; 988 } 989 990 /* rev16 */ 991 tcg_out32(s, 0x06bf0fb0 | (cond << 28) | (rd << 12) | rn); 992 if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) { 993 /* uxth */ 994 tcg_out32(s, 0x06ff0070 | (cond << 28) | (rd << 12) | rd); 995 } 996} 997 998static void tcg_out_bswap32(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rn) 999{ 1000 /* rev */ 1001 tcg_out32(s, 0x06bf0f30 | (cond << 28) | (rd << 12) | rn); 1002} 1003 1004static void tcg_out_deposit(TCGContext *s, ARMCond cond, TCGReg rd, 1005 TCGArg a1, int ofs, int len, bool const_a1) 1006{ 1007 if (const_a1) { 1008 /* bfi becomes bfc with rn == 15. */ 1009 a1 = 15; 1010 } 1011 /* bfi/bfc */ 1012 tcg_out32(s, 0x07c00010 | (cond << 28) | (rd << 12) | a1 1013 | (ofs << 7) | ((ofs + len - 1) << 16)); 1014} 1015 1016static void tcg_out_extract(TCGContext *s, ARMCond cond, TCGReg rd, 1017 TCGReg rn, int ofs, int len) 1018{ 1019 /* ubfx */ 1020 tcg_out32(s, 0x07e00050 | (cond << 28) | (rd << 12) | rn 1021 | (ofs << 7) | ((len - 1) << 16)); 1022} 1023 1024static void tcg_out_sextract(TCGContext *s, ARMCond cond, TCGReg rd, 1025 TCGReg rn, int ofs, int len) 1026{ 1027 /* sbfx */ 1028 tcg_out32(s, 0x07a00050 | (cond << 28) | (rd << 12) | rn 1029 | (ofs << 7) | ((len - 1) << 16)); 1030} 1031 1032static void tcg_out_ld32u(TCGContext *s, ARMCond cond, 1033 TCGReg rd, TCGReg rn, int32_t offset) 1034{ 1035 if (offset > 0xfff || offset < -0xfff) { 1036 tcg_out_movi32(s, cond, TCG_REG_TMP, offset); 1037 tcg_out_ld32_r(s, cond, rd, rn, TCG_REG_TMP); 1038 } else 1039 tcg_out_ld32_12(s, cond, rd, rn, offset); 1040} 1041 1042static void tcg_out_st32(TCGContext *s, ARMCond cond, 1043 TCGReg rd, TCGReg rn, int32_t offset) 1044{ 1045 if (offset > 0xfff || offset < -0xfff) { 1046 tcg_out_movi32(s, cond, TCG_REG_TMP, offset); 1047 tcg_out_st32_r(s, cond, rd, rn, TCG_REG_TMP); 1048 } else 1049 tcg_out_st32_12(s, cond, rd, rn, offset); 1050} 1051 1052static void tcg_out_ld16u(TCGContext *s, ARMCond cond, 1053 TCGReg rd, TCGReg rn, int32_t offset) 1054{ 1055 if (offset > 0xff || offset < -0xff) { 1056 tcg_out_movi32(s, cond, TCG_REG_TMP, offset); 1057 tcg_out_ld16u_r(s, cond, rd, rn, TCG_REG_TMP); 1058 } else 1059 tcg_out_ld16u_8(s, cond, rd, rn, offset); 1060} 1061 1062static void tcg_out_ld16s(TCGContext *s, ARMCond cond, 1063 TCGReg rd, TCGReg rn, int32_t offset) 1064{ 1065 if (offset > 0xff || offset < -0xff) { 1066 tcg_out_movi32(s, cond, TCG_REG_TMP, offset); 1067 tcg_out_ld16s_r(s, cond, rd, rn, TCG_REG_TMP); 1068 } else 1069 tcg_out_ld16s_8(s, cond, rd, rn, offset); 1070} 1071 1072static void tcg_out_st16(TCGContext *s, ARMCond cond, 1073 TCGReg rd, TCGReg rn, int32_t offset) 1074{ 1075 if (offset > 0xff || offset < -0xff) { 1076 tcg_out_movi32(s, cond, TCG_REG_TMP, offset); 1077 tcg_out_st16_r(s, cond, rd, rn, TCG_REG_TMP); 1078 } else 1079 tcg_out_st16_8(s, cond, rd, rn, offset); 1080} 1081 1082static void tcg_out_ld8u(TCGContext *s, ARMCond cond, 1083 TCGReg rd, TCGReg rn, int32_t offset) 1084{ 1085 if (offset > 0xfff || offset < -0xfff) { 1086 tcg_out_movi32(s, cond, TCG_REG_TMP, offset); 1087 tcg_out_ld8_r(s, cond, rd, rn, TCG_REG_TMP); 1088 } else 1089 tcg_out_ld8_12(s, cond, rd, rn, offset); 1090} 1091 1092static void tcg_out_ld8s(TCGContext *s, ARMCond cond, 1093 TCGReg rd, TCGReg rn, int32_t offset) 1094{ 1095 if (offset > 0xff || offset < -0xff) { 1096 tcg_out_movi32(s, cond, TCG_REG_TMP, offset); 1097 tcg_out_ld8s_r(s, cond, rd, rn, TCG_REG_TMP); 1098 } else 1099 tcg_out_ld8s_8(s, cond, rd, rn, offset); 1100} 1101 1102static void tcg_out_st8(TCGContext *s, ARMCond cond, 1103 TCGReg rd, TCGReg rn, int32_t offset) 1104{ 1105 if (offset > 0xfff || offset < -0xfff) { 1106 tcg_out_movi32(s, cond, TCG_REG_TMP, offset); 1107 tcg_out_st8_r(s, cond, rd, rn, TCG_REG_TMP); 1108 } else 1109 tcg_out_st8_12(s, cond, rd, rn, offset); 1110} 1111 1112/* 1113 * The _goto case is normally between TBs within the same code buffer, and 1114 * with the code buffer limited to 16MB we wouldn't need the long case. 1115 * But we also use it for the tail-call to the qemu_ld/st helpers, which does. 1116 */ 1117static void tcg_out_goto(TCGContext *s, ARMCond cond, const tcg_insn_unit *addr) 1118{ 1119 intptr_t addri = (intptr_t)addr; 1120 ptrdiff_t disp = tcg_pcrel_diff(s, addr); 1121 bool arm_mode = !(addri & 1); 1122 1123 if (arm_mode && disp - 8 < 0x01fffffd && disp - 8 > -0x01fffffd) { 1124 tcg_out_b_imm(s, cond, disp); 1125 return; 1126 } 1127 1128 /* LDR is interworking from v5t. */ 1129 tcg_out_movi_pool(s, cond, TCG_REG_PC, addri); 1130} 1131 1132/* 1133 * The call case is mostly used for helpers - so it's not unreasonable 1134 * for them to be beyond branch range. 1135 */ 1136static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *addr) 1137{ 1138 intptr_t addri = (intptr_t)addr; 1139 ptrdiff_t disp = tcg_pcrel_diff(s, addr); 1140 bool arm_mode = !(addri & 1); 1141 1142 if (disp - 8 < 0x02000000 && disp - 8 >= -0x02000000) { 1143 if (arm_mode) { 1144 tcg_out_bl_imm(s, COND_AL, disp); 1145 } else { 1146 tcg_out_blx_imm(s, disp); 1147 } 1148 return; 1149 } 1150 1151 tcg_out_movi32(s, COND_AL, TCG_REG_TMP, addri); 1152 tcg_out_blx_reg(s, COND_AL, TCG_REG_TMP); 1153} 1154 1155static void tcg_out_call(TCGContext *s, const tcg_insn_unit *addr, 1156 const TCGHelperInfo *info) 1157{ 1158 tcg_out_call_int(s, addr); 1159} 1160 1161static void tcg_out_goto_label(TCGContext *s, ARMCond cond, TCGLabel *l) 1162{ 1163 if (l->has_value) { 1164 tcg_out_goto(s, cond, l->u.value_ptr); 1165 } else { 1166 tcg_out_reloc(s, s->code_ptr, R_ARM_PC24, l, 0); 1167 tcg_out_b_imm(s, cond, 0); 1168 } 1169} 1170 1171static void tcg_out_mb(TCGContext *s, TCGArg a0) 1172{ 1173 if (use_armv7_instructions) { 1174 tcg_out32(s, INSN_DMB_ISH); 1175 } else { 1176 tcg_out32(s, INSN_DMB_MCR); 1177 } 1178} 1179 1180static TCGCond tcg_out_cmp2(TCGContext *s, const TCGArg *args, 1181 const int *const_args) 1182{ 1183 TCGReg al = args[0]; 1184 TCGReg ah = args[1]; 1185 TCGArg bl = args[2]; 1186 TCGArg bh = args[3]; 1187 TCGCond cond = args[4]; 1188 int const_bl = const_args[2]; 1189 int const_bh = const_args[3]; 1190 1191 switch (cond) { 1192 case TCG_COND_EQ: 1193 case TCG_COND_NE: 1194 case TCG_COND_LTU: 1195 case TCG_COND_LEU: 1196 case TCG_COND_GTU: 1197 case TCG_COND_GEU: 1198 /* We perform a conditional comparision. If the high half is 1199 equal, then overwrite the flags with the comparison of the 1200 low half. The resulting flags cover the whole. */ 1201 tcg_out_dat_rI(s, COND_AL, ARITH_CMP, 0, ah, bh, const_bh); 1202 tcg_out_dat_rI(s, COND_EQ, ARITH_CMP, 0, al, bl, const_bl); 1203 return cond; 1204 1205 case TCG_COND_LT: 1206 case TCG_COND_GE: 1207 /* We perform a double-word subtraction and examine the result. 1208 We do not actually need the result of the subtract, so the 1209 low part "subtract" is a compare. For the high half we have 1210 no choice but to compute into a temporary. */ 1211 tcg_out_dat_rI(s, COND_AL, ARITH_CMP, 0, al, bl, const_bl); 1212 tcg_out_dat_rI(s, COND_AL, ARITH_SBC | TO_CPSR, 1213 TCG_REG_TMP, ah, bh, const_bh); 1214 return cond; 1215 1216 case TCG_COND_LE: 1217 case TCG_COND_GT: 1218 /* Similar, but with swapped arguments, via reversed subtract. */ 1219 tcg_out_dat_rI(s, COND_AL, ARITH_RSB | TO_CPSR, 1220 TCG_REG_TMP, al, bl, const_bl); 1221 tcg_out_dat_rI(s, COND_AL, ARITH_RSC | TO_CPSR, 1222 TCG_REG_TMP, ah, bh, const_bh); 1223 return tcg_swap_cond(cond); 1224 1225 default: 1226 g_assert_not_reached(); 1227 } 1228} 1229 1230/* 1231 * Note that TCGReg references Q-registers. 1232 * Q-regno = 2 * D-regno, so shift left by 1 whlie inserting. 1233 */ 1234static uint32_t encode_vd(TCGReg rd) 1235{ 1236 tcg_debug_assert(rd >= TCG_REG_Q0); 1237 return (extract32(rd, 3, 1) << 22) | (extract32(rd, 0, 3) << 13); 1238} 1239 1240static uint32_t encode_vn(TCGReg rn) 1241{ 1242 tcg_debug_assert(rn >= TCG_REG_Q0); 1243 return (extract32(rn, 3, 1) << 7) | (extract32(rn, 0, 3) << 17); 1244} 1245 1246static uint32_t encode_vm(TCGReg rm) 1247{ 1248 tcg_debug_assert(rm >= TCG_REG_Q0); 1249 return (extract32(rm, 3, 1) << 5) | (extract32(rm, 0, 3) << 1); 1250} 1251 1252static void tcg_out_vreg2(TCGContext *s, ARMInsn insn, int q, int vece, 1253 TCGReg d, TCGReg m) 1254{ 1255 tcg_out32(s, insn | (vece << 18) | (q << 6) | 1256 encode_vd(d) | encode_vm(m)); 1257} 1258 1259static void tcg_out_vreg3(TCGContext *s, ARMInsn insn, int q, int vece, 1260 TCGReg d, TCGReg n, TCGReg m) 1261{ 1262 tcg_out32(s, insn | (vece << 20) | (q << 6) | 1263 encode_vd(d) | encode_vn(n) | encode_vm(m)); 1264} 1265 1266static void tcg_out_vmovi(TCGContext *s, TCGReg rd, 1267 int q, int op, int cmode, uint8_t imm8) 1268{ 1269 tcg_out32(s, INSN_VMOVI | encode_vd(rd) | (q << 6) | (op << 5) 1270 | (cmode << 8) | extract32(imm8, 0, 4) 1271 | (extract32(imm8, 4, 3) << 16) 1272 | (extract32(imm8, 7, 1) << 24)); 1273} 1274 1275static void tcg_out_vshifti(TCGContext *s, ARMInsn insn, int q, 1276 TCGReg rd, TCGReg rm, int l_imm6) 1277{ 1278 tcg_out32(s, insn | (q << 6) | encode_vd(rd) | encode_vm(rm) | 1279 (extract32(l_imm6, 6, 1) << 7) | 1280 (extract32(l_imm6, 0, 6) << 16)); 1281} 1282 1283static void tcg_out_vldst(TCGContext *s, ARMInsn insn, 1284 TCGReg rd, TCGReg rn, int offset) 1285{ 1286 if (offset != 0) { 1287 if (check_fit_imm(offset) || check_fit_imm(-offset)) { 1288 tcg_out_dat_rIN(s, COND_AL, ARITH_ADD, ARITH_SUB, 1289 TCG_REG_TMP, rn, offset, true); 1290 } else { 1291 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, offset); 1292 tcg_out_dat_reg(s, COND_AL, ARITH_ADD, 1293 TCG_REG_TMP, TCG_REG_TMP, rn, 0); 1294 } 1295 rn = TCG_REG_TMP; 1296 } 1297 tcg_out32(s, insn | (rn << 16) | encode_vd(rd) | 0xf); 1298} 1299 1300#ifdef CONFIG_SOFTMMU 1301/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr, 1302 * int mmu_idx, uintptr_t ra) 1303 */ 1304static void * const qemu_ld_helpers[MO_SSIZE + 1] = { 1305 [MO_UB] = helper_ret_ldub_mmu, 1306 [MO_SB] = helper_ret_ldsb_mmu, 1307#if HOST_BIG_ENDIAN 1308 [MO_UW] = helper_be_lduw_mmu, 1309 [MO_UL] = helper_be_ldul_mmu, 1310 [MO_UQ] = helper_be_ldq_mmu, 1311 [MO_SW] = helper_be_ldsw_mmu, 1312 [MO_SL] = helper_be_ldul_mmu, 1313#else 1314 [MO_UW] = helper_le_lduw_mmu, 1315 [MO_UL] = helper_le_ldul_mmu, 1316 [MO_UQ] = helper_le_ldq_mmu, 1317 [MO_SW] = helper_le_ldsw_mmu, 1318 [MO_SL] = helper_le_ldul_mmu, 1319#endif 1320}; 1321 1322/* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr, 1323 * uintxx_t val, int mmu_idx, uintptr_t ra) 1324 */ 1325static void * const qemu_st_helpers[MO_SIZE + 1] = { 1326 [MO_8] = helper_ret_stb_mmu, 1327#if HOST_BIG_ENDIAN 1328 [MO_16] = helper_be_stw_mmu, 1329 [MO_32] = helper_be_stl_mmu, 1330 [MO_64] = helper_be_stq_mmu, 1331#else 1332 [MO_16] = helper_le_stw_mmu, 1333 [MO_32] = helper_le_stl_mmu, 1334 [MO_64] = helper_le_stq_mmu, 1335#endif 1336}; 1337 1338/* Helper routines for marshalling helper function arguments into 1339 * the correct registers and stack. 1340 * argreg is where we want to put this argument, arg is the argument itself. 1341 * Return value is the updated argreg ready for the next call. 1342 * Note that argreg 0..3 is real registers, 4+ on stack. 1343 * 1344 * We provide routines for arguments which are: immediate, 32 bit 1345 * value in register, 16 and 8 bit values in register (which must be zero 1346 * extended before use) and 64 bit value in a lo:hi register pair. 1347 */ 1348#define DEFINE_TCG_OUT_ARG(NAME, ARGTYPE, MOV_ARG, EXT_ARG) \ 1349static TCGReg NAME(TCGContext *s, TCGReg argreg, ARGTYPE arg) \ 1350{ \ 1351 if (argreg < 4) { \ 1352 MOV_ARG(s, COND_AL, argreg, arg); \ 1353 } else { \ 1354 int ofs = (argreg - 4) * 4; \ 1355 EXT_ARG; \ 1356 tcg_debug_assert(ofs + 4 <= TCG_STATIC_CALL_ARGS_SIZE); \ 1357 tcg_out_st32_12(s, COND_AL, arg, TCG_REG_CALL_STACK, ofs); \ 1358 } \ 1359 return argreg + 1; \ 1360} 1361 1362DEFINE_TCG_OUT_ARG(tcg_out_arg_imm32, uint32_t, tcg_out_movi32, 1363 (tcg_out_movi32(s, COND_AL, TCG_REG_TMP, arg), arg = TCG_REG_TMP)) 1364DEFINE_TCG_OUT_ARG(tcg_out_arg_reg8, TCGReg, tcg_out_ext8u, 1365 (tcg_out_ext8u(s, COND_AL, TCG_REG_TMP, arg), arg = TCG_REG_TMP)) 1366DEFINE_TCG_OUT_ARG(tcg_out_arg_reg16, TCGReg, tcg_out_ext16u, 1367 (tcg_out_ext16u(s, COND_AL, TCG_REG_TMP, arg), arg = TCG_REG_TMP)) 1368DEFINE_TCG_OUT_ARG(tcg_out_arg_reg32, TCGReg, tcg_out_mov_reg, ) 1369 1370static TCGReg tcg_out_arg_reg64(TCGContext *s, TCGReg argreg, 1371 TCGReg arglo, TCGReg arghi) 1372{ 1373 /* 64 bit arguments must go in even/odd register pairs 1374 * and in 8-aligned stack slots. 1375 */ 1376 if (argreg & 1) { 1377 argreg++; 1378 } 1379 if (argreg >= 4 && (arglo & 1) == 0 && arghi == arglo + 1) { 1380 tcg_out_strd_8(s, COND_AL, arglo, 1381 TCG_REG_CALL_STACK, (argreg - 4) * 4); 1382 return argreg + 2; 1383 } else { 1384 argreg = tcg_out_arg_reg32(s, argreg, arglo); 1385 argreg = tcg_out_arg_reg32(s, argreg, arghi); 1386 return argreg; 1387 } 1388} 1389 1390#define TLB_SHIFT (CPU_TLB_ENTRY_BITS + CPU_TLB_BITS) 1391 1392/* We expect to use an 9-bit sign-magnitude negative offset from ENV. */ 1393QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0); 1394QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -256); 1395 1396/* These offsets are built into the LDRD below. */ 1397QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, mask) != 0); 1398QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, table) != 4); 1399 1400/* Load and compare a TLB entry, leaving the flags set. Returns the register 1401 containing the addend of the tlb entry. Clobbers R0, R1, R2, TMP. */ 1402 1403static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addrlo, TCGReg addrhi, 1404 MemOp opc, int mem_index, bool is_load) 1405{ 1406 int cmp_off = (is_load ? offsetof(CPUTLBEntry, addr_read) 1407 : offsetof(CPUTLBEntry, addr_write)); 1408 int fast_off = TLB_MASK_TABLE_OFS(mem_index); 1409 unsigned s_mask = (1 << (opc & MO_SIZE)) - 1; 1410 unsigned a_mask = (1 << get_alignment_bits(opc)) - 1; 1411 TCGReg t_addr; 1412 1413 /* Load env_tlb(env)->f[mmu_idx].{mask,table} into {r0,r1}. */ 1414 tcg_out_ldrd_8(s, COND_AL, TCG_REG_R0, TCG_AREG0, fast_off); 1415 1416 /* Extract the tlb index from the address into R0. */ 1417 tcg_out_dat_reg(s, COND_AL, ARITH_AND, TCG_REG_R0, TCG_REG_R0, addrlo, 1418 SHIFT_IMM_LSR(TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS)); 1419 1420 /* 1421 * Add the tlb_table pointer, creating the CPUTLBEntry address in R1. 1422 * Load the tlb comparator into R2/R3 and the fast path addend into R1. 1423 */ 1424 if (cmp_off == 0) { 1425 if (TARGET_LONG_BITS == 64) { 1426 tcg_out_ldrd_rwb(s, COND_AL, TCG_REG_R2, TCG_REG_R1, TCG_REG_R0); 1427 } else { 1428 tcg_out_ld32_rwb(s, COND_AL, TCG_REG_R2, TCG_REG_R1, TCG_REG_R0); 1429 } 1430 } else { 1431 tcg_out_dat_reg(s, COND_AL, ARITH_ADD, 1432 TCG_REG_R1, TCG_REG_R1, TCG_REG_R0, 0); 1433 if (TARGET_LONG_BITS == 64) { 1434 tcg_out_ldrd_8(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off); 1435 } else { 1436 tcg_out_ld32_12(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off); 1437 } 1438 } 1439 1440 /* Load the tlb addend. */ 1441 tcg_out_ld32_12(s, COND_AL, TCG_REG_R1, TCG_REG_R1, 1442 offsetof(CPUTLBEntry, addend)); 1443 1444 /* 1445 * Check alignment, check comparators. 1446 * Do this in 2-4 insns. Use MOVW for v7, if possible, 1447 * to reduce the number of sequential conditional instructions. 1448 * Almost all guests have at least 4k pages, which means that we need 1449 * to clear at least 9 bits even for an 8-byte memory, which means it 1450 * isn't worth checking for an immediate operand for BIC. 1451 * 1452 * For unaligned accesses, test the page of the last unit of alignment. 1453 * This leaves the least significant alignment bits unchanged, and of 1454 * course must be zero. 1455 */ 1456 t_addr = addrlo; 1457 if (a_mask < s_mask) { 1458 t_addr = TCG_REG_R0; 1459 tcg_out_dat_imm(s, COND_AL, ARITH_ADD, t_addr, 1460 addrlo, s_mask - a_mask); 1461 } 1462 if (use_armv7_instructions && TARGET_PAGE_BITS <= 16) { 1463 tcg_out_movi32(s, COND_AL, TCG_REG_TMP, ~(TARGET_PAGE_MASK | a_mask)); 1464 tcg_out_dat_reg(s, COND_AL, ARITH_BIC, TCG_REG_TMP, 1465 t_addr, TCG_REG_TMP, 0); 1466 tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, TCG_REG_R2, TCG_REG_TMP, 0); 1467 } else { 1468 if (a_mask) { 1469 tcg_debug_assert(a_mask <= 0xff); 1470 tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addrlo, a_mask); 1471 } 1472 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP, 0, t_addr, 1473 SHIFT_IMM_LSR(TARGET_PAGE_BITS)); 1474 tcg_out_dat_reg(s, (a_mask ? COND_EQ : COND_AL), ARITH_CMP, 1475 0, TCG_REG_R2, TCG_REG_TMP, 1476 SHIFT_IMM_LSL(TARGET_PAGE_BITS)); 1477 } 1478 1479 if (TARGET_LONG_BITS == 64) { 1480 tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0, TCG_REG_R3, addrhi, 0); 1481 } 1482 1483 return TCG_REG_R1; 1484} 1485 1486/* Record the context of a call to the out of line helper code for the slow 1487 path for a load or store, so that we can later generate the correct 1488 helper code. */ 1489static void add_qemu_ldst_label(TCGContext *s, bool is_ld, MemOpIdx oi, 1490 TCGReg datalo, TCGReg datahi, TCGReg addrlo, 1491 TCGReg addrhi, tcg_insn_unit *raddr, 1492 tcg_insn_unit *label_ptr) 1493{ 1494 TCGLabelQemuLdst *label = new_ldst_label(s); 1495 1496 label->is_ld = is_ld; 1497 label->oi = oi; 1498 label->datalo_reg = datalo; 1499 label->datahi_reg = datahi; 1500 label->addrlo_reg = addrlo; 1501 label->addrhi_reg = addrhi; 1502 label->raddr = tcg_splitwx_to_rx(raddr); 1503 label->label_ptr[0] = label_ptr; 1504} 1505 1506static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) 1507{ 1508 TCGReg argreg, datalo, datahi; 1509 MemOpIdx oi = lb->oi; 1510 MemOp opc = get_memop(oi); 1511 1512 if (!reloc_pc24(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { 1513 return false; 1514 } 1515 1516 argreg = tcg_out_arg_reg32(s, TCG_REG_R0, TCG_AREG0); 1517 if (TARGET_LONG_BITS == 64) { 1518 argreg = tcg_out_arg_reg64(s, argreg, lb->addrlo_reg, lb->addrhi_reg); 1519 } else { 1520 argreg = tcg_out_arg_reg32(s, argreg, lb->addrlo_reg); 1521 } 1522 argreg = tcg_out_arg_imm32(s, argreg, oi); 1523 argreg = tcg_out_arg_reg32(s, argreg, TCG_REG_R14); 1524 1525 /* Use the canonical unsigned helpers and minimize icache usage. */ 1526 tcg_out_call_int(s, qemu_ld_helpers[opc & MO_SIZE]); 1527 1528 datalo = lb->datalo_reg; 1529 datahi = lb->datahi_reg; 1530 switch (opc & MO_SSIZE) { 1531 case MO_SB: 1532 tcg_out_ext8s(s, COND_AL, datalo, TCG_REG_R0); 1533 break; 1534 case MO_SW: 1535 tcg_out_ext16s(s, COND_AL, datalo, TCG_REG_R0); 1536 break; 1537 default: 1538 tcg_out_mov_reg(s, COND_AL, datalo, TCG_REG_R0); 1539 break; 1540 case MO_UQ: 1541 if (datalo != TCG_REG_R1) { 1542 tcg_out_mov_reg(s, COND_AL, datalo, TCG_REG_R0); 1543 tcg_out_mov_reg(s, COND_AL, datahi, TCG_REG_R1); 1544 } else if (datahi != TCG_REG_R0) { 1545 tcg_out_mov_reg(s, COND_AL, datahi, TCG_REG_R1); 1546 tcg_out_mov_reg(s, COND_AL, datalo, TCG_REG_R0); 1547 } else { 1548 tcg_out_mov_reg(s, COND_AL, TCG_REG_TMP, TCG_REG_R0); 1549 tcg_out_mov_reg(s, COND_AL, datahi, TCG_REG_R1); 1550 tcg_out_mov_reg(s, COND_AL, datalo, TCG_REG_TMP); 1551 } 1552 break; 1553 } 1554 1555 tcg_out_goto(s, COND_AL, lb->raddr); 1556 return true; 1557} 1558 1559static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) 1560{ 1561 TCGReg argreg, datalo, datahi; 1562 MemOpIdx oi = lb->oi; 1563 MemOp opc = get_memop(oi); 1564 1565 if (!reloc_pc24(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { 1566 return false; 1567 } 1568 1569 argreg = TCG_REG_R0; 1570 argreg = tcg_out_arg_reg32(s, argreg, TCG_AREG0); 1571 if (TARGET_LONG_BITS == 64) { 1572 argreg = tcg_out_arg_reg64(s, argreg, lb->addrlo_reg, lb->addrhi_reg); 1573 } else { 1574 argreg = tcg_out_arg_reg32(s, argreg, lb->addrlo_reg); 1575 } 1576 1577 datalo = lb->datalo_reg; 1578 datahi = lb->datahi_reg; 1579 switch (opc & MO_SIZE) { 1580 case MO_8: 1581 argreg = tcg_out_arg_reg8(s, argreg, datalo); 1582 break; 1583 case MO_16: 1584 argreg = tcg_out_arg_reg16(s, argreg, datalo); 1585 break; 1586 case MO_32: 1587 default: 1588 argreg = tcg_out_arg_reg32(s, argreg, datalo); 1589 break; 1590 case MO_64: 1591 argreg = tcg_out_arg_reg64(s, argreg, datalo, datahi); 1592 break; 1593 } 1594 1595 argreg = tcg_out_arg_imm32(s, argreg, oi); 1596 argreg = tcg_out_arg_reg32(s, argreg, TCG_REG_R14); 1597 1598 /* Tail-call to the helper, which will return to the fast path. */ 1599 tcg_out_goto(s, COND_AL, qemu_st_helpers[opc & MO_SIZE]); 1600 return true; 1601} 1602#else 1603 1604static void tcg_out_test_alignment(TCGContext *s, bool is_ld, TCGReg addrlo, 1605 TCGReg addrhi, unsigned a_bits) 1606{ 1607 unsigned a_mask = (1 << a_bits) - 1; 1608 TCGLabelQemuLdst *label = new_ldst_label(s); 1609 1610 label->is_ld = is_ld; 1611 label->addrlo_reg = addrlo; 1612 label->addrhi_reg = addrhi; 1613 1614 /* We are expecting a_bits to max out at 7, and can easily support 8. */ 1615 tcg_debug_assert(a_mask <= 0xff); 1616 /* tst addr, #mask */ 1617 tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addrlo, a_mask); 1618 1619 /* blne slow_path */ 1620 label->label_ptr[0] = s->code_ptr; 1621 tcg_out_bl_imm(s, COND_NE, 0); 1622 1623 label->raddr = tcg_splitwx_to_rx(s->code_ptr); 1624} 1625 1626static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l) 1627{ 1628 if (!reloc_pc24(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { 1629 return false; 1630 } 1631 1632 if (TARGET_LONG_BITS == 64) { 1633 /* 64-bit target address is aligned into R2:R3. */ 1634 if (l->addrhi_reg != TCG_REG_R2) { 1635 tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R2, l->addrlo_reg); 1636 tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R3, l->addrhi_reg); 1637 } else if (l->addrlo_reg != TCG_REG_R3) { 1638 tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R3, l->addrhi_reg); 1639 tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R2, l->addrlo_reg); 1640 } else { 1641 tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R1, TCG_REG_R2); 1642 tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R2, TCG_REG_R3); 1643 tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R3, TCG_REG_R1); 1644 } 1645 } else { 1646 tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R1, l->addrlo_reg); 1647 } 1648 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_R0, TCG_AREG0); 1649 1650 /* 1651 * Tail call to the helper, with the return address back inline, 1652 * just for the clarity of the debugging traceback -- the helper 1653 * cannot return. We have used BLNE to arrive here, so LR is 1654 * already set. 1655 */ 1656 tcg_out_goto(s, COND_AL, (const void *) 1657 (l->is_ld ? helper_unaligned_ld : helper_unaligned_st)); 1658 return true; 1659} 1660 1661static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) 1662{ 1663 return tcg_out_fail_alignment(s, l); 1664} 1665 1666static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) 1667{ 1668 return tcg_out_fail_alignment(s, l); 1669} 1670#endif /* SOFTMMU */ 1671 1672static void tcg_out_qemu_ld_index(TCGContext *s, MemOp opc, 1673 TCGReg datalo, TCGReg datahi, 1674 TCGReg addrlo, TCGReg addend, 1675 bool scratch_addend) 1676{ 1677 /* Byte swapping is left to middle-end expansion. */ 1678 tcg_debug_assert((opc & MO_BSWAP) == 0); 1679 1680 switch (opc & MO_SSIZE) { 1681 case MO_UB: 1682 tcg_out_ld8_r(s, COND_AL, datalo, addrlo, addend); 1683 break; 1684 case MO_SB: 1685 tcg_out_ld8s_r(s, COND_AL, datalo, addrlo, addend); 1686 break; 1687 case MO_UW: 1688 tcg_out_ld16u_r(s, COND_AL, datalo, addrlo, addend); 1689 break; 1690 case MO_SW: 1691 tcg_out_ld16s_r(s, COND_AL, datalo, addrlo, addend); 1692 break; 1693 case MO_UL: 1694 tcg_out_ld32_r(s, COND_AL, datalo, addrlo, addend); 1695 break; 1696 case MO_UQ: 1697 /* We used pair allocation for datalo, so already should be aligned. */ 1698 tcg_debug_assert((datalo & 1) == 0); 1699 tcg_debug_assert(datahi == datalo + 1); 1700 /* LDRD requires alignment; double-check that. */ 1701 if (get_alignment_bits(opc) >= MO_64) { 1702 /* 1703 * Rm (the second address op) must not overlap Rt or Rt + 1. 1704 * Since datalo is aligned, we can simplify the test via alignment. 1705 * Flip the two address arguments if that works. 1706 */ 1707 if ((addend & ~1) != datalo) { 1708 tcg_out_ldrd_r(s, COND_AL, datalo, addrlo, addend); 1709 break; 1710 } 1711 if ((addrlo & ~1) != datalo) { 1712 tcg_out_ldrd_r(s, COND_AL, datalo, addend, addrlo); 1713 break; 1714 } 1715 } 1716 if (scratch_addend) { 1717 tcg_out_ld32_rwb(s, COND_AL, datalo, addend, addrlo); 1718 tcg_out_ld32_12(s, COND_AL, datahi, addend, 4); 1719 } else { 1720 tcg_out_dat_reg(s, COND_AL, ARITH_ADD, TCG_REG_TMP, 1721 addend, addrlo, SHIFT_IMM_LSL(0)); 1722 tcg_out_ld32_12(s, COND_AL, datalo, TCG_REG_TMP, 0); 1723 tcg_out_ld32_12(s, COND_AL, datahi, TCG_REG_TMP, 4); 1724 } 1725 break; 1726 default: 1727 g_assert_not_reached(); 1728 } 1729} 1730 1731#ifndef CONFIG_SOFTMMU 1732static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg datalo, 1733 TCGReg datahi, TCGReg addrlo) 1734{ 1735 /* Byte swapping is left to middle-end expansion. */ 1736 tcg_debug_assert((opc & MO_BSWAP) == 0); 1737 1738 switch (opc & MO_SSIZE) { 1739 case MO_UB: 1740 tcg_out_ld8_12(s, COND_AL, datalo, addrlo, 0); 1741 break; 1742 case MO_SB: 1743 tcg_out_ld8s_8(s, COND_AL, datalo, addrlo, 0); 1744 break; 1745 case MO_UW: 1746 tcg_out_ld16u_8(s, COND_AL, datalo, addrlo, 0); 1747 break; 1748 case MO_SW: 1749 tcg_out_ld16s_8(s, COND_AL, datalo, addrlo, 0); 1750 break; 1751 case MO_UL: 1752 tcg_out_ld32_12(s, COND_AL, datalo, addrlo, 0); 1753 break; 1754 case MO_UQ: 1755 /* We used pair allocation for datalo, so already should be aligned. */ 1756 tcg_debug_assert((datalo & 1) == 0); 1757 tcg_debug_assert(datahi == datalo + 1); 1758 /* LDRD requires alignment; double-check that. */ 1759 if (get_alignment_bits(opc) >= MO_64) { 1760 tcg_out_ldrd_8(s, COND_AL, datalo, addrlo, 0); 1761 } else if (datalo == addrlo) { 1762 tcg_out_ld32_12(s, COND_AL, datahi, addrlo, 4); 1763 tcg_out_ld32_12(s, COND_AL, datalo, addrlo, 0); 1764 } else { 1765 tcg_out_ld32_12(s, COND_AL, datalo, addrlo, 0); 1766 tcg_out_ld32_12(s, COND_AL, datahi, addrlo, 4); 1767 } 1768 break; 1769 default: 1770 g_assert_not_reached(); 1771 } 1772} 1773#endif 1774 1775static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64) 1776{ 1777 TCGReg addrlo, datalo, datahi, addrhi __attribute__((unused)); 1778 MemOpIdx oi; 1779 MemOp opc; 1780#ifdef CONFIG_SOFTMMU 1781 int mem_index; 1782 TCGReg addend; 1783 tcg_insn_unit *label_ptr; 1784#else 1785 unsigned a_bits; 1786#endif 1787 1788 datalo = *args++; 1789 datahi = (is64 ? *args++ : 0); 1790 addrlo = *args++; 1791 addrhi = (TARGET_LONG_BITS == 64 ? *args++ : 0); 1792 oi = *args++; 1793 opc = get_memop(oi); 1794 1795#ifdef CONFIG_SOFTMMU 1796 mem_index = get_mmuidx(oi); 1797 addend = tcg_out_tlb_read(s, addrlo, addrhi, opc, mem_index, 1); 1798 1799 /* This a conditional BL only to load a pointer within this opcode into LR 1800 for the slow path. We will not be using the value for a tail call. */ 1801 label_ptr = s->code_ptr; 1802 tcg_out_bl_imm(s, COND_NE, 0); 1803 1804 tcg_out_qemu_ld_index(s, opc, datalo, datahi, addrlo, addend, true); 1805 1806 add_qemu_ldst_label(s, true, oi, datalo, datahi, addrlo, addrhi, 1807 s->code_ptr, label_ptr); 1808#else /* !CONFIG_SOFTMMU */ 1809 a_bits = get_alignment_bits(opc); 1810 if (a_bits) { 1811 tcg_out_test_alignment(s, true, addrlo, addrhi, a_bits); 1812 } 1813 if (guest_base) { 1814 tcg_out_qemu_ld_index(s, opc, datalo, datahi, 1815 addrlo, TCG_REG_GUEST_BASE, false); 1816 } else { 1817 tcg_out_qemu_ld_direct(s, opc, datalo, datahi, addrlo); 1818 } 1819#endif 1820} 1821 1822static void tcg_out_qemu_st_index(TCGContext *s, ARMCond cond, MemOp opc, 1823 TCGReg datalo, TCGReg datahi, 1824 TCGReg addrlo, TCGReg addend, 1825 bool scratch_addend) 1826{ 1827 /* Byte swapping is left to middle-end expansion. */ 1828 tcg_debug_assert((opc & MO_BSWAP) == 0); 1829 1830 switch (opc & MO_SIZE) { 1831 case MO_8: 1832 tcg_out_st8_r(s, cond, datalo, addrlo, addend); 1833 break; 1834 case MO_16: 1835 tcg_out_st16_r(s, cond, datalo, addrlo, addend); 1836 break; 1837 case MO_32: 1838 tcg_out_st32_r(s, cond, datalo, addrlo, addend); 1839 break; 1840 case MO_64: 1841 /* We used pair allocation for datalo, so already should be aligned. */ 1842 tcg_debug_assert((datalo & 1) == 0); 1843 tcg_debug_assert(datahi == datalo + 1); 1844 /* STRD requires alignment; double-check that. */ 1845 if (get_alignment_bits(opc) >= MO_64) { 1846 tcg_out_strd_r(s, cond, datalo, addrlo, addend); 1847 } else if (scratch_addend) { 1848 tcg_out_st32_rwb(s, cond, datalo, addend, addrlo); 1849 tcg_out_st32_12(s, cond, datahi, addend, 4); 1850 } else { 1851 tcg_out_dat_reg(s, cond, ARITH_ADD, TCG_REG_TMP, 1852 addend, addrlo, SHIFT_IMM_LSL(0)); 1853 tcg_out_st32_12(s, cond, datalo, TCG_REG_TMP, 0); 1854 tcg_out_st32_12(s, cond, datahi, TCG_REG_TMP, 4); 1855 } 1856 break; 1857 default: 1858 g_assert_not_reached(); 1859 } 1860} 1861 1862#ifndef CONFIG_SOFTMMU 1863static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg datalo, 1864 TCGReg datahi, TCGReg addrlo) 1865{ 1866 /* Byte swapping is left to middle-end expansion. */ 1867 tcg_debug_assert((opc & MO_BSWAP) == 0); 1868 1869 switch (opc & MO_SIZE) { 1870 case MO_8: 1871 tcg_out_st8_12(s, COND_AL, datalo, addrlo, 0); 1872 break; 1873 case MO_16: 1874 tcg_out_st16_8(s, COND_AL, datalo, addrlo, 0); 1875 break; 1876 case MO_32: 1877 tcg_out_st32_12(s, COND_AL, datalo, addrlo, 0); 1878 break; 1879 case MO_64: 1880 /* We used pair allocation for datalo, so already should be aligned. */ 1881 tcg_debug_assert((datalo & 1) == 0); 1882 tcg_debug_assert(datahi == datalo + 1); 1883 /* STRD requires alignment; double-check that. */ 1884 if (get_alignment_bits(opc) >= MO_64) { 1885 tcg_out_strd_8(s, COND_AL, datalo, addrlo, 0); 1886 } else { 1887 tcg_out_st32_12(s, COND_AL, datalo, addrlo, 0); 1888 tcg_out_st32_12(s, COND_AL, datahi, addrlo, 4); 1889 } 1890 break; 1891 default: 1892 g_assert_not_reached(); 1893 } 1894} 1895#endif 1896 1897static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64) 1898{ 1899 TCGReg addrlo, datalo, datahi, addrhi __attribute__((unused)); 1900 MemOpIdx oi; 1901 MemOp opc; 1902#ifdef CONFIG_SOFTMMU 1903 int mem_index; 1904 TCGReg addend; 1905 tcg_insn_unit *label_ptr; 1906#else 1907 unsigned a_bits; 1908#endif 1909 1910 datalo = *args++; 1911 datahi = (is64 ? *args++ : 0); 1912 addrlo = *args++; 1913 addrhi = (TARGET_LONG_BITS == 64 ? *args++ : 0); 1914 oi = *args++; 1915 opc = get_memop(oi); 1916 1917#ifdef CONFIG_SOFTMMU 1918 mem_index = get_mmuidx(oi); 1919 addend = tcg_out_tlb_read(s, addrlo, addrhi, opc, mem_index, 0); 1920 1921 tcg_out_qemu_st_index(s, COND_EQ, opc, datalo, datahi, 1922 addrlo, addend, true); 1923 1924 /* The conditional call must come last, as we're going to return here. */ 1925 label_ptr = s->code_ptr; 1926 tcg_out_bl_imm(s, COND_NE, 0); 1927 1928 add_qemu_ldst_label(s, false, oi, datalo, datahi, addrlo, addrhi, 1929 s->code_ptr, label_ptr); 1930#else /* !CONFIG_SOFTMMU */ 1931 a_bits = get_alignment_bits(opc); 1932 if (a_bits) { 1933 tcg_out_test_alignment(s, false, addrlo, addrhi, a_bits); 1934 } 1935 if (guest_base) { 1936 tcg_out_qemu_st_index(s, COND_AL, opc, datalo, datahi, 1937 addrlo, TCG_REG_GUEST_BASE, false); 1938 } else { 1939 tcg_out_qemu_st_direct(s, opc, datalo, datahi, addrlo); 1940 } 1941#endif 1942} 1943 1944static void tcg_out_epilogue(TCGContext *s); 1945 1946static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg) 1947{ 1948 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R0, arg); 1949 tcg_out_epilogue(s); 1950} 1951 1952static void tcg_out_goto_tb(TCGContext *s, int which) 1953{ 1954 uintptr_t i_addr; 1955 intptr_t i_disp; 1956 1957 /* Direct branch will be patched by tb_target_set_jmp_target. */ 1958 set_jmp_insn_offset(s, which); 1959 tcg_out32(s, INSN_NOP); 1960 1961 /* When branch is out of range, fall through to indirect. */ 1962 i_addr = get_jmp_target_addr(s, which); 1963 i_disp = tcg_pcrel_diff(s, (void *)i_addr) - 8; 1964 tcg_debug_assert(i_disp < 0); 1965 if (i_disp >= -0xfff) { 1966 tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, i_disp); 1967 } else { 1968 /* 1969 * The TB is close, but outside the 12 bits addressable by 1970 * the load. We can extend this to 20 bits with a sub of a 1971 * shifted immediate from pc. 1972 */ 1973 int h = -i_disp; 1974 int l = h & 0xfff; 1975 1976 h = encode_imm_nofail(h - l); 1977 tcg_out_dat_imm(s, COND_AL, ARITH_SUB, TCG_REG_R0, TCG_REG_PC, h); 1978 tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_R0, l); 1979 } 1980 set_jmp_reset_offset(s, which); 1981} 1982 1983void tb_target_set_jmp_target(const TranslationBlock *tb, int n, 1984 uintptr_t jmp_rx, uintptr_t jmp_rw) 1985{ 1986 uintptr_t addr = tb->jmp_target_addr[n]; 1987 ptrdiff_t offset = addr - (jmp_rx + 8); 1988 tcg_insn_unit insn; 1989 1990 /* Either directly branch, or fall through to indirect branch. */ 1991 if (offset == sextract64(offset, 0, 26)) { 1992 /* B <addr> */ 1993 insn = deposit32((COND_AL << 28) | INSN_B, 0, 24, offset >> 2); 1994 } else { 1995 insn = INSN_NOP; 1996 } 1997 1998 qatomic_set((uint32_t *)jmp_rw, insn); 1999 flush_idcache_range(jmp_rx, jmp_rw, 4); 2000} 2001 2002static void tcg_out_op(TCGContext *s, TCGOpcode opc, 2003 const TCGArg args[TCG_MAX_OP_ARGS], 2004 const int const_args[TCG_MAX_OP_ARGS]) 2005{ 2006 TCGArg a0, a1, a2, a3, a4, a5; 2007 int c; 2008 2009 switch (opc) { 2010 case INDEX_op_goto_ptr: 2011 tcg_out_b_reg(s, COND_AL, args[0]); 2012 break; 2013 case INDEX_op_br: 2014 tcg_out_goto_label(s, COND_AL, arg_label(args[0])); 2015 break; 2016 2017 case INDEX_op_ld8u_i32: 2018 tcg_out_ld8u(s, COND_AL, args[0], args[1], args[2]); 2019 break; 2020 case INDEX_op_ld8s_i32: 2021 tcg_out_ld8s(s, COND_AL, args[0], args[1], args[2]); 2022 break; 2023 case INDEX_op_ld16u_i32: 2024 tcg_out_ld16u(s, COND_AL, args[0], args[1], args[2]); 2025 break; 2026 case INDEX_op_ld16s_i32: 2027 tcg_out_ld16s(s, COND_AL, args[0], args[1], args[2]); 2028 break; 2029 case INDEX_op_ld_i32: 2030 tcg_out_ld32u(s, COND_AL, args[0], args[1], args[2]); 2031 break; 2032 case INDEX_op_st8_i32: 2033 tcg_out_st8(s, COND_AL, args[0], args[1], args[2]); 2034 break; 2035 case INDEX_op_st16_i32: 2036 tcg_out_st16(s, COND_AL, args[0], args[1], args[2]); 2037 break; 2038 case INDEX_op_st_i32: 2039 tcg_out_st32(s, COND_AL, args[0], args[1], args[2]); 2040 break; 2041 2042 case INDEX_op_movcond_i32: 2043 /* Constraints mean that v2 is always in the same register as dest, 2044 * so we only need to do "if condition passed, move v1 to dest". 2045 */ 2046 tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0, 2047 args[1], args[2], const_args[2]); 2048 tcg_out_dat_rIK(s, tcg_cond_to_arm_cond[args[5]], ARITH_MOV, 2049 ARITH_MVN, args[0], 0, args[3], const_args[3]); 2050 break; 2051 case INDEX_op_add_i32: 2052 tcg_out_dat_rIN(s, COND_AL, ARITH_ADD, ARITH_SUB, 2053 args[0], args[1], args[2], const_args[2]); 2054 break; 2055 case INDEX_op_sub_i32: 2056 if (const_args[1]) { 2057 if (const_args[2]) { 2058 tcg_out_movi32(s, COND_AL, args[0], args[1] - args[2]); 2059 } else { 2060 tcg_out_dat_rI(s, COND_AL, ARITH_RSB, 2061 args[0], args[2], args[1], 1); 2062 } 2063 } else { 2064 tcg_out_dat_rIN(s, COND_AL, ARITH_SUB, ARITH_ADD, 2065 args[0], args[1], args[2], const_args[2]); 2066 } 2067 break; 2068 case INDEX_op_and_i32: 2069 tcg_out_dat_rIK(s, COND_AL, ARITH_AND, ARITH_BIC, 2070 args[0], args[1], args[2], const_args[2]); 2071 break; 2072 case INDEX_op_andc_i32: 2073 tcg_out_dat_rIK(s, COND_AL, ARITH_BIC, ARITH_AND, 2074 args[0], args[1], args[2], const_args[2]); 2075 break; 2076 case INDEX_op_or_i32: 2077 c = ARITH_ORR; 2078 goto gen_arith; 2079 case INDEX_op_xor_i32: 2080 c = ARITH_EOR; 2081 /* Fall through. */ 2082 gen_arith: 2083 tcg_out_dat_rI(s, COND_AL, c, args[0], args[1], args[2], const_args[2]); 2084 break; 2085 case INDEX_op_add2_i32: 2086 a0 = args[0], a1 = args[1], a2 = args[2]; 2087 a3 = args[3], a4 = args[4], a5 = args[5]; 2088 if (a0 == a3 || (a0 == a5 && !const_args[5])) { 2089 a0 = TCG_REG_TMP; 2090 } 2091 tcg_out_dat_rIN(s, COND_AL, ARITH_ADD | TO_CPSR, ARITH_SUB | TO_CPSR, 2092 a0, a2, a4, const_args[4]); 2093 tcg_out_dat_rIK(s, COND_AL, ARITH_ADC, ARITH_SBC, 2094 a1, a3, a5, const_args[5]); 2095 tcg_out_mov_reg(s, COND_AL, args[0], a0); 2096 break; 2097 case INDEX_op_sub2_i32: 2098 a0 = args[0], a1 = args[1], a2 = args[2]; 2099 a3 = args[3], a4 = args[4], a5 = args[5]; 2100 if ((a0 == a3 && !const_args[3]) || (a0 == a5 && !const_args[5])) { 2101 a0 = TCG_REG_TMP; 2102 } 2103 if (const_args[2]) { 2104 if (const_args[4]) { 2105 tcg_out_movi32(s, COND_AL, a0, a4); 2106 a4 = a0; 2107 } 2108 tcg_out_dat_rI(s, COND_AL, ARITH_RSB | TO_CPSR, a0, a4, a2, 1); 2109 } else { 2110 tcg_out_dat_rIN(s, COND_AL, ARITH_SUB | TO_CPSR, 2111 ARITH_ADD | TO_CPSR, a0, a2, a4, const_args[4]); 2112 } 2113 if (const_args[3]) { 2114 if (const_args[5]) { 2115 tcg_out_movi32(s, COND_AL, a1, a5); 2116 a5 = a1; 2117 } 2118 tcg_out_dat_rI(s, COND_AL, ARITH_RSC, a1, a5, a3, 1); 2119 } else { 2120 tcg_out_dat_rIK(s, COND_AL, ARITH_SBC, ARITH_ADC, 2121 a1, a3, a5, const_args[5]); 2122 } 2123 tcg_out_mov_reg(s, COND_AL, args[0], a0); 2124 break; 2125 case INDEX_op_neg_i32: 2126 tcg_out_dat_imm(s, COND_AL, ARITH_RSB, args[0], args[1], 0); 2127 break; 2128 case INDEX_op_not_i32: 2129 tcg_out_dat_reg(s, COND_AL, 2130 ARITH_MVN, args[0], 0, args[1], SHIFT_IMM_LSL(0)); 2131 break; 2132 case INDEX_op_mul_i32: 2133 tcg_out_mul32(s, COND_AL, args[0], args[1], args[2]); 2134 break; 2135 case INDEX_op_mulu2_i32: 2136 tcg_out_umull32(s, COND_AL, args[0], args[1], args[2], args[3]); 2137 break; 2138 case INDEX_op_muls2_i32: 2139 tcg_out_smull32(s, COND_AL, args[0], args[1], args[2], args[3]); 2140 break; 2141 /* XXX: Perhaps args[2] & 0x1f is wrong */ 2142 case INDEX_op_shl_i32: 2143 c = const_args[2] ? 2144 SHIFT_IMM_LSL(args[2] & 0x1f) : SHIFT_REG_LSL(args[2]); 2145 goto gen_shift32; 2146 case INDEX_op_shr_i32: 2147 c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_LSR(args[2] & 0x1f) : 2148 SHIFT_IMM_LSL(0) : SHIFT_REG_LSR(args[2]); 2149 goto gen_shift32; 2150 case INDEX_op_sar_i32: 2151 c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_ASR(args[2] & 0x1f) : 2152 SHIFT_IMM_LSL(0) : SHIFT_REG_ASR(args[2]); 2153 goto gen_shift32; 2154 case INDEX_op_rotr_i32: 2155 c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_ROR(args[2] & 0x1f) : 2156 SHIFT_IMM_LSL(0) : SHIFT_REG_ROR(args[2]); 2157 /* Fall through. */ 2158 gen_shift32: 2159 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1], c); 2160 break; 2161 2162 case INDEX_op_rotl_i32: 2163 if (const_args[2]) { 2164 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1], 2165 ((0x20 - args[2]) & 0x1f) ? 2166 SHIFT_IMM_ROR((0x20 - args[2]) & 0x1f) : 2167 SHIFT_IMM_LSL(0)); 2168 } else { 2169 tcg_out_dat_imm(s, COND_AL, ARITH_RSB, TCG_REG_TMP, args[2], 0x20); 2170 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1], 2171 SHIFT_REG_ROR(TCG_REG_TMP)); 2172 } 2173 break; 2174 2175 case INDEX_op_ctz_i32: 2176 tcg_out_dat_reg(s, COND_AL, INSN_RBIT, TCG_REG_TMP, 0, args[1], 0); 2177 a1 = TCG_REG_TMP; 2178 goto do_clz; 2179 2180 case INDEX_op_clz_i32: 2181 a1 = args[1]; 2182 do_clz: 2183 a0 = args[0]; 2184 a2 = args[2]; 2185 c = const_args[2]; 2186 if (c && a2 == 32) { 2187 tcg_out_dat_reg(s, COND_AL, INSN_CLZ, a0, 0, a1, 0); 2188 break; 2189 } 2190 tcg_out_dat_imm(s, COND_AL, ARITH_CMP, 0, a1, 0); 2191 tcg_out_dat_reg(s, COND_NE, INSN_CLZ, a0, 0, a1, 0); 2192 if (c || a0 != a2) { 2193 tcg_out_dat_rIK(s, COND_EQ, ARITH_MOV, ARITH_MVN, a0, 0, a2, c); 2194 } 2195 break; 2196 2197 case INDEX_op_brcond_i32: 2198 tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0, 2199 args[0], args[1], const_args[1]); 2200 tcg_out_goto_label(s, tcg_cond_to_arm_cond[args[2]], 2201 arg_label(args[3])); 2202 break; 2203 case INDEX_op_setcond_i32: 2204 tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0, 2205 args[1], args[2], const_args[2]); 2206 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[args[3]], 2207 ARITH_MOV, args[0], 0, 1); 2208 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(args[3])], 2209 ARITH_MOV, args[0], 0, 0); 2210 break; 2211 2212 case INDEX_op_brcond2_i32: 2213 c = tcg_out_cmp2(s, args, const_args); 2214 tcg_out_goto_label(s, tcg_cond_to_arm_cond[c], arg_label(args[5])); 2215 break; 2216 case INDEX_op_setcond2_i32: 2217 c = tcg_out_cmp2(s, args + 1, const_args + 1); 2218 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[c], ARITH_MOV, args[0], 0, 1); 2219 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(c)], 2220 ARITH_MOV, args[0], 0, 0); 2221 break; 2222 2223 case INDEX_op_qemu_ld_i32: 2224 tcg_out_qemu_ld(s, args, 0); 2225 break; 2226 case INDEX_op_qemu_ld_i64: 2227 tcg_out_qemu_ld(s, args, 1); 2228 break; 2229 case INDEX_op_qemu_st_i32: 2230 tcg_out_qemu_st(s, args, 0); 2231 break; 2232 case INDEX_op_qemu_st_i64: 2233 tcg_out_qemu_st(s, args, 1); 2234 break; 2235 2236 case INDEX_op_bswap16_i32: 2237 tcg_out_bswap16(s, COND_AL, args[0], args[1], args[2]); 2238 break; 2239 case INDEX_op_bswap32_i32: 2240 tcg_out_bswap32(s, COND_AL, args[0], args[1]); 2241 break; 2242 2243 case INDEX_op_ext8s_i32: 2244 tcg_out_ext8s(s, COND_AL, args[0], args[1]); 2245 break; 2246 case INDEX_op_ext16s_i32: 2247 tcg_out_ext16s(s, COND_AL, args[0], args[1]); 2248 break; 2249 case INDEX_op_ext16u_i32: 2250 tcg_out_ext16u(s, COND_AL, args[0], args[1]); 2251 break; 2252 2253 case INDEX_op_deposit_i32: 2254 tcg_out_deposit(s, COND_AL, args[0], args[2], 2255 args[3], args[4], const_args[2]); 2256 break; 2257 case INDEX_op_extract_i32: 2258 tcg_out_extract(s, COND_AL, args[0], args[1], args[2], args[3]); 2259 break; 2260 case INDEX_op_sextract_i32: 2261 tcg_out_sextract(s, COND_AL, args[0], args[1], args[2], args[3]); 2262 break; 2263 case INDEX_op_extract2_i32: 2264 /* ??? These optimization vs zero should be generic. */ 2265 /* ??? But we can't substitute 2 for 1 in the opcode stream yet. */ 2266 if (const_args[1]) { 2267 if (const_args[2]) { 2268 tcg_out_movi(s, TCG_TYPE_REG, args[0], 0); 2269 } else { 2270 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, 2271 args[2], SHIFT_IMM_LSL(32 - args[3])); 2272 } 2273 } else if (const_args[2]) { 2274 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, 2275 args[1], SHIFT_IMM_LSR(args[3])); 2276 } else { 2277 /* We can do extract2 in 2 insns, vs the 3 required otherwise. */ 2278 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP, 0, 2279 args[2], SHIFT_IMM_LSL(32 - args[3])); 2280 tcg_out_dat_reg(s, COND_AL, ARITH_ORR, args[0], TCG_REG_TMP, 2281 args[1], SHIFT_IMM_LSR(args[3])); 2282 } 2283 break; 2284 2285 case INDEX_op_div_i32: 2286 tcg_out_sdiv(s, COND_AL, args[0], args[1], args[2]); 2287 break; 2288 case INDEX_op_divu_i32: 2289 tcg_out_udiv(s, COND_AL, args[0], args[1], args[2]); 2290 break; 2291 2292 case INDEX_op_mb: 2293 tcg_out_mb(s, args[0]); 2294 break; 2295 2296 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ 2297 case INDEX_op_call: /* Always emitted via tcg_out_call. */ 2298 case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */ 2299 case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */ 2300 default: 2301 tcg_abort(); 2302 } 2303} 2304 2305static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) 2306{ 2307 switch (op) { 2308 case INDEX_op_goto_ptr: 2309 return C_O0_I1(r); 2310 2311 case INDEX_op_ld8u_i32: 2312 case INDEX_op_ld8s_i32: 2313 case INDEX_op_ld16u_i32: 2314 case INDEX_op_ld16s_i32: 2315 case INDEX_op_ld_i32: 2316 case INDEX_op_neg_i32: 2317 case INDEX_op_not_i32: 2318 case INDEX_op_bswap16_i32: 2319 case INDEX_op_bswap32_i32: 2320 case INDEX_op_ext8s_i32: 2321 case INDEX_op_ext16s_i32: 2322 case INDEX_op_ext16u_i32: 2323 case INDEX_op_extract_i32: 2324 case INDEX_op_sextract_i32: 2325 return C_O1_I1(r, r); 2326 2327 case INDEX_op_st8_i32: 2328 case INDEX_op_st16_i32: 2329 case INDEX_op_st_i32: 2330 return C_O0_I2(r, r); 2331 2332 case INDEX_op_add_i32: 2333 case INDEX_op_sub_i32: 2334 case INDEX_op_setcond_i32: 2335 return C_O1_I2(r, r, rIN); 2336 2337 case INDEX_op_and_i32: 2338 case INDEX_op_andc_i32: 2339 case INDEX_op_clz_i32: 2340 case INDEX_op_ctz_i32: 2341 return C_O1_I2(r, r, rIK); 2342 2343 case INDEX_op_mul_i32: 2344 case INDEX_op_div_i32: 2345 case INDEX_op_divu_i32: 2346 return C_O1_I2(r, r, r); 2347 2348 case INDEX_op_mulu2_i32: 2349 case INDEX_op_muls2_i32: 2350 return C_O2_I2(r, r, r, r); 2351 2352 case INDEX_op_or_i32: 2353 case INDEX_op_xor_i32: 2354 return C_O1_I2(r, r, rI); 2355 2356 case INDEX_op_shl_i32: 2357 case INDEX_op_shr_i32: 2358 case INDEX_op_sar_i32: 2359 case INDEX_op_rotl_i32: 2360 case INDEX_op_rotr_i32: 2361 return C_O1_I2(r, r, ri); 2362 2363 case INDEX_op_brcond_i32: 2364 return C_O0_I2(r, rIN); 2365 case INDEX_op_deposit_i32: 2366 return C_O1_I2(r, 0, rZ); 2367 case INDEX_op_extract2_i32: 2368 return C_O1_I2(r, rZ, rZ); 2369 case INDEX_op_movcond_i32: 2370 return C_O1_I4(r, r, rIN, rIK, 0); 2371 case INDEX_op_add2_i32: 2372 return C_O2_I4(r, r, r, r, rIN, rIK); 2373 case INDEX_op_sub2_i32: 2374 return C_O2_I4(r, r, rI, rI, rIN, rIK); 2375 case INDEX_op_brcond2_i32: 2376 return C_O0_I4(r, r, rI, rI); 2377 case INDEX_op_setcond2_i32: 2378 return C_O1_I4(r, r, r, rI, rI); 2379 2380 case INDEX_op_qemu_ld_i32: 2381 return TARGET_LONG_BITS == 32 ? C_O1_I1(r, l) : C_O1_I2(r, l, l); 2382 case INDEX_op_qemu_ld_i64: 2383 return TARGET_LONG_BITS == 32 ? C_O2_I1(e, p, l) : C_O2_I2(e, p, l, l); 2384 case INDEX_op_qemu_st_i32: 2385 return TARGET_LONG_BITS == 32 ? C_O0_I2(s, s) : C_O0_I3(s, s, s); 2386 case INDEX_op_qemu_st_i64: 2387 return TARGET_LONG_BITS == 32 ? C_O0_I3(S, p, s) : C_O0_I4(S, p, s, s); 2388 2389 case INDEX_op_st_vec: 2390 return C_O0_I2(w, r); 2391 case INDEX_op_ld_vec: 2392 case INDEX_op_dupm_vec: 2393 return C_O1_I1(w, r); 2394 case INDEX_op_dup_vec: 2395 return C_O1_I1(w, wr); 2396 case INDEX_op_abs_vec: 2397 case INDEX_op_neg_vec: 2398 case INDEX_op_not_vec: 2399 case INDEX_op_shli_vec: 2400 case INDEX_op_shri_vec: 2401 case INDEX_op_sari_vec: 2402 return C_O1_I1(w, w); 2403 case INDEX_op_dup2_vec: 2404 case INDEX_op_add_vec: 2405 case INDEX_op_mul_vec: 2406 case INDEX_op_smax_vec: 2407 case INDEX_op_smin_vec: 2408 case INDEX_op_ssadd_vec: 2409 case INDEX_op_sssub_vec: 2410 case INDEX_op_sub_vec: 2411 case INDEX_op_umax_vec: 2412 case INDEX_op_umin_vec: 2413 case INDEX_op_usadd_vec: 2414 case INDEX_op_ussub_vec: 2415 case INDEX_op_xor_vec: 2416 case INDEX_op_arm_sshl_vec: 2417 case INDEX_op_arm_ushl_vec: 2418 return C_O1_I2(w, w, w); 2419 case INDEX_op_arm_sli_vec: 2420 return C_O1_I2(w, 0, w); 2421 case INDEX_op_or_vec: 2422 case INDEX_op_andc_vec: 2423 return C_O1_I2(w, w, wO); 2424 case INDEX_op_and_vec: 2425 case INDEX_op_orc_vec: 2426 return C_O1_I2(w, w, wV); 2427 case INDEX_op_cmp_vec: 2428 return C_O1_I2(w, w, wZ); 2429 case INDEX_op_bitsel_vec: 2430 return C_O1_I3(w, w, w, w); 2431 default: 2432 g_assert_not_reached(); 2433 } 2434} 2435 2436static void tcg_target_init(TCGContext *s) 2437{ 2438 /* 2439 * Only probe for the platform and capabilities if we haven't already 2440 * determined maximum values at compile time. 2441 */ 2442#if !defined(use_idiv_instructions) || !defined(use_neon_instructions) 2443 { 2444 unsigned long hwcap = qemu_getauxval(AT_HWCAP); 2445#ifndef use_idiv_instructions 2446 use_idiv_instructions = (hwcap & HWCAP_ARM_IDIVA) != 0; 2447#endif 2448#ifndef use_neon_instructions 2449 use_neon_instructions = (hwcap & HWCAP_ARM_NEON) != 0; 2450#endif 2451 } 2452#endif 2453 2454 if (__ARM_ARCH < 7) { 2455 const char *pl = (const char *)qemu_getauxval(AT_PLATFORM); 2456 if (pl != NULL && pl[0] == 'v' && pl[1] >= '4' && pl[1] <= '9') { 2457 arm_arch = pl[1] - '0'; 2458 } 2459 2460 if (arm_arch < 6) { 2461 error_report("TCG: ARMv%d is unsupported; exiting", arm_arch); 2462 exit(EXIT_FAILURE); 2463 } 2464 } 2465 2466 tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS; 2467 2468 tcg_target_call_clobber_regs = 0; 2469 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R0); 2470 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R1); 2471 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R2); 2472 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R3); 2473 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R12); 2474 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R14); 2475 2476 if (use_neon_instructions) { 2477 tcg_target_available_regs[TCG_TYPE_V64] = ALL_VECTOR_REGS; 2478 tcg_target_available_regs[TCG_TYPE_V128] = ALL_VECTOR_REGS; 2479 2480 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q0); 2481 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q1); 2482 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q2); 2483 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q3); 2484 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q8); 2485 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q9); 2486 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q10); 2487 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q11); 2488 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q12); 2489 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q13); 2490 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q14); 2491 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q15); 2492 } 2493 2494 s->reserved_regs = 0; 2495 tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK); 2496 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP); 2497 tcg_regset_set_reg(s->reserved_regs, TCG_REG_PC); 2498 tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP); 2499} 2500 2501static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg, 2502 TCGReg arg1, intptr_t arg2) 2503{ 2504 switch (type) { 2505 case TCG_TYPE_I32: 2506 tcg_out_ld32u(s, COND_AL, arg, arg1, arg2); 2507 return; 2508 case TCG_TYPE_V64: 2509 /* regs 1; size 8; align 8 */ 2510 tcg_out_vldst(s, INSN_VLD1 | 0x7d0, arg, arg1, arg2); 2511 return; 2512 case TCG_TYPE_V128: 2513 /* 2514 * We have only 8-byte alignment for the stack per the ABI. 2515 * Rather than dynamically re-align the stack, it's easier 2516 * to simply not request alignment beyond that. So: 2517 * regs 2; size 8; align 8 2518 */ 2519 tcg_out_vldst(s, INSN_VLD1 | 0xad0, arg, arg1, arg2); 2520 return; 2521 default: 2522 g_assert_not_reached(); 2523 } 2524} 2525 2526static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, 2527 TCGReg arg1, intptr_t arg2) 2528{ 2529 switch (type) { 2530 case TCG_TYPE_I32: 2531 tcg_out_st32(s, COND_AL, arg, arg1, arg2); 2532 return; 2533 case TCG_TYPE_V64: 2534 /* regs 1; size 8; align 8 */ 2535 tcg_out_vldst(s, INSN_VST1 | 0x7d0, arg, arg1, arg2); 2536 return; 2537 case TCG_TYPE_V128: 2538 /* See tcg_out_ld re alignment: regs 2; size 8; align 8 */ 2539 tcg_out_vldst(s, INSN_VST1 | 0xad0, arg, arg1, arg2); 2540 return; 2541 default: 2542 g_assert_not_reached(); 2543 } 2544} 2545 2546static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, 2547 TCGReg base, intptr_t ofs) 2548{ 2549 return false; 2550} 2551 2552static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) 2553{ 2554 if (ret == arg) { 2555 return true; 2556 } 2557 switch (type) { 2558 case TCG_TYPE_I32: 2559 if (ret < TCG_REG_Q0 && arg < TCG_REG_Q0) { 2560 tcg_out_mov_reg(s, COND_AL, ret, arg); 2561 return true; 2562 } 2563 return false; 2564 2565 case TCG_TYPE_V64: 2566 case TCG_TYPE_V128: 2567 /* "VMOV D,N" is an alias for "VORR D,N,N". */ 2568 tcg_out_vreg3(s, INSN_VORR, type - TCG_TYPE_V64, 0, ret, arg, arg); 2569 return true; 2570 2571 default: 2572 g_assert_not_reached(); 2573 } 2574} 2575 2576static void tcg_out_movi(TCGContext *s, TCGType type, 2577 TCGReg ret, tcg_target_long arg) 2578{ 2579 tcg_debug_assert(type == TCG_TYPE_I32); 2580 tcg_debug_assert(ret < TCG_REG_Q0); 2581 tcg_out_movi32(s, COND_AL, ret, arg); 2582} 2583 2584/* Type is always V128, with I64 elements. */ 2585static void tcg_out_dup2_vec(TCGContext *s, TCGReg rd, TCGReg rl, TCGReg rh) 2586{ 2587 /* Move high element into place first. */ 2588 /* VMOV Dd+1, Ds */ 2589 tcg_out_vreg3(s, INSN_VORR | (1 << 12), 0, 0, rd, rh, rh); 2590 /* Move low element into place; tcg_out_mov will check for nop. */ 2591 tcg_out_mov(s, TCG_TYPE_V64, rd, rl); 2592} 2593 2594static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece, 2595 TCGReg rd, TCGReg rs) 2596{ 2597 int q = type - TCG_TYPE_V64; 2598 2599 if (vece == MO_64) { 2600 if (type == TCG_TYPE_V128) { 2601 tcg_out_dup2_vec(s, rd, rs, rs); 2602 } else { 2603 tcg_out_mov(s, TCG_TYPE_V64, rd, rs); 2604 } 2605 } else if (rs < TCG_REG_Q0) { 2606 int b = (vece == MO_8); 2607 int e = (vece == MO_16); 2608 tcg_out32(s, INSN_VDUP_G | (b << 22) | (q << 21) | (e << 5) | 2609 encode_vn(rd) | (rs << 12)); 2610 } else { 2611 int imm4 = 1 << vece; 2612 tcg_out32(s, INSN_VDUP_S | (imm4 << 16) | (q << 6) | 2613 encode_vd(rd) | encode_vm(rs)); 2614 } 2615 return true; 2616} 2617 2618static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece, 2619 TCGReg rd, TCGReg base, intptr_t offset) 2620{ 2621 if (vece == MO_64) { 2622 tcg_out_ld(s, TCG_TYPE_V64, rd, base, offset); 2623 if (type == TCG_TYPE_V128) { 2624 tcg_out_dup2_vec(s, rd, rd, rd); 2625 } 2626 } else { 2627 int q = type - TCG_TYPE_V64; 2628 tcg_out_vldst(s, INSN_VLD1R | (vece << 6) | (q << 5), 2629 rd, base, offset); 2630 } 2631 return true; 2632} 2633 2634static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece, 2635 TCGReg rd, int64_t v64) 2636{ 2637 int q = type - TCG_TYPE_V64; 2638 int cmode, imm8, i; 2639 2640 /* Test all bytes equal first. */ 2641 if (vece == MO_8) { 2642 tcg_out_vmovi(s, rd, q, 0, 0xe, v64); 2643 return; 2644 } 2645 2646 /* 2647 * Test all bytes 0x00 or 0xff second. This can match cases that 2648 * might otherwise take 2 or 3 insns for MO_16 or MO_32 below. 2649 */ 2650 for (i = imm8 = 0; i < 8; i++) { 2651 uint8_t byte = v64 >> (i * 8); 2652 if (byte == 0xff) { 2653 imm8 |= 1 << i; 2654 } else if (byte != 0) { 2655 goto fail_bytes; 2656 } 2657 } 2658 tcg_out_vmovi(s, rd, q, 1, 0xe, imm8); 2659 return; 2660 fail_bytes: 2661 2662 /* 2663 * Tests for various replications. For each element width, if we 2664 * cannot find an expansion there's no point checking a larger 2665 * width because we already know by replication it cannot match. 2666 */ 2667 if (vece == MO_16) { 2668 uint16_t v16 = v64; 2669 2670 if (is_shimm16(v16, &cmode, &imm8)) { 2671 tcg_out_vmovi(s, rd, q, 0, cmode, imm8); 2672 return; 2673 } 2674 if (is_shimm16(~v16, &cmode, &imm8)) { 2675 tcg_out_vmovi(s, rd, q, 1, cmode, imm8); 2676 return; 2677 } 2678 2679 /* 2680 * Otherwise, all remaining constants can be loaded in two insns: 2681 * rd = v16 & 0xff, rd |= v16 & 0xff00. 2682 */ 2683 tcg_out_vmovi(s, rd, q, 0, 0x8, v16 & 0xff); 2684 tcg_out_vmovi(s, rd, q, 0, 0xb, v16 >> 8); /* VORRI */ 2685 return; 2686 } 2687 2688 if (vece == MO_32) { 2689 uint32_t v32 = v64; 2690 2691 if (is_shimm32(v32, &cmode, &imm8) || 2692 is_soimm32(v32, &cmode, &imm8)) { 2693 tcg_out_vmovi(s, rd, q, 0, cmode, imm8); 2694 return; 2695 } 2696 if (is_shimm32(~v32, &cmode, &imm8) || 2697 is_soimm32(~v32, &cmode, &imm8)) { 2698 tcg_out_vmovi(s, rd, q, 1, cmode, imm8); 2699 return; 2700 } 2701 2702 /* 2703 * Restrict the set of constants to those we can load with 2704 * two instructions. Others we load from the pool. 2705 */ 2706 i = is_shimm32_pair(v32, &cmode, &imm8); 2707 if (i) { 2708 tcg_out_vmovi(s, rd, q, 0, cmode, imm8); 2709 tcg_out_vmovi(s, rd, q, 0, i | 1, extract32(v32, i * 4, 8)); 2710 return; 2711 } 2712 i = is_shimm32_pair(~v32, &cmode, &imm8); 2713 if (i) { 2714 tcg_out_vmovi(s, rd, q, 1, cmode, imm8); 2715 tcg_out_vmovi(s, rd, q, 1, i | 1, extract32(~v32, i * 4, 8)); 2716 return; 2717 } 2718 } 2719 2720 /* 2721 * As a last resort, load from the constant pool. 2722 */ 2723 if (!q || vece == MO_64) { 2724 new_pool_l2(s, R_ARM_PC11, s->code_ptr, 0, v64, v64 >> 32); 2725 /* VLDR Dd, [pc + offset] */ 2726 tcg_out32(s, INSN_VLDR_D | encode_vd(rd) | (0xf << 16)); 2727 if (q) { 2728 tcg_out_dup2_vec(s, rd, rd, rd); 2729 } 2730 } else { 2731 new_pool_label(s, (uint32_t)v64, R_ARM_PC8, s->code_ptr, 0); 2732 /* add tmp, pc, offset */ 2733 tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_TMP, TCG_REG_PC, 0); 2734 tcg_out_dupm_vec(s, type, MO_32, rd, TCG_REG_TMP, 0); 2735 } 2736} 2737 2738static const ARMInsn vec_cmp_insn[16] = { 2739 [TCG_COND_EQ] = INSN_VCEQ, 2740 [TCG_COND_GT] = INSN_VCGT, 2741 [TCG_COND_GE] = INSN_VCGE, 2742 [TCG_COND_GTU] = INSN_VCGT_U, 2743 [TCG_COND_GEU] = INSN_VCGE_U, 2744}; 2745 2746static const ARMInsn vec_cmp0_insn[16] = { 2747 [TCG_COND_EQ] = INSN_VCEQ0, 2748 [TCG_COND_GT] = INSN_VCGT0, 2749 [TCG_COND_GE] = INSN_VCGE0, 2750 [TCG_COND_LT] = INSN_VCLT0, 2751 [TCG_COND_LE] = INSN_VCLE0, 2752}; 2753 2754static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, 2755 unsigned vecl, unsigned vece, 2756 const TCGArg args[TCG_MAX_OP_ARGS], 2757 const int const_args[TCG_MAX_OP_ARGS]) 2758{ 2759 TCGType type = vecl + TCG_TYPE_V64; 2760 unsigned q = vecl; 2761 TCGArg a0, a1, a2, a3; 2762 int cmode, imm8; 2763 2764 a0 = args[0]; 2765 a1 = args[1]; 2766 a2 = args[2]; 2767 2768 switch (opc) { 2769 case INDEX_op_ld_vec: 2770 tcg_out_ld(s, type, a0, a1, a2); 2771 return; 2772 case INDEX_op_st_vec: 2773 tcg_out_st(s, type, a0, a1, a2); 2774 return; 2775 case INDEX_op_dupm_vec: 2776 tcg_out_dupm_vec(s, type, vece, a0, a1, a2); 2777 return; 2778 case INDEX_op_dup2_vec: 2779 tcg_out_dup2_vec(s, a0, a1, a2); 2780 return; 2781 case INDEX_op_abs_vec: 2782 tcg_out_vreg2(s, INSN_VABS, q, vece, a0, a1); 2783 return; 2784 case INDEX_op_neg_vec: 2785 tcg_out_vreg2(s, INSN_VNEG, q, vece, a0, a1); 2786 return; 2787 case INDEX_op_not_vec: 2788 tcg_out_vreg2(s, INSN_VMVN, q, 0, a0, a1); 2789 return; 2790 case INDEX_op_add_vec: 2791 tcg_out_vreg3(s, INSN_VADD, q, vece, a0, a1, a2); 2792 return; 2793 case INDEX_op_mul_vec: 2794 tcg_out_vreg3(s, INSN_VMUL, q, vece, a0, a1, a2); 2795 return; 2796 case INDEX_op_smax_vec: 2797 tcg_out_vreg3(s, INSN_VMAX, q, vece, a0, a1, a2); 2798 return; 2799 case INDEX_op_smin_vec: 2800 tcg_out_vreg3(s, INSN_VMIN, q, vece, a0, a1, a2); 2801 return; 2802 case INDEX_op_sub_vec: 2803 tcg_out_vreg3(s, INSN_VSUB, q, vece, a0, a1, a2); 2804 return; 2805 case INDEX_op_ssadd_vec: 2806 tcg_out_vreg3(s, INSN_VQADD, q, vece, a0, a1, a2); 2807 return; 2808 case INDEX_op_sssub_vec: 2809 tcg_out_vreg3(s, INSN_VQSUB, q, vece, a0, a1, a2); 2810 return; 2811 case INDEX_op_umax_vec: 2812 tcg_out_vreg3(s, INSN_VMAX_U, q, vece, a0, a1, a2); 2813 return; 2814 case INDEX_op_umin_vec: 2815 tcg_out_vreg3(s, INSN_VMIN_U, q, vece, a0, a1, a2); 2816 return; 2817 case INDEX_op_usadd_vec: 2818 tcg_out_vreg3(s, INSN_VQADD_U, q, vece, a0, a1, a2); 2819 return; 2820 case INDEX_op_ussub_vec: 2821 tcg_out_vreg3(s, INSN_VQSUB_U, q, vece, a0, a1, a2); 2822 return; 2823 case INDEX_op_xor_vec: 2824 tcg_out_vreg3(s, INSN_VEOR, q, 0, a0, a1, a2); 2825 return; 2826 case INDEX_op_arm_sshl_vec: 2827 /* 2828 * Note that Vm is the data and Vn is the shift count, 2829 * therefore the arguments appear reversed. 2830 */ 2831 tcg_out_vreg3(s, INSN_VSHL_S, q, vece, a0, a2, a1); 2832 return; 2833 case INDEX_op_arm_ushl_vec: 2834 /* See above. */ 2835 tcg_out_vreg3(s, INSN_VSHL_U, q, vece, a0, a2, a1); 2836 return; 2837 case INDEX_op_shli_vec: 2838 tcg_out_vshifti(s, INSN_VSHLI, q, a0, a1, a2 + (8 << vece)); 2839 return; 2840 case INDEX_op_shri_vec: 2841 tcg_out_vshifti(s, INSN_VSHRI, q, a0, a1, (16 << vece) - a2); 2842 return; 2843 case INDEX_op_sari_vec: 2844 tcg_out_vshifti(s, INSN_VSARI, q, a0, a1, (16 << vece) - a2); 2845 return; 2846 case INDEX_op_arm_sli_vec: 2847 tcg_out_vshifti(s, INSN_VSLI, q, a0, a2, args[3] + (8 << vece)); 2848 return; 2849 2850 case INDEX_op_andc_vec: 2851 if (!const_args[2]) { 2852 tcg_out_vreg3(s, INSN_VBIC, q, 0, a0, a1, a2); 2853 return; 2854 } 2855 a2 = ~a2; 2856 /* fall through */ 2857 case INDEX_op_and_vec: 2858 if (const_args[2]) { 2859 is_shimm1632(~a2, &cmode, &imm8); 2860 if (a0 == a1) { 2861 tcg_out_vmovi(s, a0, q, 1, cmode | 1, imm8); /* VBICI */ 2862 return; 2863 } 2864 tcg_out_vmovi(s, a0, q, 1, cmode, imm8); /* VMVNI */ 2865 a2 = a0; 2866 } 2867 tcg_out_vreg3(s, INSN_VAND, q, 0, a0, a1, a2); 2868 return; 2869 2870 case INDEX_op_orc_vec: 2871 if (!const_args[2]) { 2872 tcg_out_vreg3(s, INSN_VORN, q, 0, a0, a1, a2); 2873 return; 2874 } 2875 a2 = ~a2; 2876 /* fall through */ 2877 case INDEX_op_or_vec: 2878 if (const_args[2]) { 2879 is_shimm1632(a2, &cmode, &imm8); 2880 if (a0 == a1) { 2881 tcg_out_vmovi(s, a0, q, 0, cmode | 1, imm8); /* VORRI */ 2882 return; 2883 } 2884 tcg_out_vmovi(s, a0, q, 0, cmode, imm8); /* VMOVI */ 2885 a2 = a0; 2886 } 2887 tcg_out_vreg3(s, INSN_VORR, q, 0, a0, a1, a2); 2888 return; 2889 2890 case INDEX_op_cmp_vec: 2891 { 2892 TCGCond cond = args[3]; 2893 2894 if (cond == TCG_COND_NE) { 2895 if (const_args[2]) { 2896 tcg_out_vreg3(s, INSN_VTST, q, vece, a0, a1, a1); 2897 } else { 2898 tcg_out_vreg3(s, INSN_VCEQ, q, vece, a0, a1, a2); 2899 tcg_out_vreg2(s, INSN_VMVN, q, 0, a0, a0); 2900 } 2901 } else { 2902 ARMInsn insn; 2903 2904 if (const_args[2]) { 2905 insn = vec_cmp0_insn[cond]; 2906 if (insn) { 2907 tcg_out_vreg2(s, insn, q, vece, a0, a1); 2908 return; 2909 } 2910 tcg_out_dupi_vec(s, type, MO_8, TCG_VEC_TMP, 0); 2911 a2 = TCG_VEC_TMP; 2912 } 2913 insn = vec_cmp_insn[cond]; 2914 if (insn == 0) { 2915 TCGArg t; 2916 t = a1, a1 = a2, a2 = t; 2917 cond = tcg_swap_cond(cond); 2918 insn = vec_cmp_insn[cond]; 2919 tcg_debug_assert(insn != 0); 2920 } 2921 tcg_out_vreg3(s, insn, q, vece, a0, a1, a2); 2922 } 2923 } 2924 return; 2925 2926 case INDEX_op_bitsel_vec: 2927 a3 = args[3]; 2928 if (a0 == a3) { 2929 tcg_out_vreg3(s, INSN_VBIT, q, 0, a0, a2, a1); 2930 } else if (a0 == a2) { 2931 tcg_out_vreg3(s, INSN_VBIF, q, 0, a0, a3, a1); 2932 } else { 2933 tcg_out_mov(s, type, a0, a1); 2934 tcg_out_vreg3(s, INSN_VBSL, q, 0, a0, a2, a3); 2935 } 2936 return; 2937 2938 case INDEX_op_mov_vec: /* Always emitted via tcg_out_mov. */ 2939 case INDEX_op_dup_vec: /* Always emitted via tcg_out_dup_vec. */ 2940 default: 2941 g_assert_not_reached(); 2942 } 2943} 2944 2945int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece) 2946{ 2947 switch (opc) { 2948 case INDEX_op_add_vec: 2949 case INDEX_op_sub_vec: 2950 case INDEX_op_and_vec: 2951 case INDEX_op_andc_vec: 2952 case INDEX_op_or_vec: 2953 case INDEX_op_orc_vec: 2954 case INDEX_op_xor_vec: 2955 case INDEX_op_not_vec: 2956 case INDEX_op_shli_vec: 2957 case INDEX_op_shri_vec: 2958 case INDEX_op_sari_vec: 2959 case INDEX_op_ssadd_vec: 2960 case INDEX_op_sssub_vec: 2961 case INDEX_op_usadd_vec: 2962 case INDEX_op_ussub_vec: 2963 case INDEX_op_bitsel_vec: 2964 return 1; 2965 case INDEX_op_abs_vec: 2966 case INDEX_op_cmp_vec: 2967 case INDEX_op_mul_vec: 2968 case INDEX_op_neg_vec: 2969 case INDEX_op_smax_vec: 2970 case INDEX_op_smin_vec: 2971 case INDEX_op_umax_vec: 2972 case INDEX_op_umin_vec: 2973 return vece < MO_64; 2974 case INDEX_op_shlv_vec: 2975 case INDEX_op_shrv_vec: 2976 case INDEX_op_sarv_vec: 2977 case INDEX_op_rotli_vec: 2978 case INDEX_op_rotlv_vec: 2979 case INDEX_op_rotrv_vec: 2980 return -1; 2981 default: 2982 return 0; 2983 } 2984} 2985 2986void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece, 2987 TCGArg a0, ...) 2988{ 2989 va_list va; 2990 TCGv_vec v0, v1, v2, t1, t2, c1; 2991 TCGArg a2; 2992 2993 va_start(va, a0); 2994 v0 = temp_tcgv_vec(arg_temp(a0)); 2995 v1 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg))); 2996 a2 = va_arg(va, TCGArg); 2997 va_end(va); 2998 2999 switch (opc) { 3000 case INDEX_op_shlv_vec: 3001 /* 3002 * Merely propagate shlv_vec to arm_ushl_vec. 3003 * In this way we don't set TCG_TARGET_HAS_shv_vec 3004 * because everything is done via expansion. 3005 */ 3006 v2 = temp_tcgv_vec(arg_temp(a2)); 3007 vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(v0), 3008 tcgv_vec_arg(v1), tcgv_vec_arg(v2)); 3009 break; 3010 3011 case INDEX_op_shrv_vec: 3012 case INDEX_op_sarv_vec: 3013 /* Right shifts are negative left shifts for NEON. */ 3014 v2 = temp_tcgv_vec(arg_temp(a2)); 3015 t1 = tcg_temp_new_vec(type); 3016 tcg_gen_neg_vec(vece, t1, v2); 3017 if (opc == INDEX_op_shrv_vec) { 3018 opc = INDEX_op_arm_ushl_vec; 3019 } else { 3020 opc = INDEX_op_arm_sshl_vec; 3021 } 3022 vec_gen_3(opc, type, vece, tcgv_vec_arg(v0), 3023 tcgv_vec_arg(v1), tcgv_vec_arg(t1)); 3024 tcg_temp_free_vec(t1); 3025 break; 3026 3027 case INDEX_op_rotli_vec: 3028 t1 = tcg_temp_new_vec(type); 3029 tcg_gen_shri_vec(vece, t1, v1, -a2 & ((8 << vece) - 1)); 3030 vec_gen_4(INDEX_op_arm_sli_vec, type, vece, 3031 tcgv_vec_arg(v0), tcgv_vec_arg(t1), tcgv_vec_arg(v1), a2); 3032 tcg_temp_free_vec(t1); 3033 break; 3034 3035 case INDEX_op_rotlv_vec: 3036 v2 = temp_tcgv_vec(arg_temp(a2)); 3037 t1 = tcg_temp_new_vec(type); 3038 c1 = tcg_constant_vec(type, vece, 8 << vece); 3039 tcg_gen_sub_vec(vece, t1, v2, c1); 3040 /* Right shifts are negative left shifts for NEON. */ 3041 vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(t1), 3042 tcgv_vec_arg(v1), tcgv_vec_arg(t1)); 3043 vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(v0), 3044 tcgv_vec_arg(v1), tcgv_vec_arg(v2)); 3045 tcg_gen_or_vec(vece, v0, v0, t1); 3046 tcg_temp_free_vec(t1); 3047 break; 3048 3049 case INDEX_op_rotrv_vec: 3050 v2 = temp_tcgv_vec(arg_temp(a2)); 3051 t1 = tcg_temp_new_vec(type); 3052 t2 = tcg_temp_new_vec(type); 3053 c1 = tcg_constant_vec(type, vece, 8 << vece); 3054 tcg_gen_neg_vec(vece, t1, v2); 3055 tcg_gen_sub_vec(vece, t2, c1, v2); 3056 /* Right shifts are negative left shifts for NEON. */ 3057 vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(t1), 3058 tcgv_vec_arg(v1), tcgv_vec_arg(t1)); 3059 vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(t2), 3060 tcgv_vec_arg(v1), tcgv_vec_arg(t2)); 3061 tcg_gen_or_vec(vece, v0, t1, t2); 3062 tcg_temp_free_vec(t1); 3063 tcg_temp_free_vec(t2); 3064 break; 3065 3066 default: 3067 g_assert_not_reached(); 3068 } 3069} 3070 3071static void tcg_out_nop_fill(tcg_insn_unit *p, int count) 3072{ 3073 int i; 3074 for (i = 0; i < count; ++i) { 3075 p[i] = INSN_NOP; 3076 } 3077} 3078 3079/* Compute frame size via macros, to share between tcg_target_qemu_prologue 3080 and tcg_register_jit. */ 3081 3082#define PUSH_SIZE ((11 - 4 + 1 + 1) * sizeof(tcg_target_long)) 3083 3084#define FRAME_SIZE \ 3085 ((PUSH_SIZE \ 3086 + TCG_STATIC_CALL_ARGS_SIZE \ 3087 + CPU_TEMP_BUF_NLONGS * sizeof(long) \ 3088 + TCG_TARGET_STACK_ALIGN - 1) \ 3089 & -TCG_TARGET_STACK_ALIGN) 3090 3091#define STACK_ADDEND (FRAME_SIZE - PUSH_SIZE) 3092 3093static void tcg_target_qemu_prologue(TCGContext *s) 3094{ 3095 /* Calling convention requires us to save r4-r11 and lr. */ 3096 /* stmdb sp!, { r4 - r11, lr } */ 3097 tcg_out_ldstm(s, COND_AL, INSN_STMDB, TCG_REG_CALL_STACK, 3098 (1 << TCG_REG_R4) | (1 << TCG_REG_R5) | (1 << TCG_REG_R6) | 3099 (1 << TCG_REG_R7) | (1 << TCG_REG_R8) | (1 << TCG_REG_R9) | 3100 (1 << TCG_REG_R10) | (1 << TCG_REG_R11) | (1 << TCG_REG_R14)); 3101 3102 /* Reserve callee argument and tcg temp space. */ 3103 tcg_out_dat_rI(s, COND_AL, ARITH_SUB, TCG_REG_CALL_STACK, 3104 TCG_REG_CALL_STACK, STACK_ADDEND, 1); 3105 tcg_set_frame(s, TCG_REG_CALL_STACK, TCG_STATIC_CALL_ARGS_SIZE, 3106 CPU_TEMP_BUF_NLONGS * sizeof(long)); 3107 3108 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]); 3109 3110#ifndef CONFIG_SOFTMMU 3111 if (guest_base) { 3112 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_GUEST_BASE, guest_base); 3113 tcg_regset_set_reg(s->reserved_regs, TCG_REG_GUEST_BASE); 3114 } 3115#endif 3116 3117 tcg_out_b_reg(s, COND_AL, tcg_target_call_iarg_regs[1]); 3118 3119 /* 3120 * Return path for goto_ptr. Set return value to 0, a-la exit_tb, 3121 * and fall through to the rest of the epilogue. 3122 */ 3123 tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr); 3124 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R0, 0); 3125 tcg_out_epilogue(s); 3126} 3127 3128static void tcg_out_epilogue(TCGContext *s) 3129{ 3130 /* Release local stack frame. */ 3131 tcg_out_dat_rI(s, COND_AL, ARITH_ADD, TCG_REG_CALL_STACK, 3132 TCG_REG_CALL_STACK, STACK_ADDEND, 1); 3133 3134 /* ldmia sp!, { r4 - r11, pc } */ 3135 tcg_out_ldstm(s, COND_AL, INSN_LDMIA, TCG_REG_CALL_STACK, 3136 (1 << TCG_REG_R4) | (1 << TCG_REG_R5) | (1 << TCG_REG_R6) | 3137 (1 << TCG_REG_R7) | (1 << TCG_REG_R8) | (1 << TCG_REG_R9) | 3138 (1 << TCG_REG_R10) | (1 << TCG_REG_R11) | (1 << TCG_REG_PC)); 3139} 3140 3141typedef struct { 3142 DebugFrameHeader h; 3143 uint8_t fde_def_cfa[4]; 3144 uint8_t fde_reg_ofs[18]; 3145} DebugFrame; 3146 3147#define ELF_HOST_MACHINE EM_ARM 3148 3149/* We're expecting a 2 byte uleb128 encoded value. */ 3150QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14)); 3151 3152static const DebugFrame debug_frame = { 3153 .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */ 3154 .h.cie.id = -1, 3155 .h.cie.version = 1, 3156 .h.cie.code_align = 1, 3157 .h.cie.data_align = 0x7c, /* sleb128 -4 */ 3158 .h.cie.return_column = 14, 3159 3160 /* Total FDE size does not include the "len" member. */ 3161 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset), 3162 3163 .fde_def_cfa = { 3164 12, 13, /* DW_CFA_def_cfa sp, ... */ 3165 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */ 3166 (FRAME_SIZE >> 7) 3167 }, 3168 .fde_reg_ofs = { 3169 /* The following must match the stmdb in the prologue. */ 3170 0x8e, 1, /* DW_CFA_offset, lr, -4 */ 3171 0x8b, 2, /* DW_CFA_offset, r11, -8 */ 3172 0x8a, 3, /* DW_CFA_offset, r10, -12 */ 3173 0x89, 4, /* DW_CFA_offset, r9, -16 */ 3174 0x88, 5, /* DW_CFA_offset, r8, -20 */ 3175 0x87, 6, /* DW_CFA_offset, r7, -24 */ 3176 0x86, 7, /* DW_CFA_offset, r6, -28 */ 3177 0x85, 8, /* DW_CFA_offset, r5, -32 */ 3178 0x84, 9, /* DW_CFA_offset, r4, -36 */ 3179 } 3180}; 3181 3182void tcg_register_jit(const void *buf, size_t buf_size) 3183{ 3184 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame)); 3185} 3186