1/* 2 * Tiny Code Generator for QEMU 3 * 4 * Copyright (c) 2008 Andrzej Zaborowski 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 25#include "elf.h" 26#include "../tcg-pool.c.inc" 27 28int arm_arch = __ARM_ARCH; 29 30#ifndef use_idiv_instructions 31bool use_idiv_instructions; 32#endif 33#ifndef use_neon_instructions 34bool use_neon_instructions; 35#endif 36 37/* ??? Ought to think about changing CONFIG_SOFTMMU to always defined. */ 38#ifdef CONFIG_SOFTMMU 39# define USING_SOFTMMU 1 40#else 41# define USING_SOFTMMU 0 42#endif 43 44#ifdef CONFIG_DEBUG_TCG 45static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { 46 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7", 47 "%r8", "%r9", "%r10", "%r11", "%r12", "%sp", "%r14", "%pc", 48 "%q0", "%q1", "%q2", "%q3", "%q4", "%q5", "%q6", "%q7", 49 "%q8", "%q9", "%q10", "%q11", "%q12", "%q13", "%q14", "%q15", 50}; 51#endif 52 53static const int tcg_target_reg_alloc_order[] = { 54 TCG_REG_R4, 55 TCG_REG_R5, 56 TCG_REG_R6, 57 TCG_REG_R7, 58 TCG_REG_R8, 59 TCG_REG_R9, 60 TCG_REG_R10, 61 TCG_REG_R11, 62 TCG_REG_R13, 63 TCG_REG_R0, 64 TCG_REG_R1, 65 TCG_REG_R2, 66 TCG_REG_R3, 67 TCG_REG_R12, 68 TCG_REG_R14, 69 70 TCG_REG_Q0, 71 TCG_REG_Q1, 72 TCG_REG_Q2, 73 TCG_REG_Q3, 74 /* Q4 - Q7 are call-saved, and skipped. */ 75 TCG_REG_Q8, 76 TCG_REG_Q9, 77 TCG_REG_Q10, 78 TCG_REG_Q11, 79 TCG_REG_Q12, 80 TCG_REG_Q13, 81 TCG_REG_Q14, 82 TCG_REG_Q15, 83}; 84 85static const int tcg_target_call_iarg_regs[4] = { 86 TCG_REG_R0, TCG_REG_R1, TCG_REG_R2, TCG_REG_R3 87}; 88static const int tcg_target_call_oarg_regs[2] = { 89 TCG_REG_R0, TCG_REG_R1 90}; 91 92#define TCG_REG_TMP TCG_REG_R12 93#define TCG_VEC_TMP TCG_REG_Q15 94 95enum arm_cond_code_e { 96 COND_EQ = 0x0, 97 COND_NE = 0x1, 98 COND_CS = 0x2, /* Unsigned greater or equal */ 99 COND_CC = 0x3, /* Unsigned less than */ 100 COND_MI = 0x4, /* Negative */ 101 COND_PL = 0x5, /* Zero or greater */ 102 COND_VS = 0x6, /* Overflow */ 103 COND_VC = 0x7, /* No overflow */ 104 COND_HI = 0x8, /* Unsigned greater than */ 105 COND_LS = 0x9, /* Unsigned less or equal */ 106 COND_GE = 0xa, 107 COND_LT = 0xb, 108 COND_GT = 0xc, 109 COND_LE = 0xd, 110 COND_AL = 0xe, 111}; 112 113#define TO_CPSR (1 << 20) 114 115#define SHIFT_IMM_LSL(im) (((im) << 7) | 0x00) 116#define SHIFT_IMM_LSR(im) (((im) << 7) | 0x20) 117#define SHIFT_IMM_ASR(im) (((im) << 7) | 0x40) 118#define SHIFT_IMM_ROR(im) (((im) << 7) | 0x60) 119#define SHIFT_REG_LSL(rs) (((rs) << 8) | 0x10) 120#define SHIFT_REG_LSR(rs) (((rs) << 8) | 0x30) 121#define SHIFT_REG_ASR(rs) (((rs) << 8) | 0x50) 122#define SHIFT_REG_ROR(rs) (((rs) << 8) | 0x70) 123 124typedef enum { 125 ARITH_AND = 0x0 << 21, 126 ARITH_EOR = 0x1 << 21, 127 ARITH_SUB = 0x2 << 21, 128 ARITH_RSB = 0x3 << 21, 129 ARITH_ADD = 0x4 << 21, 130 ARITH_ADC = 0x5 << 21, 131 ARITH_SBC = 0x6 << 21, 132 ARITH_RSC = 0x7 << 21, 133 ARITH_TST = 0x8 << 21 | TO_CPSR, 134 ARITH_CMP = 0xa << 21 | TO_CPSR, 135 ARITH_CMN = 0xb << 21 | TO_CPSR, 136 ARITH_ORR = 0xc << 21, 137 ARITH_MOV = 0xd << 21, 138 ARITH_BIC = 0xe << 21, 139 ARITH_MVN = 0xf << 21, 140 141 INSN_CLZ = 0x016f0f10, 142 INSN_RBIT = 0x06ff0f30, 143 144 INSN_LDR_IMM = 0x04100000, 145 INSN_LDR_REG = 0x06100000, 146 INSN_STR_IMM = 0x04000000, 147 INSN_STR_REG = 0x06000000, 148 149 INSN_LDRH_IMM = 0x005000b0, 150 INSN_LDRH_REG = 0x001000b0, 151 INSN_LDRSH_IMM = 0x005000f0, 152 INSN_LDRSH_REG = 0x001000f0, 153 INSN_STRH_IMM = 0x004000b0, 154 INSN_STRH_REG = 0x000000b0, 155 156 INSN_LDRB_IMM = 0x04500000, 157 INSN_LDRB_REG = 0x06500000, 158 INSN_LDRSB_IMM = 0x005000d0, 159 INSN_LDRSB_REG = 0x001000d0, 160 INSN_STRB_IMM = 0x04400000, 161 INSN_STRB_REG = 0x06400000, 162 163 INSN_LDRD_IMM = 0x004000d0, 164 INSN_LDRD_REG = 0x000000d0, 165 INSN_STRD_IMM = 0x004000f0, 166 INSN_STRD_REG = 0x000000f0, 167 168 INSN_DMB_ISH = 0xf57ff05b, 169 INSN_DMB_MCR = 0xee070fba, 170 171 /* Architected nop introduced in v6k. */ 172 /* ??? This is an MSR (imm) 0,0,0 insn. Anyone know if this 173 also Just So Happened to do nothing on pre-v6k so that we 174 don't need to conditionalize it? */ 175 INSN_NOP_v6k = 0xe320f000, 176 /* Otherwise the assembler uses mov r0,r0 */ 177 INSN_NOP_v4 = (COND_AL << 28) | ARITH_MOV, 178 179 INSN_VADD = 0xf2000800, 180 INSN_VAND = 0xf2000110, 181 INSN_VBIC = 0xf2100110, 182 INSN_VEOR = 0xf3000110, 183 INSN_VORN = 0xf2300110, 184 INSN_VORR = 0xf2200110, 185 INSN_VSUB = 0xf3000800, 186 INSN_VMUL = 0xf2000910, 187 INSN_VQADD = 0xf2000010, 188 INSN_VQADD_U = 0xf3000010, 189 INSN_VQSUB = 0xf2000210, 190 INSN_VQSUB_U = 0xf3000210, 191 INSN_VMAX = 0xf2000600, 192 INSN_VMAX_U = 0xf3000600, 193 INSN_VMIN = 0xf2000610, 194 INSN_VMIN_U = 0xf3000610, 195 196 INSN_VABS = 0xf3b10300, 197 INSN_VMVN = 0xf3b00580, 198 INSN_VNEG = 0xf3b10380, 199 200 INSN_VCEQ0 = 0xf3b10100, 201 INSN_VCGT0 = 0xf3b10000, 202 INSN_VCGE0 = 0xf3b10080, 203 INSN_VCLE0 = 0xf3b10180, 204 INSN_VCLT0 = 0xf3b10200, 205 206 INSN_VCEQ = 0xf3000810, 207 INSN_VCGE = 0xf2000310, 208 INSN_VCGT = 0xf2000300, 209 INSN_VCGE_U = 0xf3000310, 210 INSN_VCGT_U = 0xf3000300, 211 212 INSN_VSHLI = 0xf2800510, /* VSHL (immediate) */ 213 INSN_VSARI = 0xf2800010, /* VSHR.S */ 214 INSN_VSHRI = 0xf3800010, /* VSHR.U */ 215 INSN_VSLI = 0xf3800510, 216 INSN_VSHL_S = 0xf2000400, /* VSHL.S (register) */ 217 INSN_VSHL_U = 0xf3000400, /* VSHL.U (register) */ 218 219 INSN_VBSL = 0xf3100110, 220 INSN_VBIT = 0xf3200110, 221 INSN_VBIF = 0xf3300110, 222 223 INSN_VTST = 0xf2000810, 224 225 INSN_VDUP_G = 0xee800b10, /* VDUP (ARM core register) */ 226 INSN_VDUP_S = 0xf3b00c00, /* VDUP (scalar) */ 227 INSN_VLDR_D = 0xed100b00, /* VLDR.64 */ 228 INSN_VLD1 = 0xf4200000, /* VLD1 (multiple single elements) */ 229 INSN_VLD1R = 0xf4a00c00, /* VLD1 (single element to all lanes) */ 230 INSN_VST1 = 0xf4000000, /* VST1 (multiple single elements) */ 231 INSN_VMOVI = 0xf2800010, /* VMOV (immediate) */ 232} ARMInsn; 233 234#define INSN_NOP (use_armv7_instructions ? INSN_NOP_v6k : INSN_NOP_v4) 235 236static const uint8_t tcg_cond_to_arm_cond[] = { 237 [TCG_COND_EQ] = COND_EQ, 238 [TCG_COND_NE] = COND_NE, 239 [TCG_COND_LT] = COND_LT, 240 [TCG_COND_GE] = COND_GE, 241 [TCG_COND_LE] = COND_LE, 242 [TCG_COND_GT] = COND_GT, 243 /* unsigned */ 244 [TCG_COND_LTU] = COND_CC, 245 [TCG_COND_GEU] = COND_CS, 246 [TCG_COND_LEU] = COND_LS, 247 [TCG_COND_GTU] = COND_HI, 248}; 249 250static int encode_imm(uint32_t imm); 251 252/* TCG private relocation type: add with pc+imm8 */ 253#define R_ARM_PC8 11 254 255/* TCG private relocation type: vldr with imm8 << 2 */ 256#define R_ARM_PC11 12 257 258static bool reloc_pc24(tcg_insn_unit *src_rw, const tcg_insn_unit *target) 259{ 260 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw); 261 ptrdiff_t offset = (tcg_ptr_byte_diff(target, src_rx) - 8) >> 2; 262 263 if (offset == sextract32(offset, 0, 24)) { 264 *src_rw = deposit32(*src_rw, 0, 24, offset); 265 return true; 266 } 267 return false; 268} 269 270static bool reloc_pc13(tcg_insn_unit *src_rw, const tcg_insn_unit *target) 271{ 272 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw); 273 ptrdiff_t offset = tcg_ptr_byte_diff(target, src_rx) - 8; 274 275 if (offset >= -0xfff && offset <= 0xfff) { 276 tcg_insn_unit insn = *src_rw; 277 bool u = (offset >= 0); 278 if (!u) { 279 offset = -offset; 280 } 281 insn = deposit32(insn, 23, 1, u); 282 insn = deposit32(insn, 0, 12, offset); 283 *src_rw = insn; 284 return true; 285 } 286 return false; 287} 288 289static bool reloc_pc11(tcg_insn_unit *src_rw, const tcg_insn_unit *target) 290{ 291 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw); 292 ptrdiff_t offset = (tcg_ptr_byte_diff(target, src_rx) - 8) / 4; 293 294 if (offset >= -0xff && offset <= 0xff) { 295 tcg_insn_unit insn = *src_rw; 296 bool u = (offset >= 0); 297 if (!u) { 298 offset = -offset; 299 } 300 insn = deposit32(insn, 23, 1, u); 301 insn = deposit32(insn, 0, 8, offset); 302 *src_rw = insn; 303 return true; 304 } 305 return false; 306} 307 308static bool reloc_pc8(tcg_insn_unit *src_rw, const tcg_insn_unit *target) 309{ 310 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw); 311 ptrdiff_t offset = tcg_ptr_byte_diff(target, src_rx) - 8; 312 int rot = encode_imm(offset); 313 314 if (rot >= 0) { 315 *src_rw = deposit32(*src_rw, 0, 12, rol32(offset, rot) | (rot << 7)); 316 return true; 317 } 318 return false; 319} 320 321static bool patch_reloc(tcg_insn_unit *code_ptr, int type, 322 intptr_t value, intptr_t addend) 323{ 324 tcg_debug_assert(addend == 0); 325 switch (type) { 326 case R_ARM_PC24: 327 return reloc_pc24(code_ptr, (const tcg_insn_unit *)value); 328 case R_ARM_PC13: 329 return reloc_pc13(code_ptr, (const tcg_insn_unit *)value); 330 case R_ARM_PC11: 331 return reloc_pc11(code_ptr, (const tcg_insn_unit *)value); 332 case R_ARM_PC8: 333 return reloc_pc8(code_ptr, (const tcg_insn_unit *)value); 334 default: 335 g_assert_not_reached(); 336 } 337} 338 339#define TCG_CT_CONST_ARM 0x100 340#define TCG_CT_CONST_INV 0x200 341#define TCG_CT_CONST_NEG 0x400 342#define TCG_CT_CONST_ZERO 0x800 343#define TCG_CT_CONST_ORRI 0x1000 344#define TCG_CT_CONST_ANDI 0x2000 345 346#define ALL_GENERAL_REGS 0xffffu 347#define ALL_VECTOR_REGS 0xffff0000u 348 349/* 350 * r0-r2 will be overwritten when reading the tlb entry (softmmu only) 351 * and r0-r1 doing the byte swapping, so don't use these. 352 * r3 is removed for softmmu to avoid clashes with helper arguments. 353 */ 354#ifdef CONFIG_SOFTMMU 355#define ALL_QLOAD_REGS \ 356 (ALL_GENERAL_REGS & ~((1 << TCG_REG_R0) | (1 << TCG_REG_R1) | \ 357 (1 << TCG_REG_R2) | (1 << TCG_REG_R3) | \ 358 (1 << TCG_REG_R14))) 359#define ALL_QSTORE_REGS \ 360 (ALL_GENERAL_REGS & ~((1 << TCG_REG_R0) | (1 << TCG_REG_R1) | \ 361 (1 << TCG_REG_R2) | (1 << TCG_REG_R14) | \ 362 ((TARGET_LONG_BITS == 64) << TCG_REG_R3))) 363#else 364#define ALL_QLOAD_REGS ALL_GENERAL_REGS 365#define ALL_QSTORE_REGS \ 366 (ALL_GENERAL_REGS & ~((1 << TCG_REG_R0) | (1 << TCG_REG_R1))) 367#endif 368 369static inline uint32_t rotl(uint32_t val, int n) 370{ 371 return (val << n) | (val >> (32 - n)); 372} 373 374/* ARM immediates for ALU instructions are made of an unsigned 8-bit 375 right-rotated by an even amount between 0 and 30. */ 376static int encode_imm(uint32_t imm) 377{ 378 int shift; 379 380 /* simple case, only lower bits */ 381 if ((imm & ~0xff) == 0) 382 return 0; 383 /* then try a simple even shift */ 384 shift = ctz32(imm) & ~1; 385 if (((imm >> shift) & ~0xff) == 0) 386 return 32 - shift; 387 /* now try harder with rotations */ 388 if ((rotl(imm, 2) & ~0xff) == 0) 389 return 2; 390 if ((rotl(imm, 4) & ~0xff) == 0) 391 return 4; 392 if ((rotl(imm, 6) & ~0xff) == 0) 393 return 6; 394 /* imm can't be encoded */ 395 return -1; 396} 397 398static inline int check_fit_imm(uint32_t imm) 399{ 400 return encode_imm(imm) >= 0; 401} 402 403/* Return true if v16 is a valid 16-bit shifted immediate. */ 404static bool is_shimm16(uint16_t v16, int *cmode, int *imm8) 405{ 406 if (v16 == (v16 & 0xff)) { 407 *cmode = 0x8; 408 *imm8 = v16 & 0xff; 409 return true; 410 } else if (v16 == (v16 & 0xff00)) { 411 *cmode = 0xa; 412 *imm8 = v16 >> 8; 413 return true; 414 } 415 return false; 416} 417 418/* Return true if v32 is a valid 32-bit shifted immediate. */ 419static bool is_shimm32(uint32_t v32, int *cmode, int *imm8) 420{ 421 if (v32 == (v32 & 0xff)) { 422 *cmode = 0x0; 423 *imm8 = v32 & 0xff; 424 return true; 425 } else if (v32 == (v32 & 0xff00)) { 426 *cmode = 0x2; 427 *imm8 = (v32 >> 8) & 0xff; 428 return true; 429 } else if (v32 == (v32 & 0xff0000)) { 430 *cmode = 0x4; 431 *imm8 = (v32 >> 16) & 0xff; 432 return true; 433 } else if (v32 == (v32 & 0xff000000)) { 434 *cmode = 0x6; 435 *imm8 = v32 >> 24; 436 return true; 437 } 438 return false; 439} 440 441/* Return true if v32 is a valid 32-bit shifting ones immediate. */ 442static bool is_soimm32(uint32_t v32, int *cmode, int *imm8) 443{ 444 if ((v32 & 0xffff00ff) == 0xff) { 445 *cmode = 0xc; 446 *imm8 = (v32 >> 8) & 0xff; 447 return true; 448 } else if ((v32 & 0xff00ffff) == 0xffff) { 449 *cmode = 0xd; 450 *imm8 = (v32 >> 16) & 0xff; 451 return true; 452 } 453 return false; 454} 455 456/* 457 * Return non-zero if v32 can be formed by MOVI+ORR. 458 * Place the parameters for MOVI in (cmode, imm8). 459 * Return the cmode for ORR; the imm8 can be had via extraction from v32. 460 */ 461static int is_shimm32_pair(uint32_t v32, int *cmode, int *imm8) 462{ 463 int i; 464 465 for (i = 6; i > 0; i -= 2) { 466 /* Mask out one byte we can add with ORR. */ 467 uint32_t tmp = v32 & ~(0xffu << (i * 4)); 468 if (is_shimm32(tmp, cmode, imm8) || 469 is_soimm32(tmp, cmode, imm8)) { 470 break; 471 } 472 } 473 return i; 474} 475 476/* Return true if V is a valid 16-bit or 32-bit shifted immediate. */ 477static bool is_shimm1632(uint32_t v32, int *cmode, int *imm8) 478{ 479 if (v32 == deposit32(v32, 16, 16, v32)) { 480 return is_shimm16(v32, cmode, imm8); 481 } else { 482 return is_shimm32(v32, cmode, imm8); 483 } 484} 485 486/* Test if a constant matches the constraint. 487 * TODO: define constraints for: 488 * 489 * ldr/str offset: between -0xfff and 0xfff 490 * ldrh/strh offset: between -0xff and 0xff 491 * mov operand2: values represented with x << (2 * y), x < 0x100 492 * add, sub, eor...: ditto 493 */ 494static bool tcg_target_const_match(int64_t val, TCGType type, int ct) 495{ 496 if (ct & TCG_CT_CONST) { 497 return 1; 498 } else if ((ct & TCG_CT_CONST_ARM) && check_fit_imm(val)) { 499 return 1; 500 } else if ((ct & TCG_CT_CONST_INV) && check_fit_imm(~val)) { 501 return 1; 502 } else if ((ct & TCG_CT_CONST_NEG) && check_fit_imm(-val)) { 503 return 1; 504 } else if ((ct & TCG_CT_CONST_ZERO) && val == 0) { 505 return 1; 506 } 507 508 switch (ct & (TCG_CT_CONST_ORRI | TCG_CT_CONST_ANDI)) { 509 case 0: 510 break; 511 case TCG_CT_CONST_ANDI: 512 val = ~val; 513 /* fallthru */ 514 case TCG_CT_CONST_ORRI: 515 if (val == deposit64(val, 32, 32, val)) { 516 int cmode, imm8; 517 return is_shimm1632(val, &cmode, &imm8); 518 } 519 break; 520 default: 521 /* Both bits should not be set for the same insn. */ 522 g_assert_not_reached(); 523 } 524 525 return 0; 526} 527 528static inline void tcg_out_b(TCGContext *s, int cond, int32_t offset) 529{ 530 tcg_out32(s, (cond << 28) | 0x0a000000 | 531 (((offset - 8) >> 2) & 0x00ffffff)); 532} 533 534static inline void tcg_out_bl(TCGContext *s, int cond, int32_t offset) 535{ 536 tcg_out32(s, (cond << 28) | 0x0b000000 | 537 (((offset - 8) >> 2) & 0x00ffffff)); 538} 539 540static inline void tcg_out_blx(TCGContext *s, int cond, int rn) 541{ 542 tcg_out32(s, (cond << 28) | 0x012fff30 | rn); 543} 544 545static inline void tcg_out_blx_imm(TCGContext *s, int32_t offset) 546{ 547 tcg_out32(s, 0xfa000000 | ((offset & 2) << 23) | 548 (((offset - 8) >> 2) & 0x00ffffff)); 549} 550 551static inline void tcg_out_dat_reg(TCGContext *s, 552 int cond, int opc, int rd, int rn, int rm, int shift) 553{ 554 tcg_out32(s, (cond << 28) | (0 << 25) | opc | 555 (rn << 16) | (rd << 12) | shift | rm); 556} 557 558static inline void tcg_out_nop(TCGContext *s) 559{ 560 tcg_out32(s, INSN_NOP); 561} 562 563static inline void tcg_out_mov_reg(TCGContext *s, int cond, int rd, int rm) 564{ 565 /* Simple reg-reg move, optimising out the 'do nothing' case */ 566 if (rd != rm) { 567 tcg_out_dat_reg(s, cond, ARITH_MOV, rd, 0, rm, SHIFT_IMM_LSL(0)); 568 } 569} 570 571static inline void tcg_out_bx(TCGContext *s, int cond, TCGReg rn) 572{ 573 /* Unless the C portion of QEMU is compiled as thumb, we don't 574 actually need true BX semantics; merely a branch to an address 575 held in a register. */ 576 if (use_armv5t_instructions) { 577 tcg_out32(s, (cond << 28) | 0x012fff10 | rn); 578 } else { 579 tcg_out_mov_reg(s, cond, TCG_REG_PC, rn); 580 } 581} 582 583static inline void tcg_out_dat_imm(TCGContext *s, 584 int cond, int opc, int rd, int rn, int im) 585{ 586 tcg_out32(s, (cond << 28) | (1 << 25) | opc | 587 (rn << 16) | (rd << 12) | im); 588} 589 590/* Note that this routine is used for both LDR and LDRH formats, so we do 591 not wish to include an immediate shift at this point. */ 592static void tcg_out_memop_r(TCGContext *s, int cond, ARMInsn opc, TCGReg rt, 593 TCGReg rn, TCGReg rm, bool u, bool p, bool w) 594{ 595 tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24) 596 | (w << 21) | (rn << 16) | (rt << 12) | rm); 597} 598 599static void tcg_out_memop_8(TCGContext *s, int cond, ARMInsn opc, TCGReg rt, 600 TCGReg rn, int imm8, bool p, bool w) 601{ 602 bool u = 1; 603 if (imm8 < 0) { 604 imm8 = -imm8; 605 u = 0; 606 } 607 tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24) | (w << 21) | 608 (rn << 16) | (rt << 12) | ((imm8 & 0xf0) << 4) | (imm8 & 0xf)); 609} 610 611static void tcg_out_memop_12(TCGContext *s, int cond, ARMInsn opc, TCGReg rt, 612 TCGReg rn, int imm12, bool p, bool w) 613{ 614 bool u = 1; 615 if (imm12 < 0) { 616 imm12 = -imm12; 617 u = 0; 618 } 619 tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24) | (w << 21) | 620 (rn << 16) | (rt << 12) | imm12); 621} 622 623static inline void tcg_out_ld32_12(TCGContext *s, int cond, TCGReg rt, 624 TCGReg rn, int imm12) 625{ 626 tcg_out_memop_12(s, cond, INSN_LDR_IMM, rt, rn, imm12, 1, 0); 627} 628 629static inline void tcg_out_st32_12(TCGContext *s, int cond, TCGReg rt, 630 TCGReg rn, int imm12) 631{ 632 tcg_out_memop_12(s, cond, INSN_STR_IMM, rt, rn, imm12, 1, 0); 633} 634 635static inline void tcg_out_ld32_r(TCGContext *s, int cond, TCGReg rt, 636 TCGReg rn, TCGReg rm) 637{ 638 tcg_out_memop_r(s, cond, INSN_LDR_REG, rt, rn, rm, 1, 1, 0); 639} 640 641static inline void tcg_out_st32_r(TCGContext *s, int cond, TCGReg rt, 642 TCGReg rn, TCGReg rm) 643{ 644 tcg_out_memop_r(s, cond, INSN_STR_REG, rt, rn, rm, 1, 1, 0); 645} 646 647static inline void tcg_out_ldrd_8(TCGContext *s, int cond, TCGReg rt, 648 TCGReg rn, int imm8) 649{ 650 tcg_out_memop_8(s, cond, INSN_LDRD_IMM, rt, rn, imm8, 1, 0); 651} 652 653static inline void tcg_out_ldrd_r(TCGContext *s, int cond, TCGReg rt, 654 TCGReg rn, TCGReg rm) 655{ 656 tcg_out_memop_r(s, cond, INSN_LDRD_REG, rt, rn, rm, 1, 1, 0); 657} 658 659static inline void tcg_out_ldrd_rwb(TCGContext *s, int cond, TCGReg rt, 660 TCGReg rn, TCGReg rm) 661{ 662 tcg_out_memop_r(s, cond, INSN_LDRD_REG, rt, rn, rm, 1, 1, 1); 663} 664 665static inline void tcg_out_strd_8(TCGContext *s, int cond, TCGReg rt, 666 TCGReg rn, int imm8) 667{ 668 tcg_out_memop_8(s, cond, INSN_STRD_IMM, rt, rn, imm8, 1, 0); 669} 670 671static inline void tcg_out_strd_r(TCGContext *s, int cond, TCGReg rt, 672 TCGReg rn, TCGReg rm) 673{ 674 tcg_out_memop_r(s, cond, INSN_STRD_REG, rt, rn, rm, 1, 1, 0); 675} 676 677/* Register pre-increment with base writeback. */ 678static inline void tcg_out_ld32_rwb(TCGContext *s, int cond, TCGReg rt, 679 TCGReg rn, TCGReg rm) 680{ 681 tcg_out_memop_r(s, cond, INSN_LDR_REG, rt, rn, rm, 1, 1, 1); 682} 683 684static inline void tcg_out_st32_rwb(TCGContext *s, int cond, TCGReg rt, 685 TCGReg rn, TCGReg rm) 686{ 687 tcg_out_memop_r(s, cond, INSN_STR_REG, rt, rn, rm, 1, 1, 1); 688} 689 690static inline void tcg_out_ld16u_8(TCGContext *s, int cond, TCGReg rt, 691 TCGReg rn, int imm8) 692{ 693 tcg_out_memop_8(s, cond, INSN_LDRH_IMM, rt, rn, imm8, 1, 0); 694} 695 696static inline void tcg_out_st16_8(TCGContext *s, int cond, TCGReg rt, 697 TCGReg rn, int imm8) 698{ 699 tcg_out_memop_8(s, cond, INSN_STRH_IMM, rt, rn, imm8, 1, 0); 700} 701 702static inline void tcg_out_ld16u_r(TCGContext *s, int cond, TCGReg rt, 703 TCGReg rn, TCGReg rm) 704{ 705 tcg_out_memop_r(s, cond, INSN_LDRH_REG, rt, rn, rm, 1, 1, 0); 706} 707 708static inline void tcg_out_st16_r(TCGContext *s, int cond, TCGReg rt, 709 TCGReg rn, TCGReg rm) 710{ 711 tcg_out_memop_r(s, cond, INSN_STRH_REG, rt, rn, rm, 1, 1, 0); 712} 713 714static inline void tcg_out_ld16s_8(TCGContext *s, int cond, TCGReg rt, 715 TCGReg rn, int imm8) 716{ 717 tcg_out_memop_8(s, cond, INSN_LDRSH_IMM, rt, rn, imm8, 1, 0); 718} 719 720static inline void tcg_out_ld16s_r(TCGContext *s, int cond, TCGReg rt, 721 TCGReg rn, TCGReg rm) 722{ 723 tcg_out_memop_r(s, cond, INSN_LDRSH_REG, rt, rn, rm, 1, 1, 0); 724} 725 726static inline void tcg_out_ld8_12(TCGContext *s, int cond, TCGReg rt, 727 TCGReg rn, int imm12) 728{ 729 tcg_out_memop_12(s, cond, INSN_LDRB_IMM, rt, rn, imm12, 1, 0); 730} 731 732static inline void tcg_out_st8_12(TCGContext *s, int cond, TCGReg rt, 733 TCGReg rn, int imm12) 734{ 735 tcg_out_memop_12(s, cond, INSN_STRB_IMM, rt, rn, imm12, 1, 0); 736} 737 738static inline void tcg_out_ld8_r(TCGContext *s, int cond, TCGReg rt, 739 TCGReg rn, TCGReg rm) 740{ 741 tcg_out_memop_r(s, cond, INSN_LDRB_REG, rt, rn, rm, 1, 1, 0); 742} 743 744static inline void tcg_out_st8_r(TCGContext *s, int cond, TCGReg rt, 745 TCGReg rn, TCGReg rm) 746{ 747 tcg_out_memop_r(s, cond, INSN_STRB_REG, rt, rn, rm, 1, 1, 0); 748} 749 750static inline void tcg_out_ld8s_8(TCGContext *s, int cond, TCGReg rt, 751 TCGReg rn, int imm8) 752{ 753 tcg_out_memop_8(s, cond, INSN_LDRSB_IMM, rt, rn, imm8, 1, 0); 754} 755 756static inline void tcg_out_ld8s_r(TCGContext *s, int cond, TCGReg rt, 757 TCGReg rn, TCGReg rm) 758{ 759 tcg_out_memop_r(s, cond, INSN_LDRSB_REG, rt, rn, rm, 1, 1, 0); 760} 761 762static void tcg_out_movi_pool(TCGContext *s, int cond, int rd, uint32_t arg) 763{ 764 new_pool_label(s, arg, R_ARM_PC13, s->code_ptr, 0); 765 tcg_out_ld32_12(s, cond, rd, TCG_REG_PC, 0); 766} 767 768static void tcg_out_movi32(TCGContext *s, int cond, int rd, uint32_t arg) 769{ 770 int rot, diff, opc, sh1, sh2; 771 uint32_t tt0, tt1, tt2; 772 773 /* Check a single MOV/MVN before anything else. */ 774 rot = encode_imm(arg); 775 if (rot >= 0) { 776 tcg_out_dat_imm(s, cond, ARITH_MOV, rd, 0, 777 rotl(arg, rot) | (rot << 7)); 778 return; 779 } 780 rot = encode_imm(~arg); 781 if (rot >= 0) { 782 tcg_out_dat_imm(s, cond, ARITH_MVN, rd, 0, 783 rotl(~arg, rot) | (rot << 7)); 784 return; 785 } 786 787 /* Check for a pc-relative address. This will usually be the TB, 788 or within the TB, which is immediately before the code block. */ 789 diff = tcg_pcrel_diff(s, (void *)arg) - 8; 790 if (diff >= 0) { 791 rot = encode_imm(diff); 792 if (rot >= 0) { 793 tcg_out_dat_imm(s, cond, ARITH_ADD, rd, TCG_REG_PC, 794 rotl(diff, rot) | (rot << 7)); 795 return; 796 } 797 } else { 798 rot = encode_imm(-diff); 799 if (rot >= 0) { 800 tcg_out_dat_imm(s, cond, ARITH_SUB, rd, TCG_REG_PC, 801 rotl(-diff, rot) | (rot << 7)); 802 return; 803 } 804 } 805 806 /* Use movw + movt. */ 807 if (use_armv7_instructions) { 808 /* movw */ 809 tcg_out32(s, (cond << 28) | 0x03000000 | (rd << 12) 810 | ((arg << 4) & 0x000f0000) | (arg & 0xfff)); 811 if (arg & 0xffff0000) { 812 /* movt */ 813 tcg_out32(s, (cond << 28) | 0x03400000 | (rd << 12) 814 | ((arg >> 12) & 0x000f0000) | ((arg >> 16) & 0xfff)); 815 } 816 return; 817 } 818 819 /* Look for sequences of two insns. If we have lots of 1's, we can 820 shorten the sequence by beginning with mvn and then clearing 821 higher bits with eor. */ 822 tt0 = arg; 823 opc = ARITH_MOV; 824 if (ctpop32(arg) > 16) { 825 tt0 = ~arg; 826 opc = ARITH_MVN; 827 } 828 sh1 = ctz32(tt0) & ~1; 829 tt1 = tt0 & ~(0xff << sh1); 830 sh2 = ctz32(tt1) & ~1; 831 tt2 = tt1 & ~(0xff << sh2); 832 if (tt2 == 0) { 833 rot = ((32 - sh1) << 7) & 0xf00; 834 tcg_out_dat_imm(s, cond, opc, rd, 0, ((tt0 >> sh1) & 0xff) | rot); 835 rot = ((32 - sh2) << 7) & 0xf00; 836 tcg_out_dat_imm(s, cond, ARITH_EOR, rd, rd, 837 ((tt0 >> sh2) & 0xff) | rot); 838 return; 839 } 840 841 /* Otherwise, drop it into the constant pool. */ 842 tcg_out_movi_pool(s, cond, rd, arg); 843} 844 845static inline void tcg_out_dat_rI(TCGContext *s, int cond, int opc, TCGArg dst, 846 TCGArg lhs, TCGArg rhs, int rhs_is_const) 847{ 848 /* Emit either the reg,imm or reg,reg form of a data-processing insn. 849 * rhs must satisfy the "rI" constraint. 850 */ 851 if (rhs_is_const) { 852 int rot = encode_imm(rhs); 853 tcg_debug_assert(rot >= 0); 854 tcg_out_dat_imm(s, cond, opc, dst, lhs, rotl(rhs, rot) | (rot << 7)); 855 } else { 856 tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0)); 857 } 858} 859 860static void tcg_out_dat_rIK(TCGContext *s, int cond, int opc, int opinv, 861 TCGReg dst, TCGReg lhs, TCGArg rhs, 862 bool rhs_is_const) 863{ 864 /* Emit either the reg,imm or reg,reg form of a data-processing insn. 865 * rhs must satisfy the "rIK" constraint. 866 */ 867 if (rhs_is_const) { 868 int rot = encode_imm(rhs); 869 if (rot < 0) { 870 rhs = ~rhs; 871 rot = encode_imm(rhs); 872 tcg_debug_assert(rot >= 0); 873 opc = opinv; 874 } 875 tcg_out_dat_imm(s, cond, opc, dst, lhs, rotl(rhs, rot) | (rot << 7)); 876 } else { 877 tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0)); 878 } 879} 880 881static void tcg_out_dat_rIN(TCGContext *s, int cond, int opc, int opneg, 882 TCGArg dst, TCGArg lhs, TCGArg rhs, 883 bool rhs_is_const) 884{ 885 /* Emit either the reg,imm or reg,reg form of a data-processing insn. 886 * rhs must satisfy the "rIN" constraint. 887 */ 888 if (rhs_is_const) { 889 int rot = encode_imm(rhs); 890 if (rot < 0) { 891 rhs = -rhs; 892 rot = encode_imm(rhs); 893 tcg_debug_assert(rot >= 0); 894 opc = opneg; 895 } 896 tcg_out_dat_imm(s, cond, opc, dst, lhs, rotl(rhs, rot) | (rot << 7)); 897 } else { 898 tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0)); 899 } 900} 901 902static inline void tcg_out_mul32(TCGContext *s, int cond, TCGReg rd, 903 TCGReg rn, TCGReg rm) 904{ 905 /* if ArchVersion() < 6 && d == n then UNPREDICTABLE; */ 906 if (!use_armv6_instructions && rd == rn) { 907 if (rd == rm) { 908 /* rd == rn == rm; copy an input to tmp first. */ 909 tcg_out_mov_reg(s, cond, TCG_REG_TMP, rn); 910 rm = rn = TCG_REG_TMP; 911 } else { 912 rn = rm; 913 rm = rd; 914 } 915 } 916 /* mul */ 917 tcg_out32(s, (cond << 28) | 0x90 | (rd << 16) | (rm << 8) | rn); 918} 919 920static inline void tcg_out_umull32(TCGContext *s, int cond, TCGReg rd0, 921 TCGReg rd1, TCGReg rn, TCGReg rm) 922{ 923 /* if ArchVersion() < 6 && (dHi == n || dLo == n) then UNPREDICTABLE; */ 924 if (!use_armv6_instructions && (rd0 == rn || rd1 == rn)) { 925 if (rd0 == rm || rd1 == rm) { 926 tcg_out_mov_reg(s, cond, TCG_REG_TMP, rn); 927 rn = TCG_REG_TMP; 928 } else { 929 TCGReg t = rn; 930 rn = rm; 931 rm = t; 932 } 933 } 934 /* umull */ 935 tcg_out32(s, (cond << 28) | 0x00800090 | 936 (rd1 << 16) | (rd0 << 12) | (rm << 8) | rn); 937} 938 939static inline void tcg_out_smull32(TCGContext *s, int cond, TCGReg rd0, 940 TCGReg rd1, TCGReg rn, TCGReg rm) 941{ 942 /* if ArchVersion() < 6 && (dHi == n || dLo == n) then UNPREDICTABLE; */ 943 if (!use_armv6_instructions && (rd0 == rn || rd1 == rn)) { 944 if (rd0 == rm || rd1 == rm) { 945 tcg_out_mov_reg(s, cond, TCG_REG_TMP, rn); 946 rn = TCG_REG_TMP; 947 } else { 948 TCGReg t = rn; 949 rn = rm; 950 rm = t; 951 } 952 } 953 /* smull */ 954 tcg_out32(s, (cond << 28) | 0x00c00090 | 955 (rd1 << 16) | (rd0 << 12) | (rm << 8) | rn); 956} 957 958static inline void tcg_out_sdiv(TCGContext *s, int cond, int rd, int rn, int rm) 959{ 960 tcg_out32(s, 0x0710f010 | (cond << 28) | (rd << 16) | rn | (rm << 8)); 961} 962 963static inline void tcg_out_udiv(TCGContext *s, int cond, int rd, int rn, int rm) 964{ 965 tcg_out32(s, 0x0730f010 | (cond << 28) | (rd << 16) | rn | (rm << 8)); 966} 967 968static inline void tcg_out_ext8s(TCGContext *s, int cond, 969 int rd, int rn) 970{ 971 if (use_armv6_instructions) { 972 /* sxtb */ 973 tcg_out32(s, 0x06af0070 | (cond << 28) | (rd << 12) | rn); 974 } else { 975 tcg_out_dat_reg(s, cond, ARITH_MOV, 976 rd, 0, rn, SHIFT_IMM_LSL(24)); 977 tcg_out_dat_reg(s, cond, ARITH_MOV, 978 rd, 0, rd, SHIFT_IMM_ASR(24)); 979 } 980} 981 982static inline void tcg_out_ext8u(TCGContext *s, int cond, 983 int rd, int rn) 984{ 985 tcg_out_dat_imm(s, cond, ARITH_AND, rd, rn, 0xff); 986} 987 988static inline void tcg_out_ext16s(TCGContext *s, int cond, 989 int rd, int rn) 990{ 991 if (use_armv6_instructions) { 992 /* sxth */ 993 tcg_out32(s, 0x06bf0070 | (cond << 28) | (rd << 12) | rn); 994 } else { 995 tcg_out_dat_reg(s, cond, ARITH_MOV, 996 rd, 0, rn, SHIFT_IMM_LSL(16)); 997 tcg_out_dat_reg(s, cond, ARITH_MOV, 998 rd, 0, rd, SHIFT_IMM_ASR(16)); 999 } 1000} 1001 1002static inline void tcg_out_ext16u(TCGContext *s, int cond, 1003 int rd, int rn) 1004{ 1005 if (use_armv6_instructions) { 1006 /* uxth */ 1007 tcg_out32(s, 0x06ff0070 | (cond << 28) | (rd << 12) | rn); 1008 } else { 1009 tcg_out_dat_reg(s, cond, ARITH_MOV, 1010 rd, 0, rn, SHIFT_IMM_LSL(16)); 1011 tcg_out_dat_reg(s, cond, ARITH_MOV, 1012 rd, 0, rd, SHIFT_IMM_LSR(16)); 1013 } 1014} 1015 1016static inline void tcg_out_bswap16s(TCGContext *s, int cond, int rd, int rn) 1017{ 1018 if (use_armv6_instructions) { 1019 /* revsh */ 1020 tcg_out32(s, 0x06ff0fb0 | (cond << 28) | (rd << 12) | rn); 1021 } else { 1022 tcg_out_dat_reg(s, cond, ARITH_MOV, 1023 TCG_REG_TMP, 0, rn, SHIFT_IMM_LSL(24)); 1024 tcg_out_dat_reg(s, cond, ARITH_MOV, 1025 TCG_REG_TMP, 0, TCG_REG_TMP, SHIFT_IMM_ASR(16)); 1026 tcg_out_dat_reg(s, cond, ARITH_ORR, 1027 rd, TCG_REG_TMP, rn, SHIFT_IMM_LSR(8)); 1028 } 1029} 1030 1031static inline void tcg_out_bswap16(TCGContext *s, int cond, int rd, int rn) 1032{ 1033 if (use_armv6_instructions) { 1034 /* rev16 */ 1035 tcg_out32(s, 0x06bf0fb0 | (cond << 28) | (rd << 12) | rn); 1036 } else { 1037 tcg_out_dat_reg(s, cond, ARITH_MOV, 1038 TCG_REG_TMP, 0, rn, SHIFT_IMM_LSL(24)); 1039 tcg_out_dat_reg(s, cond, ARITH_MOV, 1040 TCG_REG_TMP, 0, TCG_REG_TMP, SHIFT_IMM_LSR(16)); 1041 tcg_out_dat_reg(s, cond, ARITH_ORR, 1042 rd, TCG_REG_TMP, rn, SHIFT_IMM_LSR(8)); 1043 } 1044} 1045 1046/* swap the two low bytes assuming that the two high input bytes and the 1047 two high output bit can hold any value. */ 1048static inline void tcg_out_bswap16st(TCGContext *s, int cond, int rd, int rn) 1049{ 1050 if (use_armv6_instructions) { 1051 /* rev16 */ 1052 tcg_out32(s, 0x06bf0fb0 | (cond << 28) | (rd << 12) | rn); 1053 } else { 1054 tcg_out_dat_reg(s, cond, ARITH_MOV, 1055 TCG_REG_TMP, 0, rn, SHIFT_IMM_LSR(8)); 1056 tcg_out_dat_imm(s, cond, ARITH_AND, TCG_REG_TMP, TCG_REG_TMP, 0xff); 1057 tcg_out_dat_reg(s, cond, ARITH_ORR, 1058 rd, TCG_REG_TMP, rn, SHIFT_IMM_LSL(8)); 1059 } 1060} 1061 1062static inline void tcg_out_bswap32(TCGContext *s, int cond, int rd, int rn) 1063{ 1064 if (use_armv6_instructions) { 1065 /* rev */ 1066 tcg_out32(s, 0x06bf0f30 | (cond << 28) | (rd << 12) | rn); 1067 } else { 1068 tcg_out_dat_reg(s, cond, ARITH_EOR, 1069 TCG_REG_TMP, rn, rn, SHIFT_IMM_ROR(16)); 1070 tcg_out_dat_imm(s, cond, ARITH_BIC, 1071 TCG_REG_TMP, TCG_REG_TMP, 0xff | 0x800); 1072 tcg_out_dat_reg(s, cond, ARITH_MOV, 1073 rd, 0, rn, SHIFT_IMM_ROR(8)); 1074 tcg_out_dat_reg(s, cond, ARITH_EOR, 1075 rd, rd, TCG_REG_TMP, SHIFT_IMM_LSR(8)); 1076 } 1077} 1078 1079static inline void tcg_out_deposit(TCGContext *s, int cond, TCGReg rd, 1080 TCGArg a1, int ofs, int len, bool const_a1) 1081{ 1082 if (const_a1) { 1083 /* bfi becomes bfc with rn == 15. */ 1084 a1 = 15; 1085 } 1086 /* bfi/bfc */ 1087 tcg_out32(s, 0x07c00010 | (cond << 28) | (rd << 12) | a1 1088 | (ofs << 7) | ((ofs + len - 1) << 16)); 1089} 1090 1091static inline void tcg_out_extract(TCGContext *s, int cond, TCGReg rd, 1092 TCGArg a1, int ofs, int len) 1093{ 1094 /* ubfx */ 1095 tcg_out32(s, 0x07e00050 | (cond << 28) | (rd << 12) | a1 1096 | (ofs << 7) | ((len - 1) << 16)); 1097} 1098 1099static inline void tcg_out_sextract(TCGContext *s, int cond, TCGReg rd, 1100 TCGArg a1, int ofs, int len) 1101{ 1102 /* sbfx */ 1103 tcg_out32(s, 0x07a00050 | (cond << 28) | (rd << 12) | a1 1104 | (ofs << 7) | ((len - 1) << 16)); 1105} 1106 1107static inline void tcg_out_ld32u(TCGContext *s, int cond, 1108 int rd, int rn, int32_t offset) 1109{ 1110 if (offset > 0xfff || offset < -0xfff) { 1111 tcg_out_movi32(s, cond, TCG_REG_TMP, offset); 1112 tcg_out_ld32_r(s, cond, rd, rn, TCG_REG_TMP); 1113 } else 1114 tcg_out_ld32_12(s, cond, rd, rn, offset); 1115} 1116 1117static inline void tcg_out_st32(TCGContext *s, int cond, 1118 int rd, int rn, int32_t offset) 1119{ 1120 if (offset > 0xfff || offset < -0xfff) { 1121 tcg_out_movi32(s, cond, TCG_REG_TMP, offset); 1122 tcg_out_st32_r(s, cond, rd, rn, TCG_REG_TMP); 1123 } else 1124 tcg_out_st32_12(s, cond, rd, rn, offset); 1125} 1126 1127static inline void tcg_out_ld16u(TCGContext *s, int cond, 1128 int rd, int rn, int32_t offset) 1129{ 1130 if (offset > 0xff || offset < -0xff) { 1131 tcg_out_movi32(s, cond, TCG_REG_TMP, offset); 1132 tcg_out_ld16u_r(s, cond, rd, rn, TCG_REG_TMP); 1133 } else 1134 tcg_out_ld16u_8(s, cond, rd, rn, offset); 1135} 1136 1137static inline void tcg_out_ld16s(TCGContext *s, int cond, 1138 int rd, int rn, int32_t offset) 1139{ 1140 if (offset > 0xff || offset < -0xff) { 1141 tcg_out_movi32(s, cond, TCG_REG_TMP, offset); 1142 tcg_out_ld16s_r(s, cond, rd, rn, TCG_REG_TMP); 1143 } else 1144 tcg_out_ld16s_8(s, cond, rd, rn, offset); 1145} 1146 1147static inline void tcg_out_st16(TCGContext *s, int cond, 1148 int rd, int rn, int32_t offset) 1149{ 1150 if (offset > 0xff || offset < -0xff) { 1151 tcg_out_movi32(s, cond, TCG_REG_TMP, offset); 1152 tcg_out_st16_r(s, cond, rd, rn, TCG_REG_TMP); 1153 } else 1154 tcg_out_st16_8(s, cond, rd, rn, offset); 1155} 1156 1157static inline void tcg_out_ld8u(TCGContext *s, int cond, 1158 int rd, int rn, int32_t offset) 1159{ 1160 if (offset > 0xfff || offset < -0xfff) { 1161 tcg_out_movi32(s, cond, TCG_REG_TMP, offset); 1162 tcg_out_ld8_r(s, cond, rd, rn, TCG_REG_TMP); 1163 } else 1164 tcg_out_ld8_12(s, cond, rd, rn, offset); 1165} 1166 1167static inline void tcg_out_ld8s(TCGContext *s, int cond, 1168 int rd, int rn, int32_t offset) 1169{ 1170 if (offset > 0xff || offset < -0xff) { 1171 tcg_out_movi32(s, cond, TCG_REG_TMP, offset); 1172 tcg_out_ld8s_r(s, cond, rd, rn, TCG_REG_TMP); 1173 } else 1174 tcg_out_ld8s_8(s, cond, rd, rn, offset); 1175} 1176 1177static inline void tcg_out_st8(TCGContext *s, int cond, 1178 int rd, int rn, int32_t offset) 1179{ 1180 if (offset > 0xfff || offset < -0xfff) { 1181 tcg_out_movi32(s, cond, TCG_REG_TMP, offset); 1182 tcg_out_st8_r(s, cond, rd, rn, TCG_REG_TMP); 1183 } else 1184 tcg_out_st8_12(s, cond, rd, rn, offset); 1185} 1186 1187/* The _goto case is normally between TBs within the same code buffer, and 1188 * with the code buffer limited to 16MB we wouldn't need the long case. 1189 * But we also use it for the tail-call to the qemu_ld/st helpers, which does. 1190 */ 1191static void tcg_out_goto(TCGContext *s, int cond, const tcg_insn_unit *addr) 1192{ 1193 intptr_t addri = (intptr_t)addr; 1194 ptrdiff_t disp = tcg_pcrel_diff(s, addr); 1195 1196 if ((addri & 1) == 0 && disp - 8 < 0x01fffffd && disp - 8 > -0x01fffffd) { 1197 tcg_out_b(s, cond, disp); 1198 return; 1199 } 1200 tcg_out_movi_pool(s, cond, TCG_REG_PC, addri); 1201} 1202 1203/* The call case is mostly used for helpers - so it's not unreasonable 1204 * for them to be beyond branch range */ 1205static void tcg_out_call(TCGContext *s, const tcg_insn_unit *addr) 1206{ 1207 intptr_t addri = (intptr_t)addr; 1208 ptrdiff_t disp = tcg_pcrel_diff(s, addr); 1209 1210 if (disp - 8 < 0x02000000 && disp - 8 >= -0x02000000) { 1211 if (addri & 1) { 1212 /* Use BLX if the target is in Thumb mode */ 1213 if (!use_armv5t_instructions) { 1214 tcg_abort(); 1215 } 1216 tcg_out_blx_imm(s, disp); 1217 } else { 1218 tcg_out_bl(s, COND_AL, disp); 1219 } 1220 } else if (use_armv7_instructions) { 1221 tcg_out_movi32(s, COND_AL, TCG_REG_TMP, addri); 1222 tcg_out_blx(s, COND_AL, TCG_REG_TMP); 1223 } else { 1224 /* ??? Know that movi_pool emits exactly 1 insn. */ 1225 tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R14, TCG_REG_PC, 0); 1226 tcg_out_movi_pool(s, COND_AL, TCG_REG_PC, addri); 1227 } 1228} 1229 1230static inline void tcg_out_goto_label(TCGContext *s, int cond, TCGLabel *l) 1231{ 1232 if (l->has_value) { 1233 tcg_out_goto(s, cond, l->u.value_ptr); 1234 } else { 1235 tcg_out_reloc(s, s->code_ptr, R_ARM_PC24, l, 0); 1236 tcg_out_b(s, cond, 0); 1237 } 1238} 1239 1240static inline void tcg_out_mb(TCGContext *s, TCGArg a0) 1241{ 1242 if (use_armv7_instructions) { 1243 tcg_out32(s, INSN_DMB_ISH); 1244 } else if (use_armv6_instructions) { 1245 tcg_out32(s, INSN_DMB_MCR); 1246 } 1247} 1248 1249static TCGCond tcg_out_cmp2(TCGContext *s, const TCGArg *args, 1250 const int *const_args) 1251{ 1252 TCGReg al = args[0]; 1253 TCGReg ah = args[1]; 1254 TCGArg bl = args[2]; 1255 TCGArg bh = args[3]; 1256 TCGCond cond = args[4]; 1257 int const_bl = const_args[2]; 1258 int const_bh = const_args[3]; 1259 1260 switch (cond) { 1261 case TCG_COND_EQ: 1262 case TCG_COND_NE: 1263 case TCG_COND_LTU: 1264 case TCG_COND_LEU: 1265 case TCG_COND_GTU: 1266 case TCG_COND_GEU: 1267 /* We perform a conditional comparision. If the high half is 1268 equal, then overwrite the flags with the comparison of the 1269 low half. The resulting flags cover the whole. */ 1270 tcg_out_dat_rI(s, COND_AL, ARITH_CMP, 0, ah, bh, const_bh); 1271 tcg_out_dat_rI(s, COND_EQ, ARITH_CMP, 0, al, bl, const_bl); 1272 return cond; 1273 1274 case TCG_COND_LT: 1275 case TCG_COND_GE: 1276 /* We perform a double-word subtraction and examine the result. 1277 We do not actually need the result of the subtract, so the 1278 low part "subtract" is a compare. For the high half we have 1279 no choice but to compute into a temporary. */ 1280 tcg_out_dat_rI(s, COND_AL, ARITH_CMP, 0, al, bl, const_bl); 1281 tcg_out_dat_rI(s, COND_AL, ARITH_SBC | TO_CPSR, 1282 TCG_REG_TMP, ah, bh, const_bh); 1283 return cond; 1284 1285 case TCG_COND_LE: 1286 case TCG_COND_GT: 1287 /* Similar, but with swapped arguments, via reversed subtract. */ 1288 tcg_out_dat_rI(s, COND_AL, ARITH_RSB | TO_CPSR, 1289 TCG_REG_TMP, al, bl, const_bl); 1290 tcg_out_dat_rI(s, COND_AL, ARITH_RSC | TO_CPSR, 1291 TCG_REG_TMP, ah, bh, const_bh); 1292 return tcg_swap_cond(cond); 1293 1294 default: 1295 g_assert_not_reached(); 1296 } 1297} 1298 1299/* 1300 * Note that TCGReg references Q-registers. 1301 * Q-regno = 2 * D-regno, so shift left by 1 whlie inserting. 1302 */ 1303static uint32_t encode_vd(TCGReg rd) 1304{ 1305 tcg_debug_assert(rd >= TCG_REG_Q0); 1306 return (extract32(rd, 3, 1) << 22) | (extract32(rd, 0, 3) << 13); 1307} 1308 1309static uint32_t encode_vn(TCGReg rn) 1310{ 1311 tcg_debug_assert(rn >= TCG_REG_Q0); 1312 return (extract32(rn, 3, 1) << 7) | (extract32(rn, 0, 3) << 17); 1313} 1314 1315static uint32_t encode_vm(TCGReg rm) 1316{ 1317 tcg_debug_assert(rm >= TCG_REG_Q0); 1318 return (extract32(rm, 3, 1) << 5) | (extract32(rm, 0, 3) << 1); 1319} 1320 1321static void tcg_out_vreg2(TCGContext *s, ARMInsn insn, int q, int vece, 1322 TCGReg d, TCGReg m) 1323{ 1324 tcg_out32(s, insn | (vece << 18) | (q << 6) | 1325 encode_vd(d) | encode_vm(m)); 1326} 1327 1328static void tcg_out_vreg3(TCGContext *s, ARMInsn insn, int q, int vece, 1329 TCGReg d, TCGReg n, TCGReg m) 1330{ 1331 tcg_out32(s, insn | (vece << 20) | (q << 6) | 1332 encode_vd(d) | encode_vn(n) | encode_vm(m)); 1333} 1334 1335static void tcg_out_vmovi(TCGContext *s, TCGReg rd, 1336 int q, int op, int cmode, uint8_t imm8) 1337{ 1338 tcg_out32(s, INSN_VMOVI | encode_vd(rd) | (q << 6) | (op << 5) 1339 | (cmode << 8) | extract32(imm8, 0, 4) 1340 | (extract32(imm8, 4, 3) << 16) 1341 | (extract32(imm8, 7, 1) << 24)); 1342} 1343 1344static void tcg_out_vshifti(TCGContext *s, ARMInsn insn, int q, 1345 TCGReg rd, TCGReg rm, int l_imm6) 1346{ 1347 tcg_out32(s, insn | (q << 6) | encode_vd(rd) | encode_vm(rm) | 1348 (extract32(l_imm6, 6, 1) << 7) | 1349 (extract32(l_imm6, 0, 6) << 16)); 1350} 1351 1352static void tcg_out_vldst(TCGContext *s, ARMInsn insn, 1353 TCGReg rd, TCGReg rn, int offset) 1354{ 1355 if (offset != 0) { 1356 if (check_fit_imm(offset) || check_fit_imm(-offset)) { 1357 tcg_out_dat_rIN(s, COND_AL, ARITH_ADD, ARITH_SUB, 1358 TCG_REG_TMP, rn, offset, true); 1359 } else { 1360 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, offset); 1361 tcg_out_dat_reg(s, COND_AL, ARITH_ADD, 1362 TCG_REG_TMP, TCG_REG_TMP, rn, 0); 1363 } 1364 rn = TCG_REG_TMP; 1365 } 1366 tcg_out32(s, insn | (rn << 16) | encode_vd(rd) | 0xf); 1367} 1368 1369#ifdef CONFIG_SOFTMMU 1370#include "../tcg-ldst.c.inc" 1371 1372/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr, 1373 * int mmu_idx, uintptr_t ra) 1374 */ 1375static void * const qemu_ld_helpers[16] = { 1376 [MO_UB] = helper_ret_ldub_mmu, 1377 [MO_SB] = helper_ret_ldsb_mmu, 1378 1379 [MO_LEUW] = helper_le_lduw_mmu, 1380 [MO_LEUL] = helper_le_ldul_mmu, 1381 [MO_LEQ] = helper_le_ldq_mmu, 1382 [MO_LESW] = helper_le_ldsw_mmu, 1383 [MO_LESL] = helper_le_ldul_mmu, 1384 1385 [MO_BEUW] = helper_be_lduw_mmu, 1386 [MO_BEUL] = helper_be_ldul_mmu, 1387 [MO_BEQ] = helper_be_ldq_mmu, 1388 [MO_BESW] = helper_be_ldsw_mmu, 1389 [MO_BESL] = helper_be_ldul_mmu, 1390}; 1391 1392/* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr, 1393 * uintxx_t val, int mmu_idx, uintptr_t ra) 1394 */ 1395static void * const qemu_st_helpers[16] = { 1396 [MO_UB] = helper_ret_stb_mmu, 1397 [MO_LEUW] = helper_le_stw_mmu, 1398 [MO_LEUL] = helper_le_stl_mmu, 1399 [MO_LEQ] = helper_le_stq_mmu, 1400 [MO_BEUW] = helper_be_stw_mmu, 1401 [MO_BEUL] = helper_be_stl_mmu, 1402 [MO_BEQ] = helper_be_stq_mmu, 1403}; 1404 1405/* Helper routines for marshalling helper function arguments into 1406 * the correct registers and stack. 1407 * argreg is where we want to put this argument, arg is the argument itself. 1408 * Return value is the updated argreg ready for the next call. 1409 * Note that argreg 0..3 is real registers, 4+ on stack. 1410 * 1411 * We provide routines for arguments which are: immediate, 32 bit 1412 * value in register, 16 and 8 bit values in register (which must be zero 1413 * extended before use) and 64 bit value in a lo:hi register pair. 1414 */ 1415#define DEFINE_TCG_OUT_ARG(NAME, ARGTYPE, MOV_ARG, EXT_ARG) \ 1416static TCGReg NAME(TCGContext *s, TCGReg argreg, ARGTYPE arg) \ 1417{ \ 1418 if (argreg < 4) { \ 1419 MOV_ARG(s, COND_AL, argreg, arg); \ 1420 } else { \ 1421 int ofs = (argreg - 4) * 4; \ 1422 EXT_ARG; \ 1423 tcg_debug_assert(ofs + 4 <= TCG_STATIC_CALL_ARGS_SIZE); \ 1424 tcg_out_st32_12(s, COND_AL, arg, TCG_REG_CALL_STACK, ofs); \ 1425 } \ 1426 return argreg + 1; \ 1427} 1428 1429DEFINE_TCG_OUT_ARG(tcg_out_arg_imm32, uint32_t, tcg_out_movi32, 1430 (tcg_out_movi32(s, COND_AL, TCG_REG_TMP, arg), arg = TCG_REG_TMP)) 1431DEFINE_TCG_OUT_ARG(tcg_out_arg_reg8, TCGReg, tcg_out_ext8u, 1432 (tcg_out_ext8u(s, COND_AL, TCG_REG_TMP, arg), arg = TCG_REG_TMP)) 1433DEFINE_TCG_OUT_ARG(tcg_out_arg_reg16, TCGReg, tcg_out_ext16u, 1434 (tcg_out_ext16u(s, COND_AL, TCG_REG_TMP, arg), arg = TCG_REG_TMP)) 1435DEFINE_TCG_OUT_ARG(tcg_out_arg_reg32, TCGReg, tcg_out_mov_reg, ) 1436 1437static TCGReg tcg_out_arg_reg64(TCGContext *s, TCGReg argreg, 1438 TCGReg arglo, TCGReg arghi) 1439{ 1440 /* 64 bit arguments must go in even/odd register pairs 1441 * and in 8-aligned stack slots. 1442 */ 1443 if (argreg & 1) { 1444 argreg++; 1445 } 1446 if (use_armv6_instructions && argreg >= 4 1447 && (arglo & 1) == 0 && arghi == arglo + 1) { 1448 tcg_out_strd_8(s, COND_AL, arglo, 1449 TCG_REG_CALL_STACK, (argreg - 4) * 4); 1450 return argreg + 2; 1451 } else { 1452 argreg = tcg_out_arg_reg32(s, argreg, arglo); 1453 argreg = tcg_out_arg_reg32(s, argreg, arghi); 1454 return argreg; 1455 } 1456} 1457 1458#define TLB_SHIFT (CPU_TLB_ENTRY_BITS + CPU_TLB_BITS) 1459 1460/* We expect to use an 9-bit sign-magnitude negative offset from ENV. */ 1461QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0); 1462QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -256); 1463 1464/* These offsets are built into the LDRD below. */ 1465QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, mask) != 0); 1466QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, table) != 4); 1467 1468/* Load and compare a TLB entry, leaving the flags set. Returns the register 1469 containing the addend of the tlb entry. Clobbers R0, R1, R2, TMP. */ 1470 1471static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addrlo, TCGReg addrhi, 1472 MemOp opc, int mem_index, bool is_load) 1473{ 1474 int cmp_off = (is_load ? offsetof(CPUTLBEntry, addr_read) 1475 : offsetof(CPUTLBEntry, addr_write)); 1476 int fast_off = TLB_MASK_TABLE_OFS(mem_index); 1477 int mask_off = fast_off + offsetof(CPUTLBDescFast, mask); 1478 int table_off = fast_off + offsetof(CPUTLBDescFast, table); 1479 unsigned s_bits = opc & MO_SIZE; 1480 unsigned a_bits = get_alignment_bits(opc); 1481 1482 /* 1483 * We don't support inline unaligned acceses, but we can easily 1484 * support overalignment checks. 1485 */ 1486 if (a_bits < s_bits) { 1487 a_bits = s_bits; 1488 } 1489 1490 /* Load env_tlb(env)->f[mmu_idx].{mask,table} into {r0,r1}. */ 1491 if (use_armv6_instructions) { 1492 tcg_out_ldrd_8(s, COND_AL, TCG_REG_R0, TCG_AREG0, fast_off); 1493 } else { 1494 tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_R0, TCG_AREG0, mask_off); 1495 tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_R1, TCG_AREG0, table_off); 1496 } 1497 1498 /* Extract the tlb index from the address into R0. */ 1499 tcg_out_dat_reg(s, COND_AL, ARITH_AND, TCG_REG_R0, TCG_REG_R0, addrlo, 1500 SHIFT_IMM_LSR(TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS)); 1501 1502 /* 1503 * Add the tlb_table pointer, creating the CPUTLBEntry address in R1. 1504 * Load the tlb comparator into R2/R3 and the fast path addend into R1. 1505 */ 1506 if (cmp_off == 0) { 1507 if (use_armv6_instructions && TARGET_LONG_BITS == 64) { 1508 tcg_out_ldrd_rwb(s, COND_AL, TCG_REG_R2, TCG_REG_R1, TCG_REG_R0); 1509 } else { 1510 tcg_out_ld32_rwb(s, COND_AL, TCG_REG_R2, TCG_REG_R1, TCG_REG_R0); 1511 } 1512 } else { 1513 tcg_out_dat_reg(s, COND_AL, ARITH_ADD, 1514 TCG_REG_R1, TCG_REG_R1, TCG_REG_R0, 0); 1515 if (use_armv6_instructions && TARGET_LONG_BITS == 64) { 1516 tcg_out_ldrd_8(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off); 1517 } else { 1518 tcg_out_ld32_12(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off); 1519 } 1520 } 1521 if (!use_armv6_instructions && TARGET_LONG_BITS == 64) { 1522 tcg_out_ld32_12(s, COND_AL, TCG_REG_R3, TCG_REG_R1, cmp_off + 4); 1523 } 1524 1525 /* Load the tlb addend. */ 1526 tcg_out_ld32_12(s, COND_AL, TCG_REG_R1, TCG_REG_R1, 1527 offsetof(CPUTLBEntry, addend)); 1528 1529 /* 1530 * Check alignment, check comparators. 1531 * Do this in no more than 3 insns. Use MOVW for v7, if possible, 1532 * to reduce the number of sequential conditional instructions. 1533 * Almost all guests have at least 4k pages, which means that we need 1534 * to clear at least 9 bits even for an 8-byte memory, which means it 1535 * isn't worth checking for an immediate operand for BIC. 1536 */ 1537 if (use_armv7_instructions && TARGET_PAGE_BITS <= 16) { 1538 tcg_target_ulong mask = ~(TARGET_PAGE_MASK | ((1 << a_bits) - 1)); 1539 1540 tcg_out_movi32(s, COND_AL, TCG_REG_TMP, mask); 1541 tcg_out_dat_reg(s, COND_AL, ARITH_BIC, TCG_REG_TMP, 1542 addrlo, TCG_REG_TMP, 0); 1543 tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, TCG_REG_R2, TCG_REG_TMP, 0); 1544 } else { 1545 if (a_bits) { 1546 tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addrlo, 1547 (1 << a_bits) - 1); 1548 } 1549 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP, 0, addrlo, 1550 SHIFT_IMM_LSR(TARGET_PAGE_BITS)); 1551 tcg_out_dat_reg(s, (a_bits ? COND_EQ : COND_AL), ARITH_CMP, 1552 0, TCG_REG_R2, TCG_REG_TMP, 1553 SHIFT_IMM_LSL(TARGET_PAGE_BITS)); 1554 } 1555 1556 if (TARGET_LONG_BITS == 64) { 1557 tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0, TCG_REG_R3, addrhi, 0); 1558 } 1559 1560 return TCG_REG_R1; 1561} 1562 1563/* Record the context of a call to the out of line helper code for the slow 1564 path for a load or store, so that we can later generate the correct 1565 helper code. */ 1566static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi, 1567 TCGReg datalo, TCGReg datahi, TCGReg addrlo, 1568 TCGReg addrhi, tcg_insn_unit *raddr, 1569 tcg_insn_unit *label_ptr) 1570{ 1571 TCGLabelQemuLdst *label = new_ldst_label(s); 1572 1573 label->is_ld = is_ld; 1574 label->oi = oi; 1575 label->datalo_reg = datalo; 1576 label->datahi_reg = datahi; 1577 label->addrlo_reg = addrlo; 1578 label->addrhi_reg = addrhi; 1579 label->raddr = tcg_splitwx_to_rx(raddr); 1580 label->label_ptr[0] = label_ptr; 1581} 1582 1583static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) 1584{ 1585 TCGReg argreg, datalo, datahi; 1586 TCGMemOpIdx oi = lb->oi; 1587 MemOp opc = get_memop(oi); 1588 void *func; 1589 1590 if (!reloc_pc24(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { 1591 return false; 1592 } 1593 1594 argreg = tcg_out_arg_reg32(s, TCG_REG_R0, TCG_AREG0); 1595 if (TARGET_LONG_BITS == 64) { 1596 argreg = tcg_out_arg_reg64(s, argreg, lb->addrlo_reg, lb->addrhi_reg); 1597 } else { 1598 argreg = tcg_out_arg_reg32(s, argreg, lb->addrlo_reg); 1599 } 1600 argreg = tcg_out_arg_imm32(s, argreg, oi); 1601 argreg = tcg_out_arg_reg32(s, argreg, TCG_REG_R14); 1602 1603 /* For armv6 we can use the canonical unsigned helpers and minimize 1604 icache usage. For pre-armv6, use the signed helpers since we do 1605 not have a single insn sign-extend. */ 1606 if (use_armv6_instructions) { 1607 func = qemu_ld_helpers[opc & (MO_BSWAP | MO_SIZE)]; 1608 } else { 1609 func = qemu_ld_helpers[opc & (MO_BSWAP | MO_SSIZE)]; 1610 if (opc & MO_SIGN) { 1611 opc = MO_UL; 1612 } 1613 } 1614 tcg_out_call(s, func); 1615 1616 datalo = lb->datalo_reg; 1617 datahi = lb->datahi_reg; 1618 switch (opc & MO_SSIZE) { 1619 case MO_SB: 1620 tcg_out_ext8s(s, COND_AL, datalo, TCG_REG_R0); 1621 break; 1622 case MO_SW: 1623 tcg_out_ext16s(s, COND_AL, datalo, TCG_REG_R0); 1624 break; 1625 default: 1626 tcg_out_mov_reg(s, COND_AL, datalo, TCG_REG_R0); 1627 break; 1628 case MO_Q: 1629 if (datalo != TCG_REG_R1) { 1630 tcg_out_mov_reg(s, COND_AL, datalo, TCG_REG_R0); 1631 tcg_out_mov_reg(s, COND_AL, datahi, TCG_REG_R1); 1632 } else if (datahi != TCG_REG_R0) { 1633 tcg_out_mov_reg(s, COND_AL, datahi, TCG_REG_R1); 1634 tcg_out_mov_reg(s, COND_AL, datalo, TCG_REG_R0); 1635 } else { 1636 tcg_out_mov_reg(s, COND_AL, TCG_REG_TMP, TCG_REG_R0); 1637 tcg_out_mov_reg(s, COND_AL, datahi, TCG_REG_R1); 1638 tcg_out_mov_reg(s, COND_AL, datalo, TCG_REG_TMP); 1639 } 1640 break; 1641 } 1642 1643 tcg_out_goto(s, COND_AL, lb->raddr); 1644 return true; 1645} 1646 1647static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) 1648{ 1649 TCGReg argreg, datalo, datahi; 1650 TCGMemOpIdx oi = lb->oi; 1651 MemOp opc = get_memop(oi); 1652 1653 if (!reloc_pc24(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { 1654 return false; 1655 } 1656 1657 argreg = TCG_REG_R0; 1658 argreg = tcg_out_arg_reg32(s, argreg, TCG_AREG0); 1659 if (TARGET_LONG_BITS == 64) { 1660 argreg = tcg_out_arg_reg64(s, argreg, lb->addrlo_reg, lb->addrhi_reg); 1661 } else { 1662 argreg = tcg_out_arg_reg32(s, argreg, lb->addrlo_reg); 1663 } 1664 1665 datalo = lb->datalo_reg; 1666 datahi = lb->datahi_reg; 1667 switch (opc & MO_SIZE) { 1668 case MO_8: 1669 argreg = tcg_out_arg_reg8(s, argreg, datalo); 1670 break; 1671 case MO_16: 1672 argreg = tcg_out_arg_reg16(s, argreg, datalo); 1673 break; 1674 case MO_32: 1675 default: 1676 argreg = tcg_out_arg_reg32(s, argreg, datalo); 1677 break; 1678 case MO_64: 1679 argreg = tcg_out_arg_reg64(s, argreg, datalo, datahi); 1680 break; 1681 } 1682 1683 argreg = tcg_out_arg_imm32(s, argreg, oi); 1684 argreg = tcg_out_arg_reg32(s, argreg, TCG_REG_R14); 1685 1686 /* Tail-call to the helper, which will return to the fast path. */ 1687 tcg_out_goto(s, COND_AL, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]); 1688 return true; 1689} 1690#endif /* SOFTMMU */ 1691 1692static inline void tcg_out_qemu_ld_index(TCGContext *s, MemOp opc, 1693 TCGReg datalo, TCGReg datahi, 1694 TCGReg addrlo, TCGReg addend) 1695{ 1696 MemOp bswap = opc & MO_BSWAP; 1697 1698 switch (opc & MO_SSIZE) { 1699 case MO_UB: 1700 tcg_out_ld8_r(s, COND_AL, datalo, addrlo, addend); 1701 break; 1702 case MO_SB: 1703 tcg_out_ld8s_r(s, COND_AL, datalo, addrlo, addend); 1704 break; 1705 case MO_UW: 1706 tcg_out_ld16u_r(s, COND_AL, datalo, addrlo, addend); 1707 if (bswap) { 1708 tcg_out_bswap16(s, COND_AL, datalo, datalo); 1709 } 1710 break; 1711 case MO_SW: 1712 if (bswap) { 1713 tcg_out_ld16u_r(s, COND_AL, datalo, addrlo, addend); 1714 tcg_out_bswap16s(s, COND_AL, datalo, datalo); 1715 } else { 1716 tcg_out_ld16s_r(s, COND_AL, datalo, addrlo, addend); 1717 } 1718 break; 1719 case MO_UL: 1720 default: 1721 tcg_out_ld32_r(s, COND_AL, datalo, addrlo, addend); 1722 if (bswap) { 1723 tcg_out_bswap32(s, COND_AL, datalo, datalo); 1724 } 1725 break; 1726 case MO_Q: 1727 { 1728 TCGReg dl = (bswap ? datahi : datalo); 1729 TCGReg dh = (bswap ? datalo : datahi); 1730 1731 /* Avoid ldrd for user-only emulation, to handle unaligned. */ 1732 if (USING_SOFTMMU && use_armv6_instructions 1733 && (dl & 1) == 0 && dh == dl + 1) { 1734 tcg_out_ldrd_r(s, COND_AL, dl, addrlo, addend); 1735 } else if (dl != addend) { 1736 tcg_out_ld32_rwb(s, COND_AL, dl, addend, addrlo); 1737 tcg_out_ld32_12(s, COND_AL, dh, addend, 4); 1738 } else { 1739 tcg_out_dat_reg(s, COND_AL, ARITH_ADD, TCG_REG_TMP, 1740 addend, addrlo, SHIFT_IMM_LSL(0)); 1741 tcg_out_ld32_12(s, COND_AL, dl, TCG_REG_TMP, 0); 1742 tcg_out_ld32_12(s, COND_AL, dh, TCG_REG_TMP, 4); 1743 } 1744 if (bswap) { 1745 tcg_out_bswap32(s, COND_AL, dl, dl); 1746 tcg_out_bswap32(s, COND_AL, dh, dh); 1747 } 1748 } 1749 break; 1750 } 1751} 1752 1753static inline void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, 1754 TCGReg datalo, TCGReg datahi, 1755 TCGReg addrlo) 1756{ 1757 MemOp bswap = opc & MO_BSWAP; 1758 1759 switch (opc & MO_SSIZE) { 1760 case MO_UB: 1761 tcg_out_ld8_12(s, COND_AL, datalo, addrlo, 0); 1762 break; 1763 case MO_SB: 1764 tcg_out_ld8s_8(s, COND_AL, datalo, addrlo, 0); 1765 break; 1766 case MO_UW: 1767 tcg_out_ld16u_8(s, COND_AL, datalo, addrlo, 0); 1768 if (bswap) { 1769 tcg_out_bswap16(s, COND_AL, datalo, datalo); 1770 } 1771 break; 1772 case MO_SW: 1773 if (bswap) { 1774 tcg_out_ld16u_8(s, COND_AL, datalo, addrlo, 0); 1775 tcg_out_bswap16s(s, COND_AL, datalo, datalo); 1776 } else { 1777 tcg_out_ld16s_8(s, COND_AL, datalo, addrlo, 0); 1778 } 1779 break; 1780 case MO_UL: 1781 default: 1782 tcg_out_ld32_12(s, COND_AL, datalo, addrlo, 0); 1783 if (bswap) { 1784 tcg_out_bswap32(s, COND_AL, datalo, datalo); 1785 } 1786 break; 1787 case MO_Q: 1788 { 1789 TCGReg dl = (bswap ? datahi : datalo); 1790 TCGReg dh = (bswap ? datalo : datahi); 1791 1792 /* Avoid ldrd for user-only emulation, to handle unaligned. */ 1793 if (USING_SOFTMMU && use_armv6_instructions 1794 && (dl & 1) == 0 && dh == dl + 1) { 1795 tcg_out_ldrd_8(s, COND_AL, dl, addrlo, 0); 1796 } else if (dl == addrlo) { 1797 tcg_out_ld32_12(s, COND_AL, dh, addrlo, bswap ? 0 : 4); 1798 tcg_out_ld32_12(s, COND_AL, dl, addrlo, bswap ? 4 : 0); 1799 } else { 1800 tcg_out_ld32_12(s, COND_AL, dl, addrlo, bswap ? 4 : 0); 1801 tcg_out_ld32_12(s, COND_AL, dh, addrlo, bswap ? 0 : 4); 1802 } 1803 if (bswap) { 1804 tcg_out_bswap32(s, COND_AL, dl, dl); 1805 tcg_out_bswap32(s, COND_AL, dh, dh); 1806 } 1807 } 1808 break; 1809 } 1810} 1811 1812static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64) 1813{ 1814 TCGReg addrlo, datalo, datahi, addrhi __attribute__((unused)); 1815 TCGMemOpIdx oi; 1816 MemOp opc; 1817#ifdef CONFIG_SOFTMMU 1818 int mem_index; 1819 TCGReg addend; 1820 tcg_insn_unit *label_ptr; 1821#endif 1822 1823 datalo = *args++; 1824 datahi = (is64 ? *args++ : 0); 1825 addrlo = *args++; 1826 addrhi = (TARGET_LONG_BITS == 64 ? *args++ : 0); 1827 oi = *args++; 1828 opc = get_memop(oi); 1829 1830#ifdef CONFIG_SOFTMMU 1831 mem_index = get_mmuidx(oi); 1832 addend = tcg_out_tlb_read(s, addrlo, addrhi, opc, mem_index, 1); 1833 1834 /* This a conditional BL only to load a pointer within this opcode into LR 1835 for the slow path. We will not be using the value for a tail call. */ 1836 label_ptr = s->code_ptr; 1837 tcg_out_bl(s, COND_NE, 0); 1838 1839 tcg_out_qemu_ld_index(s, opc, datalo, datahi, addrlo, addend); 1840 1841 add_qemu_ldst_label(s, true, oi, datalo, datahi, addrlo, addrhi, 1842 s->code_ptr, label_ptr); 1843#else /* !CONFIG_SOFTMMU */ 1844 if (guest_base) { 1845 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, guest_base); 1846 tcg_out_qemu_ld_index(s, opc, datalo, datahi, addrlo, TCG_REG_TMP); 1847 } else { 1848 tcg_out_qemu_ld_direct(s, opc, datalo, datahi, addrlo); 1849 } 1850#endif 1851} 1852 1853static inline void tcg_out_qemu_st_index(TCGContext *s, int cond, MemOp opc, 1854 TCGReg datalo, TCGReg datahi, 1855 TCGReg addrlo, TCGReg addend) 1856{ 1857 MemOp bswap = opc & MO_BSWAP; 1858 1859 switch (opc & MO_SIZE) { 1860 case MO_8: 1861 tcg_out_st8_r(s, cond, datalo, addrlo, addend); 1862 break; 1863 case MO_16: 1864 if (bswap) { 1865 tcg_out_bswap16st(s, cond, TCG_REG_R0, datalo); 1866 tcg_out_st16_r(s, cond, TCG_REG_R0, addrlo, addend); 1867 } else { 1868 tcg_out_st16_r(s, cond, datalo, addrlo, addend); 1869 } 1870 break; 1871 case MO_32: 1872 default: 1873 if (bswap) { 1874 tcg_out_bswap32(s, cond, TCG_REG_R0, datalo); 1875 tcg_out_st32_r(s, cond, TCG_REG_R0, addrlo, addend); 1876 } else { 1877 tcg_out_st32_r(s, cond, datalo, addrlo, addend); 1878 } 1879 break; 1880 case MO_64: 1881 /* Avoid strd for user-only emulation, to handle unaligned. */ 1882 if (bswap) { 1883 tcg_out_bswap32(s, cond, TCG_REG_R0, datahi); 1884 tcg_out_st32_rwb(s, cond, TCG_REG_R0, addend, addrlo); 1885 tcg_out_bswap32(s, cond, TCG_REG_R0, datalo); 1886 tcg_out_st32_12(s, cond, TCG_REG_R0, addend, 4); 1887 } else if (USING_SOFTMMU && use_armv6_instructions 1888 && (datalo & 1) == 0 && datahi == datalo + 1) { 1889 tcg_out_strd_r(s, cond, datalo, addrlo, addend); 1890 } else { 1891 tcg_out_st32_rwb(s, cond, datalo, addend, addrlo); 1892 tcg_out_st32_12(s, cond, datahi, addend, 4); 1893 } 1894 break; 1895 } 1896} 1897 1898static inline void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, 1899 TCGReg datalo, TCGReg datahi, 1900 TCGReg addrlo) 1901{ 1902 MemOp bswap = opc & MO_BSWAP; 1903 1904 switch (opc & MO_SIZE) { 1905 case MO_8: 1906 tcg_out_st8_12(s, COND_AL, datalo, addrlo, 0); 1907 break; 1908 case MO_16: 1909 if (bswap) { 1910 tcg_out_bswap16st(s, COND_AL, TCG_REG_R0, datalo); 1911 tcg_out_st16_8(s, COND_AL, TCG_REG_R0, addrlo, 0); 1912 } else { 1913 tcg_out_st16_8(s, COND_AL, datalo, addrlo, 0); 1914 } 1915 break; 1916 case MO_32: 1917 default: 1918 if (bswap) { 1919 tcg_out_bswap32(s, COND_AL, TCG_REG_R0, datalo); 1920 tcg_out_st32_12(s, COND_AL, TCG_REG_R0, addrlo, 0); 1921 } else { 1922 tcg_out_st32_12(s, COND_AL, datalo, addrlo, 0); 1923 } 1924 break; 1925 case MO_64: 1926 /* Avoid strd for user-only emulation, to handle unaligned. */ 1927 if (bswap) { 1928 tcg_out_bswap32(s, COND_AL, TCG_REG_R0, datahi); 1929 tcg_out_st32_12(s, COND_AL, TCG_REG_R0, addrlo, 0); 1930 tcg_out_bswap32(s, COND_AL, TCG_REG_R0, datalo); 1931 tcg_out_st32_12(s, COND_AL, TCG_REG_R0, addrlo, 4); 1932 } else if (USING_SOFTMMU && use_armv6_instructions 1933 && (datalo & 1) == 0 && datahi == datalo + 1) { 1934 tcg_out_strd_8(s, COND_AL, datalo, addrlo, 0); 1935 } else { 1936 tcg_out_st32_12(s, COND_AL, datalo, addrlo, 0); 1937 tcg_out_st32_12(s, COND_AL, datahi, addrlo, 4); 1938 } 1939 break; 1940 } 1941} 1942 1943static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64) 1944{ 1945 TCGReg addrlo, datalo, datahi, addrhi __attribute__((unused)); 1946 TCGMemOpIdx oi; 1947 MemOp opc; 1948#ifdef CONFIG_SOFTMMU 1949 int mem_index; 1950 TCGReg addend; 1951 tcg_insn_unit *label_ptr; 1952#endif 1953 1954 datalo = *args++; 1955 datahi = (is64 ? *args++ : 0); 1956 addrlo = *args++; 1957 addrhi = (TARGET_LONG_BITS == 64 ? *args++ : 0); 1958 oi = *args++; 1959 opc = get_memop(oi); 1960 1961#ifdef CONFIG_SOFTMMU 1962 mem_index = get_mmuidx(oi); 1963 addend = tcg_out_tlb_read(s, addrlo, addrhi, opc, mem_index, 0); 1964 1965 tcg_out_qemu_st_index(s, COND_EQ, opc, datalo, datahi, addrlo, addend); 1966 1967 /* The conditional call must come last, as we're going to return here. */ 1968 label_ptr = s->code_ptr; 1969 tcg_out_bl(s, COND_NE, 0); 1970 1971 add_qemu_ldst_label(s, false, oi, datalo, datahi, addrlo, addrhi, 1972 s->code_ptr, label_ptr); 1973#else /* !CONFIG_SOFTMMU */ 1974 if (guest_base) { 1975 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, guest_base); 1976 tcg_out_qemu_st_index(s, COND_AL, opc, datalo, 1977 datahi, addrlo, TCG_REG_TMP); 1978 } else { 1979 tcg_out_qemu_st_direct(s, opc, datalo, datahi, addrlo); 1980 } 1981#endif 1982} 1983 1984static void tcg_out_epilogue(TCGContext *s); 1985 1986static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, 1987 const TCGArg args[TCG_MAX_OP_ARGS], 1988 const int const_args[TCG_MAX_OP_ARGS]) 1989{ 1990 TCGArg a0, a1, a2, a3, a4, a5; 1991 int c; 1992 1993 switch (opc) { 1994 case INDEX_op_exit_tb: 1995 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R0, args[0]); 1996 tcg_out_epilogue(s); 1997 break; 1998 case INDEX_op_goto_tb: 1999 { 2000 /* Indirect jump method */ 2001 intptr_t ptr, dif, dil; 2002 TCGReg base = TCG_REG_PC; 2003 2004 tcg_debug_assert(s->tb_jmp_insn_offset == 0); 2005 ptr = (intptr_t)tcg_splitwx_to_rx(s->tb_jmp_target_addr + args[0]); 2006 dif = tcg_pcrel_diff(s, (void *)ptr) - 8; 2007 dil = sextract32(dif, 0, 12); 2008 if (dif != dil) { 2009 /* The TB is close, but outside the 12 bits addressable by 2010 the load. We can extend this to 20 bits with a sub of a 2011 shifted immediate from pc. In the vastly unlikely event 2012 the code requires more than 1MB, we'll use 2 insns and 2013 be no worse off. */ 2014 base = TCG_REG_R0; 2015 tcg_out_movi32(s, COND_AL, base, ptr - dil); 2016 } 2017 tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, base, dil); 2018 set_jmp_reset_offset(s, args[0]); 2019 } 2020 break; 2021 case INDEX_op_goto_ptr: 2022 tcg_out_bx(s, COND_AL, args[0]); 2023 break; 2024 case INDEX_op_br: 2025 tcg_out_goto_label(s, COND_AL, arg_label(args[0])); 2026 break; 2027 2028 case INDEX_op_ld8u_i32: 2029 tcg_out_ld8u(s, COND_AL, args[0], args[1], args[2]); 2030 break; 2031 case INDEX_op_ld8s_i32: 2032 tcg_out_ld8s(s, COND_AL, args[0], args[1], args[2]); 2033 break; 2034 case INDEX_op_ld16u_i32: 2035 tcg_out_ld16u(s, COND_AL, args[0], args[1], args[2]); 2036 break; 2037 case INDEX_op_ld16s_i32: 2038 tcg_out_ld16s(s, COND_AL, args[0], args[1], args[2]); 2039 break; 2040 case INDEX_op_ld_i32: 2041 tcg_out_ld32u(s, COND_AL, args[0], args[1], args[2]); 2042 break; 2043 case INDEX_op_st8_i32: 2044 tcg_out_st8(s, COND_AL, args[0], args[1], args[2]); 2045 break; 2046 case INDEX_op_st16_i32: 2047 tcg_out_st16(s, COND_AL, args[0], args[1], args[2]); 2048 break; 2049 case INDEX_op_st_i32: 2050 tcg_out_st32(s, COND_AL, args[0], args[1], args[2]); 2051 break; 2052 2053 case INDEX_op_movcond_i32: 2054 /* Constraints mean that v2 is always in the same register as dest, 2055 * so we only need to do "if condition passed, move v1 to dest". 2056 */ 2057 tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0, 2058 args[1], args[2], const_args[2]); 2059 tcg_out_dat_rIK(s, tcg_cond_to_arm_cond[args[5]], ARITH_MOV, 2060 ARITH_MVN, args[0], 0, args[3], const_args[3]); 2061 break; 2062 case INDEX_op_add_i32: 2063 tcg_out_dat_rIN(s, COND_AL, ARITH_ADD, ARITH_SUB, 2064 args[0], args[1], args[2], const_args[2]); 2065 break; 2066 case INDEX_op_sub_i32: 2067 if (const_args[1]) { 2068 if (const_args[2]) { 2069 tcg_out_movi32(s, COND_AL, args[0], args[1] - args[2]); 2070 } else { 2071 tcg_out_dat_rI(s, COND_AL, ARITH_RSB, 2072 args[0], args[2], args[1], 1); 2073 } 2074 } else { 2075 tcg_out_dat_rIN(s, COND_AL, ARITH_SUB, ARITH_ADD, 2076 args[0], args[1], args[2], const_args[2]); 2077 } 2078 break; 2079 case INDEX_op_and_i32: 2080 tcg_out_dat_rIK(s, COND_AL, ARITH_AND, ARITH_BIC, 2081 args[0], args[1], args[2], const_args[2]); 2082 break; 2083 case INDEX_op_andc_i32: 2084 tcg_out_dat_rIK(s, COND_AL, ARITH_BIC, ARITH_AND, 2085 args[0], args[1], args[2], const_args[2]); 2086 break; 2087 case INDEX_op_or_i32: 2088 c = ARITH_ORR; 2089 goto gen_arith; 2090 case INDEX_op_xor_i32: 2091 c = ARITH_EOR; 2092 /* Fall through. */ 2093 gen_arith: 2094 tcg_out_dat_rI(s, COND_AL, c, args[0], args[1], args[2], const_args[2]); 2095 break; 2096 case INDEX_op_add2_i32: 2097 a0 = args[0], a1 = args[1], a2 = args[2]; 2098 a3 = args[3], a4 = args[4], a5 = args[5]; 2099 if (a0 == a3 || (a0 == a5 && !const_args[5])) { 2100 a0 = TCG_REG_TMP; 2101 } 2102 tcg_out_dat_rIN(s, COND_AL, ARITH_ADD | TO_CPSR, ARITH_SUB | TO_CPSR, 2103 a0, a2, a4, const_args[4]); 2104 tcg_out_dat_rIK(s, COND_AL, ARITH_ADC, ARITH_SBC, 2105 a1, a3, a5, const_args[5]); 2106 tcg_out_mov_reg(s, COND_AL, args[0], a0); 2107 break; 2108 case INDEX_op_sub2_i32: 2109 a0 = args[0], a1 = args[1], a2 = args[2]; 2110 a3 = args[3], a4 = args[4], a5 = args[5]; 2111 if ((a0 == a3 && !const_args[3]) || (a0 == a5 && !const_args[5])) { 2112 a0 = TCG_REG_TMP; 2113 } 2114 if (const_args[2]) { 2115 if (const_args[4]) { 2116 tcg_out_movi32(s, COND_AL, a0, a4); 2117 a4 = a0; 2118 } 2119 tcg_out_dat_rI(s, COND_AL, ARITH_RSB | TO_CPSR, a0, a4, a2, 1); 2120 } else { 2121 tcg_out_dat_rIN(s, COND_AL, ARITH_SUB | TO_CPSR, 2122 ARITH_ADD | TO_CPSR, a0, a2, a4, const_args[4]); 2123 } 2124 if (const_args[3]) { 2125 if (const_args[5]) { 2126 tcg_out_movi32(s, COND_AL, a1, a5); 2127 a5 = a1; 2128 } 2129 tcg_out_dat_rI(s, COND_AL, ARITH_RSC, a1, a5, a3, 1); 2130 } else { 2131 tcg_out_dat_rIK(s, COND_AL, ARITH_SBC, ARITH_ADC, 2132 a1, a3, a5, const_args[5]); 2133 } 2134 tcg_out_mov_reg(s, COND_AL, args[0], a0); 2135 break; 2136 case INDEX_op_neg_i32: 2137 tcg_out_dat_imm(s, COND_AL, ARITH_RSB, args[0], args[1], 0); 2138 break; 2139 case INDEX_op_not_i32: 2140 tcg_out_dat_reg(s, COND_AL, 2141 ARITH_MVN, args[0], 0, args[1], SHIFT_IMM_LSL(0)); 2142 break; 2143 case INDEX_op_mul_i32: 2144 tcg_out_mul32(s, COND_AL, args[0], args[1], args[2]); 2145 break; 2146 case INDEX_op_mulu2_i32: 2147 tcg_out_umull32(s, COND_AL, args[0], args[1], args[2], args[3]); 2148 break; 2149 case INDEX_op_muls2_i32: 2150 tcg_out_smull32(s, COND_AL, args[0], args[1], args[2], args[3]); 2151 break; 2152 /* XXX: Perhaps args[2] & 0x1f is wrong */ 2153 case INDEX_op_shl_i32: 2154 c = const_args[2] ? 2155 SHIFT_IMM_LSL(args[2] & 0x1f) : SHIFT_REG_LSL(args[2]); 2156 goto gen_shift32; 2157 case INDEX_op_shr_i32: 2158 c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_LSR(args[2] & 0x1f) : 2159 SHIFT_IMM_LSL(0) : SHIFT_REG_LSR(args[2]); 2160 goto gen_shift32; 2161 case INDEX_op_sar_i32: 2162 c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_ASR(args[2] & 0x1f) : 2163 SHIFT_IMM_LSL(0) : SHIFT_REG_ASR(args[2]); 2164 goto gen_shift32; 2165 case INDEX_op_rotr_i32: 2166 c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_ROR(args[2] & 0x1f) : 2167 SHIFT_IMM_LSL(0) : SHIFT_REG_ROR(args[2]); 2168 /* Fall through. */ 2169 gen_shift32: 2170 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1], c); 2171 break; 2172 2173 case INDEX_op_rotl_i32: 2174 if (const_args[2]) { 2175 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1], 2176 ((0x20 - args[2]) & 0x1f) ? 2177 SHIFT_IMM_ROR((0x20 - args[2]) & 0x1f) : 2178 SHIFT_IMM_LSL(0)); 2179 } else { 2180 tcg_out_dat_imm(s, COND_AL, ARITH_RSB, TCG_REG_TMP, args[2], 0x20); 2181 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1], 2182 SHIFT_REG_ROR(TCG_REG_TMP)); 2183 } 2184 break; 2185 2186 case INDEX_op_ctz_i32: 2187 tcg_out_dat_reg(s, COND_AL, INSN_RBIT, TCG_REG_TMP, 0, args[1], 0); 2188 a1 = TCG_REG_TMP; 2189 goto do_clz; 2190 2191 case INDEX_op_clz_i32: 2192 a1 = args[1]; 2193 do_clz: 2194 a0 = args[0]; 2195 a2 = args[2]; 2196 c = const_args[2]; 2197 if (c && a2 == 32) { 2198 tcg_out_dat_reg(s, COND_AL, INSN_CLZ, a0, 0, a1, 0); 2199 break; 2200 } 2201 tcg_out_dat_imm(s, COND_AL, ARITH_CMP, 0, a1, 0); 2202 tcg_out_dat_reg(s, COND_NE, INSN_CLZ, a0, 0, a1, 0); 2203 if (c || a0 != a2) { 2204 tcg_out_dat_rIK(s, COND_EQ, ARITH_MOV, ARITH_MVN, a0, 0, a2, c); 2205 } 2206 break; 2207 2208 case INDEX_op_brcond_i32: 2209 tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0, 2210 args[0], args[1], const_args[1]); 2211 tcg_out_goto_label(s, tcg_cond_to_arm_cond[args[2]], 2212 arg_label(args[3])); 2213 break; 2214 case INDEX_op_setcond_i32: 2215 tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0, 2216 args[1], args[2], const_args[2]); 2217 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[args[3]], 2218 ARITH_MOV, args[0], 0, 1); 2219 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(args[3])], 2220 ARITH_MOV, args[0], 0, 0); 2221 break; 2222 2223 case INDEX_op_brcond2_i32: 2224 c = tcg_out_cmp2(s, args, const_args); 2225 tcg_out_goto_label(s, tcg_cond_to_arm_cond[c], arg_label(args[5])); 2226 break; 2227 case INDEX_op_setcond2_i32: 2228 c = tcg_out_cmp2(s, args + 1, const_args + 1); 2229 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[c], ARITH_MOV, args[0], 0, 1); 2230 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(c)], 2231 ARITH_MOV, args[0], 0, 0); 2232 break; 2233 2234 case INDEX_op_qemu_ld_i32: 2235 tcg_out_qemu_ld(s, args, 0); 2236 break; 2237 case INDEX_op_qemu_ld_i64: 2238 tcg_out_qemu_ld(s, args, 1); 2239 break; 2240 case INDEX_op_qemu_st_i32: 2241 tcg_out_qemu_st(s, args, 0); 2242 break; 2243 case INDEX_op_qemu_st_i64: 2244 tcg_out_qemu_st(s, args, 1); 2245 break; 2246 2247 case INDEX_op_bswap16_i32: 2248 tcg_out_bswap16(s, COND_AL, args[0], args[1]); 2249 break; 2250 case INDEX_op_bswap32_i32: 2251 tcg_out_bswap32(s, COND_AL, args[0], args[1]); 2252 break; 2253 2254 case INDEX_op_ext8s_i32: 2255 tcg_out_ext8s(s, COND_AL, args[0], args[1]); 2256 break; 2257 case INDEX_op_ext16s_i32: 2258 tcg_out_ext16s(s, COND_AL, args[0], args[1]); 2259 break; 2260 case INDEX_op_ext16u_i32: 2261 tcg_out_ext16u(s, COND_AL, args[0], args[1]); 2262 break; 2263 2264 case INDEX_op_deposit_i32: 2265 tcg_out_deposit(s, COND_AL, args[0], args[2], 2266 args[3], args[4], const_args[2]); 2267 break; 2268 case INDEX_op_extract_i32: 2269 tcg_out_extract(s, COND_AL, args[0], args[1], args[2], args[3]); 2270 break; 2271 case INDEX_op_sextract_i32: 2272 tcg_out_sextract(s, COND_AL, args[0], args[1], args[2], args[3]); 2273 break; 2274 case INDEX_op_extract2_i32: 2275 /* ??? These optimization vs zero should be generic. */ 2276 /* ??? But we can't substitute 2 for 1 in the opcode stream yet. */ 2277 if (const_args[1]) { 2278 if (const_args[2]) { 2279 tcg_out_movi(s, TCG_TYPE_REG, args[0], 0); 2280 } else { 2281 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, 2282 args[2], SHIFT_IMM_LSL(32 - args[3])); 2283 } 2284 } else if (const_args[2]) { 2285 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, 2286 args[1], SHIFT_IMM_LSR(args[3])); 2287 } else { 2288 /* We can do extract2 in 2 insns, vs the 3 required otherwise. */ 2289 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP, 0, 2290 args[2], SHIFT_IMM_LSL(32 - args[3])); 2291 tcg_out_dat_reg(s, COND_AL, ARITH_ORR, args[0], TCG_REG_TMP, 2292 args[1], SHIFT_IMM_LSR(args[3])); 2293 } 2294 break; 2295 2296 case INDEX_op_div_i32: 2297 tcg_out_sdiv(s, COND_AL, args[0], args[1], args[2]); 2298 break; 2299 case INDEX_op_divu_i32: 2300 tcg_out_udiv(s, COND_AL, args[0], args[1], args[2]); 2301 break; 2302 2303 case INDEX_op_mb: 2304 tcg_out_mb(s, args[0]); 2305 break; 2306 2307 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ 2308 case INDEX_op_call: /* Always emitted via tcg_out_call. */ 2309 default: 2310 tcg_abort(); 2311 } 2312} 2313 2314static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) 2315{ 2316 switch (op) { 2317 case INDEX_op_goto_ptr: 2318 return C_O0_I1(r); 2319 2320 case INDEX_op_ld8u_i32: 2321 case INDEX_op_ld8s_i32: 2322 case INDEX_op_ld16u_i32: 2323 case INDEX_op_ld16s_i32: 2324 case INDEX_op_ld_i32: 2325 case INDEX_op_neg_i32: 2326 case INDEX_op_not_i32: 2327 case INDEX_op_bswap16_i32: 2328 case INDEX_op_bswap32_i32: 2329 case INDEX_op_ext8s_i32: 2330 case INDEX_op_ext16s_i32: 2331 case INDEX_op_ext16u_i32: 2332 case INDEX_op_extract_i32: 2333 case INDEX_op_sextract_i32: 2334 return C_O1_I1(r, r); 2335 2336 case INDEX_op_st8_i32: 2337 case INDEX_op_st16_i32: 2338 case INDEX_op_st_i32: 2339 return C_O0_I2(r, r); 2340 2341 case INDEX_op_add_i32: 2342 case INDEX_op_sub_i32: 2343 case INDEX_op_setcond_i32: 2344 return C_O1_I2(r, r, rIN); 2345 2346 case INDEX_op_and_i32: 2347 case INDEX_op_andc_i32: 2348 case INDEX_op_clz_i32: 2349 case INDEX_op_ctz_i32: 2350 return C_O1_I2(r, r, rIK); 2351 2352 case INDEX_op_mul_i32: 2353 case INDEX_op_div_i32: 2354 case INDEX_op_divu_i32: 2355 return C_O1_I2(r, r, r); 2356 2357 case INDEX_op_mulu2_i32: 2358 case INDEX_op_muls2_i32: 2359 return C_O2_I2(r, r, r, r); 2360 2361 case INDEX_op_or_i32: 2362 case INDEX_op_xor_i32: 2363 return C_O1_I2(r, r, rI); 2364 2365 case INDEX_op_shl_i32: 2366 case INDEX_op_shr_i32: 2367 case INDEX_op_sar_i32: 2368 case INDEX_op_rotl_i32: 2369 case INDEX_op_rotr_i32: 2370 return C_O1_I2(r, r, ri); 2371 2372 case INDEX_op_brcond_i32: 2373 return C_O0_I2(r, rIN); 2374 case INDEX_op_deposit_i32: 2375 return C_O1_I2(r, 0, rZ); 2376 case INDEX_op_extract2_i32: 2377 return C_O1_I2(r, rZ, rZ); 2378 case INDEX_op_movcond_i32: 2379 return C_O1_I4(r, r, rIN, rIK, 0); 2380 case INDEX_op_add2_i32: 2381 return C_O2_I4(r, r, r, r, rIN, rIK); 2382 case INDEX_op_sub2_i32: 2383 return C_O2_I4(r, r, rI, rI, rIN, rIK); 2384 case INDEX_op_brcond2_i32: 2385 return C_O0_I4(r, r, rI, rI); 2386 case INDEX_op_setcond2_i32: 2387 return C_O1_I4(r, r, r, rI, rI); 2388 2389 case INDEX_op_qemu_ld_i32: 2390 return TARGET_LONG_BITS == 32 ? C_O1_I1(r, l) : C_O1_I2(r, l, l); 2391 case INDEX_op_qemu_ld_i64: 2392 return TARGET_LONG_BITS == 32 ? C_O2_I1(r, r, l) : C_O2_I2(r, r, l, l); 2393 case INDEX_op_qemu_st_i32: 2394 return TARGET_LONG_BITS == 32 ? C_O0_I2(s, s) : C_O0_I3(s, s, s); 2395 case INDEX_op_qemu_st_i64: 2396 return TARGET_LONG_BITS == 32 ? C_O0_I3(s, s, s) : C_O0_I4(s, s, s, s); 2397 2398 case INDEX_op_st_vec: 2399 return C_O0_I2(w, r); 2400 case INDEX_op_ld_vec: 2401 case INDEX_op_dupm_vec: 2402 return C_O1_I1(w, r); 2403 case INDEX_op_dup_vec: 2404 return C_O1_I1(w, wr); 2405 case INDEX_op_abs_vec: 2406 case INDEX_op_neg_vec: 2407 case INDEX_op_not_vec: 2408 case INDEX_op_shli_vec: 2409 case INDEX_op_shri_vec: 2410 case INDEX_op_sari_vec: 2411 return C_O1_I1(w, w); 2412 case INDEX_op_dup2_vec: 2413 case INDEX_op_add_vec: 2414 case INDEX_op_mul_vec: 2415 case INDEX_op_smax_vec: 2416 case INDEX_op_smin_vec: 2417 case INDEX_op_ssadd_vec: 2418 case INDEX_op_sssub_vec: 2419 case INDEX_op_sub_vec: 2420 case INDEX_op_umax_vec: 2421 case INDEX_op_umin_vec: 2422 case INDEX_op_usadd_vec: 2423 case INDEX_op_ussub_vec: 2424 case INDEX_op_xor_vec: 2425 case INDEX_op_arm_sshl_vec: 2426 case INDEX_op_arm_ushl_vec: 2427 return C_O1_I2(w, w, w); 2428 case INDEX_op_arm_sli_vec: 2429 return C_O1_I2(w, 0, w); 2430 case INDEX_op_or_vec: 2431 case INDEX_op_andc_vec: 2432 return C_O1_I2(w, w, wO); 2433 case INDEX_op_and_vec: 2434 case INDEX_op_orc_vec: 2435 return C_O1_I2(w, w, wV); 2436 case INDEX_op_cmp_vec: 2437 return C_O1_I2(w, w, wZ); 2438 case INDEX_op_bitsel_vec: 2439 return C_O1_I3(w, w, w, w); 2440 default: 2441 g_assert_not_reached(); 2442 } 2443} 2444 2445static void tcg_target_init(TCGContext *s) 2446{ 2447 /* Only probe for the platform and capabilities if we havn't already 2448 determined maximum values at compile time. */ 2449#if !defined(use_idiv_instructions) || !defined(use_neon_instructions) 2450 { 2451 unsigned long hwcap = qemu_getauxval(AT_HWCAP); 2452#ifndef use_idiv_instructions 2453 use_idiv_instructions = (hwcap & HWCAP_ARM_IDIVA) != 0; 2454#endif 2455#ifndef use_neon_instructions 2456 use_neon_instructions = (hwcap & HWCAP_ARM_NEON) != 0; 2457#endif 2458 } 2459#endif 2460 2461 if (__ARM_ARCH < 7) { 2462 const char *pl = (const char *)qemu_getauxval(AT_PLATFORM); 2463 if (pl != NULL && pl[0] == 'v' && pl[1] >= '4' && pl[1] <= '9') { 2464 arm_arch = pl[1] - '0'; 2465 } 2466 } 2467 2468 tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS; 2469 2470 tcg_target_call_clobber_regs = 0; 2471 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R0); 2472 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R1); 2473 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R2); 2474 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R3); 2475 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R12); 2476 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R14); 2477 2478 if (use_neon_instructions) { 2479 tcg_target_available_regs[TCG_TYPE_V64] = ALL_VECTOR_REGS; 2480 tcg_target_available_regs[TCG_TYPE_V128] = ALL_VECTOR_REGS; 2481 2482 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q0); 2483 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q1); 2484 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q2); 2485 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q3); 2486 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q8); 2487 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q9); 2488 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q10); 2489 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q11); 2490 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q12); 2491 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q13); 2492 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q14); 2493 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q15); 2494 } 2495 2496 s->reserved_regs = 0; 2497 tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK); 2498 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP); 2499 tcg_regset_set_reg(s->reserved_regs, TCG_REG_PC); 2500 tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP); 2501} 2502 2503static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg, 2504 TCGReg arg1, intptr_t arg2) 2505{ 2506 switch (type) { 2507 case TCG_TYPE_I32: 2508 tcg_out_ld32u(s, COND_AL, arg, arg1, arg2); 2509 return; 2510 case TCG_TYPE_V64: 2511 /* regs 1; size 8; align 8 */ 2512 tcg_out_vldst(s, INSN_VLD1 | 0x7d0, arg, arg1, arg2); 2513 return; 2514 case TCG_TYPE_V128: 2515 /* regs 2; size 8; align 16 */ 2516 tcg_out_vldst(s, INSN_VLD1 | 0xae0, arg, arg1, arg2); 2517 return; 2518 default: 2519 g_assert_not_reached(); 2520 } 2521} 2522 2523static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, 2524 TCGReg arg1, intptr_t arg2) 2525{ 2526 switch (type) { 2527 case TCG_TYPE_I32: 2528 tcg_out_st32(s, COND_AL, arg, arg1, arg2); 2529 return; 2530 case TCG_TYPE_V64: 2531 /* regs 1; size 8; align 8 */ 2532 tcg_out_vldst(s, INSN_VST1 | 0x7d0, arg, arg1, arg2); 2533 return; 2534 case TCG_TYPE_V128: 2535 /* regs 2; size 8; align 16 */ 2536 tcg_out_vldst(s, INSN_VST1 | 0xae0, arg, arg1, arg2); 2537 return; 2538 default: 2539 g_assert_not_reached(); 2540 } 2541} 2542 2543static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, 2544 TCGReg base, intptr_t ofs) 2545{ 2546 return false; 2547} 2548 2549static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) 2550{ 2551 if (ret == arg) { 2552 return true; 2553 } 2554 switch (type) { 2555 case TCG_TYPE_I32: 2556 if (ret < TCG_REG_Q0 && arg < TCG_REG_Q0) { 2557 tcg_out_mov_reg(s, COND_AL, ret, arg); 2558 return true; 2559 } 2560 return false; 2561 2562 case TCG_TYPE_V64: 2563 case TCG_TYPE_V128: 2564 /* "VMOV D,N" is an alias for "VORR D,N,N". */ 2565 tcg_out_vreg3(s, INSN_VORR, type - TCG_TYPE_V64, 0, ret, arg, arg); 2566 return true; 2567 2568 default: 2569 g_assert_not_reached(); 2570 } 2571} 2572 2573static void tcg_out_movi(TCGContext *s, TCGType type, 2574 TCGReg ret, tcg_target_long arg) 2575{ 2576 tcg_debug_assert(type == TCG_TYPE_I32); 2577 tcg_debug_assert(ret < TCG_REG_Q0); 2578 tcg_out_movi32(s, COND_AL, ret, arg); 2579} 2580 2581/* Type is always V128, with I64 elements. */ 2582static void tcg_out_dup2_vec(TCGContext *s, TCGReg rd, TCGReg rl, TCGReg rh) 2583{ 2584 /* Move high element into place first. */ 2585 /* VMOV Dd+1, Ds */ 2586 tcg_out_vreg3(s, INSN_VORR | (1 << 12), 0, 0, rd, rh, rh); 2587 /* Move low element into place; tcg_out_mov will check for nop. */ 2588 tcg_out_mov(s, TCG_TYPE_V64, rd, rl); 2589} 2590 2591static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece, 2592 TCGReg rd, TCGReg rs) 2593{ 2594 int q = type - TCG_TYPE_V64; 2595 2596 if (vece == MO_64) { 2597 if (type == TCG_TYPE_V128) { 2598 tcg_out_dup2_vec(s, rd, rs, rs); 2599 } else { 2600 tcg_out_mov(s, TCG_TYPE_V64, rd, rs); 2601 } 2602 } else if (rs < TCG_REG_Q0) { 2603 int b = (vece == MO_8); 2604 int e = (vece == MO_16); 2605 tcg_out32(s, INSN_VDUP_G | (b << 22) | (q << 21) | (e << 5) | 2606 encode_vn(rd) | (rs << 12)); 2607 } else { 2608 int imm4 = 1 << vece; 2609 tcg_out32(s, INSN_VDUP_S | (imm4 << 16) | (q << 6) | 2610 encode_vd(rd) | encode_vm(rs)); 2611 } 2612 return true; 2613} 2614 2615static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece, 2616 TCGReg rd, TCGReg base, intptr_t offset) 2617{ 2618 if (vece == MO_64) { 2619 tcg_out_ld(s, TCG_TYPE_V64, rd, base, offset); 2620 if (type == TCG_TYPE_V128) { 2621 tcg_out_dup2_vec(s, rd, rd, rd); 2622 } 2623 } else { 2624 int q = type - TCG_TYPE_V64; 2625 tcg_out_vldst(s, INSN_VLD1R | (vece << 6) | (q << 5), 2626 rd, base, offset); 2627 } 2628 return true; 2629} 2630 2631static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece, 2632 TCGReg rd, int64_t v64) 2633{ 2634 int q = type - TCG_TYPE_V64; 2635 int cmode, imm8, i; 2636 2637 /* Test all bytes equal first. */ 2638 if (vece == MO_8) { 2639 tcg_out_vmovi(s, rd, q, 0, 0xe, v64); 2640 return; 2641 } 2642 2643 /* 2644 * Test all bytes 0x00 or 0xff second. This can match cases that 2645 * might otherwise take 2 or 3 insns for MO_16 or MO_32 below. 2646 */ 2647 for (i = imm8 = 0; i < 8; i++) { 2648 uint8_t byte = v64 >> (i * 8); 2649 if (byte == 0xff) { 2650 imm8 |= 1 << i; 2651 } else if (byte != 0) { 2652 goto fail_bytes; 2653 } 2654 } 2655 tcg_out_vmovi(s, rd, q, 1, 0xe, imm8); 2656 return; 2657 fail_bytes: 2658 2659 /* 2660 * Tests for various replications. For each element width, if we 2661 * cannot find an expansion there's no point checking a larger 2662 * width because we already know by replication it cannot match. 2663 */ 2664 if (vece == MO_16) { 2665 uint16_t v16 = v64; 2666 2667 if (is_shimm16(v16, &cmode, &imm8)) { 2668 tcg_out_vmovi(s, rd, q, 0, cmode, imm8); 2669 return; 2670 } 2671 if (is_shimm16(~v16, &cmode, &imm8)) { 2672 tcg_out_vmovi(s, rd, q, 1, cmode, imm8); 2673 return; 2674 } 2675 2676 /* 2677 * Otherwise, all remaining constants can be loaded in two insns: 2678 * rd = v16 & 0xff, rd |= v16 & 0xff00. 2679 */ 2680 tcg_out_vmovi(s, rd, q, 0, 0x8, v16 & 0xff); 2681 tcg_out_vmovi(s, rd, q, 0, 0xb, v16 >> 8); /* VORRI */ 2682 return; 2683 } 2684 2685 if (vece == MO_32) { 2686 uint32_t v32 = v64; 2687 2688 if (is_shimm32(v32, &cmode, &imm8) || 2689 is_soimm32(v32, &cmode, &imm8)) { 2690 tcg_out_vmovi(s, rd, q, 0, cmode, imm8); 2691 return; 2692 } 2693 if (is_shimm32(~v32, &cmode, &imm8) || 2694 is_soimm32(~v32, &cmode, &imm8)) { 2695 tcg_out_vmovi(s, rd, q, 1, cmode, imm8); 2696 return; 2697 } 2698 2699 /* 2700 * Restrict the set of constants to those we can load with 2701 * two instructions. Others we load from the pool. 2702 */ 2703 i = is_shimm32_pair(v32, &cmode, &imm8); 2704 if (i) { 2705 tcg_out_vmovi(s, rd, q, 0, cmode, imm8); 2706 tcg_out_vmovi(s, rd, q, 0, i | 1, extract32(v32, i * 4, 8)); 2707 return; 2708 } 2709 i = is_shimm32_pair(~v32, &cmode, &imm8); 2710 if (i) { 2711 tcg_out_vmovi(s, rd, q, 1, cmode, imm8); 2712 tcg_out_vmovi(s, rd, q, 1, i | 1, extract32(~v32, i * 4, 8)); 2713 return; 2714 } 2715 } 2716 2717 /* 2718 * As a last resort, load from the constant pool. 2719 */ 2720 if (!q || vece == MO_64) { 2721 new_pool_l2(s, R_ARM_PC11, s->code_ptr, 0, v64, v64 >> 32); 2722 /* VLDR Dd, [pc + offset] */ 2723 tcg_out32(s, INSN_VLDR_D | encode_vd(rd) | (0xf << 16)); 2724 if (q) { 2725 tcg_out_dup2_vec(s, rd, rd, rd); 2726 } 2727 } else { 2728 new_pool_label(s, (uint32_t)v64, R_ARM_PC8, s->code_ptr, 0); 2729 /* add tmp, pc, offset */ 2730 tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_TMP, TCG_REG_PC, 0); 2731 tcg_out_dupm_vec(s, type, MO_32, rd, TCG_REG_TMP, 0); 2732 } 2733} 2734 2735static const ARMInsn vec_cmp_insn[16] = { 2736 [TCG_COND_EQ] = INSN_VCEQ, 2737 [TCG_COND_GT] = INSN_VCGT, 2738 [TCG_COND_GE] = INSN_VCGE, 2739 [TCG_COND_GTU] = INSN_VCGT_U, 2740 [TCG_COND_GEU] = INSN_VCGE_U, 2741}; 2742 2743static const ARMInsn vec_cmp0_insn[16] = { 2744 [TCG_COND_EQ] = INSN_VCEQ0, 2745 [TCG_COND_GT] = INSN_VCGT0, 2746 [TCG_COND_GE] = INSN_VCGE0, 2747 [TCG_COND_LT] = INSN_VCLT0, 2748 [TCG_COND_LE] = INSN_VCLE0, 2749}; 2750 2751static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, 2752 unsigned vecl, unsigned vece, 2753 const TCGArg *args, const int *const_args) 2754{ 2755 TCGType type = vecl + TCG_TYPE_V64; 2756 unsigned q = vecl; 2757 TCGArg a0, a1, a2, a3; 2758 int cmode, imm8; 2759 2760 a0 = args[0]; 2761 a1 = args[1]; 2762 a2 = args[2]; 2763 2764 switch (opc) { 2765 case INDEX_op_ld_vec: 2766 tcg_out_ld(s, type, a0, a1, a2); 2767 return; 2768 case INDEX_op_st_vec: 2769 tcg_out_st(s, type, a0, a1, a2); 2770 return; 2771 case INDEX_op_dupm_vec: 2772 tcg_out_dupm_vec(s, type, vece, a0, a1, a2); 2773 return; 2774 case INDEX_op_dup2_vec: 2775 tcg_out_dup2_vec(s, a0, a1, a2); 2776 return; 2777 case INDEX_op_abs_vec: 2778 tcg_out_vreg2(s, INSN_VABS, q, vece, a0, a1); 2779 return; 2780 case INDEX_op_neg_vec: 2781 tcg_out_vreg2(s, INSN_VNEG, q, vece, a0, a1); 2782 return; 2783 case INDEX_op_not_vec: 2784 tcg_out_vreg2(s, INSN_VMVN, q, 0, a0, a1); 2785 return; 2786 case INDEX_op_add_vec: 2787 tcg_out_vreg3(s, INSN_VADD, q, vece, a0, a1, a2); 2788 return; 2789 case INDEX_op_mul_vec: 2790 tcg_out_vreg3(s, INSN_VMUL, q, vece, a0, a1, a2); 2791 return; 2792 case INDEX_op_smax_vec: 2793 tcg_out_vreg3(s, INSN_VMAX, q, vece, a0, a1, a2); 2794 return; 2795 case INDEX_op_smin_vec: 2796 tcg_out_vreg3(s, INSN_VMIN, q, vece, a0, a1, a2); 2797 return; 2798 case INDEX_op_sub_vec: 2799 tcg_out_vreg3(s, INSN_VSUB, q, vece, a0, a1, a2); 2800 return; 2801 case INDEX_op_ssadd_vec: 2802 tcg_out_vreg3(s, INSN_VQADD, q, vece, a0, a1, a2); 2803 return; 2804 case INDEX_op_sssub_vec: 2805 tcg_out_vreg3(s, INSN_VQSUB, q, vece, a0, a1, a2); 2806 return; 2807 case INDEX_op_umax_vec: 2808 tcg_out_vreg3(s, INSN_VMAX_U, q, vece, a0, a1, a2); 2809 return; 2810 case INDEX_op_umin_vec: 2811 tcg_out_vreg3(s, INSN_VMIN_U, q, vece, a0, a1, a2); 2812 return; 2813 case INDEX_op_usadd_vec: 2814 tcg_out_vreg3(s, INSN_VQADD_U, q, vece, a0, a1, a2); 2815 return; 2816 case INDEX_op_ussub_vec: 2817 tcg_out_vreg3(s, INSN_VQSUB_U, q, vece, a0, a1, a2); 2818 return; 2819 case INDEX_op_xor_vec: 2820 tcg_out_vreg3(s, INSN_VEOR, q, 0, a0, a1, a2); 2821 return; 2822 case INDEX_op_arm_sshl_vec: 2823 /* 2824 * Note that Vm is the data and Vn is the shift count, 2825 * therefore the arguments appear reversed. 2826 */ 2827 tcg_out_vreg3(s, INSN_VSHL_S, q, vece, a0, a2, a1); 2828 return; 2829 case INDEX_op_arm_ushl_vec: 2830 /* See above. */ 2831 tcg_out_vreg3(s, INSN_VSHL_U, q, vece, a0, a2, a1); 2832 return; 2833 case INDEX_op_shli_vec: 2834 tcg_out_vshifti(s, INSN_VSHLI, q, a0, a1, a2 + (8 << vece)); 2835 return; 2836 case INDEX_op_shri_vec: 2837 tcg_out_vshifti(s, INSN_VSHRI, q, a0, a1, (16 << vece) - a2); 2838 return; 2839 case INDEX_op_sari_vec: 2840 tcg_out_vshifti(s, INSN_VSARI, q, a0, a1, (16 << vece) - a2); 2841 return; 2842 case INDEX_op_arm_sli_vec: 2843 tcg_out_vshifti(s, INSN_VSLI, q, a0, a2, args[3] + (8 << vece)); 2844 return; 2845 2846 case INDEX_op_andc_vec: 2847 if (!const_args[2]) { 2848 tcg_out_vreg3(s, INSN_VBIC, q, 0, a0, a1, a2); 2849 return; 2850 } 2851 a2 = ~a2; 2852 /* fall through */ 2853 case INDEX_op_and_vec: 2854 if (const_args[2]) { 2855 is_shimm1632(~a2, &cmode, &imm8); 2856 if (a0 == a1) { 2857 tcg_out_vmovi(s, a0, q, 1, cmode | 1, imm8); /* VBICI */ 2858 return; 2859 } 2860 tcg_out_vmovi(s, a0, q, 1, cmode, imm8); /* VMVNI */ 2861 a2 = a0; 2862 } 2863 tcg_out_vreg3(s, INSN_VAND, q, 0, a0, a1, a2); 2864 return; 2865 2866 case INDEX_op_orc_vec: 2867 if (!const_args[2]) { 2868 tcg_out_vreg3(s, INSN_VORN, q, 0, a0, a1, a2); 2869 return; 2870 } 2871 a2 = ~a2; 2872 /* fall through */ 2873 case INDEX_op_or_vec: 2874 if (const_args[2]) { 2875 is_shimm1632(a2, &cmode, &imm8); 2876 if (a0 == a1) { 2877 tcg_out_vmovi(s, a0, q, 0, cmode | 1, imm8); /* VORRI */ 2878 return; 2879 } 2880 tcg_out_vmovi(s, a0, q, 0, cmode, imm8); /* VMOVI */ 2881 a2 = a0; 2882 } 2883 tcg_out_vreg3(s, INSN_VORR, q, 0, a0, a1, a2); 2884 return; 2885 2886 case INDEX_op_cmp_vec: 2887 { 2888 TCGCond cond = args[3]; 2889 2890 if (cond == TCG_COND_NE) { 2891 if (const_args[2]) { 2892 tcg_out_vreg3(s, INSN_VTST, q, vece, a0, a1, a1); 2893 } else { 2894 tcg_out_vreg3(s, INSN_VCEQ, q, vece, a0, a1, a2); 2895 tcg_out_vreg2(s, INSN_VMVN, q, 0, a0, a0); 2896 } 2897 } else { 2898 ARMInsn insn; 2899 2900 if (const_args[2]) { 2901 insn = vec_cmp0_insn[cond]; 2902 if (insn) { 2903 tcg_out_vreg2(s, insn, q, vece, a0, a1); 2904 return; 2905 } 2906 tcg_out_dupi_vec(s, type, MO_8, TCG_VEC_TMP, 0); 2907 a2 = TCG_VEC_TMP; 2908 } 2909 insn = vec_cmp_insn[cond]; 2910 if (insn == 0) { 2911 TCGArg t; 2912 t = a1, a1 = a2, a2 = t; 2913 cond = tcg_swap_cond(cond); 2914 insn = vec_cmp_insn[cond]; 2915 tcg_debug_assert(insn != 0); 2916 } 2917 tcg_out_vreg3(s, insn, q, vece, a0, a1, a2); 2918 } 2919 } 2920 return; 2921 2922 case INDEX_op_bitsel_vec: 2923 a3 = args[3]; 2924 if (a0 == a3) { 2925 tcg_out_vreg3(s, INSN_VBIT, q, 0, a0, a2, a1); 2926 } else if (a0 == a2) { 2927 tcg_out_vreg3(s, INSN_VBIF, q, 0, a0, a3, a1); 2928 } else { 2929 tcg_out_mov(s, type, a0, a1); 2930 tcg_out_vreg3(s, INSN_VBSL, q, 0, a0, a2, a3); 2931 } 2932 return; 2933 2934 case INDEX_op_mov_vec: /* Always emitted via tcg_out_mov. */ 2935 case INDEX_op_dup_vec: /* Always emitted via tcg_out_dup_vec. */ 2936 default: 2937 g_assert_not_reached(); 2938 } 2939} 2940 2941int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece) 2942{ 2943 switch (opc) { 2944 case INDEX_op_add_vec: 2945 case INDEX_op_sub_vec: 2946 case INDEX_op_and_vec: 2947 case INDEX_op_andc_vec: 2948 case INDEX_op_or_vec: 2949 case INDEX_op_orc_vec: 2950 case INDEX_op_xor_vec: 2951 case INDEX_op_not_vec: 2952 case INDEX_op_shli_vec: 2953 case INDEX_op_shri_vec: 2954 case INDEX_op_sari_vec: 2955 case INDEX_op_ssadd_vec: 2956 case INDEX_op_sssub_vec: 2957 case INDEX_op_usadd_vec: 2958 case INDEX_op_ussub_vec: 2959 case INDEX_op_bitsel_vec: 2960 return 1; 2961 case INDEX_op_abs_vec: 2962 case INDEX_op_cmp_vec: 2963 case INDEX_op_mul_vec: 2964 case INDEX_op_neg_vec: 2965 case INDEX_op_smax_vec: 2966 case INDEX_op_smin_vec: 2967 case INDEX_op_umax_vec: 2968 case INDEX_op_umin_vec: 2969 return vece < MO_64; 2970 case INDEX_op_shlv_vec: 2971 case INDEX_op_shrv_vec: 2972 case INDEX_op_sarv_vec: 2973 case INDEX_op_rotli_vec: 2974 case INDEX_op_rotlv_vec: 2975 case INDEX_op_rotrv_vec: 2976 return -1; 2977 default: 2978 return 0; 2979 } 2980} 2981 2982void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece, 2983 TCGArg a0, ...) 2984{ 2985 va_list va; 2986 TCGv_vec v0, v1, v2, t1, t2, c1; 2987 TCGArg a2; 2988 2989 va_start(va, a0); 2990 v0 = temp_tcgv_vec(arg_temp(a0)); 2991 v1 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg))); 2992 a2 = va_arg(va, TCGArg); 2993 va_end(va); 2994 2995 switch (opc) { 2996 case INDEX_op_shlv_vec: 2997 /* 2998 * Merely propagate shlv_vec to arm_ushl_vec. 2999 * In this way we don't set TCG_TARGET_HAS_shv_vec 3000 * because everything is done via expansion. 3001 */ 3002 v2 = temp_tcgv_vec(arg_temp(a2)); 3003 vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(v0), 3004 tcgv_vec_arg(v1), tcgv_vec_arg(v2)); 3005 break; 3006 3007 case INDEX_op_shrv_vec: 3008 case INDEX_op_sarv_vec: 3009 /* Right shifts are negative left shifts for NEON. */ 3010 v2 = temp_tcgv_vec(arg_temp(a2)); 3011 t1 = tcg_temp_new_vec(type); 3012 tcg_gen_neg_vec(vece, t1, v2); 3013 if (opc == INDEX_op_shrv_vec) { 3014 opc = INDEX_op_arm_ushl_vec; 3015 } else { 3016 opc = INDEX_op_arm_sshl_vec; 3017 } 3018 vec_gen_3(opc, type, vece, tcgv_vec_arg(v0), 3019 tcgv_vec_arg(v1), tcgv_vec_arg(t1)); 3020 tcg_temp_free_vec(t1); 3021 break; 3022 3023 case INDEX_op_rotli_vec: 3024 t1 = tcg_temp_new_vec(type); 3025 tcg_gen_shri_vec(vece, t1, v1, -a2 & ((8 << vece) - 1)); 3026 vec_gen_4(INDEX_op_arm_sli_vec, type, vece, 3027 tcgv_vec_arg(v0), tcgv_vec_arg(t1), tcgv_vec_arg(v1), a2); 3028 tcg_temp_free_vec(t1); 3029 break; 3030 3031 case INDEX_op_rotlv_vec: 3032 v2 = temp_tcgv_vec(arg_temp(a2)); 3033 t1 = tcg_temp_new_vec(type); 3034 c1 = tcg_constant_vec(type, vece, 8 << vece); 3035 tcg_gen_sub_vec(vece, t1, v2, c1); 3036 /* Right shifts are negative left shifts for NEON. */ 3037 vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(t1), 3038 tcgv_vec_arg(v1), tcgv_vec_arg(t1)); 3039 vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(v0), 3040 tcgv_vec_arg(v1), tcgv_vec_arg(v2)); 3041 tcg_gen_or_vec(vece, v0, v0, t1); 3042 tcg_temp_free_vec(t1); 3043 break; 3044 3045 case INDEX_op_rotrv_vec: 3046 v2 = temp_tcgv_vec(arg_temp(a2)); 3047 t1 = tcg_temp_new_vec(type); 3048 t2 = tcg_temp_new_vec(type); 3049 c1 = tcg_constant_vec(type, vece, 8 << vece); 3050 tcg_gen_neg_vec(vece, t1, v2); 3051 tcg_gen_sub_vec(vece, t2, c1, v2); 3052 /* Right shifts are negative left shifts for NEON. */ 3053 vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(t1), 3054 tcgv_vec_arg(v1), tcgv_vec_arg(t1)); 3055 vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(t2), 3056 tcgv_vec_arg(v1), tcgv_vec_arg(t2)); 3057 tcg_gen_or_vec(vece, v0, t1, t2); 3058 tcg_temp_free_vec(t1); 3059 tcg_temp_free_vec(t2); 3060 break; 3061 3062 default: 3063 g_assert_not_reached(); 3064 } 3065} 3066 3067static void tcg_out_nop_fill(tcg_insn_unit *p, int count) 3068{ 3069 int i; 3070 for (i = 0; i < count; ++i) { 3071 p[i] = INSN_NOP; 3072 } 3073} 3074 3075/* Compute frame size via macros, to share between tcg_target_qemu_prologue 3076 and tcg_register_jit. */ 3077 3078#define PUSH_SIZE ((11 - 4 + 1 + 1) * sizeof(tcg_target_long)) 3079 3080#define FRAME_SIZE \ 3081 ((PUSH_SIZE \ 3082 + TCG_STATIC_CALL_ARGS_SIZE \ 3083 + CPU_TEMP_BUF_NLONGS * sizeof(long) \ 3084 + TCG_TARGET_STACK_ALIGN - 1) \ 3085 & -TCG_TARGET_STACK_ALIGN) 3086 3087#define STACK_ADDEND (FRAME_SIZE - PUSH_SIZE) 3088 3089static void tcg_target_qemu_prologue(TCGContext *s) 3090{ 3091 /* Calling convention requires us to save r4-r11 and lr. */ 3092 /* stmdb sp!, { r4 - r11, lr } */ 3093 tcg_out32(s, (COND_AL << 28) | 0x092d4ff0); 3094 3095 /* Reserve callee argument and tcg temp space. */ 3096 tcg_out_dat_rI(s, COND_AL, ARITH_SUB, TCG_REG_CALL_STACK, 3097 TCG_REG_CALL_STACK, STACK_ADDEND, 1); 3098 tcg_set_frame(s, TCG_REG_CALL_STACK, TCG_STATIC_CALL_ARGS_SIZE, 3099 CPU_TEMP_BUF_NLONGS * sizeof(long)); 3100 3101 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]); 3102 3103 tcg_out_bx(s, COND_AL, tcg_target_call_iarg_regs[1]); 3104 3105 /* 3106 * Return path for goto_ptr. Set return value to 0, a-la exit_tb, 3107 * and fall through to the rest of the epilogue. 3108 */ 3109 tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr); 3110 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R0, 0); 3111 tcg_out_epilogue(s); 3112} 3113 3114static void tcg_out_epilogue(TCGContext *s) 3115{ 3116 /* Release local stack frame. */ 3117 tcg_out_dat_rI(s, COND_AL, ARITH_ADD, TCG_REG_CALL_STACK, 3118 TCG_REG_CALL_STACK, STACK_ADDEND, 1); 3119 3120 /* ldmia sp!, { r4 - r11, pc } */ 3121 tcg_out32(s, (COND_AL << 28) | 0x08bd8ff0); 3122} 3123 3124typedef struct { 3125 DebugFrameHeader h; 3126 uint8_t fde_def_cfa[4]; 3127 uint8_t fde_reg_ofs[18]; 3128} DebugFrame; 3129 3130#define ELF_HOST_MACHINE EM_ARM 3131 3132/* We're expecting a 2 byte uleb128 encoded value. */ 3133QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14)); 3134 3135static const DebugFrame debug_frame = { 3136 .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */ 3137 .h.cie.id = -1, 3138 .h.cie.version = 1, 3139 .h.cie.code_align = 1, 3140 .h.cie.data_align = 0x7c, /* sleb128 -4 */ 3141 .h.cie.return_column = 14, 3142 3143 /* Total FDE size does not include the "len" member. */ 3144 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset), 3145 3146 .fde_def_cfa = { 3147 12, 13, /* DW_CFA_def_cfa sp, ... */ 3148 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */ 3149 (FRAME_SIZE >> 7) 3150 }, 3151 .fde_reg_ofs = { 3152 /* The following must match the stmdb in the prologue. */ 3153 0x8e, 1, /* DW_CFA_offset, lr, -4 */ 3154 0x8b, 2, /* DW_CFA_offset, r11, -8 */ 3155 0x8a, 3, /* DW_CFA_offset, r10, -12 */ 3156 0x89, 4, /* DW_CFA_offset, r9, -16 */ 3157 0x88, 5, /* DW_CFA_offset, r8, -20 */ 3158 0x87, 6, /* DW_CFA_offset, r7, -24 */ 3159 0x86, 7, /* DW_CFA_offset, r6, -28 */ 3160 0x85, 8, /* DW_CFA_offset, r5, -32 */ 3161 0x84, 9, /* DW_CFA_offset, r4, -36 */ 3162 } 3163}; 3164 3165void tcg_register_jit(const void *buf, size_t buf_size) 3166{ 3167 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame)); 3168} 3169