1/* 2 * Tiny Code Generator for QEMU 3 * 4 * Copyright (c) 2008 Andrzej Zaborowski 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 25#include "elf.h" 26#include "../tcg-ldst.c.inc" 27#include "../tcg-pool.c.inc" 28 29int arm_arch = __ARM_ARCH; 30 31#ifndef use_idiv_instructions 32bool use_idiv_instructions; 33#endif 34#ifndef use_neon_instructions 35bool use_neon_instructions; 36#endif 37 38#ifdef CONFIG_DEBUG_TCG 39static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { 40 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7", 41 "%r8", "%r9", "%r10", "%r11", "%r12", "%sp", "%r14", "%pc", 42 "%q0", "%q1", "%q2", "%q3", "%q4", "%q5", "%q6", "%q7", 43 "%q8", "%q9", "%q10", "%q11", "%q12", "%q13", "%q14", "%q15", 44}; 45#endif 46 47static const int tcg_target_reg_alloc_order[] = { 48 TCG_REG_R4, 49 TCG_REG_R5, 50 TCG_REG_R6, 51 TCG_REG_R7, 52 TCG_REG_R8, 53 TCG_REG_R9, 54 TCG_REG_R10, 55 TCG_REG_R11, 56 TCG_REG_R13, 57 TCG_REG_R0, 58 TCG_REG_R1, 59 TCG_REG_R2, 60 TCG_REG_R3, 61 TCG_REG_R12, 62 TCG_REG_R14, 63 64 TCG_REG_Q0, 65 TCG_REG_Q1, 66 TCG_REG_Q2, 67 TCG_REG_Q3, 68 /* Q4 - Q7 are call-saved, and skipped. */ 69 TCG_REG_Q8, 70 TCG_REG_Q9, 71 TCG_REG_Q10, 72 TCG_REG_Q11, 73 TCG_REG_Q12, 74 TCG_REG_Q13, 75 TCG_REG_Q14, 76 TCG_REG_Q15, 77}; 78 79static const int tcg_target_call_iarg_regs[4] = { 80 TCG_REG_R0, TCG_REG_R1, TCG_REG_R2, TCG_REG_R3 81}; 82 83static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot) 84{ 85 tcg_debug_assert(kind == TCG_CALL_RET_NORMAL); 86 tcg_debug_assert(slot >= 0 && slot <= 3); 87 return TCG_REG_R0 + slot; 88} 89 90#define TCG_REG_TMP TCG_REG_R12 91#define TCG_VEC_TMP TCG_REG_Q15 92#ifndef CONFIG_SOFTMMU 93#define TCG_REG_GUEST_BASE TCG_REG_R11 94#endif 95 96typedef enum { 97 COND_EQ = 0x0, 98 COND_NE = 0x1, 99 COND_CS = 0x2, /* Unsigned greater or equal */ 100 COND_CC = 0x3, /* Unsigned less than */ 101 COND_MI = 0x4, /* Negative */ 102 COND_PL = 0x5, /* Zero or greater */ 103 COND_VS = 0x6, /* Overflow */ 104 COND_VC = 0x7, /* No overflow */ 105 COND_HI = 0x8, /* Unsigned greater than */ 106 COND_LS = 0x9, /* Unsigned less or equal */ 107 COND_GE = 0xa, 108 COND_LT = 0xb, 109 COND_GT = 0xc, 110 COND_LE = 0xd, 111 COND_AL = 0xe, 112} ARMCond; 113 114#define TO_CPSR (1 << 20) 115 116#define SHIFT_IMM_LSL(im) (((im) << 7) | 0x00) 117#define SHIFT_IMM_LSR(im) (((im) << 7) | 0x20) 118#define SHIFT_IMM_ASR(im) (((im) << 7) | 0x40) 119#define SHIFT_IMM_ROR(im) (((im) << 7) | 0x60) 120#define SHIFT_REG_LSL(rs) (((rs) << 8) | 0x10) 121#define SHIFT_REG_LSR(rs) (((rs) << 8) | 0x30) 122#define SHIFT_REG_ASR(rs) (((rs) << 8) | 0x50) 123#define SHIFT_REG_ROR(rs) (((rs) << 8) | 0x70) 124 125typedef enum { 126 ARITH_AND = 0x0 << 21, 127 ARITH_EOR = 0x1 << 21, 128 ARITH_SUB = 0x2 << 21, 129 ARITH_RSB = 0x3 << 21, 130 ARITH_ADD = 0x4 << 21, 131 ARITH_ADC = 0x5 << 21, 132 ARITH_SBC = 0x6 << 21, 133 ARITH_RSC = 0x7 << 21, 134 ARITH_TST = 0x8 << 21 | TO_CPSR, 135 ARITH_CMP = 0xa << 21 | TO_CPSR, 136 ARITH_CMN = 0xb << 21 | TO_CPSR, 137 ARITH_ORR = 0xc << 21, 138 ARITH_MOV = 0xd << 21, 139 ARITH_BIC = 0xe << 21, 140 ARITH_MVN = 0xf << 21, 141 142 INSN_B = 0x0a000000, 143 144 INSN_CLZ = 0x016f0f10, 145 INSN_RBIT = 0x06ff0f30, 146 147 INSN_LDMIA = 0x08b00000, 148 INSN_STMDB = 0x09200000, 149 150 INSN_LDR_IMM = 0x04100000, 151 INSN_LDR_REG = 0x06100000, 152 INSN_STR_IMM = 0x04000000, 153 INSN_STR_REG = 0x06000000, 154 155 INSN_LDRH_IMM = 0x005000b0, 156 INSN_LDRH_REG = 0x001000b0, 157 INSN_LDRSH_IMM = 0x005000f0, 158 INSN_LDRSH_REG = 0x001000f0, 159 INSN_STRH_IMM = 0x004000b0, 160 INSN_STRH_REG = 0x000000b0, 161 162 INSN_LDRB_IMM = 0x04500000, 163 INSN_LDRB_REG = 0x06500000, 164 INSN_LDRSB_IMM = 0x005000d0, 165 INSN_LDRSB_REG = 0x001000d0, 166 INSN_STRB_IMM = 0x04400000, 167 INSN_STRB_REG = 0x06400000, 168 169 INSN_LDRD_IMM = 0x004000d0, 170 INSN_LDRD_REG = 0x000000d0, 171 INSN_STRD_IMM = 0x004000f0, 172 INSN_STRD_REG = 0x000000f0, 173 174 INSN_DMB_ISH = 0xf57ff05b, 175 INSN_DMB_MCR = 0xee070fba, 176 177 /* Architected nop introduced in v6k. */ 178 /* ??? This is an MSR (imm) 0,0,0 insn. Anyone know if this 179 also Just So Happened to do nothing on pre-v6k so that we 180 don't need to conditionalize it? */ 181 INSN_NOP_v6k = 0xe320f000, 182 /* Otherwise the assembler uses mov r0,r0 */ 183 INSN_NOP_v4 = (COND_AL << 28) | ARITH_MOV, 184 185 INSN_VADD = 0xf2000800, 186 INSN_VAND = 0xf2000110, 187 INSN_VBIC = 0xf2100110, 188 INSN_VEOR = 0xf3000110, 189 INSN_VORN = 0xf2300110, 190 INSN_VORR = 0xf2200110, 191 INSN_VSUB = 0xf3000800, 192 INSN_VMUL = 0xf2000910, 193 INSN_VQADD = 0xf2000010, 194 INSN_VQADD_U = 0xf3000010, 195 INSN_VQSUB = 0xf2000210, 196 INSN_VQSUB_U = 0xf3000210, 197 INSN_VMAX = 0xf2000600, 198 INSN_VMAX_U = 0xf3000600, 199 INSN_VMIN = 0xf2000610, 200 INSN_VMIN_U = 0xf3000610, 201 202 INSN_VABS = 0xf3b10300, 203 INSN_VMVN = 0xf3b00580, 204 INSN_VNEG = 0xf3b10380, 205 206 INSN_VCEQ0 = 0xf3b10100, 207 INSN_VCGT0 = 0xf3b10000, 208 INSN_VCGE0 = 0xf3b10080, 209 INSN_VCLE0 = 0xf3b10180, 210 INSN_VCLT0 = 0xf3b10200, 211 212 INSN_VCEQ = 0xf3000810, 213 INSN_VCGE = 0xf2000310, 214 INSN_VCGT = 0xf2000300, 215 INSN_VCGE_U = 0xf3000310, 216 INSN_VCGT_U = 0xf3000300, 217 218 INSN_VSHLI = 0xf2800510, /* VSHL (immediate) */ 219 INSN_VSARI = 0xf2800010, /* VSHR.S */ 220 INSN_VSHRI = 0xf3800010, /* VSHR.U */ 221 INSN_VSLI = 0xf3800510, 222 INSN_VSHL_S = 0xf2000400, /* VSHL.S (register) */ 223 INSN_VSHL_U = 0xf3000400, /* VSHL.U (register) */ 224 225 INSN_VBSL = 0xf3100110, 226 INSN_VBIT = 0xf3200110, 227 INSN_VBIF = 0xf3300110, 228 229 INSN_VTST = 0xf2000810, 230 231 INSN_VDUP_G = 0xee800b10, /* VDUP (ARM core register) */ 232 INSN_VDUP_S = 0xf3b00c00, /* VDUP (scalar) */ 233 INSN_VLDR_D = 0xed100b00, /* VLDR.64 */ 234 INSN_VLD1 = 0xf4200000, /* VLD1 (multiple single elements) */ 235 INSN_VLD1R = 0xf4a00c00, /* VLD1 (single element to all lanes) */ 236 INSN_VST1 = 0xf4000000, /* VST1 (multiple single elements) */ 237 INSN_VMOVI = 0xf2800010, /* VMOV (immediate) */ 238} ARMInsn; 239 240#define INSN_NOP (use_armv7_instructions ? INSN_NOP_v6k : INSN_NOP_v4) 241 242static const uint8_t tcg_cond_to_arm_cond[] = { 243 [TCG_COND_EQ] = COND_EQ, 244 [TCG_COND_NE] = COND_NE, 245 [TCG_COND_LT] = COND_LT, 246 [TCG_COND_GE] = COND_GE, 247 [TCG_COND_LE] = COND_LE, 248 [TCG_COND_GT] = COND_GT, 249 /* unsigned */ 250 [TCG_COND_LTU] = COND_CC, 251 [TCG_COND_GEU] = COND_CS, 252 [TCG_COND_LEU] = COND_LS, 253 [TCG_COND_GTU] = COND_HI, 254}; 255 256static int encode_imm(uint32_t imm); 257 258/* TCG private relocation type: add with pc+imm8 */ 259#define R_ARM_PC8 11 260 261/* TCG private relocation type: vldr with imm8 << 2 */ 262#define R_ARM_PC11 12 263 264static bool reloc_pc24(tcg_insn_unit *src_rw, const tcg_insn_unit *target) 265{ 266 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw); 267 ptrdiff_t offset = (tcg_ptr_byte_diff(target, src_rx) - 8) >> 2; 268 269 if (offset == sextract32(offset, 0, 24)) { 270 *src_rw = deposit32(*src_rw, 0, 24, offset); 271 return true; 272 } 273 return false; 274} 275 276static bool reloc_pc13(tcg_insn_unit *src_rw, const tcg_insn_unit *target) 277{ 278 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw); 279 ptrdiff_t offset = tcg_ptr_byte_diff(target, src_rx) - 8; 280 281 if (offset >= -0xfff && offset <= 0xfff) { 282 tcg_insn_unit insn = *src_rw; 283 bool u = (offset >= 0); 284 if (!u) { 285 offset = -offset; 286 } 287 insn = deposit32(insn, 23, 1, u); 288 insn = deposit32(insn, 0, 12, offset); 289 *src_rw = insn; 290 return true; 291 } 292 return false; 293} 294 295static bool reloc_pc11(tcg_insn_unit *src_rw, const tcg_insn_unit *target) 296{ 297 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw); 298 ptrdiff_t offset = (tcg_ptr_byte_diff(target, src_rx) - 8) / 4; 299 300 if (offset >= -0xff && offset <= 0xff) { 301 tcg_insn_unit insn = *src_rw; 302 bool u = (offset >= 0); 303 if (!u) { 304 offset = -offset; 305 } 306 insn = deposit32(insn, 23, 1, u); 307 insn = deposit32(insn, 0, 8, offset); 308 *src_rw = insn; 309 return true; 310 } 311 return false; 312} 313 314static bool reloc_pc8(tcg_insn_unit *src_rw, const tcg_insn_unit *target) 315{ 316 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw); 317 ptrdiff_t offset = tcg_ptr_byte_diff(target, src_rx) - 8; 318 int imm12 = encode_imm(offset); 319 320 if (imm12 >= 0) { 321 *src_rw = deposit32(*src_rw, 0, 12, imm12); 322 return true; 323 } 324 return false; 325} 326 327static bool patch_reloc(tcg_insn_unit *code_ptr, int type, 328 intptr_t value, intptr_t addend) 329{ 330 tcg_debug_assert(addend == 0); 331 switch (type) { 332 case R_ARM_PC24: 333 return reloc_pc24(code_ptr, (const tcg_insn_unit *)value); 334 case R_ARM_PC13: 335 return reloc_pc13(code_ptr, (const tcg_insn_unit *)value); 336 case R_ARM_PC11: 337 return reloc_pc11(code_ptr, (const tcg_insn_unit *)value); 338 case R_ARM_PC8: 339 return reloc_pc8(code_ptr, (const tcg_insn_unit *)value); 340 default: 341 g_assert_not_reached(); 342 } 343} 344 345#define TCG_CT_CONST_ARM 0x100 346#define TCG_CT_CONST_INV 0x200 347#define TCG_CT_CONST_NEG 0x400 348#define TCG_CT_CONST_ZERO 0x800 349#define TCG_CT_CONST_ORRI 0x1000 350#define TCG_CT_CONST_ANDI 0x2000 351 352#define ALL_GENERAL_REGS 0xffffu 353#define ALL_VECTOR_REGS 0xffff0000u 354 355/* 356 * r0-r2 will be overwritten when reading the tlb entry (softmmu only) 357 * and r0-r1 doing the byte swapping, so don't use these. 358 * r3 is removed for softmmu to avoid clashes with helper arguments. 359 */ 360#ifdef CONFIG_SOFTMMU 361#define ALL_QLOAD_REGS \ 362 (ALL_GENERAL_REGS & ~((1 << TCG_REG_R0) | (1 << TCG_REG_R1) | \ 363 (1 << TCG_REG_R2) | (1 << TCG_REG_R3) | \ 364 (1 << TCG_REG_R14))) 365#define ALL_QSTORE_REGS \ 366 (ALL_GENERAL_REGS & ~((1 << TCG_REG_R0) | (1 << TCG_REG_R1) | \ 367 (1 << TCG_REG_R2) | (1 << TCG_REG_R14) | \ 368 ((TARGET_LONG_BITS == 64) << TCG_REG_R3))) 369#else 370#define ALL_QLOAD_REGS ALL_GENERAL_REGS 371#define ALL_QSTORE_REGS \ 372 (ALL_GENERAL_REGS & ~((1 << TCG_REG_R0) | (1 << TCG_REG_R1))) 373#endif 374 375/* 376 * ARM immediates for ALU instructions are made of an unsigned 8-bit 377 * right-rotated by an even amount between 0 and 30. 378 * 379 * Return < 0 if @imm cannot be encoded, else the entire imm12 field. 380 */ 381static int encode_imm(uint32_t imm) 382{ 383 uint32_t rot, imm8; 384 385 /* Simple case, no rotation required. */ 386 if ((imm & ~0xff) == 0) { 387 return imm; 388 } 389 390 /* Next, try a simple even shift. */ 391 rot = ctz32(imm) & ~1; 392 imm8 = imm >> rot; 393 rot = 32 - rot; 394 if ((imm8 & ~0xff) == 0) { 395 goto found; 396 } 397 398 /* 399 * Finally, try harder with rotations. 400 * The ctz test above will have taken care of rotates >= 8. 401 */ 402 for (rot = 2; rot < 8; rot += 2) { 403 imm8 = rol32(imm, rot); 404 if ((imm8 & ~0xff) == 0) { 405 goto found; 406 } 407 } 408 /* Fail: imm cannot be encoded. */ 409 return -1; 410 411 found: 412 /* Note that rot is even, and we discard bit 0 by shifting by 7. */ 413 return rot << 7 | imm8; 414} 415 416static int encode_imm_nofail(uint32_t imm) 417{ 418 int ret = encode_imm(imm); 419 tcg_debug_assert(ret >= 0); 420 return ret; 421} 422 423static bool check_fit_imm(uint32_t imm) 424{ 425 return encode_imm(imm) >= 0; 426} 427 428/* Return true if v16 is a valid 16-bit shifted immediate. */ 429static bool is_shimm16(uint16_t v16, int *cmode, int *imm8) 430{ 431 if (v16 == (v16 & 0xff)) { 432 *cmode = 0x8; 433 *imm8 = v16 & 0xff; 434 return true; 435 } else if (v16 == (v16 & 0xff00)) { 436 *cmode = 0xa; 437 *imm8 = v16 >> 8; 438 return true; 439 } 440 return false; 441} 442 443/* Return true if v32 is a valid 32-bit shifted immediate. */ 444static bool is_shimm32(uint32_t v32, int *cmode, int *imm8) 445{ 446 if (v32 == (v32 & 0xff)) { 447 *cmode = 0x0; 448 *imm8 = v32 & 0xff; 449 return true; 450 } else if (v32 == (v32 & 0xff00)) { 451 *cmode = 0x2; 452 *imm8 = (v32 >> 8) & 0xff; 453 return true; 454 } else if (v32 == (v32 & 0xff0000)) { 455 *cmode = 0x4; 456 *imm8 = (v32 >> 16) & 0xff; 457 return true; 458 } else if (v32 == (v32 & 0xff000000)) { 459 *cmode = 0x6; 460 *imm8 = v32 >> 24; 461 return true; 462 } 463 return false; 464} 465 466/* Return true if v32 is a valid 32-bit shifting ones immediate. */ 467static bool is_soimm32(uint32_t v32, int *cmode, int *imm8) 468{ 469 if ((v32 & 0xffff00ff) == 0xff) { 470 *cmode = 0xc; 471 *imm8 = (v32 >> 8) & 0xff; 472 return true; 473 } else if ((v32 & 0xff00ffff) == 0xffff) { 474 *cmode = 0xd; 475 *imm8 = (v32 >> 16) & 0xff; 476 return true; 477 } 478 return false; 479} 480 481/* 482 * Return non-zero if v32 can be formed by MOVI+ORR. 483 * Place the parameters for MOVI in (cmode, imm8). 484 * Return the cmode for ORR; the imm8 can be had via extraction from v32. 485 */ 486static int is_shimm32_pair(uint32_t v32, int *cmode, int *imm8) 487{ 488 int i; 489 490 for (i = 6; i > 0; i -= 2) { 491 /* Mask out one byte we can add with ORR. */ 492 uint32_t tmp = v32 & ~(0xffu << (i * 4)); 493 if (is_shimm32(tmp, cmode, imm8) || 494 is_soimm32(tmp, cmode, imm8)) { 495 break; 496 } 497 } 498 return i; 499} 500 501/* Return true if V is a valid 16-bit or 32-bit shifted immediate. */ 502static bool is_shimm1632(uint32_t v32, int *cmode, int *imm8) 503{ 504 if (v32 == deposit32(v32, 16, 16, v32)) { 505 return is_shimm16(v32, cmode, imm8); 506 } else { 507 return is_shimm32(v32, cmode, imm8); 508 } 509} 510 511/* Test if a constant matches the constraint. 512 * TODO: define constraints for: 513 * 514 * ldr/str offset: between -0xfff and 0xfff 515 * ldrh/strh offset: between -0xff and 0xff 516 * mov operand2: values represented with x << (2 * y), x < 0x100 517 * add, sub, eor...: ditto 518 */ 519static bool tcg_target_const_match(int64_t val, TCGType type, int ct) 520{ 521 if (ct & TCG_CT_CONST) { 522 return 1; 523 } else if ((ct & TCG_CT_CONST_ARM) && check_fit_imm(val)) { 524 return 1; 525 } else if ((ct & TCG_CT_CONST_INV) && check_fit_imm(~val)) { 526 return 1; 527 } else if ((ct & TCG_CT_CONST_NEG) && check_fit_imm(-val)) { 528 return 1; 529 } else if ((ct & TCG_CT_CONST_ZERO) && val == 0) { 530 return 1; 531 } 532 533 switch (ct & (TCG_CT_CONST_ORRI | TCG_CT_CONST_ANDI)) { 534 case 0: 535 break; 536 case TCG_CT_CONST_ANDI: 537 val = ~val; 538 /* fallthru */ 539 case TCG_CT_CONST_ORRI: 540 if (val == deposit64(val, 32, 32, val)) { 541 int cmode, imm8; 542 return is_shimm1632(val, &cmode, &imm8); 543 } 544 break; 545 default: 546 /* Both bits should not be set for the same insn. */ 547 g_assert_not_reached(); 548 } 549 550 return 0; 551} 552 553static void tcg_out_b_imm(TCGContext *s, ARMCond cond, int32_t offset) 554{ 555 tcg_out32(s, (cond << 28) | INSN_B | 556 (((offset - 8) >> 2) & 0x00ffffff)); 557} 558 559static void tcg_out_bl_imm(TCGContext *s, ARMCond cond, int32_t offset) 560{ 561 tcg_out32(s, (cond << 28) | 0x0b000000 | 562 (((offset - 8) >> 2) & 0x00ffffff)); 563} 564 565static void tcg_out_blx_reg(TCGContext *s, ARMCond cond, TCGReg rn) 566{ 567 tcg_out32(s, (cond << 28) | 0x012fff30 | rn); 568} 569 570static void tcg_out_blx_imm(TCGContext *s, int32_t offset) 571{ 572 tcg_out32(s, 0xfa000000 | ((offset & 2) << 23) | 573 (((offset - 8) >> 2) & 0x00ffffff)); 574} 575 576static void tcg_out_dat_reg(TCGContext *s, ARMCond cond, ARMInsn opc, 577 TCGReg rd, TCGReg rn, TCGReg rm, int shift) 578{ 579 tcg_out32(s, (cond << 28) | (0 << 25) | opc | 580 (rn << 16) | (rd << 12) | shift | rm); 581} 582 583static void tcg_out_mov_reg(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rm) 584{ 585 /* Simple reg-reg move, optimising out the 'do nothing' case */ 586 if (rd != rm) { 587 tcg_out_dat_reg(s, cond, ARITH_MOV, rd, 0, rm, SHIFT_IMM_LSL(0)); 588 } 589} 590 591static void tcg_out_bx_reg(TCGContext *s, ARMCond cond, TCGReg rn) 592{ 593 tcg_out32(s, (cond << 28) | 0x012fff10 | rn); 594} 595 596static void tcg_out_b_reg(TCGContext *s, ARMCond cond, TCGReg rn) 597{ 598 /* 599 * Unless the C portion of QEMU is compiled as thumb, we don't need 600 * true BX semantics; merely a branch to an address held in a register. 601 */ 602 tcg_out_bx_reg(s, cond, rn); 603} 604 605static void tcg_out_dat_imm(TCGContext *s, ARMCond cond, ARMInsn opc, 606 TCGReg rd, TCGReg rn, int im) 607{ 608 tcg_out32(s, (cond << 28) | (1 << 25) | opc | 609 (rn << 16) | (rd << 12) | im); 610} 611 612static void tcg_out_ldstm(TCGContext *s, ARMCond cond, ARMInsn opc, 613 TCGReg rn, uint16_t mask) 614{ 615 tcg_out32(s, (cond << 28) | opc | (rn << 16) | mask); 616} 617 618/* Note that this routine is used for both LDR and LDRH formats, so we do 619 not wish to include an immediate shift at this point. */ 620static void tcg_out_memop_r(TCGContext *s, ARMCond cond, ARMInsn opc, TCGReg rt, 621 TCGReg rn, TCGReg rm, bool u, bool p, bool w) 622{ 623 tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24) 624 | (w << 21) | (rn << 16) | (rt << 12) | rm); 625} 626 627static void tcg_out_memop_8(TCGContext *s, ARMCond cond, ARMInsn opc, TCGReg rt, 628 TCGReg rn, int imm8, bool p, bool w) 629{ 630 bool u = 1; 631 if (imm8 < 0) { 632 imm8 = -imm8; 633 u = 0; 634 } 635 tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24) | (w << 21) | 636 (rn << 16) | (rt << 12) | ((imm8 & 0xf0) << 4) | (imm8 & 0xf)); 637} 638 639static void tcg_out_memop_12(TCGContext *s, ARMCond cond, ARMInsn opc, 640 TCGReg rt, TCGReg rn, int imm12, bool p, bool w) 641{ 642 bool u = 1; 643 if (imm12 < 0) { 644 imm12 = -imm12; 645 u = 0; 646 } 647 tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24) | (w << 21) | 648 (rn << 16) | (rt << 12) | imm12); 649} 650 651static void tcg_out_ld32_12(TCGContext *s, ARMCond cond, TCGReg rt, 652 TCGReg rn, int imm12) 653{ 654 tcg_out_memop_12(s, cond, INSN_LDR_IMM, rt, rn, imm12, 1, 0); 655} 656 657static void tcg_out_st32_12(TCGContext *s, ARMCond cond, TCGReg rt, 658 TCGReg rn, int imm12) 659{ 660 tcg_out_memop_12(s, cond, INSN_STR_IMM, rt, rn, imm12, 1, 0); 661} 662 663static void tcg_out_ld32_r(TCGContext *s, ARMCond cond, TCGReg rt, 664 TCGReg rn, TCGReg rm) 665{ 666 tcg_out_memop_r(s, cond, INSN_LDR_REG, rt, rn, rm, 1, 1, 0); 667} 668 669static void tcg_out_st32_r(TCGContext *s, ARMCond cond, TCGReg rt, 670 TCGReg rn, TCGReg rm) 671{ 672 tcg_out_memop_r(s, cond, INSN_STR_REG, rt, rn, rm, 1, 1, 0); 673} 674 675static void tcg_out_ldrd_8(TCGContext *s, ARMCond cond, TCGReg rt, 676 TCGReg rn, int imm8) 677{ 678 tcg_out_memop_8(s, cond, INSN_LDRD_IMM, rt, rn, imm8, 1, 0); 679} 680 681static void tcg_out_ldrd_r(TCGContext *s, ARMCond cond, TCGReg rt, 682 TCGReg rn, TCGReg rm) 683{ 684 tcg_out_memop_r(s, cond, INSN_LDRD_REG, rt, rn, rm, 1, 1, 0); 685} 686 687static void __attribute__((unused)) 688tcg_out_ldrd_rwb(TCGContext *s, ARMCond cond, TCGReg rt, TCGReg rn, TCGReg rm) 689{ 690 tcg_out_memop_r(s, cond, INSN_LDRD_REG, rt, rn, rm, 1, 1, 1); 691} 692 693static void tcg_out_strd_8(TCGContext *s, ARMCond cond, TCGReg rt, 694 TCGReg rn, int imm8) 695{ 696 tcg_out_memop_8(s, cond, INSN_STRD_IMM, rt, rn, imm8, 1, 0); 697} 698 699static void tcg_out_strd_r(TCGContext *s, ARMCond cond, TCGReg rt, 700 TCGReg rn, TCGReg rm) 701{ 702 tcg_out_memop_r(s, cond, INSN_STRD_REG, rt, rn, rm, 1, 1, 0); 703} 704 705/* Register pre-increment with base writeback. */ 706static void tcg_out_ld32_rwb(TCGContext *s, ARMCond cond, TCGReg rt, 707 TCGReg rn, TCGReg rm) 708{ 709 tcg_out_memop_r(s, cond, INSN_LDR_REG, rt, rn, rm, 1, 1, 1); 710} 711 712static void tcg_out_st32_rwb(TCGContext *s, ARMCond cond, TCGReg rt, 713 TCGReg rn, TCGReg rm) 714{ 715 tcg_out_memop_r(s, cond, INSN_STR_REG, rt, rn, rm, 1, 1, 1); 716} 717 718static void tcg_out_ld16u_8(TCGContext *s, ARMCond cond, TCGReg rt, 719 TCGReg rn, int imm8) 720{ 721 tcg_out_memop_8(s, cond, INSN_LDRH_IMM, rt, rn, imm8, 1, 0); 722} 723 724static void tcg_out_st16_8(TCGContext *s, ARMCond cond, TCGReg rt, 725 TCGReg rn, int imm8) 726{ 727 tcg_out_memop_8(s, cond, INSN_STRH_IMM, rt, rn, imm8, 1, 0); 728} 729 730static void tcg_out_ld16u_r(TCGContext *s, ARMCond cond, TCGReg rt, 731 TCGReg rn, TCGReg rm) 732{ 733 tcg_out_memop_r(s, cond, INSN_LDRH_REG, rt, rn, rm, 1, 1, 0); 734} 735 736static void tcg_out_st16_r(TCGContext *s, ARMCond cond, TCGReg rt, 737 TCGReg rn, TCGReg rm) 738{ 739 tcg_out_memop_r(s, cond, INSN_STRH_REG, rt, rn, rm, 1, 1, 0); 740} 741 742static void tcg_out_ld16s_8(TCGContext *s, ARMCond cond, TCGReg rt, 743 TCGReg rn, int imm8) 744{ 745 tcg_out_memop_8(s, cond, INSN_LDRSH_IMM, rt, rn, imm8, 1, 0); 746} 747 748static void tcg_out_ld16s_r(TCGContext *s, ARMCond cond, TCGReg rt, 749 TCGReg rn, TCGReg rm) 750{ 751 tcg_out_memop_r(s, cond, INSN_LDRSH_REG, rt, rn, rm, 1, 1, 0); 752} 753 754static void tcg_out_ld8_12(TCGContext *s, ARMCond cond, TCGReg rt, 755 TCGReg rn, int imm12) 756{ 757 tcg_out_memop_12(s, cond, INSN_LDRB_IMM, rt, rn, imm12, 1, 0); 758} 759 760static void tcg_out_st8_12(TCGContext *s, ARMCond cond, TCGReg rt, 761 TCGReg rn, int imm12) 762{ 763 tcg_out_memop_12(s, cond, INSN_STRB_IMM, rt, rn, imm12, 1, 0); 764} 765 766static void tcg_out_ld8_r(TCGContext *s, ARMCond cond, TCGReg rt, 767 TCGReg rn, TCGReg rm) 768{ 769 tcg_out_memop_r(s, cond, INSN_LDRB_REG, rt, rn, rm, 1, 1, 0); 770} 771 772static void tcg_out_st8_r(TCGContext *s, ARMCond cond, TCGReg rt, 773 TCGReg rn, TCGReg rm) 774{ 775 tcg_out_memop_r(s, cond, INSN_STRB_REG, rt, rn, rm, 1, 1, 0); 776} 777 778static void tcg_out_ld8s_8(TCGContext *s, ARMCond cond, TCGReg rt, 779 TCGReg rn, int imm8) 780{ 781 tcg_out_memop_8(s, cond, INSN_LDRSB_IMM, rt, rn, imm8, 1, 0); 782} 783 784static void tcg_out_ld8s_r(TCGContext *s, ARMCond cond, TCGReg rt, 785 TCGReg rn, TCGReg rm) 786{ 787 tcg_out_memop_r(s, cond, INSN_LDRSB_REG, rt, rn, rm, 1, 1, 0); 788} 789 790static void tcg_out_movi_pool(TCGContext *s, ARMCond cond, 791 TCGReg rd, uint32_t arg) 792{ 793 new_pool_label(s, arg, R_ARM_PC13, s->code_ptr, 0); 794 tcg_out_ld32_12(s, cond, rd, TCG_REG_PC, 0); 795} 796 797static void tcg_out_movi32(TCGContext *s, ARMCond cond, 798 TCGReg rd, uint32_t arg) 799{ 800 int imm12, diff, opc, sh1, sh2; 801 uint32_t tt0, tt1, tt2; 802 803 /* Check a single MOV/MVN before anything else. */ 804 imm12 = encode_imm(arg); 805 if (imm12 >= 0) { 806 tcg_out_dat_imm(s, cond, ARITH_MOV, rd, 0, imm12); 807 return; 808 } 809 imm12 = encode_imm(~arg); 810 if (imm12 >= 0) { 811 tcg_out_dat_imm(s, cond, ARITH_MVN, rd, 0, imm12); 812 return; 813 } 814 815 /* Check for a pc-relative address. This will usually be the TB, 816 or within the TB, which is immediately before the code block. */ 817 diff = tcg_pcrel_diff(s, (void *)arg) - 8; 818 if (diff >= 0) { 819 imm12 = encode_imm(diff); 820 if (imm12 >= 0) { 821 tcg_out_dat_imm(s, cond, ARITH_ADD, rd, TCG_REG_PC, imm12); 822 return; 823 } 824 } else { 825 imm12 = encode_imm(-diff); 826 if (imm12 >= 0) { 827 tcg_out_dat_imm(s, cond, ARITH_SUB, rd, TCG_REG_PC, imm12); 828 return; 829 } 830 } 831 832 /* Use movw + movt. */ 833 if (use_armv7_instructions) { 834 /* movw */ 835 tcg_out32(s, (cond << 28) | 0x03000000 | (rd << 12) 836 | ((arg << 4) & 0x000f0000) | (arg & 0xfff)); 837 if (arg & 0xffff0000) { 838 /* movt */ 839 tcg_out32(s, (cond << 28) | 0x03400000 | (rd << 12) 840 | ((arg >> 12) & 0x000f0000) | ((arg >> 16) & 0xfff)); 841 } 842 return; 843 } 844 845 /* Look for sequences of two insns. If we have lots of 1's, we can 846 shorten the sequence by beginning with mvn and then clearing 847 higher bits with eor. */ 848 tt0 = arg; 849 opc = ARITH_MOV; 850 if (ctpop32(arg) > 16) { 851 tt0 = ~arg; 852 opc = ARITH_MVN; 853 } 854 sh1 = ctz32(tt0) & ~1; 855 tt1 = tt0 & ~(0xff << sh1); 856 sh2 = ctz32(tt1) & ~1; 857 tt2 = tt1 & ~(0xff << sh2); 858 if (tt2 == 0) { 859 int rot; 860 861 rot = ((32 - sh1) << 7) & 0xf00; 862 tcg_out_dat_imm(s, cond, opc, rd, 0, ((tt0 >> sh1) & 0xff) | rot); 863 rot = ((32 - sh2) << 7) & 0xf00; 864 tcg_out_dat_imm(s, cond, ARITH_EOR, rd, rd, 865 ((tt0 >> sh2) & 0xff) | rot); 866 return; 867 } 868 869 /* Otherwise, drop it into the constant pool. */ 870 tcg_out_movi_pool(s, cond, rd, arg); 871} 872 873/* 874 * Emit either the reg,imm or reg,reg form of a data-processing insn. 875 * rhs must satisfy the "rI" constraint. 876 */ 877static void tcg_out_dat_rI(TCGContext *s, ARMCond cond, ARMInsn opc, 878 TCGReg dst, TCGReg lhs, TCGArg rhs, int rhs_is_const) 879{ 880 if (rhs_is_const) { 881 tcg_out_dat_imm(s, cond, opc, dst, lhs, encode_imm_nofail(rhs)); 882 } else { 883 tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0)); 884 } 885} 886 887/* 888 * Emit either the reg,imm or reg,reg form of a data-processing insn. 889 * rhs must satisfy the "rIK" constraint. 890 */ 891static void tcg_out_dat_rIK(TCGContext *s, ARMCond cond, ARMInsn opc, 892 ARMInsn opinv, TCGReg dst, TCGReg lhs, TCGArg rhs, 893 bool rhs_is_const) 894{ 895 if (rhs_is_const) { 896 int imm12 = encode_imm(rhs); 897 if (imm12 < 0) { 898 imm12 = encode_imm_nofail(~rhs); 899 opc = opinv; 900 } 901 tcg_out_dat_imm(s, cond, opc, dst, lhs, imm12); 902 } else { 903 tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0)); 904 } 905} 906 907static void tcg_out_dat_rIN(TCGContext *s, ARMCond cond, ARMInsn opc, 908 ARMInsn opneg, TCGReg dst, TCGReg lhs, TCGArg rhs, 909 bool rhs_is_const) 910{ 911 /* Emit either the reg,imm or reg,reg form of a data-processing insn. 912 * rhs must satisfy the "rIN" constraint. 913 */ 914 if (rhs_is_const) { 915 int imm12 = encode_imm(rhs); 916 if (imm12 < 0) { 917 imm12 = encode_imm_nofail(-rhs); 918 opc = opneg; 919 } 920 tcg_out_dat_imm(s, cond, opc, dst, lhs, imm12); 921 } else { 922 tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0)); 923 } 924} 925 926static void tcg_out_mul32(TCGContext *s, ARMCond cond, TCGReg rd, 927 TCGReg rn, TCGReg rm) 928{ 929 /* mul */ 930 tcg_out32(s, (cond << 28) | 0x90 | (rd << 16) | (rm << 8) | rn); 931} 932 933static void tcg_out_umull32(TCGContext *s, ARMCond cond, TCGReg rd0, 934 TCGReg rd1, TCGReg rn, TCGReg rm) 935{ 936 /* umull */ 937 tcg_out32(s, (cond << 28) | 0x00800090 | 938 (rd1 << 16) | (rd0 << 12) | (rm << 8) | rn); 939} 940 941static void tcg_out_smull32(TCGContext *s, ARMCond cond, TCGReg rd0, 942 TCGReg rd1, TCGReg rn, TCGReg rm) 943{ 944 /* smull */ 945 tcg_out32(s, (cond << 28) | 0x00c00090 | 946 (rd1 << 16) | (rd0 << 12) | (rm << 8) | rn); 947} 948 949static void tcg_out_sdiv(TCGContext *s, ARMCond cond, 950 TCGReg rd, TCGReg rn, TCGReg rm) 951{ 952 tcg_out32(s, 0x0710f010 | (cond << 28) | (rd << 16) | rn | (rm << 8)); 953} 954 955static void tcg_out_udiv(TCGContext *s, ARMCond cond, 956 TCGReg rd, TCGReg rn, TCGReg rm) 957{ 958 tcg_out32(s, 0x0730f010 | (cond << 28) | (rd << 16) | rn | (rm << 8)); 959} 960 961static void tcg_out_ext8s(TCGContext *s, TCGType t, TCGReg rd, TCGReg rn) 962{ 963 /* sxtb */ 964 tcg_out32(s, 0x06af0070 | (COND_AL << 28) | (rd << 12) | rn); 965} 966 967static void tcg_out_ext8u(TCGContext *s, TCGReg rd, TCGReg rn) 968{ 969 tcg_out_dat_imm(s, COND_AL, ARITH_AND, rd, rn, 0xff); 970} 971 972static void __attribute__((unused)) 973tcg_out_ext8u_cond(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rn) 974{ 975 tcg_out_dat_imm(s, cond, ARITH_AND, rd, rn, 0xff); 976} 977 978static void tcg_out_ext16s(TCGContext *s, TCGType t, TCGReg rd, TCGReg rn) 979{ 980 /* sxth */ 981 tcg_out32(s, 0x06bf0070 | (COND_AL << 28) | (rd << 12) | rn); 982} 983 984static void tcg_out_ext16u_cond(TCGContext *s, ARMCond cond, 985 TCGReg rd, TCGReg rn) 986{ 987 /* uxth */ 988 tcg_out32(s, 0x06ff0070 | (cond << 28) | (rd << 12) | rn); 989} 990 991static void tcg_out_ext16u(TCGContext *s, TCGReg rd, TCGReg rn) 992{ 993 tcg_out_ext16u_cond(s, COND_AL, rd, rn); 994} 995 996static void tcg_out_ext32s(TCGContext *s, TCGReg rd, TCGReg rn) 997{ 998 g_assert_not_reached(); 999} 1000 1001static void tcg_out_ext32u(TCGContext *s, TCGReg rd, TCGReg rn) 1002{ 1003 g_assert_not_reached(); 1004} 1005 1006static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg rd, TCGReg rn) 1007{ 1008 g_assert_not_reached(); 1009} 1010 1011static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg rd, TCGReg rn) 1012{ 1013 g_assert_not_reached(); 1014} 1015 1016static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg rd, TCGReg rn) 1017{ 1018 g_assert_not_reached(); 1019} 1020 1021static void tcg_out_bswap16(TCGContext *s, ARMCond cond, 1022 TCGReg rd, TCGReg rn, int flags) 1023{ 1024 if (flags & TCG_BSWAP_OS) { 1025 /* revsh */ 1026 tcg_out32(s, 0x06ff0fb0 | (cond << 28) | (rd << 12) | rn); 1027 return; 1028 } 1029 1030 /* rev16 */ 1031 tcg_out32(s, 0x06bf0fb0 | (cond << 28) | (rd << 12) | rn); 1032 if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) { 1033 /* uxth */ 1034 tcg_out32(s, 0x06ff0070 | (cond << 28) | (rd << 12) | rd); 1035 } 1036} 1037 1038static void tcg_out_bswap32(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rn) 1039{ 1040 /* rev */ 1041 tcg_out32(s, 0x06bf0f30 | (cond << 28) | (rd << 12) | rn); 1042} 1043 1044static void tcg_out_deposit(TCGContext *s, ARMCond cond, TCGReg rd, 1045 TCGArg a1, int ofs, int len, bool const_a1) 1046{ 1047 if (const_a1) { 1048 /* bfi becomes bfc with rn == 15. */ 1049 a1 = 15; 1050 } 1051 /* bfi/bfc */ 1052 tcg_out32(s, 0x07c00010 | (cond << 28) | (rd << 12) | a1 1053 | (ofs << 7) | ((ofs + len - 1) << 16)); 1054} 1055 1056static void tcg_out_extract(TCGContext *s, ARMCond cond, TCGReg rd, 1057 TCGReg rn, int ofs, int len) 1058{ 1059 /* ubfx */ 1060 tcg_out32(s, 0x07e00050 | (cond << 28) | (rd << 12) | rn 1061 | (ofs << 7) | ((len - 1) << 16)); 1062} 1063 1064static void tcg_out_sextract(TCGContext *s, ARMCond cond, TCGReg rd, 1065 TCGReg rn, int ofs, int len) 1066{ 1067 /* sbfx */ 1068 tcg_out32(s, 0x07a00050 | (cond << 28) | (rd << 12) | rn 1069 | (ofs << 7) | ((len - 1) << 16)); 1070} 1071 1072static void tcg_out_ld32u(TCGContext *s, ARMCond cond, 1073 TCGReg rd, TCGReg rn, int32_t offset) 1074{ 1075 if (offset > 0xfff || offset < -0xfff) { 1076 tcg_out_movi32(s, cond, TCG_REG_TMP, offset); 1077 tcg_out_ld32_r(s, cond, rd, rn, TCG_REG_TMP); 1078 } else 1079 tcg_out_ld32_12(s, cond, rd, rn, offset); 1080} 1081 1082static void tcg_out_st32(TCGContext *s, ARMCond cond, 1083 TCGReg rd, TCGReg rn, int32_t offset) 1084{ 1085 if (offset > 0xfff || offset < -0xfff) { 1086 tcg_out_movi32(s, cond, TCG_REG_TMP, offset); 1087 tcg_out_st32_r(s, cond, rd, rn, TCG_REG_TMP); 1088 } else 1089 tcg_out_st32_12(s, cond, rd, rn, offset); 1090} 1091 1092static void tcg_out_ld16u(TCGContext *s, ARMCond cond, 1093 TCGReg rd, TCGReg rn, int32_t offset) 1094{ 1095 if (offset > 0xff || offset < -0xff) { 1096 tcg_out_movi32(s, cond, TCG_REG_TMP, offset); 1097 tcg_out_ld16u_r(s, cond, rd, rn, TCG_REG_TMP); 1098 } else 1099 tcg_out_ld16u_8(s, cond, rd, rn, offset); 1100} 1101 1102static void tcg_out_ld16s(TCGContext *s, ARMCond cond, 1103 TCGReg rd, TCGReg rn, int32_t offset) 1104{ 1105 if (offset > 0xff || offset < -0xff) { 1106 tcg_out_movi32(s, cond, TCG_REG_TMP, offset); 1107 tcg_out_ld16s_r(s, cond, rd, rn, TCG_REG_TMP); 1108 } else 1109 tcg_out_ld16s_8(s, cond, rd, rn, offset); 1110} 1111 1112static void tcg_out_st16(TCGContext *s, ARMCond cond, 1113 TCGReg rd, TCGReg rn, int32_t offset) 1114{ 1115 if (offset > 0xff || offset < -0xff) { 1116 tcg_out_movi32(s, cond, TCG_REG_TMP, offset); 1117 tcg_out_st16_r(s, cond, rd, rn, TCG_REG_TMP); 1118 } else 1119 tcg_out_st16_8(s, cond, rd, rn, offset); 1120} 1121 1122static void tcg_out_ld8u(TCGContext *s, ARMCond cond, 1123 TCGReg rd, TCGReg rn, int32_t offset) 1124{ 1125 if (offset > 0xfff || offset < -0xfff) { 1126 tcg_out_movi32(s, cond, TCG_REG_TMP, offset); 1127 tcg_out_ld8_r(s, cond, rd, rn, TCG_REG_TMP); 1128 } else 1129 tcg_out_ld8_12(s, cond, rd, rn, offset); 1130} 1131 1132static void tcg_out_ld8s(TCGContext *s, ARMCond cond, 1133 TCGReg rd, TCGReg rn, int32_t offset) 1134{ 1135 if (offset > 0xff || offset < -0xff) { 1136 tcg_out_movi32(s, cond, TCG_REG_TMP, offset); 1137 tcg_out_ld8s_r(s, cond, rd, rn, TCG_REG_TMP); 1138 } else 1139 tcg_out_ld8s_8(s, cond, rd, rn, offset); 1140} 1141 1142static void tcg_out_st8(TCGContext *s, ARMCond cond, 1143 TCGReg rd, TCGReg rn, int32_t offset) 1144{ 1145 if (offset > 0xfff || offset < -0xfff) { 1146 tcg_out_movi32(s, cond, TCG_REG_TMP, offset); 1147 tcg_out_st8_r(s, cond, rd, rn, TCG_REG_TMP); 1148 } else 1149 tcg_out_st8_12(s, cond, rd, rn, offset); 1150} 1151 1152/* 1153 * The _goto case is normally between TBs within the same code buffer, and 1154 * with the code buffer limited to 16MB we wouldn't need the long case. 1155 * But we also use it for the tail-call to the qemu_ld/st helpers, which does. 1156 */ 1157static void tcg_out_goto(TCGContext *s, ARMCond cond, const tcg_insn_unit *addr) 1158{ 1159 intptr_t addri = (intptr_t)addr; 1160 ptrdiff_t disp = tcg_pcrel_diff(s, addr); 1161 bool arm_mode = !(addri & 1); 1162 1163 if (arm_mode && disp - 8 < 0x01fffffd && disp - 8 > -0x01fffffd) { 1164 tcg_out_b_imm(s, cond, disp); 1165 return; 1166 } 1167 1168 /* LDR is interworking from v5t. */ 1169 tcg_out_movi_pool(s, cond, TCG_REG_PC, addri); 1170} 1171 1172/* 1173 * The call case is mostly used for helpers - so it's not unreasonable 1174 * for them to be beyond branch range. 1175 */ 1176static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *addr) 1177{ 1178 intptr_t addri = (intptr_t)addr; 1179 ptrdiff_t disp = tcg_pcrel_diff(s, addr); 1180 bool arm_mode = !(addri & 1); 1181 1182 if (disp - 8 < 0x02000000 && disp - 8 >= -0x02000000) { 1183 if (arm_mode) { 1184 tcg_out_bl_imm(s, COND_AL, disp); 1185 } else { 1186 tcg_out_blx_imm(s, disp); 1187 } 1188 return; 1189 } 1190 1191 tcg_out_movi32(s, COND_AL, TCG_REG_TMP, addri); 1192 tcg_out_blx_reg(s, COND_AL, TCG_REG_TMP); 1193} 1194 1195static void tcg_out_call(TCGContext *s, const tcg_insn_unit *addr, 1196 const TCGHelperInfo *info) 1197{ 1198 tcg_out_call_int(s, addr); 1199} 1200 1201static void tcg_out_goto_label(TCGContext *s, ARMCond cond, TCGLabel *l) 1202{ 1203 if (l->has_value) { 1204 tcg_out_goto(s, cond, l->u.value_ptr); 1205 } else { 1206 tcg_out_reloc(s, s->code_ptr, R_ARM_PC24, l, 0); 1207 tcg_out_b_imm(s, cond, 0); 1208 } 1209} 1210 1211static void tcg_out_mb(TCGContext *s, TCGArg a0) 1212{ 1213 if (use_armv7_instructions) { 1214 tcg_out32(s, INSN_DMB_ISH); 1215 } else { 1216 tcg_out32(s, INSN_DMB_MCR); 1217 } 1218} 1219 1220static TCGCond tcg_out_cmp2(TCGContext *s, const TCGArg *args, 1221 const int *const_args) 1222{ 1223 TCGReg al = args[0]; 1224 TCGReg ah = args[1]; 1225 TCGArg bl = args[2]; 1226 TCGArg bh = args[3]; 1227 TCGCond cond = args[4]; 1228 int const_bl = const_args[2]; 1229 int const_bh = const_args[3]; 1230 1231 switch (cond) { 1232 case TCG_COND_EQ: 1233 case TCG_COND_NE: 1234 case TCG_COND_LTU: 1235 case TCG_COND_LEU: 1236 case TCG_COND_GTU: 1237 case TCG_COND_GEU: 1238 /* We perform a conditional comparision. If the high half is 1239 equal, then overwrite the flags with the comparison of the 1240 low half. The resulting flags cover the whole. */ 1241 tcg_out_dat_rI(s, COND_AL, ARITH_CMP, 0, ah, bh, const_bh); 1242 tcg_out_dat_rI(s, COND_EQ, ARITH_CMP, 0, al, bl, const_bl); 1243 return cond; 1244 1245 case TCG_COND_LT: 1246 case TCG_COND_GE: 1247 /* We perform a double-word subtraction and examine the result. 1248 We do not actually need the result of the subtract, so the 1249 low part "subtract" is a compare. For the high half we have 1250 no choice but to compute into a temporary. */ 1251 tcg_out_dat_rI(s, COND_AL, ARITH_CMP, 0, al, bl, const_bl); 1252 tcg_out_dat_rI(s, COND_AL, ARITH_SBC | TO_CPSR, 1253 TCG_REG_TMP, ah, bh, const_bh); 1254 return cond; 1255 1256 case TCG_COND_LE: 1257 case TCG_COND_GT: 1258 /* Similar, but with swapped arguments, via reversed subtract. */ 1259 tcg_out_dat_rI(s, COND_AL, ARITH_RSB | TO_CPSR, 1260 TCG_REG_TMP, al, bl, const_bl); 1261 tcg_out_dat_rI(s, COND_AL, ARITH_RSC | TO_CPSR, 1262 TCG_REG_TMP, ah, bh, const_bh); 1263 return tcg_swap_cond(cond); 1264 1265 default: 1266 g_assert_not_reached(); 1267 } 1268} 1269 1270/* 1271 * Note that TCGReg references Q-registers. 1272 * Q-regno = 2 * D-regno, so shift left by 1 whlie inserting. 1273 */ 1274static uint32_t encode_vd(TCGReg rd) 1275{ 1276 tcg_debug_assert(rd >= TCG_REG_Q0); 1277 return (extract32(rd, 3, 1) << 22) | (extract32(rd, 0, 3) << 13); 1278} 1279 1280static uint32_t encode_vn(TCGReg rn) 1281{ 1282 tcg_debug_assert(rn >= TCG_REG_Q0); 1283 return (extract32(rn, 3, 1) << 7) | (extract32(rn, 0, 3) << 17); 1284} 1285 1286static uint32_t encode_vm(TCGReg rm) 1287{ 1288 tcg_debug_assert(rm >= TCG_REG_Q0); 1289 return (extract32(rm, 3, 1) << 5) | (extract32(rm, 0, 3) << 1); 1290} 1291 1292static void tcg_out_vreg2(TCGContext *s, ARMInsn insn, int q, int vece, 1293 TCGReg d, TCGReg m) 1294{ 1295 tcg_out32(s, insn | (vece << 18) | (q << 6) | 1296 encode_vd(d) | encode_vm(m)); 1297} 1298 1299static void tcg_out_vreg3(TCGContext *s, ARMInsn insn, int q, int vece, 1300 TCGReg d, TCGReg n, TCGReg m) 1301{ 1302 tcg_out32(s, insn | (vece << 20) | (q << 6) | 1303 encode_vd(d) | encode_vn(n) | encode_vm(m)); 1304} 1305 1306static void tcg_out_vmovi(TCGContext *s, TCGReg rd, 1307 int q, int op, int cmode, uint8_t imm8) 1308{ 1309 tcg_out32(s, INSN_VMOVI | encode_vd(rd) | (q << 6) | (op << 5) 1310 | (cmode << 8) | extract32(imm8, 0, 4) 1311 | (extract32(imm8, 4, 3) << 16) 1312 | (extract32(imm8, 7, 1) << 24)); 1313} 1314 1315static void tcg_out_vshifti(TCGContext *s, ARMInsn insn, int q, 1316 TCGReg rd, TCGReg rm, int l_imm6) 1317{ 1318 tcg_out32(s, insn | (q << 6) | encode_vd(rd) | encode_vm(rm) | 1319 (extract32(l_imm6, 6, 1) << 7) | 1320 (extract32(l_imm6, 0, 6) << 16)); 1321} 1322 1323static void tcg_out_vldst(TCGContext *s, ARMInsn insn, 1324 TCGReg rd, TCGReg rn, int offset) 1325{ 1326 if (offset != 0) { 1327 if (check_fit_imm(offset) || check_fit_imm(-offset)) { 1328 tcg_out_dat_rIN(s, COND_AL, ARITH_ADD, ARITH_SUB, 1329 TCG_REG_TMP, rn, offset, true); 1330 } else { 1331 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, offset); 1332 tcg_out_dat_reg(s, COND_AL, ARITH_ADD, 1333 TCG_REG_TMP, TCG_REG_TMP, rn, 0); 1334 } 1335 rn = TCG_REG_TMP; 1336 } 1337 tcg_out32(s, insn | (rn << 16) | encode_vd(rd) | 0xf); 1338} 1339 1340#ifdef CONFIG_SOFTMMU 1341/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr, 1342 * int mmu_idx, uintptr_t ra) 1343 */ 1344static void * const qemu_ld_helpers[MO_SSIZE + 1] = { 1345 [MO_UB] = helper_ret_ldub_mmu, 1346 [MO_SB] = helper_ret_ldsb_mmu, 1347#if HOST_BIG_ENDIAN 1348 [MO_UW] = helper_be_lduw_mmu, 1349 [MO_UL] = helper_be_ldul_mmu, 1350 [MO_UQ] = helper_be_ldq_mmu, 1351 [MO_SW] = helper_be_ldsw_mmu, 1352 [MO_SL] = helper_be_ldul_mmu, 1353#else 1354 [MO_UW] = helper_le_lduw_mmu, 1355 [MO_UL] = helper_le_ldul_mmu, 1356 [MO_UQ] = helper_le_ldq_mmu, 1357 [MO_SW] = helper_le_ldsw_mmu, 1358 [MO_SL] = helper_le_ldul_mmu, 1359#endif 1360}; 1361 1362/* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr, 1363 * uintxx_t val, int mmu_idx, uintptr_t ra) 1364 */ 1365static void * const qemu_st_helpers[MO_SIZE + 1] = { 1366 [MO_8] = helper_ret_stb_mmu, 1367#if HOST_BIG_ENDIAN 1368 [MO_16] = helper_be_stw_mmu, 1369 [MO_32] = helper_be_stl_mmu, 1370 [MO_64] = helper_be_stq_mmu, 1371#else 1372 [MO_16] = helper_le_stw_mmu, 1373 [MO_32] = helper_le_stl_mmu, 1374 [MO_64] = helper_le_stq_mmu, 1375#endif 1376}; 1377 1378/* Helper routines for marshalling helper function arguments into 1379 * the correct registers and stack. 1380 * argreg is where we want to put this argument, arg is the argument itself. 1381 * Return value is the updated argreg ready for the next call. 1382 * Note that argreg 0..3 is real registers, 4+ on stack. 1383 * 1384 * We provide routines for arguments which are: immediate, 32 bit 1385 * value in register, 16 and 8 bit values in register (which must be zero 1386 * extended before use) and 64 bit value in a lo:hi register pair. 1387 */ 1388#define DEFINE_TCG_OUT_ARG(NAME, ARGTYPE, MOV_ARG, EXT_ARG) \ 1389static TCGReg NAME(TCGContext *s, TCGReg argreg, ARGTYPE arg) \ 1390{ \ 1391 if (argreg < 4) { \ 1392 MOV_ARG(s, COND_AL, argreg, arg); \ 1393 } else { \ 1394 int ofs = (argreg - 4) * 4; \ 1395 EXT_ARG; \ 1396 tcg_debug_assert(ofs + 4 <= TCG_STATIC_CALL_ARGS_SIZE); \ 1397 tcg_out_st32_12(s, COND_AL, arg, TCG_REG_CALL_STACK, ofs); \ 1398 } \ 1399 return argreg + 1; \ 1400} 1401 1402DEFINE_TCG_OUT_ARG(tcg_out_arg_imm32, uint32_t, tcg_out_movi32, 1403 (tcg_out_movi32(s, COND_AL, TCG_REG_TMP, arg), arg = TCG_REG_TMP)) 1404DEFINE_TCG_OUT_ARG(tcg_out_arg_reg8, TCGReg, tcg_out_ext8u_cond, 1405 (tcg_out_ext8u_cond(s, COND_AL, TCG_REG_TMP, arg), arg = TCG_REG_TMP)) 1406DEFINE_TCG_OUT_ARG(tcg_out_arg_reg16, TCGReg, tcg_out_ext16u_cond, 1407 (tcg_out_ext16u_cond(s, COND_AL, TCG_REG_TMP, arg), arg = TCG_REG_TMP)) 1408DEFINE_TCG_OUT_ARG(tcg_out_arg_reg32, TCGReg, tcg_out_mov_reg, ) 1409 1410static TCGReg tcg_out_arg_reg64(TCGContext *s, TCGReg argreg, 1411 TCGReg arglo, TCGReg arghi) 1412{ 1413 /* 64 bit arguments must go in even/odd register pairs 1414 * and in 8-aligned stack slots. 1415 */ 1416 if (argreg & 1) { 1417 argreg++; 1418 } 1419 if (argreg >= 4 && (arglo & 1) == 0 && arghi == arglo + 1) { 1420 tcg_out_strd_8(s, COND_AL, arglo, 1421 TCG_REG_CALL_STACK, (argreg - 4) * 4); 1422 return argreg + 2; 1423 } else { 1424 argreg = tcg_out_arg_reg32(s, argreg, arglo); 1425 argreg = tcg_out_arg_reg32(s, argreg, arghi); 1426 return argreg; 1427 } 1428} 1429 1430#define TLB_SHIFT (CPU_TLB_ENTRY_BITS + CPU_TLB_BITS) 1431 1432/* We expect to use an 9-bit sign-magnitude negative offset from ENV. */ 1433QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0); 1434QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -256); 1435 1436/* These offsets are built into the LDRD below. */ 1437QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, mask) != 0); 1438QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, table) != 4); 1439 1440/* Load and compare a TLB entry, leaving the flags set. Returns the register 1441 containing the addend of the tlb entry. Clobbers R0, R1, R2, TMP. */ 1442 1443static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addrlo, TCGReg addrhi, 1444 MemOp opc, int mem_index, bool is_load) 1445{ 1446 int cmp_off = (is_load ? offsetof(CPUTLBEntry, addr_read) 1447 : offsetof(CPUTLBEntry, addr_write)); 1448 int fast_off = TLB_MASK_TABLE_OFS(mem_index); 1449 unsigned s_mask = (1 << (opc & MO_SIZE)) - 1; 1450 unsigned a_mask = (1 << get_alignment_bits(opc)) - 1; 1451 TCGReg t_addr; 1452 1453 /* Load env_tlb(env)->f[mmu_idx].{mask,table} into {r0,r1}. */ 1454 tcg_out_ldrd_8(s, COND_AL, TCG_REG_R0, TCG_AREG0, fast_off); 1455 1456 /* Extract the tlb index from the address into R0. */ 1457 tcg_out_dat_reg(s, COND_AL, ARITH_AND, TCG_REG_R0, TCG_REG_R0, addrlo, 1458 SHIFT_IMM_LSR(TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS)); 1459 1460 /* 1461 * Add the tlb_table pointer, creating the CPUTLBEntry address in R1. 1462 * Load the tlb comparator into R2/R3 and the fast path addend into R1. 1463 */ 1464 if (cmp_off == 0) { 1465 if (TARGET_LONG_BITS == 64) { 1466 tcg_out_ldrd_rwb(s, COND_AL, TCG_REG_R2, TCG_REG_R1, TCG_REG_R0); 1467 } else { 1468 tcg_out_ld32_rwb(s, COND_AL, TCG_REG_R2, TCG_REG_R1, TCG_REG_R0); 1469 } 1470 } else { 1471 tcg_out_dat_reg(s, COND_AL, ARITH_ADD, 1472 TCG_REG_R1, TCG_REG_R1, TCG_REG_R0, 0); 1473 if (TARGET_LONG_BITS == 64) { 1474 tcg_out_ldrd_8(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off); 1475 } else { 1476 tcg_out_ld32_12(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off); 1477 } 1478 } 1479 1480 /* Load the tlb addend. */ 1481 tcg_out_ld32_12(s, COND_AL, TCG_REG_R1, TCG_REG_R1, 1482 offsetof(CPUTLBEntry, addend)); 1483 1484 /* 1485 * Check alignment, check comparators. 1486 * Do this in 2-4 insns. Use MOVW for v7, if possible, 1487 * to reduce the number of sequential conditional instructions. 1488 * Almost all guests have at least 4k pages, which means that we need 1489 * to clear at least 9 bits even for an 8-byte memory, which means it 1490 * isn't worth checking for an immediate operand for BIC. 1491 * 1492 * For unaligned accesses, test the page of the last unit of alignment. 1493 * This leaves the least significant alignment bits unchanged, and of 1494 * course must be zero. 1495 */ 1496 t_addr = addrlo; 1497 if (a_mask < s_mask) { 1498 t_addr = TCG_REG_R0; 1499 tcg_out_dat_imm(s, COND_AL, ARITH_ADD, t_addr, 1500 addrlo, s_mask - a_mask); 1501 } 1502 if (use_armv7_instructions && TARGET_PAGE_BITS <= 16) { 1503 tcg_out_movi32(s, COND_AL, TCG_REG_TMP, ~(TARGET_PAGE_MASK | a_mask)); 1504 tcg_out_dat_reg(s, COND_AL, ARITH_BIC, TCG_REG_TMP, 1505 t_addr, TCG_REG_TMP, 0); 1506 tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, TCG_REG_R2, TCG_REG_TMP, 0); 1507 } else { 1508 if (a_mask) { 1509 tcg_debug_assert(a_mask <= 0xff); 1510 tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addrlo, a_mask); 1511 } 1512 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP, 0, t_addr, 1513 SHIFT_IMM_LSR(TARGET_PAGE_BITS)); 1514 tcg_out_dat_reg(s, (a_mask ? COND_EQ : COND_AL), ARITH_CMP, 1515 0, TCG_REG_R2, TCG_REG_TMP, 1516 SHIFT_IMM_LSL(TARGET_PAGE_BITS)); 1517 } 1518 1519 if (TARGET_LONG_BITS == 64) { 1520 tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0, TCG_REG_R3, addrhi, 0); 1521 } 1522 1523 return TCG_REG_R1; 1524} 1525 1526/* Record the context of a call to the out of line helper code for the slow 1527 path for a load or store, so that we can later generate the correct 1528 helper code. */ 1529static void add_qemu_ldst_label(TCGContext *s, bool is_ld, MemOpIdx oi, 1530 TCGReg datalo, TCGReg datahi, TCGReg addrlo, 1531 TCGReg addrhi, tcg_insn_unit *raddr, 1532 tcg_insn_unit *label_ptr) 1533{ 1534 TCGLabelQemuLdst *label = new_ldst_label(s); 1535 1536 label->is_ld = is_ld; 1537 label->oi = oi; 1538 label->datalo_reg = datalo; 1539 label->datahi_reg = datahi; 1540 label->addrlo_reg = addrlo; 1541 label->addrhi_reg = addrhi; 1542 label->raddr = tcg_splitwx_to_rx(raddr); 1543 label->label_ptr[0] = label_ptr; 1544} 1545 1546static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) 1547{ 1548 TCGReg argreg; 1549 MemOpIdx oi = lb->oi; 1550 MemOp opc = get_memop(oi); 1551 1552 if (!reloc_pc24(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { 1553 return false; 1554 } 1555 1556 argreg = tcg_out_arg_reg32(s, TCG_REG_R0, TCG_AREG0); 1557 if (TARGET_LONG_BITS == 64) { 1558 argreg = tcg_out_arg_reg64(s, argreg, lb->addrlo_reg, lb->addrhi_reg); 1559 } else { 1560 argreg = tcg_out_arg_reg32(s, argreg, lb->addrlo_reg); 1561 } 1562 argreg = tcg_out_arg_imm32(s, argreg, oi); 1563 argreg = tcg_out_arg_reg32(s, argreg, TCG_REG_R14); 1564 1565 /* Use the canonical unsigned helpers and minimize icache usage. */ 1566 tcg_out_call_int(s, qemu_ld_helpers[opc & MO_SIZE]); 1567 1568 if ((opc & MO_SIZE) == MO_64) { 1569 TCGMovExtend ext[2] = { 1570 { .dst = lb->datalo_reg, .dst_type = TCG_TYPE_I32, 1571 .src = TCG_REG_R0, .src_type = TCG_TYPE_I32, .src_ext = MO_UL }, 1572 { .dst = lb->datahi_reg, .dst_type = TCG_TYPE_I32, 1573 .src = TCG_REG_R1, .src_type = TCG_TYPE_I32, .src_ext = MO_UL }, 1574 }; 1575 tcg_out_movext2(s, &ext[0], &ext[1], TCG_REG_TMP); 1576 } else { 1577 tcg_out_movext(s, TCG_TYPE_I32, lb->datalo_reg, 1578 TCG_TYPE_I32, opc & MO_SSIZE, TCG_REG_R0); 1579 } 1580 1581 tcg_out_goto(s, COND_AL, lb->raddr); 1582 return true; 1583} 1584 1585static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) 1586{ 1587 TCGReg argreg, datalo, datahi; 1588 MemOpIdx oi = lb->oi; 1589 MemOp opc = get_memop(oi); 1590 1591 if (!reloc_pc24(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { 1592 return false; 1593 } 1594 1595 argreg = TCG_REG_R0; 1596 argreg = tcg_out_arg_reg32(s, argreg, TCG_AREG0); 1597 if (TARGET_LONG_BITS == 64) { 1598 argreg = tcg_out_arg_reg64(s, argreg, lb->addrlo_reg, lb->addrhi_reg); 1599 } else { 1600 argreg = tcg_out_arg_reg32(s, argreg, lb->addrlo_reg); 1601 } 1602 1603 datalo = lb->datalo_reg; 1604 datahi = lb->datahi_reg; 1605 switch (opc & MO_SIZE) { 1606 case MO_8: 1607 argreg = tcg_out_arg_reg8(s, argreg, datalo); 1608 break; 1609 case MO_16: 1610 argreg = tcg_out_arg_reg16(s, argreg, datalo); 1611 break; 1612 case MO_32: 1613 default: 1614 argreg = tcg_out_arg_reg32(s, argreg, datalo); 1615 break; 1616 case MO_64: 1617 argreg = tcg_out_arg_reg64(s, argreg, datalo, datahi); 1618 break; 1619 } 1620 1621 argreg = tcg_out_arg_imm32(s, argreg, oi); 1622 argreg = tcg_out_arg_reg32(s, argreg, TCG_REG_R14); 1623 1624 /* Tail-call to the helper, which will return to the fast path. */ 1625 tcg_out_goto(s, COND_AL, qemu_st_helpers[opc & MO_SIZE]); 1626 return true; 1627} 1628#else 1629 1630static void tcg_out_test_alignment(TCGContext *s, bool is_ld, TCGReg addrlo, 1631 TCGReg addrhi, unsigned a_bits) 1632{ 1633 unsigned a_mask = (1 << a_bits) - 1; 1634 TCGLabelQemuLdst *label = new_ldst_label(s); 1635 1636 label->is_ld = is_ld; 1637 label->addrlo_reg = addrlo; 1638 label->addrhi_reg = addrhi; 1639 1640 /* We are expecting a_bits to max out at 7, and can easily support 8. */ 1641 tcg_debug_assert(a_mask <= 0xff); 1642 /* tst addr, #mask */ 1643 tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addrlo, a_mask); 1644 1645 /* blne slow_path */ 1646 label->label_ptr[0] = s->code_ptr; 1647 tcg_out_bl_imm(s, COND_NE, 0); 1648 1649 label->raddr = tcg_splitwx_to_rx(s->code_ptr); 1650} 1651 1652static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l) 1653{ 1654 if (!reloc_pc24(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { 1655 return false; 1656 } 1657 1658 if (TARGET_LONG_BITS == 64) { 1659 /* 64-bit target address is aligned into R2:R3. */ 1660 TCGMovExtend ext[2] = { 1661 { .dst = TCG_REG_R2, .dst_type = TCG_TYPE_I32, 1662 .src = l->addrlo_reg, 1663 .src_type = TCG_TYPE_I32, .src_ext = MO_UL }, 1664 { .dst = TCG_REG_R3, .dst_type = TCG_TYPE_I32, 1665 .src = l->addrhi_reg, 1666 .src_type = TCG_TYPE_I32, .src_ext = MO_UL }, 1667 }; 1668 tcg_out_movext2(s, &ext[0], &ext[1], TCG_REG_TMP); 1669 } else { 1670 tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R1, l->addrlo_reg); 1671 } 1672 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_R0, TCG_AREG0); 1673 1674 /* 1675 * Tail call to the helper, with the return address back inline, 1676 * just for the clarity of the debugging traceback -- the helper 1677 * cannot return. We have used BLNE to arrive here, so LR is 1678 * already set. 1679 */ 1680 tcg_out_goto(s, COND_AL, (const void *) 1681 (l->is_ld ? helper_unaligned_ld : helper_unaligned_st)); 1682 return true; 1683} 1684 1685static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) 1686{ 1687 return tcg_out_fail_alignment(s, l); 1688} 1689 1690static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) 1691{ 1692 return tcg_out_fail_alignment(s, l); 1693} 1694#endif /* SOFTMMU */ 1695 1696static void tcg_out_qemu_ld_index(TCGContext *s, MemOp opc, 1697 TCGReg datalo, TCGReg datahi, 1698 TCGReg addrlo, TCGReg addend, 1699 bool scratch_addend) 1700{ 1701 /* Byte swapping is left to middle-end expansion. */ 1702 tcg_debug_assert((opc & MO_BSWAP) == 0); 1703 1704 switch (opc & MO_SSIZE) { 1705 case MO_UB: 1706 tcg_out_ld8_r(s, COND_AL, datalo, addrlo, addend); 1707 break; 1708 case MO_SB: 1709 tcg_out_ld8s_r(s, COND_AL, datalo, addrlo, addend); 1710 break; 1711 case MO_UW: 1712 tcg_out_ld16u_r(s, COND_AL, datalo, addrlo, addend); 1713 break; 1714 case MO_SW: 1715 tcg_out_ld16s_r(s, COND_AL, datalo, addrlo, addend); 1716 break; 1717 case MO_UL: 1718 tcg_out_ld32_r(s, COND_AL, datalo, addrlo, addend); 1719 break; 1720 case MO_UQ: 1721 /* We used pair allocation for datalo, so already should be aligned. */ 1722 tcg_debug_assert((datalo & 1) == 0); 1723 tcg_debug_assert(datahi == datalo + 1); 1724 /* LDRD requires alignment; double-check that. */ 1725 if (get_alignment_bits(opc) >= MO_64) { 1726 /* 1727 * Rm (the second address op) must not overlap Rt or Rt + 1. 1728 * Since datalo is aligned, we can simplify the test via alignment. 1729 * Flip the two address arguments if that works. 1730 */ 1731 if ((addend & ~1) != datalo) { 1732 tcg_out_ldrd_r(s, COND_AL, datalo, addrlo, addend); 1733 break; 1734 } 1735 if ((addrlo & ~1) != datalo) { 1736 tcg_out_ldrd_r(s, COND_AL, datalo, addend, addrlo); 1737 break; 1738 } 1739 } 1740 if (scratch_addend) { 1741 tcg_out_ld32_rwb(s, COND_AL, datalo, addend, addrlo); 1742 tcg_out_ld32_12(s, COND_AL, datahi, addend, 4); 1743 } else { 1744 tcg_out_dat_reg(s, COND_AL, ARITH_ADD, TCG_REG_TMP, 1745 addend, addrlo, SHIFT_IMM_LSL(0)); 1746 tcg_out_ld32_12(s, COND_AL, datalo, TCG_REG_TMP, 0); 1747 tcg_out_ld32_12(s, COND_AL, datahi, TCG_REG_TMP, 4); 1748 } 1749 break; 1750 default: 1751 g_assert_not_reached(); 1752 } 1753} 1754 1755#ifndef CONFIG_SOFTMMU 1756static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg datalo, 1757 TCGReg datahi, TCGReg addrlo) 1758{ 1759 /* Byte swapping is left to middle-end expansion. */ 1760 tcg_debug_assert((opc & MO_BSWAP) == 0); 1761 1762 switch (opc & MO_SSIZE) { 1763 case MO_UB: 1764 tcg_out_ld8_12(s, COND_AL, datalo, addrlo, 0); 1765 break; 1766 case MO_SB: 1767 tcg_out_ld8s_8(s, COND_AL, datalo, addrlo, 0); 1768 break; 1769 case MO_UW: 1770 tcg_out_ld16u_8(s, COND_AL, datalo, addrlo, 0); 1771 break; 1772 case MO_SW: 1773 tcg_out_ld16s_8(s, COND_AL, datalo, addrlo, 0); 1774 break; 1775 case MO_UL: 1776 tcg_out_ld32_12(s, COND_AL, datalo, addrlo, 0); 1777 break; 1778 case MO_UQ: 1779 /* We used pair allocation for datalo, so already should be aligned. */ 1780 tcg_debug_assert((datalo & 1) == 0); 1781 tcg_debug_assert(datahi == datalo + 1); 1782 /* LDRD requires alignment; double-check that. */ 1783 if (get_alignment_bits(opc) >= MO_64) { 1784 tcg_out_ldrd_8(s, COND_AL, datalo, addrlo, 0); 1785 } else if (datalo == addrlo) { 1786 tcg_out_ld32_12(s, COND_AL, datahi, addrlo, 4); 1787 tcg_out_ld32_12(s, COND_AL, datalo, addrlo, 0); 1788 } else { 1789 tcg_out_ld32_12(s, COND_AL, datalo, addrlo, 0); 1790 tcg_out_ld32_12(s, COND_AL, datahi, addrlo, 4); 1791 } 1792 break; 1793 default: 1794 g_assert_not_reached(); 1795 } 1796} 1797#endif 1798 1799static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64) 1800{ 1801 TCGReg addrlo, datalo, datahi, addrhi __attribute__((unused)); 1802 MemOpIdx oi; 1803 MemOp opc; 1804#ifdef CONFIG_SOFTMMU 1805 int mem_index; 1806 TCGReg addend; 1807 tcg_insn_unit *label_ptr; 1808#else 1809 unsigned a_bits; 1810#endif 1811 1812 datalo = *args++; 1813 datahi = (is64 ? *args++ : 0); 1814 addrlo = *args++; 1815 addrhi = (TARGET_LONG_BITS == 64 ? *args++ : 0); 1816 oi = *args++; 1817 opc = get_memop(oi); 1818 1819#ifdef CONFIG_SOFTMMU 1820 mem_index = get_mmuidx(oi); 1821 addend = tcg_out_tlb_read(s, addrlo, addrhi, opc, mem_index, 1); 1822 1823 /* This a conditional BL only to load a pointer within this opcode into LR 1824 for the slow path. We will not be using the value for a tail call. */ 1825 label_ptr = s->code_ptr; 1826 tcg_out_bl_imm(s, COND_NE, 0); 1827 1828 tcg_out_qemu_ld_index(s, opc, datalo, datahi, addrlo, addend, true); 1829 1830 add_qemu_ldst_label(s, true, oi, datalo, datahi, addrlo, addrhi, 1831 s->code_ptr, label_ptr); 1832#else /* !CONFIG_SOFTMMU */ 1833 a_bits = get_alignment_bits(opc); 1834 if (a_bits) { 1835 tcg_out_test_alignment(s, true, addrlo, addrhi, a_bits); 1836 } 1837 if (guest_base) { 1838 tcg_out_qemu_ld_index(s, opc, datalo, datahi, 1839 addrlo, TCG_REG_GUEST_BASE, false); 1840 } else { 1841 tcg_out_qemu_ld_direct(s, opc, datalo, datahi, addrlo); 1842 } 1843#endif 1844} 1845 1846static void tcg_out_qemu_st_index(TCGContext *s, ARMCond cond, MemOp opc, 1847 TCGReg datalo, TCGReg datahi, 1848 TCGReg addrlo, TCGReg addend, 1849 bool scratch_addend) 1850{ 1851 /* Byte swapping is left to middle-end expansion. */ 1852 tcg_debug_assert((opc & MO_BSWAP) == 0); 1853 1854 switch (opc & MO_SIZE) { 1855 case MO_8: 1856 tcg_out_st8_r(s, cond, datalo, addrlo, addend); 1857 break; 1858 case MO_16: 1859 tcg_out_st16_r(s, cond, datalo, addrlo, addend); 1860 break; 1861 case MO_32: 1862 tcg_out_st32_r(s, cond, datalo, addrlo, addend); 1863 break; 1864 case MO_64: 1865 /* We used pair allocation for datalo, so already should be aligned. */ 1866 tcg_debug_assert((datalo & 1) == 0); 1867 tcg_debug_assert(datahi == datalo + 1); 1868 /* STRD requires alignment; double-check that. */ 1869 if (get_alignment_bits(opc) >= MO_64) { 1870 tcg_out_strd_r(s, cond, datalo, addrlo, addend); 1871 } else if (scratch_addend) { 1872 tcg_out_st32_rwb(s, cond, datalo, addend, addrlo); 1873 tcg_out_st32_12(s, cond, datahi, addend, 4); 1874 } else { 1875 tcg_out_dat_reg(s, cond, ARITH_ADD, TCG_REG_TMP, 1876 addend, addrlo, SHIFT_IMM_LSL(0)); 1877 tcg_out_st32_12(s, cond, datalo, TCG_REG_TMP, 0); 1878 tcg_out_st32_12(s, cond, datahi, TCG_REG_TMP, 4); 1879 } 1880 break; 1881 default: 1882 g_assert_not_reached(); 1883 } 1884} 1885 1886#ifndef CONFIG_SOFTMMU 1887static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg datalo, 1888 TCGReg datahi, TCGReg addrlo) 1889{ 1890 /* Byte swapping is left to middle-end expansion. */ 1891 tcg_debug_assert((opc & MO_BSWAP) == 0); 1892 1893 switch (opc & MO_SIZE) { 1894 case MO_8: 1895 tcg_out_st8_12(s, COND_AL, datalo, addrlo, 0); 1896 break; 1897 case MO_16: 1898 tcg_out_st16_8(s, COND_AL, datalo, addrlo, 0); 1899 break; 1900 case MO_32: 1901 tcg_out_st32_12(s, COND_AL, datalo, addrlo, 0); 1902 break; 1903 case MO_64: 1904 /* We used pair allocation for datalo, so already should be aligned. */ 1905 tcg_debug_assert((datalo & 1) == 0); 1906 tcg_debug_assert(datahi == datalo + 1); 1907 /* STRD requires alignment; double-check that. */ 1908 if (get_alignment_bits(opc) >= MO_64) { 1909 tcg_out_strd_8(s, COND_AL, datalo, addrlo, 0); 1910 } else { 1911 tcg_out_st32_12(s, COND_AL, datalo, addrlo, 0); 1912 tcg_out_st32_12(s, COND_AL, datahi, addrlo, 4); 1913 } 1914 break; 1915 default: 1916 g_assert_not_reached(); 1917 } 1918} 1919#endif 1920 1921static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64) 1922{ 1923 TCGReg addrlo, datalo, datahi, addrhi __attribute__((unused)); 1924 MemOpIdx oi; 1925 MemOp opc; 1926#ifdef CONFIG_SOFTMMU 1927 int mem_index; 1928 TCGReg addend; 1929 tcg_insn_unit *label_ptr; 1930#else 1931 unsigned a_bits; 1932#endif 1933 1934 datalo = *args++; 1935 datahi = (is64 ? *args++ : 0); 1936 addrlo = *args++; 1937 addrhi = (TARGET_LONG_BITS == 64 ? *args++ : 0); 1938 oi = *args++; 1939 opc = get_memop(oi); 1940 1941#ifdef CONFIG_SOFTMMU 1942 mem_index = get_mmuidx(oi); 1943 addend = tcg_out_tlb_read(s, addrlo, addrhi, opc, mem_index, 0); 1944 1945 tcg_out_qemu_st_index(s, COND_EQ, opc, datalo, datahi, 1946 addrlo, addend, true); 1947 1948 /* The conditional call must come last, as we're going to return here. */ 1949 label_ptr = s->code_ptr; 1950 tcg_out_bl_imm(s, COND_NE, 0); 1951 1952 add_qemu_ldst_label(s, false, oi, datalo, datahi, addrlo, addrhi, 1953 s->code_ptr, label_ptr); 1954#else /* !CONFIG_SOFTMMU */ 1955 a_bits = get_alignment_bits(opc); 1956 if (a_bits) { 1957 tcg_out_test_alignment(s, false, addrlo, addrhi, a_bits); 1958 } 1959 if (guest_base) { 1960 tcg_out_qemu_st_index(s, COND_AL, opc, datalo, datahi, 1961 addrlo, TCG_REG_GUEST_BASE, false); 1962 } else { 1963 tcg_out_qemu_st_direct(s, opc, datalo, datahi, addrlo); 1964 } 1965#endif 1966} 1967 1968static void tcg_out_epilogue(TCGContext *s); 1969 1970static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg) 1971{ 1972 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R0, arg); 1973 tcg_out_epilogue(s); 1974} 1975 1976static void tcg_out_goto_tb(TCGContext *s, int which) 1977{ 1978 uintptr_t i_addr; 1979 intptr_t i_disp; 1980 1981 /* Direct branch will be patched by tb_target_set_jmp_target. */ 1982 set_jmp_insn_offset(s, which); 1983 tcg_out32(s, INSN_NOP); 1984 1985 /* When branch is out of range, fall through to indirect. */ 1986 i_addr = get_jmp_target_addr(s, which); 1987 i_disp = tcg_pcrel_diff(s, (void *)i_addr) - 8; 1988 tcg_debug_assert(i_disp < 0); 1989 if (i_disp >= -0xfff) { 1990 tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, i_disp); 1991 } else { 1992 /* 1993 * The TB is close, but outside the 12 bits addressable by 1994 * the load. We can extend this to 20 bits with a sub of a 1995 * shifted immediate from pc. 1996 */ 1997 int h = -i_disp; 1998 int l = h & 0xfff; 1999 2000 h = encode_imm_nofail(h - l); 2001 tcg_out_dat_imm(s, COND_AL, ARITH_SUB, TCG_REG_R0, TCG_REG_PC, h); 2002 tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_R0, l); 2003 } 2004 set_jmp_reset_offset(s, which); 2005} 2006 2007void tb_target_set_jmp_target(const TranslationBlock *tb, int n, 2008 uintptr_t jmp_rx, uintptr_t jmp_rw) 2009{ 2010 uintptr_t addr = tb->jmp_target_addr[n]; 2011 ptrdiff_t offset = addr - (jmp_rx + 8); 2012 tcg_insn_unit insn; 2013 2014 /* Either directly branch, or fall through to indirect branch. */ 2015 if (offset == sextract64(offset, 0, 26)) { 2016 /* B <addr> */ 2017 insn = deposit32((COND_AL << 28) | INSN_B, 0, 24, offset >> 2); 2018 } else { 2019 insn = INSN_NOP; 2020 } 2021 2022 qatomic_set((uint32_t *)jmp_rw, insn); 2023 flush_idcache_range(jmp_rx, jmp_rw, 4); 2024} 2025 2026static void tcg_out_op(TCGContext *s, TCGOpcode opc, 2027 const TCGArg args[TCG_MAX_OP_ARGS], 2028 const int const_args[TCG_MAX_OP_ARGS]) 2029{ 2030 TCGArg a0, a1, a2, a3, a4, a5; 2031 int c; 2032 2033 switch (opc) { 2034 case INDEX_op_goto_ptr: 2035 tcg_out_b_reg(s, COND_AL, args[0]); 2036 break; 2037 case INDEX_op_br: 2038 tcg_out_goto_label(s, COND_AL, arg_label(args[0])); 2039 break; 2040 2041 case INDEX_op_ld8u_i32: 2042 tcg_out_ld8u(s, COND_AL, args[0], args[1], args[2]); 2043 break; 2044 case INDEX_op_ld8s_i32: 2045 tcg_out_ld8s(s, COND_AL, args[0], args[1], args[2]); 2046 break; 2047 case INDEX_op_ld16u_i32: 2048 tcg_out_ld16u(s, COND_AL, args[0], args[1], args[2]); 2049 break; 2050 case INDEX_op_ld16s_i32: 2051 tcg_out_ld16s(s, COND_AL, args[0], args[1], args[2]); 2052 break; 2053 case INDEX_op_ld_i32: 2054 tcg_out_ld32u(s, COND_AL, args[0], args[1], args[2]); 2055 break; 2056 case INDEX_op_st8_i32: 2057 tcg_out_st8(s, COND_AL, args[0], args[1], args[2]); 2058 break; 2059 case INDEX_op_st16_i32: 2060 tcg_out_st16(s, COND_AL, args[0], args[1], args[2]); 2061 break; 2062 case INDEX_op_st_i32: 2063 tcg_out_st32(s, COND_AL, args[0], args[1], args[2]); 2064 break; 2065 2066 case INDEX_op_movcond_i32: 2067 /* Constraints mean that v2 is always in the same register as dest, 2068 * so we only need to do "if condition passed, move v1 to dest". 2069 */ 2070 tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0, 2071 args[1], args[2], const_args[2]); 2072 tcg_out_dat_rIK(s, tcg_cond_to_arm_cond[args[5]], ARITH_MOV, 2073 ARITH_MVN, args[0], 0, args[3], const_args[3]); 2074 break; 2075 case INDEX_op_add_i32: 2076 tcg_out_dat_rIN(s, COND_AL, ARITH_ADD, ARITH_SUB, 2077 args[0], args[1], args[2], const_args[2]); 2078 break; 2079 case INDEX_op_sub_i32: 2080 if (const_args[1]) { 2081 if (const_args[2]) { 2082 tcg_out_movi32(s, COND_AL, args[0], args[1] - args[2]); 2083 } else { 2084 tcg_out_dat_rI(s, COND_AL, ARITH_RSB, 2085 args[0], args[2], args[1], 1); 2086 } 2087 } else { 2088 tcg_out_dat_rIN(s, COND_AL, ARITH_SUB, ARITH_ADD, 2089 args[0], args[1], args[2], const_args[2]); 2090 } 2091 break; 2092 case INDEX_op_and_i32: 2093 tcg_out_dat_rIK(s, COND_AL, ARITH_AND, ARITH_BIC, 2094 args[0], args[1], args[2], const_args[2]); 2095 break; 2096 case INDEX_op_andc_i32: 2097 tcg_out_dat_rIK(s, COND_AL, ARITH_BIC, ARITH_AND, 2098 args[0], args[1], args[2], const_args[2]); 2099 break; 2100 case INDEX_op_or_i32: 2101 c = ARITH_ORR; 2102 goto gen_arith; 2103 case INDEX_op_xor_i32: 2104 c = ARITH_EOR; 2105 /* Fall through. */ 2106 gen_arith: 2107 tcg_out_dat_rI(s, COND_AL, c, args[0], args[1], args[2], const_args[2]); 2108 break; 2109 case INDEX_op_add2_i32: 2110 a0 = args[0], a1 = args[1], a2 = args[2]; 2111 a3 = args[3], a4 = args[4], a5 = args[5]; 2112 if (a0 == a3 || (a0 == a5 && !const_args[5])) { 2113 a0 = TCG_REG_TMP; 2114 } 2115 tcg_out_dat_rIN(s, COND_AL, ARITH_ADD | TO_CPSR, ARITH_SUB | TO_CPSR, 2116 a0, a2, a4, const_args[4]); 2117 tcg_out_dat_rIK(s, COND_AL, ARITH_ADC, ARITH_SBC, 2118 a1, a3, a5, const_args[5]); 2119 tcg_out_mov_reg(s, COND_AL, args[0], a0); 2120 break; 2121 case INDEX_op_sub2_i32: 2122 a0 = args[0], a1 = args[1], a2 = args[2]; 2123 a3 = args[3], a4 = args[4], a5 = args[5]; 2124 if ((a0 == a3 && !const_args[3]) || (a0 == a5 && !const_args[5])) { 2125 a0 = TCG_REG_TMP; 2126 } 2127 if (const_args[2]) { 2128 if (const_args[4]) { 2129 tcg_out_movi32(s, COND_AL, a0, a4); 2130 a4 = a0; 2131 } 2132 tcg_out_dat_rI(s, COND_AL, ARITH_RSB | TO_CPSR, a0, a4, a2, 1); 2133 } else { 2134 tcg_out_dat_rIN(s, COND_AL, ARITH_SUB | TO_CPSR, 2135 ARITH_ADD | TO_CPSR, a0, a2, a4, const_args[4]); 2136 } 2137 if (const_args[3]) { 2138 if (const_args[5]) { 2139 tcg_out_movi32(s, COND_AL, a1, a5); 2140 a5 = a1; 2141 } 2142 tcg_out_dat_rI(s, COND_AL, ARITH_RSC, a1, a5, a3, 1); 2143 } else { 2144 tcg_out_dat_rIK(s, COND_AL, ARITH_SBC, ARITH_ADC, 2145 a1, a3, a5, const_args[5]); 2146 } 2147 tcg_out_mov_reg(s, COND_AL, args[0], a0); 2148 break; 2149 case INDEX_op_neg_i32: 2150 tcg_out_dat_imm(s, COND_AL, ARITH_RSB, args[0], args[1], 0); 2151 break; 2152 case INDEX_op_not_i32: 2153 tcg_out_dat_reg(s, COND_AL, 2154 ARITH_MVN, args[0], 0, args[1], SHIFT_IMM_LSL(0)); 2155 break; 2156 case INDEX_op_mul_i32: 2157 tcg_out_mul32(s, COND_AL, args[0], args[1], args[2]); 2158 break; 2159 case INDEX_op_mulu2_i32: 2160 tcg_out_umull32(s, COND_AL, args[0], args[1], args[2], args[3]); 2161 break; 2162 case INDEX_op_muls2_i32: 2163 tcg_out_smull32(s, COND_AL, args[0], args[1], args[2], args[3]); 2164 break; 2165 /* XXX: Perhaps args[2] & 0x1f is wrong */ 2166 case INDEX_op_shl_i32: 2167 c = const_args[2] ? 2168 SHIFT_IMM_LSL(args[2] & 0x1f) : SHIFT_REG_LSL(args[2]); 2169 goto gen_shift32; 2170 case INDEX_op_shr_i32: 2171 c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_LSR(args[2] & 0x1f) : 2172 SHIFT_IMM_LSL(0) : SHIFT_REG_LSR(args[2]); 2173 goto gen_shift32; 2174 case INDEX_op_sar_i32: 2175 c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_ASR(args[2] & 0x1f) : 2176 SHIFT_IMM_LSL(0) : SHIFT_REG_ASR(args[2]); 2177 goto gen_shift32; 2178 case INDEX_op_rotr_i32: 2179 c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_ROR(args[2] & 0x1f) : 2180 SHIFT_IMM_LSL(0) : SHIFT_REG_ROR(args[2]); 2181 /* Fall through. */ 2182 gen_shift32: 2183 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1], c); 2184 break; 2185 2186 case INDEX_op_rotl_i32: 2187 if (const_args[2]) { 2188 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1], 2189 ((0x20 - args[2]) & 0x1f) ? 2190 SHIFT_IMM_ROR((0x20 - args[2]) & 0x1f) : 2191 SHIFT_IMM_LSL(0)); 2192 } else { 2193 tcg_out_dat_imm(s, COND_AL, ARITH_RSB, TCG_REG_TMP, args[2], 0x20); 2194 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1], 2195 SHIFT_REG_ROR(TCG_REG_TMP)); 2196 } 2197 break; 2198 2199 case INDEX_op_ctz_i32: 2200 tcg_out_dat_reg(s, COND_AL, INSN_RBIT, TCG_REG_TMP, 0, args[1], 0); 2201 a1 = TCG_REG_TMP; 2202 goto do_clz; 2203 2204 case INDEX_op_clz_i32: 2205 a1 = args[1]; 2206 do_clz: 2207 a0 = args[0]; 2208 a2 = args[2]; 2209 c = const_args[2]; 2210 if (c && a2 == 32) { 2211 tcg_out_dat_reg(s, COND_AL, INSN_CLZ, a0, 0, a1, 0); 2212 break; 2213 } 2214 tcg_out_dat_imm(s, COND_AL, ARITH_CMP, 0, a1, 0); 2215 tcg_out_dat_reg(s, COND_NE, INSN_CLZ, a0, 0, a1, 0); 2216 if (c || a0 != a2) { 2217 tcg_out_dat_rIK(s, COND_EQ, ARITH_MOV, ARITH_MVN, a0, 0, a2, c); 2218 } 2219 break; 2220 2221 case INDEX_op_brcond_i32: 2222 tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0, 2223 args[0], args[1], const_args[1]); 2224 tcg_out_goto_label(s, tcg_cond_to_arm_cond[args[2]], 2225 arg_label(args[3])); 2226 break; 2227 case INDEX_op_setcond_i32: 2228 tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0, 2229 args[1], args[2], const_args[2]); 2230 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[args[3]], 2231 ARITH_MOV, args[0], 0, 1); 2232 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(args[3])], 2233 ARITH_MOV, args[0], 0, 0); 2234 break; 2235 2236 case INDEX_op_brcond2_i32: 2237 c = tcg_out_cmp2(s, args, const_args); 2238 tcg_out_goto_label(s, tcg_cond_to_arm_cond[c], arg_label(args[5])); 2239 break; 2240 case INDEX_op_setcond2_i32: 2241 c = tcg_out_cmp2(s, args + 1, const_args + 1); 2242 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[c], ARITH_MOV, args[0], 0, 1); 2243 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(c)], 2244 ARITH_MOV, args[0], 0, 0); 2245 break; 2246 2247 case INDEX_op_qemu_ld_i32: 2248 tcg_out_qemu_ld(s, args, 0); 2249 break; 2250 case INDEX_op_qemu_ld_i64: 2251 tcg_out_qemu_ld(s, args, 1); 2252 break; 2253 case INDEX_op_qemu_st_i32: 2254 tcg_out_qemu_st(s, args, 0); 2255 break; 2256 case INDEX_op_qemu_st_i64: 2257 tcg_out_qemu_st(s, args, 1); 2258 break; 2259 2260 case INDEX_op_bswap16_i32: 2261 tcg_out_bswap16(s, COND_AL, args[0], args[1], args[2]); 2262 break; 2263 case INDEX_op_bswap32_i32: 2264 tcg_out_bswap32(s, COND_AL, args[0], args[1]); 2265 break; 2266 2267 case INDEX_op_deposit_i32: 2268 tcg_out_deposit(s, COND_AL, args[0], args[2], 2269 args[3], args[4], const_args[2]); 2270 break; 2271 case INDEX_op_extract_i32: 2272 tcg_out_extract(s, COND_AL, args[0], args[1], args[2], args[3]); 2273 break; 2274 case INDEX_op_sextract_i32: 2275 tcg_out_sextract(s, COND_AL, args[0], args[1], args[2], args[3]); 2276 break; 2277 case INDEX_op_extract2_i32: 2278 /* ??? These optimization vs zero should be generic. */ 2279 /* ??? But we can't substitute 2 for 1 in the opcode stream yet. */ 2280 if (const_args[1]) { 2281 if (const_args[2]) { 2282 tcg_out_movi(s, TCG_TYPE_REG, args[0], 0); 2283 } else { 2284 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, 2285 args[2], SHIFT_IMM_LSL(32 - args[3])); 2286 } 2287 } else if (const_args[2]) { 2288 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, 2289 args[1], SHIFT_IMM_LSR(args[3])); 2290 } else { 2291 /* We can do extract2 in 2 insns, vs the 3 required otherwise. */ 2292 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP, 0, 2293 args[2], SHIFT_IMM_LSL(32 - args[3])); 2294 tcg_out_dat_reg(s, COND_AL, ARITH_ORR, args[0], TCG_REG_TMP, 2295 args[1], SHIFT_IMM_LSR(args[3])); 2296 } 2297 break; 2298 2299 case INDEX_op_div_i32: 2300 tcg_out_sdiv(s, COND_AL, args[0], args[1], args[2]); 2301 break; 2302 case INDEX_op_divu_i32: 2303 tcg_out_udiv(s, COND_AL, args[0], args[1], args[2]); 2304 break; 2305 2306 case INDEX_op_mb: 2307 tcg_out_mb(s, args[0]); 2308 break; 2309 2310 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ 2311 case INDEX_op_call: /* Always emitted via tcg_out_call. */ 2312 case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */ 2313 case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */ 2314 case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */ 2315 case INDEX_op_ext8u_i32: 2316 case INDEX_op_ext16s_i32: 2317 case INDEX_op_ext16u_i32: 2318 default: 2319 g_assert_not_reached(); 2320 } 2321} 2322 2323static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) 2324{ 2325 switch (op) { 2326 case INDEX_op_goto_ptr: 2327 return C_O0_I1(r); 2328 2329 case INDEX_op_ld8u_i32: 2330 case INDEX_op_ld8s_i32: 2331 case INDEX_op_ld16u_i32: 2332 case INDEX_op_ld16s_i32: 2333 case INDEX_op_ld_i32: 2334 case INDEX_op_neg_i32: 2335 case INDEX_op_not_i32: 2336 case INDEX_op_bswap16_i32: 2337 case INDEX_op_bswap32_i32: 2338 case INDEX_op_ext8s_i32: 2339 case INDEX_op_ext16s_i32: 2340 case INDEX_op_ext16u_i32: 2341 case INDEX_op_extract_i32: 2342 case INDEX_op_sextract_i32: 2343 return C_O1_I1(r, r); 2344 2345 case INDEX_op_st8_i32: 2346 case INDEX_op_st16_i32: 2347 case INDEX_op_st_i32: 2348 return C_O0_I2(r, r); 2349 2350 case INDEX_op_add_i32: 2351 case INDEX_op_sub_i32: 2352 case INDEX_op_setcond_i32: 2353 return C_O1_I2(r, r, rIN); 2354 2355 case INDEX_op_and_i32: 2356 case INDEX_op_andc_i32: 2357 case INDEX_op_clz_i32: 2358 case INDEX_op_ctz_i32: 2359 return C_O1_I2(r, r, rIK); 2360 2361 case INDEX_op_mul_i32: 2362 case INDEX_op_div_i32: 2363 case INDEX_op_divu_i32: 2364 return C_O1_I2(r, r, r); 2365 2366 case INDEX_op_mulu2_i32: 2367 case INDEX_op_muls2_i32: 2368 return C_O2_I2(r, r, r, r); 2369 2370 case INDEX_op_or_i32: 2371 case INDEX_op_xor_i32: 2372 return C_O1_I2(r, r, rI); 2373 2374 case INDEX_op_shl_i32: 2375 case INDEX_op_shr_i32: 2376 case INDEX_op_sar_i32: 2377 case INDEX_op_rotl_i32: 2378 case INDEX_op_rotr_i32: 2379 return C_O1_I2(r, r, ri); 2380 2381 case INDEX_op_brcond_i32: 2382 return C_O0_I2(r, rIN); 2383 case INDEX_op_deposit_i32: 2384 return C_O1_I2(r, 0, rZ); 2385 case INDEX_op_extract2_i32: 2386 return C_O1_I2(r, rZ, rZ); 2387 case INDEX_op_movcond_i32: 2388 return C_O1_I4(r, r, rIN, rIK, 0); 2389 case INDEX_op_add2_i32: 2390 return C_O2_I4(r, r, r, r, rIN, rIK); 2391 case INDEX_op_sub2_i32: 2392 return C_O2_I4(r, r, rI, rI, rIN, rIK); 2393 case INDEX_op_brcond2_i32: 2394 return C_O0_I4(r, r, rI, rI); 2395 case INDEX_op_setcond2_i32: 2396 return C_O1_I4(r, r, r, rI, rI); 2397 2398 case INDEX_op_qemu_ld_i32: 2399 return TARGET_LONG_BITS == 32 ? C_O1_I1(r, l) : C_O1_I2(r, l, l); 2400 case INDEX_op_qemu_ld_i64: 2401 return TARGET_LONG_BITS == 32 ? C_O2_I1(e, p, l) : C_O2_I2(e, p, l, l); 2402 case INDEX_op_qemu_st_i32: 2403 return TARGET_LONG_BITS == 32 ? C_O0_I2(s, s) : C_O0_I3(s, s, s); 2404 case INDEX_op_qemu_st_i64: 2405 return TARGET_LONG_BITS == 32 ? C_O0_I3(S, p, s) : C_O0_I4(S, p, s, s); 2406 2407 case INDEX_op_st_vec: 2408 return C_O0_I2(w, r); 2409 case INDEX_op_ld_vec: 2410 case INDEX_op_dupm_vec: 2411 return C_O1_I1(w, r); 2412 case INDEX_op_dup_vec: 2413 return C_O1_I1(w, wr); 2414 case INDEX_op_abs_vec: 2415 case INDEX_op_neg_vec: 2416 case INDEX_op_not_vec: 2417 case INDEX_op_shli_vec: 2418 case INDEX_op_shri_vec: 2419 case INDEX_op_sari_vec: 2420 return C_O1_I1(w, w); 2421 case INDEX_op_dup2_vec: 2422 case INDEX_op_add_vec: 2423 case INDEX_op_mul_vec: 2424 case INDEX_op_smax_vec: 2425 case INDEX_op_smin_vec: 2426 case INDEX_op_ssadd_vec: 2427 case INDEX_op_sssub_vec: 2428 case INDEX_op_sub_vec: 2429 case INDEX_op_umax_vec: 2430 case INDEX_op_umin_vec: 2431 case INDEX_op_usadd_vec: 2432 case INDEX_op_ussub_vec: 2433 case INDEX_op_xor_vec: 2434 case INDEX_op_arm_sshl_vec: 2435 case INDEX_op_arm_ushl_vec: 2436 return C_O1_I2(w, w, w); 2437 case INDEX_op_arm_sli_vec: 2438 return C_O1_I2(w, 0, w); 2439 case INDEX_op_or_vec: 2440 case INDEX_op_andc_vec: 2441 return C_O1_I2(w, w, wO); 2442 case INDEX_op_and_vec: 2443 case INDEX_op_orc_vec: 2444 return C_O1_I2(w, w, wV); 2445 case INDEX_op_cmp_vec: 2446 return C_O1_I2(w, w, wZ); 2447 case INDEX_op_bitsel_vec: 2448 return C_O1_I3(w, w, w, w); 2449 default: 2450 g_assert_not_reached(); 2451 } 2452} 2453 2454static void tcg_target_init(TCGContext *s) 2455{ 2456 /* 2457 * Only probe for the platform and capabilities if we haven't already 2458 * determined maximum values at compile time. 2459 */ 2460#if !defined(use_idiv_instructions) || !defined(use_neon_instructions) 2461 { 2462 unsigned long hwcap = qemu_getauxval(AT_HWCAP); 2463#ifndef use_idiv_instructions 2464 use_idiv_instructions = (hwcap & HWCAP_ARM_IDIVA) != 0; 2465#endif 2466#ifndef use_neon_instructions 2467 use_neon_instructions = (hwcap & HWCAP_ARM_NEON) != 0; 2468#endif 2469 } 2470#endif 2471 2472 if (__ARM_ARCH < 7) { 2473 const char *pl = (const char *)qemu_getauxval(AT_PLATFORM); 2474 if (pl != NULL && pl[0] == 'v' && pl[1] >= '4' && pl[1] <= '9') { 2475 arm_arch = pl[1] - '0'; 2476 } 2477 2478 if (arm_arch < 6) { 2479 error_report("TCG: ARMv%d is unsupported; exiting", arm_arch); 2480 exit(EXIT_FAILURE); 2481 } 2482 } 2483 2484 tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS; 2485 2486 tcg_target_call_clobber_regs = 0; 2487 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R0); 2488 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R1); 2489 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R2); 2490 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R3); 2491 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R12); 2492 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R14); 2493 2494 if (use_neon_instructions) { 2495 tcg_target_available_regs[TCG_TYPE_V64] = ALL_VECTOR_REGS; 2496 tcg_target_available_regs[TCG_TYPE_V128] = ALL_VECTOR_REGS; 2497 2498 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q0); 2499 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q1); 2500 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q2); 2501 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q3); 2502 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q8); 2503 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q9); 2504 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q10); 2505 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q11); 2506 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q12); 2507 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q13); 2508 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q14); 2509 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q15); 2510 } 2511 2512 s->reserved_regs = 0; 2513 tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK); 2514 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP); 2515 tcg_regset_set_reg(s->reserved_regs, TCG_REG_PC); 2516 tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP); 2517} 2518 2519static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg, 2520 TCGReg arg1, intptr_t arg2) 2521{ 2522 switch (type) { 2523 case TCG_TYPE_I32: 2524 tcg_out_ld32u(s, COND_AL, arg, arg1, arg2); 2525 return; 2526 case TCG_TYPE_V64: 2527 /* regs 1; size 8; align 8 */ 2528 tcg_out_vldst(s, INSN_VLD1 | 0x7d0, arg, arg1, arg2); 2529 return; 2530 case TCG_TYPE_V128: 2531 /* 2532 * We have only 8-byte alignment for the stack per the ABI. 2533 * Rather than dynamically re-align the stack, it's easier 2534 * to simply not request alignment beyond that. So: 2535 * regs 2; size 8; align 8 2536 */ 2537 tcg_out_vldst(s, INSN_VLD1 | 0xad0, arg, arg1, arg2); 2538 return; 2539 default: 2540 g_assert_not_reached(); 2541 } 2542} 2543 2544static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, 2545 TCGReg arg1, intptr_t arg2) 2546{ 2547 switch (type) { 2548 case TCG_TYPE_I32: 2549 tcg_out_st32(s, COND_AL, arg, arg1, arg2); 2550 return; 2551 case TCG_TYPE_V64: 2552 /* regs 1; size 8; align 8 */ 2553 tcg_out_vldst(s, INSN_VST1 | 0x7d0, arg, arg1, arg2); 2554 return; 2555 case TCG_TYPE_V128: 2556 /* See tcg_out_ld re alignment: regs 2; size 8; align 8 */ 2557 tcg_out_vldst(s, INSN_VST1 | 0xad0, arg, arg1, arg2); 2558 return; 2559 default: 2560 g_assert_not_reached(); 2561 } 2562} 2563 2564static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, 2565 TCGReg base, intptr_t ofs) 2566{ 2567 return false; 2568} 2569 2570static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) 2571{ 2572 if (ret == arg) { 2573 return true; 2574 } 2575 switch (type) { 2576 case TCG_TYPE_I32: 2577 if (ret < TCG_REG_Q0 && arg < TCG_REG_Q0) { 2578 tcg_out_mov_reg(s, COND_AL, ret, arg); 2579 return true; 2580 } 2581 return false; 2582 2583 case TCG_TYPE_V64: 2584 case TCG_TYPE_V128: 2585 /* "VMOV D,N" is an alias for "VORR D,N,N". */ 2586 tcg_out_vreg3(s, INSN_VORR, type - TCG_TYPE_V64, 0, ret, arg, arg); 2587 return true; 2588 2589 default: 2590 g_assert_not_reached(); 2591 } 2592} 2593 2594static void tcg_out_movi(TCGContext *s, TCGType type, 2595 TCGReg ret, tcg_target_long arg) 2596{ 2597 tcg_debug_assert(type == TCG_TYPE_I32); 2598 tcg_debug_assert(ret < TCG_REG_Q0); 2599 tcg_out_movi32(s, COND_AL, ret, arg); 2600} 2601 2602static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2) 2603{ 2604 return false; 2605} 2606 2607static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs, 2608 tcg_target_long imm) 2609{ 2610 int enc, opc = ARITH_ADD; 2611 2612 /* All of the easiest immediates to encode are positive. */ 2613 if (imm < 0) { 2614 imm = -imm; 2615 opc = ARITH_SUB; 2616 } 2617 enc = encode_imm(imm); 2618 if (enc >= 0) { 2619 tcg_out_dat_imm(s, COND_AL, opc, rd, rs, enc); 2620 } else { 2621 tcg_out_movi32(s, COND_AL, TCG_REG_TMP, imm); 2622 tcg_out_dat_reg(s, COND_AL, opc, rd, rs, 2623 TCG_REG_TMP, SHIFT_IMM_LSL(0)); 2624 } 2625} 2626 2627/* Type is always V128, with I64 elements. */ 2628static void tcg_out_dup2_vec(TCGContext *s, TCGReg rd, TCGReg rl, TCGReg rh) 2629{ 2630 /* Move high element into place first. */ 2631 /* VMOV Dd+1, Ds */ 2632 tcg_out_vreg3(s, INSN_VORR | (1 << 12), 0, 0, rd, rh, rh); 2633 /* Move low element into place; tcg_out_mov will check for nop. */ 2634 tcg_out_mov(s, TCG_TYPE_V64, rd, rl); 2635} 2636 2637static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece, 2638 TCGReg rd, TCGReg rs) 2639{ 2640 int q = type - TCG_TYPE_V64; 2641 2642 if (vece == MO_64) { 2643 if (type == TCG_TYPE_V128) { 2644 tcg_out_dup2_vec(s, rd, rs, rs); 2645 } else { 2646 tcg_out_mov(s, TCG_TYPE_V64, rd, rs); 2647 } 2648 } else if (rs < TCG_REG_Q0) { 2649 int b = (vece == MO_8); 2650 int e = (vece == MO_16); 2651 tcg_out32(s, INSN_VDUP_G | (b << 22) | (q << 21) | (e << 5) | 2652 encode_vn(rd) | (rs << 12)); 2653 } else { 2654 int imm4 = 1 << vece; 2655 tcg_out32(s, INSN_VDUP_S | (imm4 << 16) | (q << 6) | 2656 encode_vd(rd) | encode_vm(rs)); 2657 } 2658 return true; 2659} 2660 2661static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece, 2662 TCGReg rd, TCGReg base, intptr_t offset) 2663{ 2664 if (vece == MO_64) { 2665 tcg_out_ld(s, TCG_TYPE_V64, rd, base, offset); 2666 if (type == TCG_TYPE_V128) { 2667 tcg_out_dup2_vec(s, rd, rd, rd); 2668 } 2669 } else { 2670 int q = type - TCG_TYPE_V64; 2671 tcg_out_vldst(s, INSN_VLD1R | (vece << 6) | (q << 5), 2672 rd, base, offset); 2673 } 2674 return true; 2675} 2676 2677static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece, 2678 TCGReg rd, int64_t v64) 2679{ 2680 int q = type - TCG_TYPE_V64; 2681 int cmode, imm8, i; 2682 2683 /* Test all bytes equal first. */ 2684 if (vece == MO_8) { 2685 tcg_out_vmovi(s, rd, q, 0, 0xe, v64); 2686 return; 2687 } 2688 2689 /* 2690 * Test all bytes 0x00 or 0xff second. This can match cases that 2691 * might otherwise take 2 or 3 insns for MO_16 or MO_32 below. 2692 */ 2693 for (i = imm8 = 0; i < 8; i++) { 2694 uint8_t byte = v64 >> (i * 8); 2695 if (byte == 0xff) { 2696 imm8 |= 1 << i; 2697 } else if (byte != 0) { 2698 goto fail_bytes; 2699 } 2700 } 2701 tcg_out_vmovi(s, rd, q, 1, 0xe, imm8); 2702 return; 2703 fail_bytes: 2704 2705 /* 2706 * Tests for various replications. For each element width, if we 2707 * cannot find an expansion there's no point checking a larger 2708 * width because we already know by replication it cannot match. 2709 */ 2710 if (vece == MO_16) { 2711 uint16_t v16 = v64; 2712 2713 if (is_shimm16(v16, &cmode, &imm8)) { 2714 tcg_out_vmovi(s, rd, q, 0, cmode, imm8); 2715 return; 2716 } 2717 if (is_shimm16(~v16, &cmode, &imm8)) { 2718 tcg_out_vmovi(s, rd, q, 1, cmode, imm8); 2719 return; 2720 } 2721 2722 /* 2723 * Otherwise, all remaining constants can be loaded in two insns: 2724 * rd = v16 & 0xff, rd |= v16 & 0xff00. 2725 */ 2726 tcg_out_vmovi(s, rd, q, 0, 0x8, v16 & 0xff); 2727 tcg_out_vmovi(s, rd, q, 0, 0xb, v16 >> 8); /* VORRI */ 2728 return; 2729 } 2730 2731 if (vece == MO_32) { 2732 uint32_t v32 = v64; 2733 2734 if (is_shimm32(v32, &cmode, &imm8) || 2735 is_soimm32(v32, &cmode, &imm8)) { 2736 tcg_out_vmovi(s, rd, q, 0, cmode, imm8); 2737 return; 2738 } 2739 if (is_shimm32(~v32, &cmode, &imm8) || 2740 is_soimm32(~v32, &cmode, &imm8)) { 2741 tcg_out_vmovi(s, rd, q, 1, cmode, imm8); 2742 return; 2743 } 2744 2745 /* 2746 * Restrict the set of constants to those we can load with 2747 * two instructions. Others we load from the pool. 2748 */ 2749 i = is_shimm32_pair(v32, &cmode, &imm8); 2750 if (i) { 2751 tcg_out_vmovi(s, rd, q, 0, cmode, imm8); 2752 tcg_out_vmovi(s, rd, q, 0, i | 1, extract32(v32, i * 4, 8)); 2753 return; 2754 } 2755 i = is_shimm32_pair(~v32, &cmode, &imm8); 2756 if (i) { 2757 tcg_out_vmovi(s, rd, q, 1, cmode, imm8); 2758 tcg_out_vmovi(s, rd, q, 1, i | 1, extract32(~v32, i * 4, 8)); 2759 return; 2760 } 2761 } 2762 2763 /* 2764 * As a last resort, load from the constant pool. 2765 */ 2766 if (!q || vece == MO_64) { 2767 new_pool_l2(s, R_ARM_PC11, s->code_ptr, 0, v64, v64 >> 32); 2768 /* VLDR Dd, [pc + offset] */ 2769 tcg_out32(s, INSN_VLDR_D | encode_vd(rd) | (0xf << 16)); 2770 if (q) { 2771 tcg_out_dup2_vec(s, rd, rd, rd); 2772 } 2773 } else { 2774 new_pool_label(s, (uint32_t)v64, R_ARM_PC8, s->code_ptr, 0); 2775 /* add tmp, pc, offset */ 2776 tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_TMP, TCG_REG_PC, 0); 2777 tcg_out_dupm_vec(s, type, MO_32, rd, TCG_REG_TMP, 0); 2778 } 2779} 2780 2781static const ARMInsn vec_cmp_insn[16] = { 2782 [TCG_COND_EQ] = INSN_VCEQ, 2783 [TCG_COND_GT] = INSN_VCGT, 2784 [TCG_COND_GE] = INSN_VCGE, 2785 [TCG_COND_GTU] = INSN_VCGT_U, 2786 [TCG_COND_GEU] = INSN_VCGE_U, 2787}; 2788 2789static const ARMInsn vec_cmp0_insn[16] = { 2790 [TCG_COND_EQ] = INSN_VCEQ0, 2791 [TCG_COND_GT] = INSN_VCGT0, 2792 [TCG_COND_GE] = INSN_VCGE0, 2793 [TCG_COND_LT] = INSN_VCLT0, 2794 [TCG_COND_LE] = INSN_VCLE0, 2795}; 2796 2797static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, 2798 unsigned vecl, unsigned vece, 2799 const TCGArg args[TCG_MAX_OP_ARGS], 2800 const int const_args[TCG_MAX_OP_ARGS]) 2801{ 2802 TCGType type = vecl + TCG_TYPE_V64; 2803 unsigned q = vecl; 2804 TCGArg a0, a1, a2, a3; 2805 int cmode, imm8; 2806 2807 a0 = args[0]; 2808 a1 = args[1]; 2809 a2 = args[2]; 2810 2811 switch (opc) { 2812 case INDEX_op_ld_vec: 2813 tcg_out_ld(s, type, a0, a1, a2); 2814 return; 2815 case INDEX_op_st_vec: 2816 tcg_out_st(s, type, a0, a1, a2); 2817 return; 2818 case INDEX_op_dupm_vec: 2819 tcg_out_dupm_vec(s, type, vece, a0, a1, a2); 2820 return; 2821 case INDEX_op_dup2_vec: 2822 tcg_out_dup2_vec(s, a0, a1, a2); 2823 return; 2824 case INDEX_op_abs_vec: 2825 tcg_out_vreg2(s, INSN_VABS, q, vece, a0, a1); 2826 return; 2827 case INDEX_op_neg_vec: 2828 tcg_out_vreg2(s, INSN_VNEG, q, vece, a0, a1); 2829 return; 2830 case INDEX_op_not_vec: 2831 tcg_out_vreg2(s, INSN_VMVN, q, 0, a0, a1); 2832 return; 2833 case INDEX_op_add_vec: 2834 tcg_out_vreg3(s, INSN_VADD, q, vece, a0, a1, a2); 2835 return; 2836 case INDEX_op_mul_vec: 2837 tcg_out_vreg3(s, INSN_VMUL, q, vece, a0, a1, a2); 2838 return; 2839 case INDEX_op_smax_vec: 2840 tcg_out_vreg3(s, INSN_VMAX, q, vece, a0, a1, a2); 2841 return; 2842 case INDEX_op_smin_vec: 2843 tcg_out_vreg3(s, INSN_VMIN, q, vece, a0, a1, a2); 2844 return; 2845 case INDEX_op_sub_vec: 2846 tcg_out_vreg3(s, INSN_VSUB, q, vece, a0, a1, a2); 2847 return; 2848 case INDEX_op_ssadd_vec: 2849 tcg_out_vreg3(s, INSN_VQADD, q, vece, a0, a1, a2); 2850 return; 2851 case INDEX_op_sssub_vec: 2852 tcg_out_vreg3(s, INSN_VQSUB, q, vece, a0, a1, a2); 2853 return; 2854 case INDEX_op_umax_vec: 2855 tcg_out_vreg3(s, INSN_VMAX_U, q, vece, a0, a1, a2); 2856 return; 2857 case INDEX_op_umin_vec: 2858 tcg_out_vreg3(s, INSN_VMIN_U, q, vece, a0, a1, a2); 2859 return; 2860 case INDEX_op_usadd_vec: 2861 tcg_out_vreg3(s, INSN_VQADD_U, q, vece, a0, a1, a2); 2862 return; 2863 case INDEX_op_ussub_vec: 2864 tcg_out_vreg3(s, INSN_VQSUB_U, q, vece, a0, a1, a2); 2865 return; 2866 case INDEX_op_xor_vec: 2867 tcg_out_vreg3(s, INSN_VEOR, q, 0, a0, a1, a2); 2868 return; 2869 case INDEX_op_arm_sshl_vec: 2870 /* 2871 * Note that Vm is the data and Vn is the shift count, 2872 * therefore the arguments appear reversed. 2873 */ 2874 tcg_out_vreg3(s, INSN_VSHL_S, q, vece, a0, a2, a1); 2875 return; 2876 case INDEX_op_arm_ushl_vec: 2877 /* See above. */ 2878 tcg_out_vreg3(s, INSN_VSHL_U, q, vece, a0, a2, a1); 2879 return; 2880 case INDEX_op_shli_vec: 2881 tcg_out_vshifti(s, INSN_VSHLI, q, a0, a1, a2 + (8 << vece)); 2882 return; 2883 case INDEX_op_shri_vec: 2884 tcg_out_vshifti(s, INSN_VSHRI, q, a0, a1, (16 << vece) - a2); 2885 return; 2886 case INDEX_op_sari_vec: 2887 tcg_out_vshifti(s, INSN_VSARI, q, a0, a1, (16 << vece) - a2); 2888 return; 2889 case INDEX_op_arm_sli_vec: 2890 tcg_out_vshifti(s, INSN_VSLI, q, a0, a2, args[3] + (8 << vece)); 2891 return; 2892 2893 case INDEX_op_andc_vec: 2894 if (!const_args[2]) { 2895 tcg_out_vreg3(s, INSN_VBIC, q, 0, a0, a1, a2); 2896 return; 2897 } 2898 a2 = ~a2; 2899 /* fall through */ 2900 case INDEX_op_and_vec: 2901 if (const_args[2]) { 2902 is_shimm1632(~a2, &cmode, &imm8); 2903 if (a0 == a1) { 2904 tcg_out_vmovi(s, a0, q, 1, cmode | 1, imm8); /* VBICI */ 2905 return; 2906 } 2907 tcg_out_vmovi(s, a0, q, 1, cmode, imm8); /* VMVNI */ 2908 a2 = a0; 2909 } 2910 tcg_out_vreg3(s, INSN_VAND, q, 0, a0, a1, a2); 2911 return; 2912 2913 case INDEX_op_orc_vec: 2914 if (!const_args[2]) { 2915 tcg_out_vreg3(s, INSN_VORN, q, 0, a0, a1, a2); 2916 return; 2917 } 2918 a2 = ~a2; 2919 /* fall through */ 2920 case INDEX_op_or_vec: 2921 if (const_args[2]) { 2922 is_shimm1632(a2, &cmode, &imm8); 2923 if (a0 == a1) { 2924 tcg_out_vmovi(s, a0, q, 0, cmode | 1, imm8); /* VORRI */ 2925 return; 2926 } 2927 tcg_out_vmovi(s, a0, q, 0, cmode, imm8); /* VMOVI */ 2928 a2 = a0; 2929 } 2930 tcg_out_vreg3(s, INSN_VORR, q, 0, a0, a1, a2); 2931 return; 2932 2933 case INDEX_op_cmp_vec: 2934 { 2935 TCGCond cond = args[3]; 2936 2937 if (cond == TCG_COND_NE) { 2938 if (const_args[2]) { 2939 tcg_out_vreg3(s, INSN_VTST, q, vece, a0, a1, a1); 2940 } else { 2941 tcg_out_vreg3(s, INSN_VCEQ, q, vece, a0, a1, a2); 2942 tcg_out_vreg2(s, INSN_VMVN, q, 0, a0, a0); 2943 } 2944 } else { 2945 ARMInsn insn; 2946 2947 if (const_args[2]) { 2948 insn = vec_cmp0_insn[cond]; 2949 if (insn) { 2950 tcg_out_vreg2(s, insn, q, vece, a0, a1); 2951 return; 2952 } 2953 tcg_out_dupi_vec(s, type, MO_8, TCG_VEC_TMP, 0); 2954 a2 = TCG_VEC_TMP; 2955 } 2956 insn = vec_cmp_insn[cond]; 2957 if (insn == 0) { 2958 TCGArg t; 2959 t = a1, a1 = a2, a2 = t; 2960 cond = tcg_swap_cond(cond); 2961 insn = vec_cmp_insn[cond]; 2962 tcg_debug_assert(insn != 0); 2963 } 2964 tcg_out_vreg3(s, insn, q, vece, a0, a1, a2); 2965 } 2966 } 2967 return; 2968 2969 case INDEX_op_bitsel_vec: 2970 a3 = args[3]; 2971 if (a0 == a3) { 2972 tcg_out_vreg3(s, INSN_VBIT, q, 0, a0, a2, a1); 2973 } else if (a0 == a2) { 2974 tcg_out_vreg3(s, INSN_VBIF, q, 0, a0, a3, a1); 2975 } else { 2976 tcg_out_mov(s, type, a0, a1); 2977 tcg_out_vreg3(s, INSN_VBSL, q, 0, a0, a2, a3); 2978 } 2979 return; 2980 2981 case INDEX_op_mov_vec: /* Always emitted via tcg_out_mov. */ 2982 case INDEX_op_dup_vec: /* Always emitted via tcg_out_dup_vec. */ 2983 default: 2984 g_assert_not_reached(); 2985 } 2986} 2987 2988int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece) 2989{ 2990 switch (opc) { 2991 case INDEX_op_add_vec: 2992 case INDEX_op_sub_vec: 2993 case INDEX_op_and_vec: 2994 case INDEX_op_andc_vec: 2995 case INDEX_op_or_vec: 2996 case INDEX_op_orc_vec: 2997 case INDEX_op_xor_vec: 2998 case INDEX_op_not_vec: 2999 case INDEX_op_shli_vec: 3000 case INDEX_op_shri_vec: 3001 case INDEX_op_sari_vec: 3002 case INDEX_op_ssadd_vec: 3003 case INDEX_op_sssub_vec: 3004 case INDEX_op_usadd_vec: 3005 case INDEX_op_ussub_vec: 3006 case INDEX_op_bitsel_vec: 3007 return 1; 3008 case INDEX_op_abs_vec: 3009 case INDEX_op_cmp_vec: 3010 case INDEX_op_mul_vec: 3011 case INDEX_op_neg_vec: 3012 case INDEX_op_smax_vec: 3013 case INDEX_op_smin_vec: 3014 case INDEX_op_umax_vec: 3015 case INDEX_op_umin_vec: 3016 return vece < MO_64; 3017 case INDEX_op_shlv_vec: 3018 case INDEX_op_shrv_vec: 3019 case INDEX_op_sarv_vec: 3020 case INDEX_op_rotli_vec: 3021 case INDEX_op_rotlv_vec: 3022 case INDEX_op_rotrv_vec: 3023 return -1; 3024 default: 3025 return 0; 3026 } 3027} 3028 3029void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece, 3030 TCGArg a0, ...) 3031{ 3032 va_list va; 3033 TCGv_vec v0, v1, v2, t1, t2, c1; 3034 TCGArg a2; 3035 3036 va_start(va, a0); 3037 v0 = temp_tcgv_vec(arg_temp(a0)); 3038 v1 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg))); 3039 a2 = va_arg(va, TCGArg); 3040 va_end(va); 3041 3042 switch (opc) { 3043 case INDEX_op_shlv_vec: 3044 /* 3045 * Merely propagate shlv_vec to arm_ushl_vec. 3046 * In this way we don't set TCG_TARGET_HAS_shv_vec 3047 * because everything is done via expansion. 3048 */ 3049 v2 = temp_tcgv_vec(arg_temp(a2)); 3050 vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(v0), 3051 tcgv_vec_arg(v1), tcgv_vec_arg(v2)); 3052 break; 3053 3054 case INDEX_op_shrv_vec: 3055 case INDEX_op_sarv_vec: 3056 /* Right shifts are negative left shifts for NEON. */ 3057 v2 = temp_tcgv_vec(arg_temp(a2)); 3058 t1 = tcg_temp_new_vec(type); 3059 tcg_gen_neg_vec(vece, t1, v2); 3060 if (opc == INDEX_op_shrv_vec) { 3061 opc = INDEX_op_arm_ushl_vec; 3062 } else { 3063 opc = INDEX_op_arm_sshl_vec; 3064 } 3065 vec_gen_3(opc, type, vece, tcgv_vec_arg(v0), 3066 tcgv_vec_arg(v1), tcgv_vec_arg(t1)); 3067 tcg_temp_free_vec(t1); 3068 break; 3069 3070 case INDEX_op_rotli_vec: 3071 t1 = tcg_temp_new_vec(type); 3072 tcg_gen_shri_vec(vece, t1, v1, -a2 & ((8 << vece) - 1)); 3073 vec_gen_4(INDEX_op_arm_sli_vec, type, vece, 3074 tcgv_vec_arg(v0), tcgv_vec_arg(t1), tcgv_vec_arg(v1), a2); 3075 tcg_temp_free_vec(t1); 3076 break; 3077 3078 case INDEX_op_rotlv_vec: 3079 v2 = temp_tcgv_vec(arg_temp(a2)); 3080 t1 = tcg_temp_new_vec(type); 3081 c1 = tcg_constant_vec(type, vece, 8 << vece); 3082 tcg_gen_sub_vec(vece, t1, v2, c1); 3083 /* Right shifts are negative left shifts for NEON. */ 3084 vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(t1), 3085 tcgv_vec_arg(v1), tcgv_vec_arg(t1)); 3086 vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(v0), 3087 tcgv_vec_arg(v1), tcgv_vec_arg(v2)); 3088 tcg_gen_or_vec(vece, v0, v0, t1); 3089 tcg_temp_free_vec(t1); 3090 break; 3091 3092 case INDEX_op_rotrv_vec: 3093 v2 = temp_tcgv_vec(arg_temp(a2)); 3094 t1 = tcg_temp_new_vec(type); 3095 t2 = tcg_temp_new_vec(type); 3096 c1 = tcg_constant_vec(type, vece, 8 << vece); 3097 tcg_gen_neg_vec(vece, t1, v2); 3098 tcg_gen_sub_vec(vece, t2, c1, v2); 3099 /* Right shifts are negative left shifts for NEON. */ 3100 vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(t1), 3101 tcgv_vec_arg(v1), tcgv_vec_arg(t1)); 3102 vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(t2), 3103 tcgv_vec_arg(v1), tcgv_vec_arg(t2)); 3104 tcg_gen_or_vec(vece, v0, t1, t2); 3105 tcg_temp_free_vec(t1); 3106 tcg_temp_free_vec(t2); 3107 break; 3108 3109 default: 3110 g_assert_not_reached(); 3111 } 3112} 3113 3114static void tcg_out_nop_fill(tcg_insn_unit *p, int count) 3115{ 3116 int i; 3117 for (i = 0; i < count; ++i) { 3118 p[i] = INSN_NOP; 3119 } 3120} 3121 3122/* Compute frame size via macros, to share between tcg_target_qemu_prologue 3123 and tcg_register_jit. */ 3124 3125#define PUSH_SIZE ((11 - 4 + 1 + 1) * sizeof(tcg_target_long)) 3126 3127#define FRAME_SIZE \ 3128 ((PUSH_SIZE \ 3129 + TCG_STATIC_CALL_ARGS_SIZE \ 3130 + CPU_TEMP_BUF_NLONGS * sizeof(long) \ 3131 + TCG_TARGET_STACK_ALIGN - 1) \ 3132 & -TCG_TARGET_STACK_ALIGN) 3133 3134#define STACK_ADDEND (FRAME_SIZE - PUSH_SIZE) 3135 3136static void tcg_target_qemu_prologue(TCGContext *s) 3137{ 3138 /* Calling convention requires us to save r4-r11 and lr. */ 3139 /* stmdb sp!, { r4 - r11, lr } */ 3140 tcg_out_ldstm(s, COND_AL, INSN_STMDB, TCG_REG_CALL_STACK, 3141 (1 << TCG_REG_R4) | (1 << TCG_REG_R5) | (1 << TCG_REG_R6) | 3142 (1 << TCG_REG_R7) | (1 << TCG_REG_R8) | (1 << TCG_REG_R9) | 3143 (1 << TCG_REG_R10) | (1 << TCG_REG_R11) | (1 << TCG_REG_R14)); 3144 3145 /* Reserve callee argument and tcg temp space. */ 3146 tcg_out_dat_rI(s, COND_AL, ARITH_SUB, TCG_REG_CALL_STACK, 3147 TCG_REG_CALL_STACK, STACK_ADDEND, 1); 3148 tcg_set_frame(s, TCG_REG_CALL_STACK, TCG_STATIC_CALL_ARGS_SIZE, 3149 CPU_TEMP_BUF_NLONGS * sizeof(long)); 3150 3151 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]); 3152 3153#ifndef CONFIG_SOFTMMU 3154 if (guest_base) { 3155 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_GUEST_BASE, guest_base); 3156 tcg_regset_set_reg(s->reserved_regs, TCG_REG_GUEST_BASE); 3157 } 3158#endif 3159 3160 tcg_out_b_reg(s, COND_AL, tcg_target_call_iarg_regs[1]); 3161 3162 /* 3163 * Return path for goto_ptr. Set return value to 0, a-la exit_tb, 3164 * and fall through to the rest of the epilogue. 3165 */ 3166 tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr); 3167 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R0, 0); 3168 tcg_out_epilogue(s); 3169} 3170 3171static void tcg_out_epilogue(TCGContext *s) 3172{ 3173 /* Release local stack frame. */ 3174 tcg_out_dat_rI(s, COND_AL, ARITH_ADD, TCG_REG_CALL_STACK, 3175 TCG_REG_CALL_STACK, STACK_ADDEND, 1); 3176 3177 /* ldmia sp!, { r4 - r11, pc } */ 3178 tcg_out_ldstm(s, COND_AL, INSN_LDMIA, TCG_REG_CALL_STACK, 3179 (1 << TCG_REG_R4) | (1 << TCG_REG_R5) | (1 << TCG_REG_R6) | 3180 (1 << TCG_REG_R7) | (1 << TCG_REG_R8) | (1 << TCG_REG_R9) | 3181 (1 << TCG_REG_R10) | (1 << TCG_REG_R11) | (1 << TCG_REG_PC)); 3182} 3183 3184typedef struct { 3185 DebugFrameHeader h; 3186 uint8_t fde_def_cfa[4]; 3187 uint8_t fde_reg_ofs[18]; 3188} DebugFrame; 3189 3190#define ELF_HOST_MACHINE EM_ARM 3191 3192/* We're expecting a 2 byte uleb128 encoded value. */ 3193QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14)); 3194 3195static const DebugFrame debug_frame = { 3196 .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */ 3197 .h.cie.id = -1, 3198 .h.cie.version = 1, 3199 .h.cie.code_align = 1, 3200 .h.cie.data_align = 0x7c, /* sleb128 -4 */ 3201 .h.cie.return_column = 14, 3202 3203 /* Total FDE size does not include the "len" member. */ 3204 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset), 3205 3206 .fde_def_cfa = { 3207 12, 13, /* DW_CFA_def_cfa sp, ... */ 3208 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */ 3209 (FRAME_SIZE >> 7) 3210 }, 3211 .fde_reg_ofs = { 3212 /* The following must match the stmdb in the prologue. */ 3213 0x8e, 1, /* DW_CFA_offset, lr, -4 */ 3214 0x8b, 2, /* DW_CFA_offset, r11, -8 */ 3215 0x8a, 3, /* DW_CFA_offset, r10, -12 */ 3216 0x89, 4, /* DW_CFA_offset, r9, -16 */ 3217 0x88, 5, /* DW_CFA_offset, r8, -20 */ 3218 0x87, 6, /* DW_CFA_offset, r7, -24 */ 3219 0x86, 7, /* DW_CFA_offset, r6, -28 */ 3220 0x85, 8, /* DW_CFA_offset, r5, -32 */ 3221 0x84, 9, /* DW_CFA_offset, r4, -36 */ 3222 } 3223}; 3224 3225void tcg_register_jit(const void *buf, size_t buf_size) 3226{ 3227 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame)); 3228} 3229