1/* 2 * Tiny Code Generator for QEMU 3 * 4 * Copyright (c) 2008 Andrzej Zaborowski 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 25#include "elf.h" 26#include "../tcg-ldst.c.inc" 27#include "../tcg-pool.c.inc" 28 29int arm_arch = __ARM_ARCH; 30 31#ifndef use_idiv_instructions 32bool use_idiv_instructions; 33#endif 34#ifndef use_neon_instructions 35bool use_neon_instructions; 36#endif 37 38#ifdef CONFIG_DEBUG_TCG 39static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { 40 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7", 41 "%r8", "%r9", "%r10", "%r11", "%r12", "%sp", "%r14", "%pc", 42 "%q0", "%q1", "%q2", "%q3", "%q4", "%q5", "%q6", "%q7", 43 "%q8", "%q9", "%q10", "%q11", "%q12", "%q13", "%q14", "%q15", 44}; 45#endif 46 47static const int tcg_target_reg_alloc_order[] = { 48 TCG_REG_R4, 49 TCG_REG_R5, 50 TCG_REG_R6, 51 TCG_REG_R7, 52 TCG_REG_R8, 53 TCG_REG_R9, 54 TCG_REG_R10, 55 TCG_REG_R11, 56 TCG_REG_R13, 57 TCG_REG_R0, 58 TCG_REG_R1, 59 TCG_REG_R2, 60 TCG_REG_R3, 61 TCG_REG_R12, 62 TCG_REG_R14, 63 64 TCG_REG_Q0, 65 TCG_REG_Q1, 66 TCG_REG_Q2, 67 TCG_REG_Q3, 68 /* Q4 - Q7 are call-saved, and skipped. */ 69 TCG_REG_Q8, 70 TCG_REG_Q9, 71 TCG_REG_Q10, 72 TCG_REG_Q11, 73 TCG_REG_Q12, 74 TCG_REG_Q13, 75 TCG_REG_Q14, 76 TCG_REG_Q15, 77}; 78 79static const int tcg_target_call_iarg_regs[4] = { 80 TCG_REG_R0, TCG_REG_R1, TCG_REG_R2, TCG_REG_R3 81}; 82static const int tcg_target_call_oarg_regs[2] = { 83 TCG_REG_R0, TCG_REG_R1 84}; 85 86#define TCG_REG_TMP TCG_REG_R12 87#define TCG_VEC_TMP TCG_REG_Q15 88#ifndef CONFIG_SOFTMMU 89#define TCG_REG_GUEST_BASE TCG_REG_R11 90#endif 91 92typedef enum { 93 COND_EQ = 0x0, 94 COND_NE = 0x1, 95 COND_CS = 0x2, /* Unsigned greater or equal */ 96 COND_CC = 0x3, /* Unsigned less than */ 97 COND_MI = 0x4, /* Negative */ 98 COND_PL = 0x5, /* Zero or greater */ 99 COND_VS = 0x6, /* Overflow */ 100 COND_VC = 0x7, /* No overflow */ 101 COND_HI = 0x8, /* Unsigned greater than */ 102 COND_LS = 0x9, /* Unsigned less or equal */ 103 COND_GE = 0xa, 104 COND_LT = 0xb, 105 COND_GT = 0xc, 106 COND_LE = 0xd, 107 COND_AL = 0xe, 108} ARMCond; 109 110#define TO_CPSR (1 << 20) 111 112#define SHIFT_IMM_LSL(im) (((im) << 7) | 0x00) 113#define SHIFT_IMM_LSR(im) (((im) << 7) | 0x20) 114#define SHIFT_IMM_ASR(im) (((im) << 7) | 0x40) 115#define SHIFT_IMM_ROR(im) (((im) << 7) | 0x60) 116#define SHIFT_REG_LSL(rs) (((rs) << 8) | 0x10) 117#define SHIFT_REG_LSR(rs) (((rs) << 8) | 0x30) 118#define SHIFT_REG_ASR(rs) (((rs) << 8) | 0x50) 119#define SHIFT_REG_ROR(rs) (((rs) << 8) | 0x70) 120 121typedef enum { 122 ARITH_AND = 0x0 << 21, 123 ARITH_EOR = 0x1 << 21, 124 ARITH_SUB = 0x2 << 21, 125 ARITH_RSB = 0x3 << 21, 126 ARITH_ADD = 0x4 << 21, 127 ARITH_ADC = 0x5 << 21, 128 ARITH_SBC = 0x6 << 21, 129 ARITH_RSC = 0x7 << 21, 130 ARITH_TST = 0x8 << 21 | TO_CPSR, 131 ARITH_CMP = 0xa << 21 | TO_CPSR, 132 ARITH_CMN = 0xb << 21 | TO_CPSR, 133 ARITH_ORR = 0xc << 21, 134 ARITH_MOV = 0xd << 21, 135 ARITH_BIC = 0xe << 21, 136 ARITH_MVN = 0xf << 21, 137 138 INSN_CLZ = 0x016f0f10, 139 INSN_RBIT = 0x06ff0f30, 140 141 INSN_LDMIA = 0x08b00000, 142 INSN_STMDB = 0x09200000, 143 144 INSN_LDR_IMM = 0x04100000, 145 INSN_LDR_REG = 0x06100000, 146 INSN_STR_IMM = 0x04000000, 147 INSN_STR_REG = 0x06000000, 148 149 INSN_LDRH_IMM = 0x005000b0, 150 INSN_LDRH_REG = 0x001000b0, 151 INSN_LDRSH_IMM = 0x005000f0, 152 INSN_LDRSH_REG = 0x001000f0, 153 INSN_STRH_IMM = 0x004000b0, 154 INSN_STRH_REG = 0x000000b0, 155 156 INSN_LDRB_IMM = 0x04500000, 157 INSN_LDRB_REG = 0x06500000, 158 INSN_LDRSB_IMM = 0x005000d0, 159 INSN_LDRSB_REG = 0x001000d0, 160 INSN_STRB_IMM = 0x04400000, 161 INSN_STRB_REG = 0x06400000, 162 163 INSN_LDRD_IMM = 0x004000d0, 164 INSN_LDRD_REG = 0x000000d0, 165 INSN_STRD_IMM = 0x004000f0, 166 INSN_STRD_REG = 0x000000f0, 167 168 INSN_DMB_ISH = 0xf57ff05b, 169 INSN_DMB_MCR = 0xee070fba, 170 171 /* Architected nop introduced in v6k. */ 172 /* ??? This is an MSR (imm) 0,0,0 insn. Anyone know if this 173 also Just So Happened to do nothing on pre-v6k so that we 174 don't need to conditionalize it? */ 175 INSN_NOP_v6k = 0xe320f000, 176 /* Otherwise the assembler uses mov r0,r0 */ 177 INSN_NOP_v4 = (COND_AL << 28) | ARITH_MOV, 178 179 INSN_VADD = 0xf2000800, 180 INSN_VAND = 0xf2000110, 181 INSN_VBIC = 0xf2100110, 182 INSN_VEOR = 0xf3000110, 183 INSN_VORN = 0xf2300110, 184 INSN_VORR = 0xf2200110, 185 INSN_VSUB = 0xf3000800, 186 INSN_VMUL = 0xf2000910, 187 INSN_VQADD = 0xf2000010, 188 INSN_VQADD_U = 0xf3000010, 189 INSN_VQSUB = 0xf2000210, 190 INSN_VQSUB_U = 0xf3000210, 191 INSN_VMAX = 0xf2000600, 192 INSN_VMAX_U = 0xf3000600, 193 INSN_VMIN = 0xf2000610, 194 INSN_VMIN_U = 0xf3000610, 195 196 INSN_VABS = 0xf3b10300, 197 INSN_VMVN = 0xf3b00580, 198 INSN_VNEG = 0xf3b10380, 199 200 INSN_VCEQ0 = 0xf3b10100, 201 INSN_VCGT0 = 0xf3b10000, 202 INSN_VCGE0 = 0xf3b10080, 203 INSN_VCLE0 = 0xf3b10180, 204 INSN_VCLT0 = 0xf3b10200, 205 206 INSN_VCEQ = 0xf3000810, 207 INSN_VCGE = 0xf2000310, 208 INSN_VCGT = 0xf2000300, 209 INSN_VCGE_U = 0xf3000310, 210 INSN_VCGT_U = 0xf3000300, 211 212 INSN_VSHLI = 0xf2800510, /* VSHL (immediate) */ 213 INSN_VSARI = 0xf2800010, /* VSHR.S */ 214 INSN_VSHRI = 0xf3800010, /* VSHR.U */ 215 INSN_VSLI = 0xf3800510, 216 INSN_VSHL_S = 0xf2000400, /* VSHL.S (register) */ 217 INSN_VSHL_U = 0xf3000400, /* VSHL.U (register) */ 218 219 INSN_VBSL = 0xf3100110, 220 INSN_VBIT = 0xf3200110, 221 INSN_VBIF = 0xf3300110, 222 223 INSN_VTST = 0xf2000810, 224 225 INSN_VDUP_G = 0xee800b10, /* VDUP (ARM core register) */ 226 INSN_VDUP_S = 0xf3b00c00, /* VDUP (scalar) */ 227 INSN_VLDR_D = 0xed100b00, /* VLDR.64 */ 228 INSN_VLD1 = 0xf4200000, /* VLD1 (multiple single elements) */ 229 INSN_VLD1R = 0xf4a00c00, /* VLD1 (single element to all lanes) */ 230 INSN_VST1 = 0xf4000000, /* VST1 (multiple single elements) */ 231 INSN_VMOVI = 0xf2800010, /* VMOV (immediate) */ 232} ARMInsn; 233 234#define INSN_NOP (use_armv7_instructions ? INSN_NOP_v6k : INSN_NOP_v4) 235 236static const uint8_t tcg_cond_to_arm_cond[] = { 237 [TCG_COND_EQ] = COND_EQ, 238 [TCG_COND_NE] = COND_NE, 239 [TCG_COND_LT] = COND_LT, 240 [TCG_COND_GE] = COND_GE, 241 [TCG_COND_LE] = COND_LE, 242 [TCG_COND_GT] = COND_GT, 243 /* unsigned */ 244 [TCG_COND_LTU] = COND_CC, 245 [TCG_COND_GEU] = COND_CS, 246 [TCG_COND_LEU] = COND_LS, 247 [TCG_COND_GTU] = COND_HI, 248}; 249 250static int encode_imm(uint32_t imm); 251 252/* TCG private relocation type: add with pc+imm8 */ 253#define R_ARM_PC8 11 254 255/* TCG private relocation type: vldr with imm8 << 2 */ 256#define R_ARM_PC11 12 257 258static bool reloc_pc24(tcg_insn_unit *src_rw, const tcg_insn_unit *target) 259{ 260 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw); 261 ptrdiff_t offset = (tcg_ptr_byte_diff(target, src_rx) - 8) >> 2; 262 263 if (offset == sextract32(offset, 0, 24)) { 264 *src_rw = deposit32(*src_rw, 0, 24, offset); 265 return true; 266 } 267 return false; 268} 269 270static bool reloc_pc13(tcg_insn_unit *src_rw, const tcg_insn_unit *target) 271{ 272 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw); 273 ptrdiff_t offset = tcg_ptr_byte_diff(target, src_rx) - 8; 274 275 if (offset >= -0xfff && offset <= 0xfff) { 276 tcg_insn_unit insn = *src_rw; 277 bool u = (offset >= 0); 278 if (!u) { 279 offset = -offset; 280 } 281 insn = deposit32(insn, 23, 1, u); 282 insn = deposit32(insn, 0, 12, offset); 283 *src_rw = insn; 284 return true; 285 } 286 return false; 287} 288 289static bool reloc_pc11(tcg_insn_unit *src_rw, const tcg_insn_unit *target) 290{ 291 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw); 292 ptrdiff_t offset = (tcg_ptr_byte_diff(target, src_rx) - 8) / 4; 293 294 if (offset >= -0xff && offset <= 0xff) { 295 tcg_insn_unit insn = *src_rw; 296 bool u = (offset >= 0); 297 if (!u) { 298 offset = -offset; 299 } 300 insn = deposit32(insn, 23, 1, u); 301 insn = deposit32(insn, 0, 8, offset); 302 *src_rw = insn; 303 return true; 304 } 305 return false; 306} 307 308static bool reloc_pc8(tcg_insn_unit *src_rw, const tcg_insn_unit *target) 309{ 310 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw); 311 ptrdiff_t offset = tcg_ptr_byte_diff(target, src_rx) - 8; 312 int imm12 = encode_imm(offset); 313 314 if (imm12 >= 0) { 315 *src_rw = deposit32(*src_rw, 0, 12, imm12); 316 return true; 317 } 318 return false; 319} 320 321static bool patch_reloc(tcg_insn_unit *code_ptr, int type, 322 intptr_t value, intptr_t addend) 323{ 324 tcg_debug_assert(addend == 0); 325 switch (type) { 326 case R_ARM_PC24: 327 return reloc_pc24(code_ptr, (const tcg_insn_unit *)value); 328 case R_ARM_PC13: 329 return reloc_pc13(code_ptr, (const tcg_insn_unit *)value); 330 case R_ARM_PC11: 331 return reloc_pc11(code_ptr, (const tcg_insn_unit *)value); 332 case R_ARM_PC8: 333 return reloc_pc8(code_ptr, (const tcg_insn_unit *)value); 334 default: 335 g_assert_not_reached(); 336 } 337} 338 339#define TCG_CT_CONST_ARM 0x100 340#define TCG_CT_CONST_INV 0x200 341#define TCG_CT_CONST_NEG 0x400 342#define TCG_CT_CONST_ZERO 0x800 343#define TCG_CT_CONST_ORRI 0x1000 344#define TCG_CT_CONST_ANDI 0x2000 345 346#define ALL_GENERAL_REGS 0xffffu 347#define ALL_VECTOR_REGS 0xffff0000u 348 349/* 350 * r0-r2 will be overwritten when reading the tlb entry (softmmu only) 351 * and r0-r1 doing the byte swapping, so don't use these. 352 * r3 is removed for softmmu to avoid clashes with helper arguments. 353 */ 354#ifdef CONFIG_SOFTMMU 355#define ALL_QLOAD_REGS \ 356 (ALL_GENERAL_REGS & ~((1 << TCG_REG_R0) | (1 << TCG_REG_R1) | \ 357 (1 << TCG_REG_R2) | (1 << TCG_REG_R3) | \ 358 (1 << TCG_REG_R14))) 359#define ALL_QSTORE_REGS \ 360 (ALL_GENERAL_REGS & ~((1 << TCG_REG_R0) | (1 << TCG_REG_R1) | \ 361 (1 << TCG_REG_R2) | (1 << TCG_REG_R14) | \ 362 ((TARGET_LONG_BITS == 64) << TCG_REG_R3))) 363#else 364#define ALL_QLOAD_REGS ALL_GENERAL_REGS 365#define ALL_QSTORE_REGS \ 366 (ALL_GENERAL_REGS & ~((1 << TCG_REG_R0) | (1 << TCG_REG_R1))) 367#endif 368 369/* 370 * ARM immediates for ALU instructions are made of an unsigned 8-bit 371 * right-rotated by an even amount between 0 and 30. 372 * 373 * Return < 0 if @imm cannot be encoded, else the entire imm12 field. 374 */ 375static int encode_imm(uint32_t imm) 376{ 377 uint32_t rot, imm8; 378 379 /* Simple case, no rotation required. */ 380 if ((imm & ~0xff) == 0) { 381 return imm; 382 } 383 384 /* Next, try a simple even shift. */ 385 rot = ctz32(imm) & ~1; 386 imm8 = imm >> rot; 387 rot = 32 - rot; 388 if ((imm8 & ~0xff) == 0) { 389 goto found; 390 } 391 392 /* 393 * Finally, try harder with rotations. 394 * The ctz test above will have taken care of rotates >= 8. 395 */ 396 for (rot = 2; rot < 8; rot += 2) { 397 imm8 = rol32(imm, rot); 398 if ((imm8 & ~0xff) == 0) { 399 goto found; 400 } 401 } 402 /* Fail: imm cannot be encoded. */ 403 return -1; 404 405 found: 406 /* Note that rot is even, and we discard bit 0 by shifting by 7. */ 407 return rot << 7 | imm8; 408} 409 410static int encode_imm_nofail(uint32_t imm) 411{ 412 int ret = encode_imm(imm); 413 tcg_debug_assert(ret >= 0); 414 return ret; 415} 416 417static bool check_fit_imm(uint32_t imm) 418{ 419 return encode_imm(imm) >= 0; 420} 421 422/* Return true if v16 is a valid 16-bit shifted immediate. */ 423static bool is_shimm16(uint16_t v16, int *cmode, int *imm8) 424{ 425 if (v16 == (v16 & 0xff)) { 426 *cmode = 0x8; 427 *imm8 = v16 & 0xff; 428 return true; 429 } else if (v16 == (v16 & 0xff00)) { 430 *cmode = 0xa; 431 *imm8 = v16 >> 8; 432 return true; 433 } 434 return false; 435} 436 437/* Return true if v32 is a valid 32-bit shifted immediate. */ 438static bool is_shimm32(uint32_t v32, int *cmode, int *imm8) 439{ 440 if (v32 == (v32 & 0xff)) { 441 *cmode = 0x0; 442 *imm8 = v32 & 0xff; 443 return true; 444 } else if (v32 == (v32 & 0xff00)) { 445 *cmode = 0x2; 446 *imm8 = (v32 >> 8) & 0xff; 447 return true; 448 } else if (v32 == (v32 & 0xff0000)) { 449 *cmode = 0x4; 450 *imm8 = (v32 >> 16) & 0xff; 451 return true; 452 } else if (v32 == (v32 & 0xff000000)) { 453 *cmode = 0x6; 454 *imm8 = v32 >> 24; 455 return true; 456 } 457 return false; 458} 459 460/* Return true if v32 is a valid 32-bit shifting ones immediate. */ 461static bool is_soimm32(uint32_t v32, int *cmode, int *imm8) 462{ 463 if ((v32 & 0xffff00ff) == 0xff) { 464 *cmode = 0xc; 465 *imm8 = (v32 >> 8) & 0xff; 466 return true; 467 } else if ((v32 & 0xff00ffff) == 0xffff) { 468 *cmode = 0xd; 469 *imm8 = (v32 >> 16) & 0xff; 470 return true; 471 } 472 return false; 473} 474 475/* 476 * Return non-zero if v32 can be formed by MOVI+ORR. 477 * Place the parameters for MOVI in (cmode, imm8). 478 * Return the cmode for ORR; the imm8 can be had via extraction from v32. 479 */ 480static int is_shimm32_pair(uint32_t v32, int *cmode, int *imm8) 481{ 482 int i; 483 484 for (i = 6; i > 0; i -= 2) { 485 /* Mask out one byte we can add with ORR. */ 486 uint32_t tmp = v32 & ~(0xffu << (i * 4)); 487 if (is_shimm32(tmp, cmode, imm8) || 488 is_soimm32(tmp, cmode, imm8)) { 489 break; 490 } 491 } 492 return i; 493} 494 495/* Return true if V is a valid 16-bit or 32-bit shifted immediate. */ 496static bool is_shimm1632(uint32_t v32, int *cmode, int *imm8) 497{ 498 if (v32 == deposit32(v32, 16, 16, v32)) { 499 return is_shimm16(v32, cmode, imm8); 500 } else { 501 return is_shimm32(v32, cmode, imm8); 502 } 503} 504 505/* Test if a constant matches the constraint. 506 * TODO: define constraints for: 507 * 508 * ldr/str offset: between -0xfff and 0xfff 509 * ldrh/strh offset: between -0xff and 0xff 510 * mov operand2: values represented with x << (2 * y), x < 0x100 511 * add, sub, eor...: ditto 512 */ 513static bool tcg_target_const_match(int64_t val, TCGType type, int ct) 514{ 515 if (ct & TCG_CT_CONST) { 516 return 1; 517 } else if ((ct & TCG_CT_CONST_ARM) && check_fit_imm(val)) { 518 return 1; 519 } else if ((ct & TCG_CT_CONST_INV) && check_fit_imm(~val)) { 520 return 1; 521 } else if ((ct & TCG_CT_CONST_NEG) && check_fit_imm(-val)) { 522 return 1; 523 } else if ((ct & TCG_CT_CONST_ZERO) && val == 0) { 524 return 1; 525 } 526 527 switch (ct & (TCG_CT_CONST_ORRI | TCG_CT_CONST_ANDI)) { 528 case 0: 529 break; 530 case TCG_CT_CONST_ANDI: 531 val = ~val; 532 /* fallthru */ 533 case TCG_CT_CONST_ORRI: 534 if (val == deposit64(val, 32, 32, val)) { 535 int cmode, imm8; 536 return is_shimm1632(val, &cmode, &imm8); 537 } 538 break; 539 default: 540 /* Both bits should not be set for the same insn. */ 541 g_assert_not_reached(); 542 } 543 544 return 0; 545} 546 547static void tcg_out_b_imm(TCGContext *s, ARMCond cond, int32_t offset) 548{ 549 tcg_out32(s, (cond << 28) | 0x0a000000 | 550 (((offset - 8) >> 2) & 0x00ffffff)); 551} 552 553static void tcg_out_bl_imm(TCGContext *s, ARMCond cond, int32_t offset) 554{ 555 tcg_out32(s, (cond << 28) | 0x0b000000 | 556 (((offset - 8) >> 2) & 0x00ffffff)); 557} 558 559static void tcg_out_blx_reg(TCGContext *s, ARMCond cond, TCGReg rn) 560{ 561 tcg_out32(s, (cond << 28) | 0x012fff30 | rn); 562} 563 564static void tcg_out_blx_imm(TCGContext *s, int32_t offset) 565{ 566 tcg_out32(s, 0xfa000000 | ((offset & 2) << 23) | 567 (((offset - 8) >> 2) & 0x00ffffff)); 568} 569 570static void tcg_out_dat_reg(TCGContext *s, ARMCond cond, ARMInsn opc, 571 TCGReg rd, TCGReg rn, TCGReg rm, int shift) 572{ 573 tcg_out32(s, (cond << 28) | (0 << 25) | opc | 574 (rn << 16) | (rd << 12) | shift | rm); 575} 576 577static void tcg_out_mov_reg(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rm) 578{ 579 /* Simple reg-reg move, optimising out the 'do nothing' case */ 580 if (rd != rm) { 581 tcg_out_dat_reg(s, cond, ARITH_MOV, rd, 0, rm, SHIFT_IMM_LSL(0)); 582 } 583} 584 585static void tcg_out_bx_reg(TCGContext *s, ARMCond cond, TCGReg rn) 586{ 587 tcg_out32(s, (cond << 28) | 0x012fff10 | rn); 588} 589 590static void tcg_out_b_reg(TCGContext *s, ARMCond cond, TCGReg rn) 591{ 592 /* 593 * Unless the C portion of QEMU is compiled as thumb, we don't need 594 * true BX semantics; merely a branch to an address held in a register. 595 */ 596 tcg_out_bx_reg(s, cond, rn); 597} 598 599static void tcg_out_dat_imm(TCGContext *s, ARMCond cond, ARMInsn opc, 600 TCGReg rd, TCGReg rn, int im) 601{ 602 tcg_out32(s, (cond << 28) | (1 << 25) | opc | 603 (rn << 16) | (rd << 12) | im); 604} 605 606static void tcg_out_ldstm(TCGContext *s, ARMCond cond, ARMInsn opc, 607 TCGReg rn, uint16_t mask) 608{ 609 tcg_out32(s, (cond << 28) | opc | (rn << 16) | mask); 610} 611 612/* Note that this routine is used for both LDR and LDRH formats, so we do 613 not wish to include an immediate shift at this point. */ 614static void tcg_out_memop_r(TCGContext *s, ARMCond cond, ARMInsn opc, TCGReg rt, 615 TCGReg rn, TCGReg rm, bool u, bool p, bool w) 616{ 617 tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24) 618 | (w << 21) | (rn << 16) | (rt << 12) | rm); 619} 620 621static void tcg_out_memop_8(TCGContext *s, ARMCond cond, ARMInsn opc, TCGReg rt, 622 TCGReg rn, int imm8, bool p, bool w) 623{ 624 bool u = 1; 625 if (imm8 < 0) { 626 imm8 = -imm8; 627 u = 0; 628 } 629 tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24) | (w << 21) | 630 (rn << 16) | (rt << 12) | ((imm8 & 0xf0) << 4) | (imm8 & 0xf)); 631} 632 633static void tcg_out_memop_12(TCGContext *s, ARMCond cond, ARMInsn opc, 634 TCGReg rt, TCGReg rn, int imm12, bool p, bool w) 635{ 636 bool u = 1; 637 if (imm12 < 0) { 638 imm12 = -imm12; 639 u = 0; 640 } 641 tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24) | (w << 21) | 642 (rn << 16) | (rt << 12) | imm12); 643} 644 645static void tcg_out_ld32_12(TCGContext *s, ARMCond cond, TCGReg rt, 646 TCGReg rn, int imm12) 647{ 648 tcg_out_memop_12(s, cond, INSN_LDR_IMM, rt, rn, imm12, 1, 0); 649} 650 651static void tcg_out_st32_12(TCGContext *s, ARMCond cond, TCGReg rt, 652 TCGReg rn, int imm12) 653{ 654 tcg_out_memop_12(s, cond, INSN_STR_IMM, rt, rn, imm12, 1, 0); 655} 656 657static void tcg_out_ld32_r(TCGContext *s, ARMCond cond, TCGReg rt, 658 TCGReg rn, TCGReg rm) 659{ 660 tcg_out_memop_r(s, cond, INSN_LDR_REG, rt, rn, rm, 1, 1, 0); 661} 662 663static void tcg_out_st32_r(TCGContext *s, ARMCond cond, TCGReg rt, 664 TCGReg rn, TCGReg rm) 665{ 666 tcg_out_memop_r(s, cond, INSN_STR_REG, rt, rn, rm, 1, 1, 0); 667} 668 669static void tcg_out_ldrd_8(TCGContext *s, ARMCond cond, TCGReg rt, 670 TCGReg rn, int imm8) 671{ 672 tcg_out_memop_8(s, cond, INSN_LDRD_IMM, rt, rn, imm8, 1, 0); 673} 674 675static void tcg_out_ldrd_r(TCGContext *s, ARMCond cond, TCGReg rt, 676 TCGReg rn, TCGReg rm) 677{ 678 tcg_out_memop_r(s, cond, INSN_LDRD_REG, rt, rn, rm, 1, 1, 0); 679} 680 681static void __attribute__((unused)) 682tcg_out_ldrd_rwb(TCGContext *s, ARMCond cond, TCGReg rt, TCGReg rn, TCGReg rm) 683{ 684 tcg_out_memop_r(s, cond, INSN_LDRD_REG, rt, rn, rm, 1, 1, 1); 685} 686 687static void tcg_out_strd_8(TCGContext *s, ARMCond cond, TCGReg rt, 688 TCGReg rn, int imm8) 689{ 690 tcg_out_memop_8(s, cond, INSN_STRD_IMM, rt, rn, imm8, 1, 0); 691} 692 693static void tcg_out_strd_r(TCGContext *s, ARMCond cond, TCGReg rt, 694 TCGReg rn, TCGReg rm) 695{ 696 tcg_out_memop_r(s, cond, INSN_STRD_REG, rt, rn, rm, 1, 1, 0); 697} 698 699/* Register pre-increment with base writeback. */ 700static void tcg_out_ld32_rwb(TCGContext *s, ARMCond cond, TCGReg rt, 701 TCGReg rn, TCGReg rm) 702{ 703 tcg_out_memop_r(s, cond, INSN_LDR_REG, rt, rn, rm, 1, 1, 1); 704} 705 706static void tcg_out_st32_rwb(TCGContext *s, ARMCond cond, TCGReg rt, 707 TCGReg rn, TCGReg rm) 708{ 709 tcg_out_memop_r(s, cond, INSN_STR_REG, rt, rn, rm, 1, 1, 1); 710} 711 712static void tcg_out_ld16u_8(TCGContext *s, ARMCond cond, TCGReg rt, 713 TCGReg rn, int imm8) 714{ 715 tcg_out_memop_8(s, cond, INSN_LDRH_IMM, rt, rn, imm8, 1, 0); 716} 717 718static void tcg_out_st16_8(TCGContext *s, ARMCond cond, TCGReg rt, 719 TCGReg rn, int imm8) 720{ 721 tcg_out_memop_8(s, cond, INSN_STRH_IMM, rt, rn, imm8, 1, 0); 722} 723 724static void tcg_out_ld16u_r(TCGContext *s, ARMCond cond, TCGReg rt, 725 TCGReg rn, TCGReg rm) 726{ 727 tcg_out_memop_r(s, cond, INSN_LDRH_REG, rt, rn, rm, 1, 1, 0); 728} 729 730static void tcg_out_st16_r(TCGContext *s, ARMCond cond, TCGReg rt, 731 TCGReg rn, TCGReg rm) 732{ 733 tcg_out_memop_r(s, cond, INSN_STRH_REG, rt, rn, rm, 1, 1, 0); 734} 735 736static void tcg_out_ld16s_8(TCGContext *s, ARMCond cond, TCGReg rt, 737 TCGReg rn, int imm8) 738{ 739 tcg_out_memop_8(s, cond, INSN_LDRSH_IMM, rt, rn, imm8, 1, 0); 740} 741 742static void tcg_out_ld16s_r(TCGContext *s, ARMCond cond, TCGReg rt, 743 TCGReg rn, TCGReg rm) 744{ 745 tcg_out_memop_r(s, cond, INSN_LDRSH_REG, rt, rn, rm, 1, 1, 0); 746} 747 748static void tcg_out_ld8_12(TCGContext *s, ARMCond cond, TCGReg rt, 749 TCGReg rn, int imm12) 750{ 751 tcg_out_memop_12(s, cond, INSN_LDRB_IMM, rt, rn, imm12, 1, 0); 752} 753 754static void tcg_out_st8_12(TCGContext *s, ARMCond cond, TCGReg rt, 755 TCGReg rn, int imm12) 756{ 757 tcg_out_memop_12(s, cond, INSN_STRB_IMM, rt, rn, imm12, 1, 0); 758} 759 760static void tcg_out_ld8_r(TCGContext *s, ARMCond cond, TCGReg rt, 761 TCGReg rn, TCGReg rm) 762{ 763 tcg_out_memop_r(s, cond, INSN_LDRB_REG, rt, rn, rm, 1, 1, 0); 764} 765 766static void tcg_out_st8_r(TCGContext *s, ARMCond cond, TCGReg rt, 767 TCGReg rn, TCGReg rm) 768{ 769 tcg_out_memop_r(s, cond, INSN_STRB_REG, rt, rn, rm, 1, 1, 0); 770} 771 772static void tcg_out_ld8s_8(TCGContext *s, ARMCond cond, TCGReg rt, 773 TCGReg rn, int imm8) 774{ 775 tcg_out_memop_8(s, cond, INSN_LDRSB_IMM, rt, rn, imm8, 1, 0); 776} 777 778static void tcg_out_ld8s_r(TCGContext *s, ARMCond cond, TCGReg rt, 779 TCGReg rn, TCGReg rm) 780{ 781 tcg_out_memop_r(s, cond, INSN_LDRSB_REG, rt, rn, rm, 1, 1, 0); 782} 783 784static void tcg_out_movi_pool(TCGContext *s, ARMCond cond, 785 TCGReg rd, uint32_t arg) 786{ 787 new_pool_label(s, arg, R_ARM_PC13, s->code_ptr, 0); 788 tcg_out_ld32_12(s, cond, rd, TCG_REG_PC, 0); 789} 790 791static void tcg_out_movi32(TCGContext *s, ARMCond cond, 792 TCGReg rd, uint32_t arg) 793{ 794 int imm12, diff, opc, sh1, sh2; 795 uint32_t tt0, tt1, tt2; 796 797 /* Check a single MOV/MVN before anything else. */ 798 imm12 = encode_imm(arg); 799 if (imm12 >= 0) { 800 tcg_out_dat_imm(s, cond, ARITH_MOV, rd, 0, imm12); 801 return; 802 } 803 imm12 = encode_imm(~arg); 804 if (imm12 >= 0) { 805 tcg_out_dat_imm(s, cond, ARITH_MVN, rd, 0, imm12); 806 return; 807 } 808 809 /* Check for a pc-relative address. This will usually be the TB, 810 or within the TB, which is immediately before the code block. */ 811 diff = tcg_pcrel_diff(s, (void *)arg) - 8; 812 if (diff >= 0) { 813 imm12 = encode_imm(diff); 814 if (imm12 >= 0) { 815 tcg_out_dat_imm(s, cond, ARITH_ADD, rd, TCG_REG_PC, imm12); 816 return; 817 } 818 } else { 819 imm12 = encode_imm(-diff); 820 if (imm12 >= 0) { 821 tcg_out_dat_imm(s, cond, ARITH_SUB, rd, TCG_REG_PC, imm12); 822 return; 823 } 824 } 825 826 /* Use movw + movt. */ 827 if (use_armv7_instructions) { 828 /* movw */ 829 tcg_out32(s, (cond << 28) | 0x03000000 | (rd << 12) 830 | ((arg << 4) & 0x000f0000) | (arg & 0xfff)); 831 if (arg & 0xffff0000) { 832 /* movt */ 833 tcg_out32(s, (cond << 28) | 0x03400000 | (rd << 12) 834 | ((arg >> 12) & 0x000f0000) | ((arg >> 16) & 0xfff)); 835 } 836 return; 837 } 838 839 /* Look for sequences of two insns. If we have lots of 1's, we can 840 shorten the sequence by beginning with mvn and then clearing 841 higher bits with eor. */ 842 tt0 = arg; 843 opc = ARITH_MOV; 844 if (ctpop32(arg) > 16) { 845 tt0 = ~arg; 846 opc = ARITH_MVN; 847 } 848 sh1 = ctz32(tt0) & ~1; 849 tt1 = tt0 & ~(0xff << sh1); 850 sh2 = ctz32(tt1) & ~1; 851 tt2 = tt1 & ~(0xff << sh2); 852 if (tt2 == 0) { 853 int rot; 854 855 rot = ((32 - sh1) << 7) & 0xf00; 856 tcg_out_dat_imm(s, cond, opc, rd, 0, ((tt0 >> sh1) & 0xff) | rot); 857 rot = ((32 - sh2) << 7) & 0xf00; 858 tcg_out_dat_imm(s, cond, ARITH_EOR, rd, rd, 859 ((tt0 >> sh2) & 0xff) | rot); 860 return; 861 } 862 863 /* Otherwise, drop it into the constant pool. */ 864 tcg_out_movi_pool(s, cond, rd, arg); 865} 866 867/* 868 * Emit either the reg,imm or reg,reg form of a data-processing insn. 869 * rhs must satisfy the "rI" constraint. 870 */ 871static void tcg_out_dat_rI(TCGContext *s, ARMCond cond, ARMInsn opc, 872 TCGReg dst, TCGReg lhs, TCGArg rhs, int rhs_is_const) 873{ 874 if (rhs_is_const) { 875 tcg_out_dat_imm(s, cond, opc, dst, lhs, encode_imm_nofail(rhs)); 876 } else { 877 tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0)); 878 } 879} 880 881/* 882 * Emit either the reg,imm or reg,reg form of a data-processing insn. 883 * rhs must satisfy the "rIK" constraint. 884 */ 885static void tcg_out_dat_rIK(TCGContext *s, ARMCond cond, ARMInsn opc, 886 ARMInsn opinv, TCGReg dst, TCGReg lhs, TCGArg rhs, 887 bool rhs_is_const) 888{ 889 if (rhs_is_const) { 890 int imm12 = encode_imm(rhs); 891 if (imm12 < 0) { 892 imm12 = encode_imm_nofail(~rhs); 893 opc = opinv; 894 } 895 tcg_out_dat_imm(s, cond, opc, dst, lhs, imm12); 896 } else { 897 tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0)); 898 } 899} 900 901static void tcg_out_dat_rIN(TCGContext *s, ARMCond cond, ARMInsn opc, 902 ARMInsn opneg, TCGReg dst, TCGReg lhs, TCGArg rhs, 903 bool rhs_is_const) 904{ 905 /* Emit either the reg,imm or reg,reg form of a data-processing insn. 906 * rhs must satisfy the "rIN" constraint. 907 */ 908 if (rhs_is_const) { 909 int imm12 = encode_imm(rhs); 910 if (imm12 < 0) { 911 imm12 = encode_imm_nofail(-rhs); 912 opc = opneg; 913 } 914 tcg_out_dat_imm(s, cond, opc, dst, lhs, imm12); 915 } else { 916 tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0)); 917 } 918} 919 920static void tcg_out_mul32(TCGContext *s, ARMCond cond, TCGReg rd, 921 TCGReg rn, TCGReg rm) 922{ 923 /* mul */ 924 tcg_out32(s, (cond << 28) | 0x90 | (rd << 16) | (rm << 8) | rn); 925} 926 927static void tcg_out_umull32(TCGContext *s, ARMCond cond, TCGReg rd0, 928 TCGReg rd1, TCGReg rn, TCGReg rm) 929{ 930 /* umull */ 931 tcg_out32(s, (cond << 28) | 0x00800090 | 932 (rd1 << 16) | (rd0 << 12) | (rm << 8) | rn); 933} 934 935static void tcg_out_smull32(TCGContext *s, ARMCond cond, TCGReg rd0, 936 TCGReg rd1, TCGReg rn, TCGReg rm) 937{ 938 /* smull */ 939 tcg_out32(s, (cond << 28) | 0x00c00090 | 940 (rd1 << 16) | (rd0 << 12) | (rm << 8) | rn); 941} 942 943static void tcg_out_sdiv(TCGContext *s, ARMCond cond, 944 TCGReg rd, TCGReg rn, TCGReg rm) 945{ 946 tcg_out32(s, 0x0710f010 | (cond << 28) | (rd << 16) | rn | (rm << 8)); 947} 948 949static void tcg_out_udiv(TCGContext *s, ARMCond cond, 950 TCGReg rd, TCGReg rn, TCGReg rm) 951{ 952 tcg_out32(s, 0x0730f010 | (cond << 28) | (rd << 16) | rn | (rm << 8)); 953} 954 955static void tcg_out_ext8s(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rn) 956{ 957 /* sxtb */ 958 tcg_out32(s, 0x06af0070 | (cond << 28) | (rd << 12) | rn); 959} 960 961static void __attribute__((unused)) 962tcg_out_ext8u(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rn) 963{ 964 tcg_out_dat_imm(s, cond, ARITH_AND, rd, rn, 0xff); 965} 966 967static void tcg_out_ext16s(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rn) 968{ 969 /* sxth */ 970 tcg_out32(s, 0x06bf0070 | (cond << 28) | (rd << 12) | rn); 971} 972 973static void tcg_out_ext16u(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rn) 974{ 975 /* uxth */ 976 tcg_out32(s, 0x06ff0070 | (cond << 28) | (rd << 12) | rn); 977} 978 979static void tcg_out_bswap16(TCGContext *s, ARMCond cond, 980 TCGReg rd, TCGReg rn, int flags) 981{ 982 if (flags & TCG_BSWAP_OS) { 983 /* revsh */ 984 tcg_out32(s, 0x06ff0fb0 | (cond << 28) | (rd << 12) | rn); 985 return; 986 } 987 988 /* rev16 */ 989 tcg_out32(s, 0x06bf0fb0 | (cond << 28) | (rd << 12) | rn); 990 if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) { 991 /* uxth */ 992 tcg_out32(s, 0x06ff0070 | (cond << 28) | (rd << 12) | rd); 993 } 994} 995 996static void tcg_out_bswap32(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rn) 997{ 998 /* rev */ 999 tcg_out32(s, 0x06bf0f30 | (cond << 28) | (rd << 12) | rn); 1000} 1001 1002static void tcg_out_deposit(TCGContext *s, ARMCond cond, TCGReg rd, 1003 TCGArg a1, int ofs, int len, bool const_a1) 1004{ 1005 if (const_a1) { 1006 /* bfi becomes bfc with rn == 15. */ 1007 a1 = 15; 1008 } 1009 /* bfi/bfc */ 1010 tcg_out32(s, 0x07c00010 | (cond << 28) | (rd << 12) | a1 1011 | (ofs << 7) | ((ofs + len - 1) << 16)); 1012} 1013 1014static void tcg_out_extract(TCGContext *s, ARMCond cond, TCGReg rd, 1015 TCGReg rn, int ofs, int len) 1016{ 1017 /* ubfx */ 1018 tcg_out32(s, 0x07e00050 | (cond << 28) | (rd << 12) | rn 1019 | (ofs << 7) | ((len - 1) << 16)); 1020} 1021 1022static void tcg_out_sextract(TCGContext *s, ARMCond cond, TCGReg rd, 1023 TCGReg rn, int ofs, int len) 1024{ 1025 /* sbfx */ 1026 tcg_out32(s, 0x07a00050 | (cond << 28) | (rd << 12) | rn 1027 | (ofs << 7) | ((len - 1) << 16)); 1028} 1029 1030static void tcg_out_ld32u(TCGContext *s, ARMCond cond, 1031 TCGReg rd, TCGReg rn, int32_t offset) 1032{ 1033 if (offset > 0xfff || offset < -0xfff) { 1034 tcg_out_movi32(s, cond, TCG_REG_TMP, offset); 1035 tcg_out_ld32_r(s, cond, rd, rn, TCG_REG_TMP); 1036 } else 1037 tcg_out_ld32_12(s, cond, rd, rn, offset); 1038} 1039 1040static void tcg_out_st32(TCGContext *s, ARMCond cond, 1041 TCGReg rd, TCGReg rn, int32_t offset) 1042{ 1043 if (offset > 0xfff || offset < -0xfff) { 1044 tcg_out_movi32(s, cond, TCG_REG_TMP, offset); 1045 tcg_out_st32_r(s, cond, rd, rn, TCG_REG_TMP); 1046 } else 1047 tcg_out_st32_12(s, cond, rd, rn, offset); 1048} 1049 1050static void tcg_out_ld16u(TCGContext *s, ARMCond cond, 1051 TCGReg rd, TCGReg rn, int32_t offset) 1052{ 1053 if (offset > 0xff || offset < -0xff) { 1054 tcg_out_movi32(s, cond, TCG_REG_TMP, offset); 1055 tcg_out_ld16u_r(s, cond, rd, rn, TCG_REG_TMP); 1056 } else 1057 tcg_out_ld16u_8(s, cond, rd, rn, offset); 1058} 1059 1060static void tcg_out_ld16s(TCGContext *s, ARMCond cond, 1061 TCGReg rd, TCGReg rn, int32_t offset) 1062{ 1063 if (offset > 0xff || offset < -0xff) { 1064 tcg_out_movi32(s, cond, TCG_REG_TMP, offset); 1065 tcg_out_ld16s_r(s, cond, rd, rn, TCG_REG_TMP); 1066 } else 1067 tcg_out_ld16s_8(s, cond, rd, rn, offset); 1068} 1069 1070static void tcg_out_st16(TCGContext *s, ARMCond cond, 1071 TCGReg rd, TCGReg rn, int32_t offset) 1072{ 1073 if (offset > 0xff || offset < -0xff) { 1074 tcg_out_movi32(s, cond, TCG_REG_TMP, offset); 1075 tcg_out_st16_r(s, cond, rd, rn, TCG_REG_TMP); 1076 } else 1077 tcg_out_st16_8(s, cond, rd, rn, offset); 1078} 1079 1080static void tcg_out_ld8u(TCGContext *s, ARMCond cond, 1081 TCGReg rd, TCGReg rn, int32_t offset) 1082{ 1083 if (offset > 0xfff || offset < -0xfff) { 1084 tcg_out_movi32(s, cond, TCG_REG_TMP, offset); 1085 tcg_out_ld8_r(s, cond, rd, rn, TCG_REG_TMP); 1086 } else 1087 tcg_out_ld8_12(s, cond, rd, rn, offset); 1088} 1089 1090static void tcg_out_ld8s(TCGContext *s, ARMCond cond, 1091 TCGReg rd, TCGReg rn, int32_t offset) 1092{ 1093 if (offset > 0xff || offset < -0xff) { 1094 tcg_out_movi32(s, cond, TCG_REG_TMP, offset); 1095 tcg_out_ld8s_r(s, cond, rd, rn, TCG_REG_TMP); 1096 } else 1097 tcg_out_ld8s_8(s, cond, rd, rn, offset); 1098} 1099 1100static void tcg_out_st8(TCGContext *s, ARMCond cond, 1101 TCGReg rd, TCGReg rn, int32_t offset) 1102{ 1103 if (offset > 0xfff || offset < -0xfff) { 1104 tcg_out_movi32(s, cond, TCG_REG_TMP, offset); 1105 tcg_out_st8_r(s, cond, rd, rn, TCG_REG_TMP); 1106 } else 1107 tcg_out_st8_12(s, cond, rd, rn, offset); 1108} 1109 1110/* 1111 * The _goto case is normally between TBs within the same code buffer, and 1112 * with the code buffer limited to 16MB we wouldn't need the long case. 1113 * But we also use it for the tail-call to the qemu_ld/st helpers, which does. 1114 */ 1115static void tcg_out_goto(TCGContext *s, ARMCond cond, const tcg_insn_unit *addr) 1116{ 1117 intptr_t addri = (intptr_t)addr; 1118 ptrdiff_t disp = tcg_pcrel_diff(s, addr); 1119 bool arm_mode = !(addri & 1); 1120 1121 if (arm_mode && disp - 8 < 0x01fffffd && disp - 8 > -0x01fffffd) { 1122 tcg_out_b_imm(s, cond, disp); 1123 return; 1124 } 1125 1126 /* LDR is interworking from v5t. */ 1127 tcg_out_movi_pool(s, cond, TCG_REG_PC, addri); 1128} 1129 1130/* 1131 * The call case is mostly used for helpers - so it's not unreasonable 1132 * for them to be beyond branch range. 1133 */ 1134static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *addr) 1135{ 1136 intptr_t addri = (intptr_t)addr; 1137 ptrdiff_t disp = tcg_pcrel_diff(s, addr); 1138 bool arm_mode = !(addri & 1); 1139 1140 if (disp - 8 < 0x02000000 && disp - 8 >= -0x02000000) { 1141 if (arm_mode) { 1142 tcg_out_bl_imm(s, COND_AL, disp); 1143 } else { 1144 tcg_out_blx_imm(s, disp); 1145 } 1146 return; 1147 } 1148 1149 tcg_out_movi32(s, COND_AL, TCG_REG_TMP, addri); 1150 tcg_out_blx_reg(s, COND_AL, TCG_REG_TMP); 1151} 1152 1153static void tcg_out_call(TCGContext *s, const tcg_insn_unit *addr, 1154 const TCGHelperInfo *info) 1155{ 1156 tcg_out_call_int(s, addr); 1157} 1158 1159static void tcg_out_goto_label(TCGContext *s, ARMCond cond, TCGLabel *l) 1160{ 1161 if (l->has_value) { 1162 tcg_out_goto(s, cond, l->u.value_ptr); 1163 } else { 1164 tcg_out_reloc(s, s->code_ptr, R_ARM_PC24, l, 0); 1165 tcg_out_b_imm(s, cond, 0); 1166 } 1167} 1168 1169static void tcg_out_mb(TCGContext *s, TCGArg a0) 1170{ 1171 if (use_armv7_instructions) { 1172 tcg_out32(s, INSN_DMB_ISH); 1173 } else { 1174 tcg_out32(s, INSN_DMB_MCR); 1175 } 1176} 1177 1178static TCGCond tcg_out_cmp2(TCGContext *s, const TCGArg *args, 1179 const int *const_args) 1180{ 1181 TCGReg al = args[0]; 1182 TCGReg ah = args[1]; 1183 TCGArg bl = args[2]; 1184 TCGArg bh = args[3]; 1185 TCGCond cond = args[4]; 1186 int const_bl = const_args[2]; 1187 int const_bh = const_args[3]; 1188 1189 switch (cond) { 1190 case TCG_COND_EQ: 1191 case TCG_COND_NE: 1192 case TCG_COND_LTU: 1193 case TCG_COND_LEU: 1194 case TCG_COND_GTU: 1195 case TCG_COND_GEU: 1196 /* We perform a conditional comparision. If the high half is 1197 equal, then overwrite the flags with the comparison of the 1198 low half. The resulting flags cover the whole. */ 1199 tcg_out_dat_rI(s, COND_AL, ARITH_CMP, 0, ah, bh, const_bh); 1200 tcg_out_dat_rI(s, COND_EQ, ARITH_CMP, 0, al, bl, const_bl); 1201 return cond; 1202 1203 case TCG_COND_LT: 1204 case TCG_COND_GE: 1205 /* We perform a double-word subtraction and examine the result. 1206 We do not actually need the result of the subtract, so the 1207 low part "subtract" is a compare. For the high half we have 1208 no choice but to compute into a temporary. */ 1209 tcg_out_dat_rI(s, COND_AL, ARITH_CMP, 0, al, bl, const_bl); 1210 tcg_out_dat_rI(s, COND_AL, ARITH_SBC | TO_CPSR, 1211 TCG_REG_TMP, ah, bh, const_bh); 1212 return cond; 1213 1214 case TCG_COND_LE: 1215 case TCG_COND_GT: 1216 /* Similar, but with swapped arguments, via reversed subtract. */ 1217 tcg_out_dat_rI(s, COND_AL, ARITH_RSB | TO_CPSR, 1218 TCG_REG_TMP, al, bl, const_bl); 1219 tcg_out_dat_rI(s, COND_AL, ARITH_RSC | TO_CPSR, 1220 TCG_REG_TMP, ah, bh, const_bh); 1221 return tcg_swap_cond(cond); 1222 1223 default: 1224 g_assert_not_reached(); 1225 } 1226} 1227 1228/* 1229 * Note that TCGReg references Q-registers. 1230 * Q-regno = 2 * D-regno, so shift left by 1 whlie inserting. 1231 */ 1232static uint32_t encode_vd(TCGReg rd) 1233{ 1234 tcg_debug_assert(rd >= TCG_REG_Q0); 1235 return (extract32(rd, 3, 1) << 22) | (extract32(rd, 0, 3) << 13); 1236} 1237 1238static uint32_t encode_vn(TCGReg rn) 1239{ 1240 tcg_debug_assert(rn >= TCG_REG_Q0); 1241 return (extract32(rn, 3, 1) << 7) | (extract32(rn, 0, 3) << 17); 1242} 1243 1244static uint32_t encode_vm(TCGReg rm) 1245{ 1246 tcg_debug_assert(rm >= TCG_REG_Q0); 1247 return (extract32(rm, 3, 1) << 5) | (extract32(rm, 0, 3) << 1); 1248} 1249 1250static void tcg_out_vreg2(TCGContext *s, ARMInsn insn, int q, int vece, 1251 TCGReg d, TCGReg m) 1252{ 1253 tcg_out32(s, insn | (vece << 18) | (q << 6) | 1254 encode_vd(d) | encode_vm(m)); 1255} 1256 1257static void tcg_out_vreg3(TCGContext *s, ARMInsn insn, int q, int vece, 1258 TCGReg d, TCGReg n, TCGReg m) 1259{ 1260 tcg_out32(s, insn | (vece << 20) | (q << 6) | 1261 encode_vd(d) | encode_vn(n) | encode_vm(m)); 1262} 1263 1264static void tcg_out_vmovi(TCGContext *s, TCGReg rd, 1265 int q, int op, int cmode, uint8_t imm8) 1266{ 1267 tcg_out32(s, INSN_VMOVI | encode_vd(rd) | (q << 6) | (op << 5) 1268 | (cmode << 8) | extract32(imm8, 0, 4) 1269 | (extract32(imm8, 4, 3) << 16) 1270 | (extract32(imm8, 7, 1) << 24)); 1271} 1272 1273static void tcg_out_vshifti(TCGContext *s, ARMInsn insn, int q, 1274 TCGReg rd, TCGReg rm, int l_imm6) 1275{ 1276 tcg_out32(s, insn | (q << 6) | encode_vd(rd) | encode_vm(rm) | 1277 (extract32(l_imm6, 6, 1) << 7) | 1278 (extract32(l_imm6, 0, 6) << 16)); 1279} 1280 1281static void tcg_out_vldst(TCGContext *s, ARMInsn insn, 1282 TCGReg rd, TCGReg rn, int offset) 1283{ 1284 if (offset != 0) { 1285 if (check_fit_imm(offset) || check_fit_imm(-offset)) { 1286 tcg_out_dat_rIN(s, COND_AL, ARITH_ADD, ARITH_SUB, 1287 TCG_REG_TMP, rn, offset, true); 1288 } else { 1289 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, offset); 1290 tcg_out_dat_reg(s, COND_AL, ARITH_ADD, 1291 TCG_REG_TMP, TCG_REG_TMP, rn, 0); 1292 } 1293 rn = TCG_REG_TMP; 1294 } 1295 tcg_out32(s, insn | (rn << 16) | encode_vd(rd) | 0xf); 1296} 1297 1298#ifdef CONFIG_SOFTMMU 1299/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr, 1300 * int mmu_idx, uintptr_t ra) 1301 */ 1302static void * const qemu_ld_helpers[MO_SSIZE + 1] = { 1303 [MO_UB] = helper_ret_ldub_mmu, 1304 [MO_SB] = helper_ret_ldsb_mmu, 1305#if HOST_BIG_ENDIAN 1306 [MO_UW] = helper_be_lduw_mmu, 1307 [MO_UL] = helper_be_ldul_mmu, 1308 [MO_UQ] = helper_be_ldq_mmu, 1309 [MO_SW] = helper_be_ldsw_mmu, 1310 [MO_SL] = helper_be_ldul_mmu, 1311#else 1312 [MO_UW] = helper_le_lduw_mmu, 1313 [MO_UL] = helper_le_ldul_mmu, 1314 [MO_UQ] = helper_le_ldq_mmu, 1315 [MO_SW] = helper_le_ldsw_mmu, 1316 [MO_SL] = helper_le_ldul_mmu, 1317#endif 1318}; 1319 1320/* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr, 1321 * uintxx_t val, int mmu_idx, uintptr_t ra) 1322 */ 1323static void * const qemu_st_helpers[MO_SIZE + 1] = { 1324 [MO_8] = helper_ret_stb_mmu, 1325#if HOST_BIG_ENDIAN 1326 [MO_16] = helper_be_stw_mmu, 1327 [MO_32] = helper_be_stl_mmu, 1328 [MO_64] = helper_be_stq_mmu, 1329#else 1330 [MO_16] = helper_le_stw_mmu, 1331 [MO_32] = helper_le_stl_mmu, 1332 [MO_64] = helper_le_stq_mmu, 1333#endif 1334}; 1335 1336/* Helper routines for marshalling helper function arguments into 1337 * the correct registers and stack. 1338 * argreg is where we want to put this argument, arg is the argument itself. 1339 * Return value is the updated argreg ready for the next call. 1340 * Note that argreg 0..3 is real registers, 4+ on stack. 1341 * 1342 * We provide routines for arguments which are: immediate, 32 bit 1343 * value in register, 16 and 8 bit values in register (which must be zero 1344 * extended before use) and 64 bit value in a lo:hi register pair. 1345 */ 1346#define DEFINE_TCG_OUT_ARG(NAME, ARGTYPE, MOV_ARG, EXT_ARG) \ 1347static TCGReg NAME(TCGContext *s, TCGReg argreg, ARGTYPE arg) \ 1348{ \ 1349 if (argreg < 4) { \ 1350 MOV_ARG(s, COND_AL, argreg, arg); \ 1351 } else { \ 1352 int ofs = (argreg - 4) * 4; \ 1353 EXT_ARG; \ 1354 tcg_debug_assert(ofs + 4 <= TCG_STATIC_CALL_ARGS_SIZE); \ 1355 tcg_out_st32_12(s, COND_AL, arg, TCG_REG_CALL_STACK, ofs); \ 1356 } \ 1357 return argreg + 1; \ 1358} 1359 1360DEFINE_TCG_OUT_ARG(tcg_out_arg_imm32, uint32_t, tcg_out_movi32, 1361 (tcg_out_movi32(s, COND_AL, TCG_REG_TMP, arg), arg = TCG_REG_TMP)) 1362DEFINE_TCG_OUT_ARG(tcg_out_arg_reg8, TCGReg, tcg_out_ext8u, 1363 (tcg_out_ext8u(s, COND_AL, TCG_REG_TMP, arg), arg = TCG_REG_TMP)) 1364DEFINE_TCG_OUT_ARG(tcg_out_arg_reg16, TCGReg, tcg_out_ext16u, 1365 (tcg_out_ext16u(s, COND_AL, TCG_REG_TMP, arg), arg = TCG_REG_TMP)) 1366DEFINE_TCG_OUT_ARG(tcg_out_arg_reg32, TCGReg, tcg_out_mov_reg, ) 1367 1368static TCGReg tcg_out_arg_reg64(TCGContext *s, TCGReg argreg, 1369 TCGReg arglo, TCGReg arghi) 1370{ 1371 /* 64 bit arguments must go in even/odd register pairs 1372 * and in 8-aligned stack slots. 1373 */ 1374 if (argreg & 1) { 1375 argreg++; 1376 } 1377 if (argreg >= 4 && (arglo & 1) == 0 && arghi == arglo + 1) { 1378 tcg_out_strd_8(s, COND_AL, arglo, 1379 TCG_REG_CALL_STACK, (argreg - 4) * 4); 1380 return argreg + 2; 1381 } else { 1382 argreg = tcg_out_arg_reg32(s, argreg, arglo); 1383 argreg = tcg_out_arg_reg32(s, argreg, arghi); 1384 return argreg; 1385 } 1386} 1387 1388#define TLB_SHIFT (CPU_TLB_ENTRY_BITS + CPU_TLB_BITS) 1389 1390/* We expect to use an 9-bit sign-magnitude negative offset from ENV. */ 1391QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0); 1392QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -256); 1393 1394/* These offsets are built into the LDRD below. */ 1395QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, mask) != 0); 1396QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, table) != 4); 1397 1398/* Load and compare a TLB entry, leaving the flags set. Returns the register 1399 containing the addend of the tlb entry. Clobbers R0, R1, R2, TMP. */ 1400 1401static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addrlo, TCGReg addrhi, 1402 MemOp opc, int mem_index, bool is_load) 1403{ 1404 int cmp_off = (is_load ? offsetof(CPUTLBEntry, addr_read) 1405 : offsetof(CPUTLBEntry, addr_write)); 1406 int fast_off = TLB_MASK_TABLE_OFS(mem_index); 1407 unsigned s_mask = (1 << (opc & MO_SIZE)) - 1; 1408 unsigned a_mask = (1 << get_alignment_bits(opc)) - 1; 1409 TCGReg t_addr; 1410 1411 /* Load env_tlb(env)->f[mmu_idx].{mask,table} into {r0,r1}. */ 1412 tcg_out_ldrd_8(s, COND_AL, TCG_REG_R0, TCG_AREG0, fast_off); 1413 1414 /* Extract the tlb index from the address into R0. */ 1415 tcg_out_dat_reg(s, COND_AL, ARITH_AND, TCG_REG_R0, TCG_REG_R0, addrlo, 1416 SHIFT_IMM_LSR(TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS)); 1417 1418 /* 1419 * Add the tlb_table pointer, creating the CPUTLBEntry address in R1. 1420 * Load the tlb comparator into R2/R3 and the fast path addend into R1. 1421 */ 1422 if (cmp_off == 0) { 1423 if (TARGET_LONG_BITS == 64) { 1424 tcg_out_ldrd_rwb(s, COND_AL, TCG_REG_R2, TCG_REG_R1, TCG_REG_R0); 1425 } else { 1426 tcg_out_ld32_rwb(s, COND_AL, TCG_REG_R2, TCG_REG_R1, TCG_REG_R0); 1427 } 1428 } else { 1429 tcg_out_dat_reg(s, COND_AL, ARITH_ADD, 1430 TCG_REG_R1, TCG_REG_R1, TCG_REG_R0, 0); 1431 if (TARGET_LONG_BITS == 64) { 1432 tcg_out_ldrd_8(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off); 1433 } else { 1434 tcg_out_ld32_12(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off); 1435 } 1436 } 1437 1438 /* Load the tlb addend. */ 1439 tcg_out_ld32_12(s, COND_AL, TCG_REG_R1, TCG_REG_R1, 1440 offsetof(CPUTLBEntry, addend)); 1441 1442 /* 1443 * Check alignment, check comparators. 1444 * Do this in 2-4 insns. Use MOVW for v7, if possible, 1445 * to reduce the number of sequential conditional instructions. 1446 * Almost all guests have at least 4k pages, which means that we need 1447 * to clear at least 9 bits even for an 8-byte memory, which means it 1448 * isn't worth checking for an immediate operand for BIC. 1449 * 1450 * For unaligned accesses, test the page of the last unit of alignment. 1451 * This leaves the least significant alignment bits unchanged, and of 1452 * course must be zero. 1453 */ 1454 t_addr = addrlo; 1455 if (a_mask < s_mask) { 1456 t_addr = TCG_REG_R0; 1457 tcg_out_dat_imm(s, COND_AL, ARITH_ADD, t_addr, 1458 addrlo, s_mask - a_mask); 1459 } 1460 if (use_armv7_instructions && TARGET_PAGE_BITS <= 16) { 1461 tcg_out_movi32(s, COND_AL, TCG_REG_TMP, ~(TARGET_PAGE_MASK | a_mask)); 1462 tcg_out_dat_reg(s, COND_AL, ARITH_BIC, TCG_REG_TMP, 1463 t_addr, TCG_REG_TMP, 0); 1464 tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, TCG_REG_R2, TCG_REG_TMP, 0); 1465 } else { 1466 if (a_mask) { 1467 tcg_debug_assert(a_mask <= 0xff); 1468 tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addrlo, a_mask); 1469 } 1470 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP, 0, t_addr, 1471 SHIFT_IMM_LSR(TARGET_PAGE_BITS)); 1472 tcg_out_dat_reg(s, (a_mask ? COND_EQ : COND_AL), ARITH_CMP, 1473 0, TCG_REG_R2, TCG_REG_TMP, 1474 SHIFT_IMM_LSL(TARGET_PAGE_BITS)); 1475 } 1476 1477 if (TARGET_LONG_BITS == 64) { 1478 tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0, TCG_REG_R3, addrhi, 0); 1479 } 1480 1481 return TCG_REG_R1; 1482} 1483 1484/* Record the context of a call to the out of line helper code for the slow 1485 path for a load or store, so that we can later generate the correct 1486 helper code. */ 1487static void add_qemu_ldst_label(TCGContext *s, bool is_ld, MemOpIdx oi, 1488 TCGReg datalo, TCGReg datahi, TCGReg addrlo, 1489 TCGReg addrhi, tcg_insn_unit *raddr, 1490 tcg_insn_unit *label_ptr) 1491{ 1492 TCGLabelQemuLdst *label = new_ldst_label(s); 1493 1494 label->is_ld = is_ld; 1495 label->oi = oi; 1496 label->datalo_reg = datalo; 1497 label->datahi_reg = datahi; 1498 label->addrlo_reg = addrlo; 1499 label->addrhi_reg = addrhi; 1500 label->raddr = tcg_splitwx_to_rx(raddr); 1501 label->label_ptr[0] = label_ptr; 1502} 1503 1504static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) 1505{ 1506 TCGReg argreg, datalo, datahi; 1507 MemOpIdx oi = lb->oi; 1508 MemOp opc = get_memop(oi); 1509 1510 if (!reloc_pc24(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { 1511 return false; 1512 } 1513 1514 argreg = tcg_out_arg_reg32(s, TCG_REG_R0, TCG_AREG0); 1515 if (TARGET_LONG_BITS == 64) { 1516 argreg = tcg_out_arg_reg64(s, argreg, lb->addrlo_reg, lb->addrhi_reg); 1517 } else { 1518 argreg = tcg_out_arg_reg32(s, argreg, lb->addrlo_reg); 1519 } 1520 argreg = tcg_out_arg_imm32(s, argreg, oi); 1521 argreg = tcg_out_arg_reg32(s, argreg, TCG_REG_R14); 1522 1523 /* Use the canonical unsigned helpers and minimize icache usage. */ 1524 tcg_out_call_int(s, qemu_ld_helpers[opc & MO_SIZE]); 1525 1526 datalo = lb->datalo_reg; 1527 datahi = lb->datahi_reg; 1528 switch (opc & MO_SSIZE) { 1529 case MO_SB: 1530 tcg_out_ext8s(s, COND_AL, datalo, TCG_REG_R0); 1531 break; 1532 case MO_SW: 1533 tcg_out_ext16s(s, COND_AL, datalo, TCG_REG_R0); 1534 break; 1535 default: 1536 tcg_out_mov_reg(s, COND_AL, datalo, TCG_REG_R0); 1537 break; 1538 case MO_UQ: 1539 if (datalo != TCG_REG_R1) { 1540 tcg_out_mov_reg(s, COND_AL, datalo, TCG_REG_R0); 1541 tcg_out_mov_reg(s, COND_AL, datahi, TCG_REG_R1); 1542 } else if (datahi != TCG_REG_R0) { 1543 tcg_out_mov_reg(s, COND_AL, datahi, TCG_REG_R1); 1544 tcg_out_mov_reg(s, COND_AL, datalo, TCG_REG_R0); 1545 } else { 1546 tcg_out_mov_reg(s, COND_AL, TCG_REG_TMP, TCG_REG_R0); 1547 tcg_out_mov_reg(s, COND_AL, datahi, TCG_REG_R1); 1548 tcg_out_mov_reg(s, COND_AL, datalo, TCG_REG_TMP); 1549 } 1550 break; 1551 } 1552 1553 tcg_out_goto(s, COND_AL, lb->raddr); 1554 return true; 1555} 1556 1557static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) 1558{ 1559 TCGReg argreg, datalo, datahi; 1560 MemOpIdx oi = lb->oi; 1561 MemOp opc = get_memop(oi); 1562 1563 if (!reloc_pc24(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { 1564 return false; 1565 } 1566 1567 argreg = TCG_REG_R0; 1568 argreg = tcg_out_arg_reg32(s, argreg, TCG_AREG0); 1569 if (TARGET_LONG_BITS == 64) { 1570 argreg = tcg_out_arg_reg64(s, argreg, lb->addrlo_reg, lb->addrhi_reg); 1571 } else { 1572 argreg = tcg_out_arg_reg32(s, argreg, lb->addrlo_reg); 1573 } 1574 1575 datalo = lb->datalo_reg; 1576 datahi = lb->datahi_reg; 1577 switch (opc & MO_SIZE) { 1578 case MO_8: 1579 argreg = tcg_out_arg_reg8(s, argreg, datalo); 1580 break; 1581 case MO_16: 1582 argreg = tcg_out_arg_reg16(s, argreg, datalo); 1583 break; 1584 case MO_32: 1585 default: 1586 argreg = tcg_out_arg_reg32(s, argreg, datalo); 1587 break; 1588 case MO_64: 1589 argreg = tcg_out_arg_reg64(s, argreg, datalo, datahi); 1590 break; 1591 } 1592 1593 argreg = tcg_out_arg_imm32(s, argreg, oi); 1594 argreg = tcg_out_arg_reg32(s, argreg, TCG_REG_R14); 1595 1596 /* Tail-call to the helper, which will return to the fast path. */ 1597 tcg_out_goto(s, COND_AL, qemu_st_helpers[opc & MO_SIZE]); 1598 return true; 1599} 1600#else 1601 1602static void tcg_out_test_alignment(TCGContext *s, bool is_ld, TCGReg addrlo, 1603 TCGReg addrhi, unsigned a_bits) 1604{ 1605 unsigned a_mask = (1 << a_bits) - 1; 1606 TCGLabelQemuLdst *label = new_ldst_label(s); 1607 1608 label->is_ld = is_ld; 1609 label->addrlo_reg = addrlo; 1610 label->addrhi_reg = addrhi; 1611 1612 /* We are expecting a_bits to max out at 7, and can easily support 8. */ 1613 tcg_debug_assert(a_mask <= 0xff); 1614 /* tst addr, #mask */ 1615 tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addrlo, a_mask); 1616 1617 /* blne slow_path */ 1618 label->label_ptr[0] = s->code_ptr; 1619 tcg_out_bl_imm(s, COND_NE, 0); 1620 1621 label->raddr = tcg_splitwx_to_rx(s->code_ptr); 1622} 1623 1624static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l) 1625{ 1626 if (!reloc_pc24(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { 1627 return false; 1628 } 1629 1630 if (TARGET_LONG_BITS == 64) { 1631 /* 64-bit target address is aligned into R2:R3. */ 1632 if (l->addrhi_reg != TCG_REG_R2) { 1633 tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R2, l->addrlo_reg); 1634 tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R3, l->addrhi_reg); 1635 } else if (l->addrlo_reg != TCG_REG_R3) { 1636 tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R3, l->addrhi_reg); 1637 tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R2, l->addrlo_reg); 1638 } else { 1639 tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R1, TCG_REG_R2); 1640 tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R2, TCG_REG_R3); 1641 tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R3, TCG_REG_R1); 1642 } 1643 } else { 1644 tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R1, l->addrlo_reg); 1645 } 1646 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_R0, TCG_AREG0); 1647 1648 /* 1649 * Tail call to the helper, with the return address back inline, 1650 * just for the clarity of the debugging traceback -- the helper 1651 * cannot return. We have used BLNE to arrive here, so LR is 1652 * already set. 1653 */ 1654 tcg_out_goto(s, COND_AL, (const void *) 1655 (l->is_ld ? helper_unaligned_ld : helper_unaligned_st)); 1656 return true; 1657} 1658 1659static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) 1660{ 1661 return tcg_out_fail_alignment(s, l); 1662} 1663 1664static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) 1665{ 1666 return tcg_out_fail_alignment(s, l); 1667} 1668#endif /* SOFTMMU */ 1669 1670static void tcg_out_qemu_ld_index(TCGContext *s, MemOp opc, 1671 TCGReg datalo, TCGReg datahi, 1672 TCGReg addrlo, TCGReg addend, 1673 bool scratch_addend) 1674{ 1675 /* Byte swapping is left to middle-end expansion. */ 1676 tcg_debug_assert((opc & MO_BSWAP) == 0); 1677 1678 switch (opc & MO_SSIZE) { 1679 case MO_UB: 1680 tcg_out_ld8_r(s, COND_AL, datalo, addrlo, addend); 1681 break; 1682 case MO_SB: 1683 tcg_out_ld8s_r(s, COND_AL, datalo, addrlo, addend); 1684 break; 1685 case MO_UW: 1686 tcg_out_ld16u_r(s, COND_AL, datalo, addrlo, addend); 1687 break; 1688 case MO_SW: 1689 tcg_out_ld16s_r(s, COND_AL, datalo, addrlo, addend); 1690 break; 1691 case MO_UL: 1692 tcg_out_ld32_r(s, COND_AL, datalo, addrlo, addend); 1693 break; 1694 case MO_UQ: 1695 /* LDRD requires alignment; double-check that. */ 1696 if (get_alignment_bits(opc) >= MO_64 1697 && (datalo & 1) == 0 && datahi == datalo + 1) { 1698 /* 1699 * Rm (the second address op) must not overlap Rt or Rt + 1. 1700 * Since datalo is aligned, we can simplify the test via alignment. 1701 * Flip the two address arguments if that works. 1702 */ 1703 if ((addend & ~1) != datalo) { 1704 tcg_out_ldrd_r(s, COND_AL, datalo, addrlo, addend); 1705 break; 1706 } 1707 if ((addrlo & ~1) != datalo) { 1708 tcg_out_ldrd_r(s, COND_AL, datalo, addend, addrlo); 1709 break; 1710 } 1711 } 1712 if (scratch_addend) { 1713 tcg_out_ld32_rwb(s, COND_AL, datalo, addend, addrlo); 1714 tcg_out_ld32_12(s, COND_AL, datahi, addend, 4); 1715 } else { 1716 tcg_out_dat_reg(s, COND_AL, ARITH_ADD, TCG_REG_TMP, 1717 addend, addrlo, SHIFT_IMM_LSL(0)); 1718 tcg_out_ld32_12(s, COND_AL, datalo, TCG_REG_TMP, 0); 1719 tcg_out_ld32_12(s, COND_AL, datahi, TCG_REG_TMP, 4); 1720 } 1721 break; 1722 default: 1723 g_assert_not_reached(); 1724 } 1725} 1726 1727#ifndef CONFIG_SOFTMMU 1728static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg datalo, 1729 TCGReg datahi, TCGReg addrlo) 1730{ 1731 /* Byte swapping is left to middle-end expansion. */ 1732 tcg_debug_assert((opc & MO_BSWAP) == 0); 1733 1734 switch (opc & MO_SSIZE) { 1735 case MO_UB: 1736 tcg_out_ld8_12(s, COND_AL, datalo, addrlo, 0); 1737 break; 1738 case MO_SB: 1739 tcg_out_ld8s_8(s, COND_AL, datalo, addrlo, 0); 1740 break; 1741 case MO_UW: 1742 tcg_out_ld16u_8(s, COND_AL, datalo, addrlo, 0); 1743 break; 1744 case MO_SW: 1745 tcg_out_ld16s_8(s, COND_AL, datalo, addrlo, 0); 1746 break; 1747 case MO_UL: 1748 tcg_out_ld32_12(s, COND_AL, datalo, addrlo, 0); 1749 break; 1750 case MO_UQ: 1751 /* LDRD requires alignment; double-check that. */ 1752 if (get_alignment_bits(opc) >= MO_64 1753 && (datalo & 1) == 0 && datahi == datalo + 1) { 1754 tcg_out_ldrd_8(s, COND_AL, datalo, addrlo, 0); 1755 } else if (datalo == addrlo) { 1756 tcg_out_ld32_12(s, COND_AL, datahi, addrlo, 4); 1757 tcg_out_ld32_12(s, COND_AL, datalo, addrlo, 0); 1758 } else { 1759 tcg_out_ld32_12(s, COND_AL, datalo, addrlo, 0); 1760 tcg_out_ld32_12(s, COND_AL, datahi, addrlo, 4); 1761 } 1762 break; 1763 default: 1764 g_assert_not_reached(); 1765 } 1766} 1767#endif 1768 1769static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64) 1770{ 1771 TCGReg addrlo, datalo, datahi, addrhi __attribute__((unused)); 1772 MemOpIdx oi; 1773 MemOp opc; 1774#ifdef CONFIG_SOFTMMU 1775 int mem_index; 1776 TCGReg addend; 1777 tcg_insn_unit *label_ptr; 1778#else 1779 unsigned a_bits; 1780#endif 1781 1782 datalo = *args++; 1783 datahi = (is64 ? *args++ : 0); 1784 addrlo = *args++; 1785 addrhi = (TARGET_LONG_BITS == 64 ? *args++ : 0); 1786 oi = *args++; 1787 opc = get_memop(oi); 1788 1789#ifdef CONFIG_SOFTMMU 1790 mem_index = get_mmuidx(oi); 1791 addend = tcg_out_tlb_read(s, addrlo, addrhi, opc, mem_index, 1); 1792 1793 /* This a conditional BL only to load a pointer within this opcode into LR 1794 for the slow path. We will not be using the value for a tail call. */ 1795 label_ptr = s->code_ptr; 1796 tcg_out_bl_imm(s, COND_NE, 0); 1797 1798 tcg_out_qemu_ld_index(s, opc, datalo, datahi, addrlo, addend, true); 1799 1800 add_qemu_ldst_label(s, true, oi, datalo, datahi, addrlo, addrhi, 1801 s->code_ptr, label_ptr); 1802#else /* !CONFIG_SOFTMMU */ 1803 a_bits = get_alignment_bits(opc); 1804 if (a_bits) { 1805 tcg_out_test_alignment(s, true, addrlo, addrhi, a_bits); 1806 } 1807 if (guest_base) { 1808 tcg_out_qemu_ld_index(s, opc, datalo, datahi, 1809 addrlo, TCG_REG_GUEST_BASE, false); 1810 } else { 1811 tcg_out_qemu_ld_direct(s, opc, datalo, datahi, addrlo); 1812 } 1813#endif 1814} 1815 1816static void tcg_out_qemu_st_index(TCGContext *s, ARMCond cond, MemOp opc, 1817 TCGReg datalo, TCGReg datahi, 1818 TCGReg addrlo, TCGReg addend, 1819 bool scratch_addend) 1820{ 1821 /* Byte swapping is left to middle-end expansion. */ 1822 tcg_debug_assert((opc & MO_BSWAP) == 0); 1823 1824 switch (opc & MO_SIZE) { 1825 case MO_8: 1826 tcg_out_st8_r(s, cond, datalo, addrlo, addend); 1827 break; 1828 case MO_16: 1829 tcg_out_st16_r(s, cond, datalo, addrlo, addend); 1830 break; 1831 case MO_32: 1832 tcg_out_st32_r(s, cond, datalo, addrlo, addend); 1833 break; 1834 case MO_64: 1835 /* STRD requires alignment; double-check that. */ 1836 if (get_alignment_bits(opc) >= MO_64 1837 && (datalo & 1) == 0 && datahi == datalo + 1) { 1838 tcg_out_strd_r(s, cond, datalo, addrlo, addend); 1839 } else if (scratch_addend) { 1840 tcg_out_st32_rwb(s, cond, datalo, addend, addrlo); 1841 tcg_out_st32_12(s, cond, datahi, addend, 4); 1842 } else { 1843 tcg_out_dat_reg(s, cond, ARITH_ADD, TCG_REG_TMP, 1844 addend, addrlo, SHIFT_IMM_LSL(0)); 1845 tcg_out_st32_12(s, cond, datalo, TCG_REG_TMP, 0); 1846 tcg_out_st32_12(s, cond, datahi, TCG_REG_TMP, 4); 1847 } 1848 break; 1849 default: 1850 g_assert_not_reached(); 1851 } 1852} 1853 1854#ifndef CONFIG_SOFTMMU 1855static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg datalo, 1856 TCGReg datahi, TCGReg addrlo) 1857{ 1858 /* Byte swapping is left to middle-end expansion. */ 1859 tcg_debug_assert((opc & MO_BSWAP) == 0); 1860 1861 switch (opc & MO_SIZE) { 1862 case MO_8: 1863 tcg_out_st8_12(s, COND_AL, datalo, addrlo, 0); 1864 break; 1865 case MO_16: 1866 tcg_out_st16_8(s, COND_AL, datalo, addrlo, 0); 1867 break; 1868 case MO_32: 1869 tcg_out_st32_12(s, COND_AL, datalo, addrlo, 0); 1870 break; 1871 case MO_64: 1872 /* STRD requires alignment; double-check that. */ 1873 if (get_alignment_bits(opc) >= MO_64 1874 && (datalo & 1) == 0 && datahi == datalo + 1) { 1875 tcg_out_strd_8(s, COND_AL, datalo, addrlo, 0); 1876 } else { 1877 tcg_out_st32_12(s, COND_AL, datalo, addrlo, 0); 1878 tcg_out_st32_12(s, COND_AL, datahi, addrlo, 4); 1879 } 1880 break; 1881 default: 1882 g_assert_not_reached(); 1883 } 1884} 1885#endif 1886 1887static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64) 1888{ 1889 TCGReg addrlo, datalo, datahi, addrhi __attribute__((unused)); 1890 MemOpIdx oi; 1891 MemOp opc; 1892#ifdef CONFIG_SOFTMMU 1893 int mem_index; 1894 TCGReg addend; 1895 tcg_insn_unit *label_ptr; 1896#else 1897 unsigned a_bits; 1898#endif 1899 1900 datalo = *args++; 1901 datahi = (is64 ? *args++ : 0); 1902 addrlo = *args++; 1903 addrhi = (TARGET_LONG_BITS == 64 ? *args++ : 0); 1904 oi = *args++; 1905 opc = get_memop(oi); 1906 1907#ifdef CONFIG_SOFTMMU 1908 mem_index = get_mmuidx(oi); 1909 addend = tcg_out_tlb_read(s, addrlo, addrhi, opc, mem_index, 0); 1910 1911 tcg_out_qemu_st_index(s, COND_EQ, opc, datalo, datahi, 1912 addrlo, addend, true); 1913 1914 /* The conditional call must come last, as we're going to return here. */ 1915 label_ptr = s->code_ptr; 1916 tcg_out_bl_imm(s, COND_NE, 0); 1917 1918 add_qemu_ldst_label(s, false, oi, datalo, datahi, addrlo, addrhi, 1919 s->code_ptr, label_ptr); 1920#else /* !CONFIG_SOFTMMU */ 1921 a_bits = get_alignment_bits(opc); 1922 if (a_bits) { 1923 tcg_out_test_alignment(s, false, addrlo, addrhi, a_bits); 1924 } 1925 if (guest_base) { 1926 tcg_out_qemu_st_index(s, COND_AL, opc, datalo, datahi, 1927 addrlo, TCG_REG_GUEST_BASE, false); 1928 } else { 1929 tcg_out_qemu_st_direct(s, opc, datalo, datahi, addrlo); 1930 } 1931#endif 1932} 1933 1934static void tcg_out_epilogue(TCGContext *s); 1935 1936static void tcg_out_op(TCGContext *s, TCGOpcode opc, 1937 const TCGArg args[TCG_MAX_OP_ARGS], 1938 const int const_args[TCG_MAX_OP_ARGS]) 1939{ 1940 TCGArg a0, a1, a2, a3, a4, a5; 1941 int c; 1942 1943 switch (opc) { 1944 case INDEX_op_exit_tb: 1945 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R0, args[0]); 1946 tcg_out_epilogue(s); 1947 break; 1948 case INDEX_op_goto_tb: 1949 { 1950 /* Indirect jump method */ 1951 intptr_t ptr, dif, dil; 1952 TCGReg base = TCG_REG_PC; 1953 1954 tcg_debug_assert(s->tb_jmp_insn_offset == 0); 1955 ptr = (intptr_t)tcg_splitwx_to_rx(s->tb_jmp_target_addr + args[0]); 1956 dif = tcg_pcrel_diff(s, (void *)ptr) - 8; 1957 dil = sextract32(dif, 0, 12); 1958 if (dif != dil) { 1959 /* The TB is close, but outside the 12 bits addressable by 1960 the load. We can extend this to 20 bits with a sub of a 1961 shifted immediate from pc. In the vastly unlikely event 1962 the code requires more than 1MB, we'll use 2 insns and 1963 be no worse off. */ 1964 base = TCG_REG_R0; 1965 tcg_out_movi32(s, COND_AL, base, ptr - dil); 1966 } 1967 tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, base, dil); 1968 set_jmp_reset_offset(s, args[0]); 1969 } 1970 break; 1971 case INDEX_op_goto_ptr: 1972 tcg_out_b_reg(s, COND_AL, args[0]); 1973 break; 1974 case INDEX_op_br: 1975 tcg_out_goto_label(s, COND_AL, arg_label(args[0])); 1976 break; 1977 1978 case INDEX_op_ld8u_i32: 1979 tcg_out_ld8u(s, COND_AL, args[0], args[1], args[2]); 1980 break; 1981 case INDEX_op_ld8s_i32: 1982 tcg_out_ld8s(s, COND_AL, args[0], args[1], args[2]); 1983 break; 1984 case INDEX_op_ld16u_i32: 1985 tcg_out_ld16u(s, COND_AL, args[0], args[1], args[2]); 1986 break; 1987 case INDEX_op_ld16s_i32: 1988 tcg_out_ld16s(s, COND_AL, args[0], args[1], args[2]); 1989 break; 1990 case INDEX_op_ld_i32: 1991 tcg_out_ld32u(s, COND_AL, args[0], args[1], args[2]); 1992 break; 1993 case INDEX_op_st8_i32: 1994 tcg_out_st8(s, COND_AL, args[0], args[1], args[2]); 1995 break; 1996 case INDEX_op_st16_i32: 1997 tcg_out_st16(s, COND_AL, args[0], args[1], args[2]); 1998 break; 1999 case INDEX_op_st_i32: 2000 tcg_out_st32(s, COND_AL, args[0], args[1], args[2]); 2001 break; 2002 2003 case INDEX_op_movcond_i32: 2004 /* Constraints mean that v2 is always in the same register as dest, 2005 * so we only need to do "if condition passed, move v1 to dest". 2006 */ 2007 tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0, 2008 args[1], args[2], const_args[2]); 2009 tcg_out_dat_rIK(s, tcg_cond_to_arm_cond[args[5]], ARITH_MOV, 2010 ARITH_MVN, args[0], 0, args[3], const_args[3]); 2011 break; 2012 case INDEX_op_add_i32: 2013 tcg_out_dat_rIN(s, COND_AL, ARITH_ADD, ARITH_SUB, 2014 args[0], args[1], args[2], const_args[2]); 2015 break; 2016 case INDEX_op_sub_i32: 2017 if (const_args[1]) { 2018 if (const_args[2]) { 2019 tcg_out_movi32(s, COND_AL, args[0], args[1] - args[2]); 2020 } else { 2021 tcg_out_dat_rI(s, COND_AL, ARITH_RSB, 2022 args[0], args[2], args[1], 1); 2023 } 2024 } else { 2025 tcg_out_dat_rIN(s, COND_AL, ARITH_SUB, ARITH_ADD, 2026 args[0], args[1], args[2], const_args[2]); 2027 } 2028 break; 2029 case INDEX_op_and_i32: 2030 tcg_out_dat_rIK(s, COND_AL, ARITH_AND, ARITH_BIC, 2031 args[0], args[1], args[2], const_args[2]); 2032 break; 2033 case INDEX_op_andc_i32: 2034 tcg_out_dat_rIK(s, COND_AL, ARITH_BIC, ARITH_AND, 2035 args[0], args[1], args[2], const_args[2]); 2036 break; 2037 case INDEX_op_or_i32: 2038 c = ARITH_ORR; 2039 goto gen_arith; 2040 case INDEX_op_xor_i32: 2041 c = ARITH_EOR; 2042 /* Fall through. */ 2043 gen_arith: 2044 tcg_out_dat_rI(s, COND_AL, c, args[0], args[1], args[2], const_args[2]); 2045 break; 2046 case INDEX_op_add2_i32: 2047 a0 = args[0], a1 = args[1], a2 = args[2]; 2048 a3 = args[3], a4 = args[4], a5 = args[5]; 2049 if (a0 == a3 || (a0 == a5 && !const_args[5])) { 2050 a0 = TCG_REG_TMP; 2051 } 2052 tcg_out_dat_rIN(s, COND_AL, ARITH_ADD | TO_CPSR, ARITH_SUB | TO_CPSR, 2053 a0, a2, a4, const_args[4]); 2054 tcg_out_dat_rIK(s, COND_AL, ARITH_ADC, ARITH_SBC, 2055 a1, a3, a5, const_args[5]); 2056 tcg_out_mov_reg(s, COND_AL, args[0], a0); 2057 break; 2058 case INDEX_op_sub2_i32: 2059 a0 = args[0], a1 = args[1], a2 = args[2]; 2060 a3 = args[3], a4 = args[4], a5 = args[5]; 2061 if ((a0 == a3 && !const_args[3]) || (a0 == a5 && !const_args[5])) { 2062 a0 = TCG_REG_TMP; 2063 } 2064 if (const_args[2]) { 2065 if (const_args[4]) { 2066 tcg_out_movi32(s, COND_AL, a0, a4); 2067 a4 = a0; 2068 } 2069 tcg_out_dat_rI(s, COND_AL, ARITH_RSB | TO_CPSR, a0, a4, a2, 1); 2070 } else { 2071 tcg_out_dat_rIN(s, COND_AL, ARITH_SUB | TO_CPSR, 2072 ARITH_ADD | TO_CPSR, a0, a2, a4, const_args[4]); 2073 } 2074 if (const_args[3]) { 2075 if (const_args[5]) { 2076 tcg_out_movi32(s, COND_AL, a1, a5); 2077 a5 = a1; 2078 } 2079 tcg_out_dat_rI(s, COND_AL, ARITH_RSC, a1, a5, a3, 1); 2080 } else { 2081 tcg_out_dat_rIK(s, COND_AL, ARITH_SBC, ARITH_ADC, 2082 a1, a3, a5, const_args[5]); 2083 } 2084 tcg_out_mov_reg(s, COND_AL, args[0], a0); 2085 break; 2086 case INDEX_op_neg_i32: 2087 tcg_out_dat_imm(s, COND_AL, ARITH_RSB, args[0], args[1], 0); 2088 break; 2089 case INDEX_op_not_i32: 2090 tcg_out_dat_reg(s, COND_AL, 2091 ARITH_MVN, args[0], 0, args[1], SHIFT_IMM_LSL(0)); 2092 break; 2093 case INDEX_op_mul_i32: 2094 tcg_out_mul32(s, COND_AL, args[0], args[1], args[2]); 2095 break; 2096 case INDEX_op_mulu2_i32: 2097 tcg_out_umull32(s, COND_AL, args[0], args[1], args[2], args[3]); 2098 break; 2099 case INDEX_op_muls2_i32: 2100 tcg_out_smull32(s, COND_AL, args[0], args[1], args[2], args[3]); 2101 break; 2102 /* XXX: Perhaps args[2] & 0x1f is wrong */ 2103 case INDEX_op_shl_i32: 2104 c = const_args[2] ? 2105 SHIFT_IMM_LSL(args[2] & 0x1f) : SHIFT_REG_LSL(args[2]); 2106 goto gen_shift32; 2107 case INDEX_op_shr_i32: 2108 c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_LSR(args[2] & 0x1f) : 2109 SHIFT_IMM_LSL(0) : SHIFT_REG_LSR(args[2]); 2110 goto gen_shift32; 2111 case INDEX_op_sar_i32: 2112 c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_ASR(args[2] & 0x1f) : 2113 SHIFT_IMM_LSL(0) : SHIFT_REG_ASR(args[2]); 2114 goto gen_shift32; 2115 case INDEX_op_rotr_i32: 2116 c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_ROR(args[2] & 0x1f) : 2117 SHIFT_IMM_LSL(0) : SHIFT_REG_ROR(args[2]); 2118 /* Fall through. */ 2119 gen_shift32: 2120 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1], c); 2121 break; 2122 2123 case INDEX_op_rotl_i32: 2124 if (const_args[2]) { 2125 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1], 2126 ((0x20 - args[2]) & 0x1f) ? 2127 SHIFT_IMM_ROR((0x20 - args[2]) & 0x1f) : 2128 SHIFT_IMM_LSL(0)); 2129 } else { 2130 tcg_out_dat_imm(s, COND_AL, ARITH_RSB, TCG_REG_TMP, args[2], 0x20); 2131 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1], 2132 SHIFT_REG_ROR(TCG_REG_TMP)); 2133 } 2134 break; 2135 2136 case INDEX_op_ctz_i32: 2137 tcg_out_dat_reg(s, COND_AL, INSN_RBIT, TCG_REG_TMP, 0, args[1], 0); 2138 a1 = TCG_REG_TMP; 2139 goto do_clz; 2140 2141 case INDEX_op_clz_i32: 2142 a1 = args[1]; 2143 do_clz: 2144 a0 = args[0]; 2145 a2 = args[2]; 2146 c = const_args[2]; 2147 if (c && a2 == 32) { 2148 tcg_out_dat_reg(s, COND_AL, INSN_CLZ, a0, 0, a1, 0); 2149 break; 2150 } 2151 tcg_out_dat_imm(s, COND_AL, ARITH_CMP, 0, a1, 0); 2152 tcg_out_dat_reg(s, COND_NE, INSN_CLZ, a0, 0, a1, 0); 2153 if (c || a0 != a2) { 2154 tcg_out_dat_rIK(s, COND_EQ, ARITH_MOV, ARITH_MVN, a0, 0, a2, c); 2155 } 2156 break; 2157 2158 case INDEX_op_brcond_i32: 2159 tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0, 2160 args[0], args[1], const_args[1]); 2161 tcg_out_goto_label(s, tcg_cond_to_arm_cond[args[2]], 2162 arg_label(args[3])); 2163 break; 2164 case INDEX_op_setcond_i32: 2165 tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0, 2166 args[1], args[2], const_args[2]); 2167 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[args[3]], 2168 ARITH_MOV, args[0], 0, 1); 2169 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(args[3])], 2170 ARITH_MOV, args[0], 0, 0); 2171 break; 2172 2173 case INDEX_op_brcond2_i32: 2174 c = tcg_out_cmp2(s, args, const_args); 2175 tcg_out_goto_label(s, tcg_cond_to_arm_cond[c], arg_label(args[5])); 2176 break; 2177 case INDEX_op_setcond2_i32: 2178 c = tcg_out_cmp2(s, args + 1, const_args + 1); 2179 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[c], ARITH_MOV, args[0], 0, 1); 2180 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(c)], 2181 ARITH_MOV, args[0], 0, 0); 2182 break; 2183 2184 case INDEX_op_qemu_ld_i32: 2185 tcg_out_qemu_ld(s, args, 0); 2186 break; 2187 case INDEX_op_qemu_ld_i64: 2188 tcg_out_qemu_ld(s, args, 1); 2189 break; 2190 case INDEX_op_qemu_st_i32: 2191 tcg_out_qemu_st(s, args, 0); 2192 break; 2193 case INDEX_op_qemu_st_i64: 2194 tcg_out_qemu_st(s, args, 1); 2195 break; 2196 2197 case INDEX_op_bswap16_i32: 2198 tcg_out_bswap16(s, COND_AL, args[0], args[1], args[2]); 2199 break; 2200 case INDEX_op_bswap32_i32: 2201 tcg_out_bswap32(s, COND_AL, args[0], args[1]); 2202 break; 2203 2204 case INDEX_op_ext8s_i32: 2205 tcg_out_ext8s(s, COND_AL, args[0], args[1]); 2206 break; 2207 case INDEX_op_ext16s_i32: 2208 tcg_out_ext16s(s, COND_AL, args[0], args[1]); 2209 break; 2210 case INDEX_op_ext16u_i32: 2211 tcg_out_ext16u(s, COND_AL, args[0], args[1]); 2212 break; 2213 2214 case INDEX_op_deposit_i32: 2215 tcg_out_deposit(s, COND_AL, args[0], args[2], 2216 args[3], args[4], const_args[2]); 2217 break; 2218 case INDEX_op_extract_i32: 2219 tcg_out_extract(s, COND_AL, args[0], args[1], args[2], args[3]); 2220 break; 2221 case INDEX_op_sextract_i32: 2222 tcg_out_sextract(s, COND_AL, args[0], args[1], args[2], args[3]); 2223 break; 2224 case INDEX_op_extract2_i32: 2225 /* ??? These optimization vs zero should be generic. */ 2226 /* ??? But we can't substitute 2 for 1 in the opcode stream yet. */ 2227 if (const_args[1]) { 2228 if (const_args[2]) { 2229 tcg_out_movi(s, TCG_TYPE_REG, args[0], 0); 2230 } else { 2231 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, 2232 args[2], SHIFT_IMM_LSL(32 - args[3])); 2233 } 2234 } else if (const_args[2]) { 2235 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, 2236 args[1], SHIFT_IMM_LSR(args[3])); 2237 } else { 2238 /* We can do extract2 in 2 insns, vs the 3 required otherwise. */ 2239 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP, 0, 2240 args[2], SHIFT_IMM_LSL(32 - args[3])); 2241 tcg_out_dat_reg(s, COND_AL, ARITH_ORR, args[0], TCG_REG_TMP, 2242 args[1], SHIFT_IMM_LSR(args[3])); 2243 } 2244 break; 2245 2246 case INDEX_op_div_i32: 2247 tcg_out_sdiv(s, COND_AL, args[0], args[1], args[2]); 2248 break; 2249 case INDEX_op_divu_i32: 2250 tcg_out_udiv(s, COND_AL, args[0], args[1], args[2]); 2251 break; 2252 2253 case INDEX_op_mb: 2254 tcg_out_mb(s, args[0]); 2255 break; 2256 2257 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ 2258 case INDEX_op_call: /* Always emitted via tcg_out_call. */ 2259 default: 2260 tcg_abort(); 2261 } 2262} 2263 2264static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) 2265{ 2266 switch (op) { 2267 case INDEX_op_goto_ptr: 2268 return C_O0_I1(r); 2269 2270 case INDEX_op_ld8u_i32: 2271 case INDEX_op_ld8s_i32: 2272 case INDEX_op_ld16u_i32: 2273 case INDEX_op_ld16s_i32: 2274 case INDEX_op_ld_i32: 2275 case INDEX_op_neg_i32: 2276 case INDEX_op_not_i32: 2277 case INDEX_op_bswap16_i32: 2278 case INDEX_op_bswap32_i32: 2279 case INDEX_op_ext8s_i32: 2280 case INDEX_op_ext16s_i32: 2281 case INDEX_op_ext16u_i32: 2282 case INDEX_op_extract_i32: 2283 case INDEX_op_sextract_i32: 2284 return C_O1_I1(r, r); 2285 2286 case INDEX_op_st8_i32: 2287 case INDEX_op_st16_i32: 2288 case INDEX_op_st_i32: 2289 return C_O0_I2(r, r); 2290 2291 case INDEX_op_add_i32: 2292 case INDEX_op_sub_i32: 2293 case INDEX_op_setcond_i32: 2294 return C_O1_I2(r, r, rIN); 2295 2296 case INDEX_op_and_i32: 2297 case INDEX_op_andc_i32: 2298 case INDEX_op_clz_i32: 2299 case INDEX_op_ctz_i32: 2300 return C_O1_I2(r, r, rIK); 2301 2302 case INDEX_op_mul_i32: 2303 case INDEX_op_div_i32: 2304 case INDEX_op_divu_i32: 2305 return C_O1_I2(r, r, r); 2306 2307 case INDEX_op_mulu2_i32: 2308 case INDEX_op_muls2_i32: 2309 return C_O2_I2(r, r, r, r); 2310 2311 case INDEX_op_or_i32: 2312 case INDEX_op_xor_i32: 2313 return C_O1_I2(r, r, rI); 2314 2315 case INDEX_op_shl_i32: 2316 case INDEX_op_shr_i32: 2317 case INDEX_op_sar_i32: 2318 case INDEX_op_rotl_i32: 2319 case INDEX_op_rotr_i32: 2320 return C_O1_I2(r, r, ri); 2321 2322 case INDEX_op_brcond_i32: 2323 return C_O0_I2(r, rIN); 2324 case INDEX_op_deposit_i32: 2325 return C_O1_I2(r, 0, rZ); 2326 case INDEX_op_extract2_i32: 2327 return C_O1_I2(r, rZ, rZ); 2328 case INDEX_op_movcond_i32: 2329 return C_O1_I4(r, r, rIN, rIK, 0); 2330 case INDEX_op_add2_i32: 2331 return C_O2_I4(r, r, r, r, rIN, rIK); 2332 case INDEX_op_sub2_i32: 2333 return C_O2_I4(r, r, rI, rI, rIN, rIK); 2334 case INDEX_op_brcond2_i32: 2335 return C_O0_I4(r, r, rI, rI); 2336 case INDEX_op_setcond2_i32: 2337 return C_O1_I4(r, r, r, rI, rI); 2338 2339 case INDEX_op_qemu_ld_i32: 2340 return TARGET_LONG_BITS == 32 ? C_O1_I1(r, l) : C_O1_I2(r, l, l); 2341 case INDEX_op_qemu_ld_i64: 2342 return TARGET_LONG_BITS == 32 ? C_O2_I1(r, r, l) : C_O2_I2(r, r, l, l); 2343 case INDEX_op_qemu_st_i32: 2344 return TARGET_LONG_BITS == 32 ? C_O0_I2(s, s) : C_O0_I3(s, s, s); 2345 case INDEX_op_qemu_st_i64: 2346 return TARGET_LONG_BITS == 32 ? C_O0_I3(s, s, s) : C_O0_I4(s, s, s, s); 2347 2348 case INDEX_op_st_vec: 2349 return C_O0_I2(w, r); 2350 case INDEX_op_ld_vec: 2351 case INDEX_op_dupm_vec: 2352 return C_O1_I1(w, r); 2353 case INDEX_op_dup_vec: 2354 return C_O1_I1(w, wr); 2355 case INDEX_op_abs_vec: 2356 case INDEX_op_neg_vec: 2357 case INDEX_op_not_vec: 2358 case INDEX_op_shli_vec: 2359 case INDEX_op_shri_vec: 2360 case INDEX_op_sari_vec: 2361 return C_O1_I1(w, w); 2362 case INDEX_op_dup2_vec: 2363 case INDEX_op_add_vec: 2364 case INDEX_op_mul_vec: 2365 case INDEX_op_smax_vec: 2366 case INDEX_op_smin_vec: 2367 case INDEX_op_ssadd_vec: 2368 case INDEX_op_sssub_vec: 2369 case INDEX_op_sub_vec: 2370 case INDEX_op_umax_vec: 2371 case INDEX_op_umin_vec: 2372 case INDEX_op_usadd_vec: 2373 case INDEX_op_ussub_vec: 2374 case INDEX_op_xor_vec: 2375 case INDEX_op_arm_sshl_vec: 2376 case INDEX_op_arm_ushl_vec: 2377 return C_O1_I2(w, w, w); 2378 case INDEX_op_arm_sli_vec: 2379 return C_O1_I2(w, 0, w); 2380 case INDEX_op_or_vec: 2381 case INDEX_op_andc_vec: 2382 return C_O1_I2(w, w, wO); 2383 case INDEX_op_and_vec: 2384 case INDEX_op_orc_vec: 2385 return C_O1_I2(w, w, wV); 2386 case INDEX_op_cmp_vec: 2387 return C_O1_I2(w, w, wZ); 2388 case INDEX_op_bitsel_vec: 2389 return C_O1_I3(w, w, w, w); 2390 default: 2391 g_assert_not_reached(); 2392 } 2393} 2394 2395static void tcg_target_init(TCGContext *s) 2396{ 2397 /* 2398 * Only probe for the platform and capabilities if we haven't already 2399 * determined maximum values at compile time. 2400 */ 2401#if !defined(use_idiv_instructions) || !defined(use_neon_instructions) 2402 { 2403 unsigned long hwcap = qemu_getauxval(AT_HWCAP); 2404#ifndef use_idiv_instructions 2405 use_idiv_instructions = (hwcap & HWCAP_ARM_IDIVA) != 0; 2406#endif 2407#ifndef use_neon_instructions 2408 use_neon_instructions = (hwcap & HWCAP_ARM_NEON) != 0; 2409#endif 2410 } 2411#endif 2412 2413 if (__ARM_ARCH < 7) { 2414 const char *pl = (const char *)qemu_getauxval(AT_PLATFORM); 2415 if (pl != NULL && pl[0] == 'v' && pl[1] >= '4' && pl[1] <= '9') { 2416 arm_arch = pl[1] - '0'; 2417 } 2418 2419 if (arm_arch < 6) { 2420 error_report("TCG: ARMv%d is unsupported; exiting", arm_arch); 2421 exit(EXIT_FAILURE); 2422 } 2423 } 2424 2425 tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS; 2426 2427 tcg_target_call_clobber_regs = 0; 2428 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R0); 2429 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R1); 2430 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R2); 2431 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R3); 2432 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R12); 2433 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R14); 2434 2435 if (use_neon_instructions) { 2436 tcg_target_available_regs[TCG_TYPE_V64] = ALL_VECTOR_REGS; 2437 tcg_target_available_regs[TCG_TYPE_V128] = ALL_VECTOR_REGS; 2438 2439 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q0); 2440 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q1); 2441 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q2); 2442 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q3); 2443 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q8); 2444 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q9); 2445 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q10); 2446 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q11); 2447 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q12); 2448 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q13); 2449 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q14); 2450 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q15); 2451 } 2452 2453 s->reserved_regs = 0; 2454 tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK); 2455 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP); 2456 tcg_regset_set_reg(s->reserved_regs, TCG_REG_PC); 2457 tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP); 2458} 2459 2460static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg, 2461 TCGReg arg1, intptr_t arg2) 2462{ 2463 switch (type) { 2464 case TCG_TYPE_I32: 2465 tcg_out_ld32u(s, COND_AL, arg, arg1, arg2); 2466 return; 2467 case TCG_TYPE_V64: 2468 /* regs 1; size 8; align 8 */ 2469 tcg_out_vldst(s, INSN_VLD1 | 0x7d0, arg, arg1, arg2); 2470 return; 2471 case TCG_TYPE_V128: 2472 /* 2473 * We have only 8-byte alignment for the stack per the ABI. 2474 * Rather than dynamically re-align the stack, it's easier 2475 * to simply not request alignment beyond that. So: 2476 * regs 2; size 8; align 8 2477 */ 2478 tcg_out_vldst(s, INSN_VLD1 | 0xad0, arg, arg1, arg2); 2479 return; 2480 default: 2481 g_assert_not_reached(); 2482 } 2483} 2484 2485static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, 2486 TCGReg arg1, intptr_t arg2) 2487{ 2488 switch (type) { 2489 case TCG_TYPE_I32: 2490 tcg_out_st32(s, COND_AL, arg, arg1, arg2); 2491 return; 2492 case TCG_TYPE_V64: 2493 /* regs 1; size 8; align 8 */ 2494 tcg_out_vldst(s, INSN_VST1 | 0x7d0, arg, arg1, arg2); 2495 return; 2496 case TCG_TYPE_V128: 2497 /* See tcg_out_ld re alignment: regs 2; size 8; align 8 */ 2498 tcg_out_vldst(s, INSN_VST1 | 0xad0, arg, arg1, arg2); 2499 return; 2500 default: 2501 g_assert_not_reached(); 2502 } 2503} 2504 2505static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, 2506 TCGReg base, intptr_t ofs) 2507{ 2508 return false; 2509} 2510 2511static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) 2512{ 2513 if (ret == arg) { 2514 return true; 2515 } 2516 switch (type) { 2517 case TCG_TYPE_I32: 2518 if (ret < TCG_REG_Q0 && arg < TCG_REG_Q0) { 2519 tcg_out_mov_reg(s, COND_AL, ret, arg); 2520 return true; 2521 } 2522 return false; 2523 2524 case TCG_TYPE_V64: 2525 case TCG_TYPE_V128: 2526 /* "VMOV D,N" is an alias for "VORR D,N,N". */ 2527 tcg_out_vreg3(s, INSN_VORR, type - TCG_TYPE_V64, 0, ret, arg, arg); 2528 return true; 2529 2530 default: 2531 g_assert_not_reached(); 2532 } 2533} 2534 2535static void tcg_out_movi(TCGContext *s, TCGType type, 2536 TCGReg ret, tcg_target_long arg) 2537{ 2538 tcg_debug_assert(type == TCG_TYPE_I32); 2539 tcg_debug_assert(ret < TCG_REG_Q0); 2540 tcg_out_movi32(s, COND_AL, ret, arg); 2541} 2542 2543/* Type is always V128, with I64 elements. */ 2544static void tcg_out_dup2_vec(TCGContext *s, TCGReg rd, TCGReg rl, TCGReg rh) 2545{ 2546 /* Move high element into place first. */ 2547 /* VMOV Dd+1, Ds */ 2548 tcg_out_vreg3(s, INSN_VORR | (1 << 12), 0, 0, rd, rh, rh); 2549 /* Move low element into place; tcg_out_mov will check for nop. */ 2550 tcg_out_mov(s, TCG_TYPE_V64, rd, rl); 2551} 2552 2553static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece, 2554 TCGReg rd, TCGReg rs) 2555{ 2556 int q = type - TCG_TYPE_V64; 2557 2558 if (vece == MO_64) { 2559 if (type == TCG_TYPE_V128) { 2560 tcg_out_dup2_vec(s, rd, rs, rs); 2561 } else { 2562 tcg_out_mov(s, TCG_TYPE_V64, rd, rs); 2563 } 2564 } else if (rs < TCG_REG_Q0) { 2565 int b = (vece == MO_8); 2566 int e = (vece == MO_16); 2567 tcg_out32(s, INSN_VDUP_G | (b << 22) | (q << 21) | (e << 5) | 2568 encode_vn(rd) | (rs << 12)); 2569 } else { 2570 int imm4 = 1 << vece; 2571 tcg_out32(s, INSN_VDUP_S | (imm4 << 16) | (q << 6) | 2572 encode_vd(rd) | encode_vm(rs)); 2573 } 2574 return true; 2575} 2576 2577static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece, 2578 TCGReg rd, TCGReg base, intptr_t offset) 2579{ 2580 if (vece == MO_64) { 2581 tcg_out_ld(s, TCG_TYPE_V64, rd, base, offset); 2582 if (type == TCG_TYPE_V128) { 2583 tcg_out_dup2_vec(s, rd, rd, rd); 2584 } 2585 } else { 2586 int q = type - TCG_TYPE_V64; 2587 tcg_out_vldst(s, INSN_VLD1R | (vece << 6) | (q << 5), 2588 rd, base, offset); 2589 } 2590 return true; 2591} 2592 2593static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece, 2594 TCGReg rd, int64_t v64) 2595{ 2596 int q = type - TCG_TYPE_V64; 2597 int cmode, imm8, i; 2598 2599 /* Test all bytes equal first. */ 2600 if (vece == MO_8) { 2601 tcg_out_vmovi(s, rd, q, 0, 0xe, v64); 2602 return; 2603 } 2604 2605 /* 2606 * Test all bytes 0x00 or 0xff second. This can match cases that 2607 * might otherwise take 2 or 3 insns for MO_16 or MO_32 below. 2608 */ 2609 for (i = imm8 = 0; i < 8; i++) { 2610 uint8_t byte = v64 >> (i * 8); 2611 if (byte == 0xff) { 2612 imm8 |= 1 << i; 2613 } else if (byte != 0) { 2614 goto fail_bytes; 2615 } 2616 } 2617 tcg_out_vmovi(s, rd, q, 1, 0xe, imm8); 2618 return; 2619 fail_bytes: 2620 2621 /* 2622 * Tests for various replications. For each element width, if we 2623 * cannot find an expansion there's no point checking a larger 2624 * width because we already know by replication it cannot match. 2625 */ 2626 if (vece == MO_16) { 2627 uint16_t v16 = v64; 2628 2629 if (is_shimm16(v16, &cmode, &imm8)) { 2630 tcg_out_vmovi(s, rd, q, 0, cmode, imm8); 2631 return; 2632 } 2633 if (is_shimm16(~v16, &cmode, &imm8)) { 2634 tcg_out_vmovi(s, rd, q, 1, cmode, imm8); 2635 return; 2636 } 2637 2638 /* 2639 * Otherwise, all remaining constants can be loaded in two insns: 2640 * rd = v16 & 0xff, rd |= v16 & 0xff00. 2641 */ 2642 tcg_out_vmovi(s, rd, q, 0, 0x8, v16 & 0xff); 2643 tcg_out_vmovi(s, rd, q, 0, 0xb, v16 >> 8); /* VORRI */ 2644 return; 2645 } 2646 2647 if (vece == MO_32) { 2648 uint32_t v32 = v64; 2649 2650 if (is_shimm32(v32, &cmode, &imm8) || 2651 is_soimm32(v32, &cmode, &imm8)) { 2652 tcg_out_vmovi(s, rd, q, 0, cmode, imm8); 2653 return; 2654 } 2655 if (is_shimm32(~v32, &cmode, &imm8) || 2656 is_soimm32(~v32, &cmode, &imm8)) { 2657 tcg_out_vmovi(s, rd, q, 1, cmode, imm8); 2658 return; 2659 } 2660 2661 /* 2662 * Restrict the set of constants to those we can load with 2663 * two instructions. Others we load from the pool. 2664 */ 2665 i = is_shimm32_pair(v32, &cmode, &imm8); 2666 if (i) { 2667 tcg_out_vmovi(s, rd, q, 0, cmode, imm8); 2668 tcg_out_vmovi(s, rd, q, 0, i | 1, extract32(v32, i * 4, 8)); 2669 return; 2670 } 2671 i = is_shimm32_pair(~v32, &cmode, &imm8); 2672 if (i) { 2673 tcg_out_vmovi(s, rd, q, 1, cmode, imm8); 2674 tcg_out_vmovi(s, rd, q, 1, i | 1, extract32(~v32, i * 4, 8)); 2675 return; 2676 } 2677 } 2678 2679 /* 2680 * As a last resort, load from the constant pool. 2681 */ 2682 if (!q || vece == MO_64) { 2683 new_pool_l2(s, R_ARM_PC11, s->code_ptr, 0, v64, v64 >> 32); 2684 /* VLDR Dd, [pc + offset] */ 2685 tcg_out32(s, INSN_VLDR_D | encode_vd(rd) | (0xf << 16)); 2686 if (q) { 2687 tcg_out_dup2_vec(s, rd, rd, rd); 2688 } 2689 } else { 2690 new_pool_label(s, (uint32_t)v64, R_ARM_PC8, s->code_ptr, 0); 2691 /* add tmp, pc, offset */ 2692 tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_TMP, TCG_REG_PC, 0); 2693 tcg_out_dupm_vec(s, type, MO_32, rd, TCG_REG_TMP, 0); 2694 } 2695} 2696 2697static const ARMInsn vec_cmp_insn[16] = { 2698 [TCG_COND_EQ] = INSN_VCEQ, 2699 [TCG_COND_GT] = INSN_VCGT, 2700 [TCG_COND_GE] = INSN_VCGE, 2701 [TCG_COND_GTU] = INSN_VCGT_U, 2702 [TCG_COND_GEU] = INSN_VCGE_U, 2703}; 2704 2705static const ARMInsn vec_cmp0_insn[16] = { 2706 [TCG_COND_EQ] = INSN_VCEQ0, 2707 [TCG_COND_GT] = INSN_VCGT0, 2708 [TCG_COND_GE] = INSN_VCGE0, 2709 [TCG_COND_LT] = INSN_VCLT0, 2710 [TCG_COND_LE] = INSN_VCLE0, 2711}; 2712 2713static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, 2714 unsigned vecl, unsigned vece, 2715 const TCGArg args[TCG_MAX_OP_ARGS], 2716 const int const_args[TCG_MAX_OP_ARGS]) 2717{ 2718 TCGType type = vecl + TCG_TYPE_V64; 2719 unsigned q = vecl; 2720 TCGArg a0, a1, a2, a3; 2721 int cmode, imm8; 2722 2723 a0 = args[0]; 2724 a1 = args[1]; 2725 a2 = args[2]; 2726 2727 switch (opc) { 2728 case INDEX_op_ld_vec: 2729 tcg_out_ld(s, type, a0, a1, a2); 2730 return; 2731 case INDEX_op_st_vec: 2732 tcg_out_st(s, type, a0, a1, a2); 2733 return; 2734 case INDEX_op_dupm_vec: 2735 tcg_out_dupm_vec(s, type, vece, a0, a1, a2); 2736 return; 2737 case INDEX_op_dup2_vec: 2738 tcg_out_dup2_vec(s, a0, a1, a2); 2739 return; 2740 case INDEX_op_abs_vec: 2741 tcg_out_vreg2(s, INSN_VABS, q, vece, a0, a1); 2742 return; 2743 case INDEX_op_neg_vec: 2744 tcg_out_vreg2(s, INSN_VNEG, q, vece, a0, a1); 2745 return; 2746 case INDEX_op_not_vec: 2747 tcg_out_vreg2(s, INSN_VMVN, q, 0, a0, a1); 2748 return; 2749 case INDEX_op_add_vec: 2750 tcg_out_vreg3(s, INSN_VADD, q, vece, a0, a1, a2); 2751 return; 2752 case INDEX_op_mul_vec: 2753 tcg_out_vreg3(s, INSN_VMUL, q, vece, a0, a1, a2); 2754 return; 2755 case INDEX_op_smax_vec: 2756 tcg_out_vreg3(s, INSN_VMAX, q, vece, a0, a1, a2); 2757 return; 2758 case INDEX_op_smin_vec: 2759 tcg_out_vreg3(s, INSN_VMIN, q, vece, a0, a1, a2); 2760 return; 2761 case INDEX_op_sub_vec: 2762 tcg_out_vreg3(s, INSN_VSUB, q, vece, a0, a1, a2); 2763 return; 2764 case INDEX_op_ssadd_vec: 2765 tcg_out_vreg3(s, INSN_VQADD, q, vece, a0, a1, a2); 2766 return; 2767 case INDEX_op_sssub_vec: 2768 tcg_out_vreg3(s, INSN_VQSUB, q, vece, a0, a1, a2); 2769 return; 2770 case INDEX_op_umax_vec: 2771 tcg_out_vreg3(s, INSN_VMAX_U, q, vece, a0, a1, a2); 2772 return; 2773 case INDEX_op_umin_vec: 2774 tcg_out_vreg3(s, INSN_VMIN_U, q, vece, a0, a1, a2); 2775 return; 2776 case INDEX_op_usadd_vec: 2777 tcg_out_vreg3(s, INSN_VQADD_U, q, vece, a0, a1, a2); 2778 return; 2779 case INDEX_op_ussub_vec: 2780 tcg_out_vreg3(s, INSN_VQSUB_U, q, vece, a0, a1, a2); 2781 return; 2782 case INDEX_op_xor_vec: 2783 tcg_out_vreg3(s, INSN_VEOR, q, 0, a0, a1, a2); 2784 return; 2785 case INDEX_op_arm_sshl_vec: 2786 /* 2787 * Note that Vm is the data and Vn is the shift count, 2788 * therefore the arguments appear reversed. 2789 */ 2790 tcg_out_vreg3(s, INSN_VSHL_S, q, vece, a0, a2, a1); 2791 return; 2792 case INDEX_op_arm_ushl_vec: 2793 /* See above. */ 2794 tcg_out_vreg3(s, INSN_VSHL_U, q, vece, a0, a2, a1); 2795 return; 2796 case INDEX_op_shli_vec: 2797 tcg_out_vshifti(s, INSN_VSHLI, q, a0, a1, a2 + (8 << vece)); 2798 return; 2799 case INDEX_op_shri_vec: 2800 tcg_out_vshifti(s, INSN_VSHRI, q, a0, a1, (16 << vece) - a2); 2801 return; 2802 case INDEX_op_sari_vec: 2803 tcg_out_vshifti(s, INSN_VSARI, q, a0, a1, (16 << vece) - a2); 2804 return; 2805 case INDEX_op_arm_sli_vec: 2806 tcg_out_vshifti(s, INSN_VSLI, q, a0, a2, args[3] + (8 << vece)); 2807 return; 2808 2809 case INDEX_op_andc_vec: 2810 if (!const_args[2]) { 2811 tcg_out_vreg3(s, INSN_VBIC, q, 0, a0, a1, a2); 2812 return; 2813 } 2814 a2 = ~a2; 2815 /* fall through */ 2816 case INDEX_op_and_vec: 2817 if (const_args[2]) { 2818 is_shimm1632(~a2, &cmode, &imm8); 2819 if (a0 == a1) { 2820 tcg_out_vmovi(s, a0, q, 1, cmode | 1, imm8); /* VBICI */ 2821 return; 2822 } 2823 tcg_out_vmovi(s, a0, q, 1, cmode, imm8); /* VMVNI */ 2824 a2 = a0; 2825 } 2826 tcg_out_vreg3(s, INSN_VAND, q, 0, a0, a1, a2); 2827 return; 2828 2829 case INDEX_op_orc_vec: 2830 if (!const_args[2]) { 2831 tcg_out_vreg3(s, INSN_VORN, q, 0, a0, a1, a2); 2832 return; 2833 } 2834 a2 = ~a2; 2835 /* fall through */ 2836 case INDEX_op_or_vec: 2837 if (const_args[2]) { 2838 is_shimm1632(a2, &cmode, &imm8); 2839 if (a0 == a1) { 2840 tcg_out_vmovi(s, a0, q, 0, cmode | 1, imm8); /* VORRI */ 2841 return; 2842 } 2843 tcg_out_vmovi(s, a0, q, 0, cmode, imm8); /* VMOVI */ 2844 a2 = a0; 2845 } 2846 tcg_out_vreg3(s, INSN_VORR, q, 0, a0, a1, a2); 2847 return; 2848 2849 case INDEX_op_cmp_vec: 2850 { 2851 TCGCond cond = args[3]; 2852 2853 if (cond == TCG_COND_NE) { 2854 if (const_args[2]) { 2855 tcg_out_vreg3(s, INSN_VTST, q, vece, a0, a1, a1); 2856 } else { 2857 tcg_out_vreg3(s, INSN_VCEQ, q, vece, a0, a1, a2); 2858 tcg_out_vreg2(s, INSN_VMVN, q, 0, a0, a0); 2859 } 2860 } else { 2861 ARMInsn insn; 2862 2863 if (const_args[2]) { 2864 insn = vec_cmp0_insn[cond]; 2865 if (insn) { 2866 tcg_out_vreg2(s, insn, q, vece, a0, a1); 2867 return; 2868 } 2869 tcg_out_dupi_vec(s, type, MO_8, TCG_VEC_TMP, 0); 2870 a2 = TCG_VEC_TMP; 2871 } 2872 insn = vec_cmp_insn[cond]; 2873 if (insn == 0) { 2874 TCGArg t; 2875 t = a1, a1 = a2, a2 = t; 2876 cond = tcg_swap_cond(cond); 2877 insn = vec_cmp_insn[cond]; 2878 tcg_debug_assert(insn != 0); 2879 } 2880 tcg_out_vreg3(s, insn, q, vece, a0, a1, a2); 2881 } 2882 } 2883 return; 2884 2885 case INDEX_op_bitsel_vec: 2886 a3 = args[3]; 2887 if (a0 == a3) { 2888 tcg_out_vreg3(s, INSN_VBIT, q, 0, a0, a2, a1); 2889 } else if (a0 == a2) { 2890 tcg_out_vreg3(s, INSN_VBIF, q, 0, a0, a3, a1); 2891 } else { 2892 tcg_out_mov(s, type, a0, a1); 2893 tcg_out_vreg3(s, INSN_VBSL, q, 0, a0, a2, a3); 2894 } 2895 return; 2896 2897 case INDEX_op_mov_vec: /* Always emitted via tcg_out_mov. */ 2898 case INDEX_op_dup_vec: /* Always emitted via tcg_out_dup_vec. */ 2899 default: 2900 g_assert_not_reached(); 2901 } 2902} 2903 2904int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece) 2905{ 2906 switch (opc) { 2907 case INDEX_op_add_vec: 2908 case INDEX_op_sub_vec: 2909 case INDEX_op_and_vec: 2910 case INDEX_op_andc_vec: 2911 case INDEX_op_or_vec: 2912 case INDEX_op_orc_vec: 2913 case INDEX_op_xor_vec: 2914 case INDEX_op_not_vec: 2915 case INDEX_op_shli_vec: 2916 case INDEX_op_shri_vec: 2917 case INDEX_op_sari_vec: 2918 case INDEX_op_ssadd_vec: 2919 case INDEX_op_sssub_vec: 2920 case INDEX_op_usadd_vec: 2921 case INDEX_op_ussub_vec: 2922 case INDEX_op_bitsel_vec: 2923 return 1; 2924 case INDEX_op_abs_vec: 2925 case INDEX_op_cmp_vec: 2926 case INDEX_op_mul_vec: 2927 case INDEX_op_neg_vec: 2928 case INDEX_op_smax_vec: 2929 case INDEX_op_smin_vec: 2930 case INDEX_op_umax_vec: 2931 case INDEX_op_umin_vec: 2932 return vece < MO_64; 2933 case INDEX_op_shlv_vec: 2934 case INDEX_op_shrv_vec: 2935 case INDEX_op_sarv_vec: 2936 case INDEX_op_rotli_vec: 2937 case INDEX_op_rotlv_vec: 2938 case INDEX_op_rotrv_vec: 2939 return -1; 2940 default: 2941 return 0; 2942 } 2943} 2944 2945void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece, 2946 TCGArg a0, ...) 2947{ 2948 va_list va; 2949 TCGv_vec v0, v1, v2, t1, t2, c1; 2950 TCGArg a2; 2951 2952 va_start(va, a0); 2953 v0 = temp_tcgv_vec(arg_temp(a0)); 2954 v1 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg))); 2955 a2 = va_arg(va, TCGArg); 2956 va_end(va); 2957 2958 switch (opc) { 2959 case INDEX_op_shlv_vec: 2960 /* 2961 * Merely propagate shlv_vec to arm_ushl_vec. 2962 * In this way we don't set TCG_TARGET_HAS_shv_vec 2963 * because everything is done via expansion. 2964 */ 2965 v2 = temp_tcgv_vec(arg_temp(a2)); 2966 vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(v0), 2967 tcgv_vec_arg(v1), tcgv_vec_arg(v2)); 2968 break; 2969 2970 case INDEX_op_shrv_vec: 2971 case INDEX_op_sarv_vec: 2972 /* Right shifts are negative left shifts for NEON. */ 2973 v2 = temp_tcgv_vec(arg_temp(a2)); 2974 t1 = tcg_temp_new_vec(type); 2975 tcg_gen_neg_vec(vece, t1, v2); 2976 if (opc == INDEX_op_shrv_vec) { 2977 opc = INDEX_op_arm_ushl_vec; 2978 } else { 2979 opc = INDEX_op_arm_sshl_vec; 2980 } 2981 vec_gen_3(opc, type, vece, tcgv_vec_arg(v0), 2982 tcgv_vec_arg(v1), tcgv_vec_arg(t1)); 2983 tcg_temp_free_vec(t1); 2984 break; 2985 2986 case INDEX_op_rotli_vec: 2987 t1 = tcg_temp_new_vec(type); 2988 tcg_gen_shri_vec(vece, t1, v1, -a2 & ((8 << vece) - 1)); 2989 vec_gen_4(INDEX_op_arm_sli_vec, type, vece, 2990 tcgv_vec_arg(v0), tcgv_vec_arg(t1), tcgv_vec_arg(v1), a2); 2991 tcg_temp_free_vec(t1); 2992 break; 2993 2994 case INDEX_op_rotlv_vec: 2995 v2 = temp_tcgv_vec(arg_temp(a2)); 2996 t1 = tcg_temp_new_vec(type); 2997 c1 = tcg_constant_vec(type, vece, 8 << vece); 2998 tcg_gen_sub_vec(vece, t1, v2, c1); 2999 /* Right shifts are negative left shifts for NEON. */ 3000 vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(t1), 3001 tcgv_vec_arg(v1), tcgv_vec_arg(t1)); 3002 vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(v0), 3003 tcgv_vec_arg(v1), tcgv_vec_arg(v2)); 3004 tcg_gen_or_vec(vece, v0, v0, t1); 3005 tcg_temp_free_vec(t1); 3006 break; 3007 3008 case INDEX_op_rotrv_vec: 3009 v2 = temp_tcgv_vec(arg_temp(a2)); 3010 t1 = tcg_temp_new_vec(type); 3011 t2 = tcg_temp_new_vec(type); 3012 c1 = tcg_constant_vec(type, vece, 8 << vece); 3013 tcg_gen_neg_vec(vece, t1, v2); 3014 tcg_gen_sub_vec(vece, t2, c1, v2); 3015 /* Right shifts are negative left shifts for NEON. */ 3016 vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(t1), 3017 tcgv_vec_arg(v1), tcgv_vec_arg(t1)); 3018 vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(t2), 3019 tcgv_vec_arg(v1), tcgv_vec_arg(t2)); 3020 tcg_gen_or_vec(vece, v0, t1, t2); 3021 tcg_temp_free_vec(t1); 3022 tcg_temp_free_vec(t2); 3023 break; 3024 3025 default: 3026 g_assert_not_reached(); 3027 } 3028} 3029 3030static void tcg_out_nop_fill(tcg_insn_unit *p, int count) 3031{ 3032 int i; 3033 for (i = 0; i < count; ++i) { 3034 p[i] = INSN_NOP; 3035 } 3036} 3037 3038/* Compute frame size via macros, to share between tcg_target_qemu_prologue 3039 and tcg_register_jit. */ 3040 3041#define PUSH_SIZE ((11 - 4 + 1 + 1) * sizeof(tcg_target_long)) 3042 3043#define FRAME_SIZE \ 3044 ((PUSH_SIZE \ 3045 + TCG_STATIC_CALL_ARGS_SIZE \ 3046 + CPU_TEMP_BUF_NLONGS * sizeof(long) \ 3047 + TCG_TARGET_STACK_ALIGN - 1) \ 3048 & -TCG_TARGET_STACK_ALIGN) 3049 3050#define STACK_ADDEND (FRAME_SIZE - PUSH_SIZE) 3051 3052static void tcg_target_qemu_prologue(TCGContext *s) 3053{ 3054 /* Calling convention requires us to save r4-r11 and lr. */ 3055 /* stmdb sp!, { r4 - r11, lr } */ 3056 tcg_out_ldstm(s, COND_AL, INSN_STMDB, TCG_REG_CALL_STACK, 3057 (1 << TCG_REG_R4) | (1 << TCG_REG_R5) | (1 << TCG_REG_R6) | 3058 (1 << TCG_REG_R7) | (1 << TCG_REG_R8) | (1 << TCG_REG_R9) | 3059 (1 << TCG_REG_R10) | (1 << TCG_REG_R11) | (1 << TCG_REG_R14)); 3060 3061 /* Reserve callee argument and tcg temp space. */ 3062 tcg_out_dat_rI(s, COND_AL, ARITH_SUB, TCG_REG_CALL_STACK, 3063 TCG_REG_CALL_STACK, STACK_ADDEND, 1); 3064 tcg_set_frame(s, TCG_REG_CALL_STACK, TCG_STATIC_CALL_ARGS_SIZE, 3065 CPU_TEMP_BUF_NLONGS * sizeof(long)); 3066 3067 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]); 3068 3069#ifndef CONFIG_SOFTMMU 3070 if (guest_base) { 3071 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_GUEST_BASE, guest_base); 3072 tcg_regset_set_reg(s->reserved_regs, TCG_REG_GUEST_BASE); 3073 } 3074#endif 3075 3076 tcg_out_b_reg(s, COND_AL, tcg_target_call_iarg_regs[1]); 3077 3078 /* 3079 * Return path for goto_ptr. Set return value to 0, a-la exit_tb, 3080 * and fall through to the rest of the epilogue. 3081 */ 3082 tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr); 3083 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R0, 0); 3084 tcg_out_epilogue(s); 3085} 3086 3087static void tcg_out_epilogue(TCGContext *s) 3088{ 3089 /* Release local stack frame. */ 3090 tcg_out_dat_rI(s, COND_AL, ARITH_ADD, TCG_REG_CALL_STACK, 3091 TCG_REG_CALL_STACK, STACK_ADDEND, 1); 3092 3093 /* ldmia sp!, { r4 - r11, pc } */ 3094 tcg_out_ldstm(s, COND_AL, INSN_LDMIA, TCG_REG_CALL_STACK, 3095 (1 << TCG_REG_R4) | (1 << TCG_REG_R5) | (1 << TCG_REG_R6) | 3096 (1 << TCG_REG_R7) | (1 << TCG_REG_R8) | (1 << TCG_REG_R9) | 3097 (1 << TCG_REG_R10) | (1 << TCG_REG_R11) | (1 << TCG_REG_PC)); 3098} 3099 3100typedef struct { 3101 DebugFrameHeader h; 3102 uint8_t fde_def_cfa[4]; 3103 uint8_t fde_reg_ofs[18]; 3104} DebugFrame; 3105 3106#define ELF_HOST_MACHINE EM_ARM 3107 3108/* We're expecting a 2 byte uleb128 encoded value. */ 3109QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14)); 3110 3111static const DebugFrame debug_frame = { 3112 .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */ 3113 .h.cie.id = -1, 3114 .h.cie.version = 1, 3115 .h.cie.code_align = 1, 3116 .h.cie.data_align = 0x7c, /* sleb128 -4 */ 3117 .h.cie.return_column = 14, 3118 3119 /* Total FDE size does not include the "len" member. */ 3120 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset), 3121 3122 .fde_def_cfa = { 3123 12, 13, /* DW_CFA_def_cfa sp, ... */ 3124 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */ 3125 (FRAME_SIZE >> 7) 3126 }, 3127 .fde_reg_ofs = { 3128 /* The following must match the stmdb in the prologue. */ 3129 0x8e, 1, /* DW_CFA_offset, lr, -4 */ 3130 0x8b, 2, /* DW_CFA_offset, r11, -8 */ 3131 0x8a, 3, /* DW_CFA_offset, r10, -12 */ 3132 0x89, 4, /* DW_CFA_offset, r9, -16 */ 3133 0x88, 5, /* DW_CFA_offset, r8, -20 */ 3134 0x87, 6, /* DW_CFA_offset, r7, -24 */ 3135 0x86, 7, /* DW_CFA_offset, r6, -28 */ 3136 0x85, 8, /* DW_CFA_offset, r5, -32 */ 3137 0x84, 9, /* DW_CFA_offset, r4, -36 */ 3138 } 3139}; 3140 3141void tcg_register_jit(const void *buf, size_t buf_size) 3142{ 3143 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame)); 3144} 3145