1/* 2 * Tiny Code Generator for QEMU 3 * 4 * Copyright (c) 2008 Andrzej Zaborowski 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 25#include "elf.h" 26#include "../tcg-ldst.c.inc" 27#include "../tcg-pool.c.inc" 28 29int arm_arch = __ARM_ARCH; 30 31#ifndef use_idiv_instructions 32bool use_idiv_instructions; 33#endif 34#ifndef use_neon_instructions 35bool use_neon_instructions; 36#endif 37 38#ifdef CONFIG_DEBUG_TCG 39static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { 40 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7", 41 "%r8", "%r9", "%r10", "%r11", "%r12", "%sp", "%r14", "%pc", 42 "%q0", "%q1", "%q2", "%q3", "%q4", "%q5", "%q6", "%q7", 43 "%q8", "%q9", "%q10", "%q11", "%q12", "%q13", "%q14", "%q15", 44}; 45#endif 46 47static const int tcg_target_reg_alloc_order[] = { 48 TCG_REG_R4, 49 TCG_REG_R5, 50 TCG_REG_R6, 51 TCG_REG_R7, 52 TCG_REG_R8, 53 TCG_REG_R9, 54 TCG_REG_R10, 55 TCG_REG_R11, 56 TCG_REG_R13, 57 TCG_REG_R0, 58 TCG_REG_R1, 59 TCG_REG_R2, 60 TCG_REG_R3, 61 TCG_REG_R12, 62 TCG_REG_R14, 63 64 TCG_REG_Q0, 65 TCG_REG_Q1, 66 TCG_REG_Q2, 67 TCG_REG_Q3, 68 /* Q4 - Q7 are call-saved, and skipped. */ 69 TCG_REG_Q8, 70 TCG_REG_Q9, 71 TCG_REG_Q10, 72 TCG_REG_Q11, 73 TCG_REG_Q12, 74 TCG_REG_Q13, 75 TCG_REG_Q14, 76 TCG_REG_Q15, 77}; 78 79static const int tcg_target_call_iarg_regs[4] = { 80 TCG_REG_R0, TCG_REG_R1, TCG_REG_R2, TCG_REG_R3 81}; 82 83static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot) 84{ 85 tcg_debug_assert(kind == TCG_CALL_RET_NORMAL); 86 tcg_debug_assert(slot >= 0 && slot <= 3); 87 return TCG_REG_R0 + slot; 88} 89 90#define TCG_REG_TMP TCG_REG_R12 91#define TCG_VEC_TMP TCG_REG_Q15 92#define TCG_REG_GUEST_BASE TCG_REG_R11 93 94typedef enum { 95 COND_EQ = 0x0, 96 COND_NE = 0x1, 97 COND_CS = 0x2, /* Unsigned greater or equal */ 98 COND_CC = 0x3, /* Unsigned less than */ 99 COND_MI = 0x4, /* Negative */ 100 COND_PL = 0x5, /* Zero or greater */ 101 COND_VS = 0x6, /* Overflow */ 102 COND_VC = 0x7, /* No overflow */ 103 COND_HI = 0x8, /* Unsigned greater than */ 104 COND_LS = 0x9, /* Unsigned less or equal */ 105 COND_GE = 0xa, 106 COND_LT = 0xb, 107 COND_GT = 0xc, 108 COND_LE = 0xd, 109 COND_AL = 0xe, 110} ARMCond; 111 112#define TO_CPSR (1 << 20) 113 114#define SHIFT_IMM_LSL(im) (((im) << 7) | 0x00) 115#define SHIFT_IMM_LSR(im) (((im) << 7) | 0x20) 116#define SHIFT_IMM_ASR(im) (((im) << 7) | 0x40) 117#define SHIFT_IMM_ROR(im) (((im) << 7) | 0x60) 118#define SHIFT_REG_LSL(rs) (((rs) << 8) | 0x10) 119#define SHIFT_REG_LSR(rs) (((rs) << 8) | 0x30) 120#define SHIFT_REG_ASR(rs) (((rs) << 8) | 0x50) 121#define SHIFT_REG_ROR(rs) (((rs) << 8) | 0x70) 122 123typedef enum { 124 ARITH_AND = 0x0 << 21, 125 ARITH_EOR = 0x1 << 21, 126 ARITH_SUB = 0x2 << 21, 127 ARITH_RSB = 0x3 << 21, 128 ARITH_ADD = 0x4 << 21, 129 ARITH_ADC = 0x5 << 21, 130 ARITH_SBC = 0x6 << 21, 131 ARITH_RSC = 0x7 << 21, 132 ARITH_TST = 0x8 << 21 | TO_CPSR, 133 ARITH_CMP = 0xa << 21 | TO_CPSR, 134 ARITH_CMN = 0xb << 21 | TO_CPSR, 135 ARITH_ORR = 0xc << 21, 136 ARITH_MOV = 0xd << 21, 137 ARITH_BIC = 0xe << 21, 138 ARITH_MVN = 0xf << 21, 139 140 INSN_B = 0x0a000000, 141 142 INSN_CLZ = 0x016f0f10, 143 INSN_RBIT = 0x06ff0f30, 144 145 INSN_LDMIA = 0x08b00000, 146 INSN_STMDB = 0x09200000, 147 148 INSN_LDR_IMM = 0x04100000, 149 INSN_LDR_REG = 0x06100000, 150 INSN_STR_IMM = 0x04000000, 151 INSN_STR_REG = 0x06000000, 152 153 INSN_LDRH_IMM = 0x005000b0, 154 INSN_LDRH_REG = 0x001000b0, 155 INSN_LDRSH_IMM = 0x005000f0, 156 INSN_LDRSH_REG = 0x001000f0, 157 INSN_STRH_IMM = 0x004000b0, 158 INSN_STRH_REG = 0x000000b0, 159 160 INSN_LDRB_IMM = 0x04500000, 161 INSN_LDRB_REG = 0x06500000, 162 INSN_LDRSB_IMM = 0x005000d0, 163 INSN_LDRSB_REG = 0x001000d0, 164 INSN_STRB_IMM = 0x04400000, 165 INSN_STRB_REG = 0x06400000, 166 167 INSN_LDRD_IMM = 0x004000d0, 168 INSN_LDRD_REG = 0x000000d0, 169 INSN_STRD_IMM = 0x004000f0, 170 INSN_STRD_REG = 0x000000f0, 171 172 INSN_DMB_ISH = 0xf57ff05b, 173 INSN_DMB_MCR = 0xee070fba, 174 175 /* Architected nop introduced in v6k. */ 176 /* ??? This is an MSR (imm) 0,0,0 insn. Anyone know if this 177 also Just So Happened to do nothing on pre-v6k so that we 178 don't need to conditionalize it? */ 179 INSN_NOP_v6k = 0xe320f000, 180 /* Otherwise the assembler uses mov r0,r0 */ 181 INSN_NOP_v4 = (COND_AL << 28) | ARITH_MOV, 182 183 INSN_VADD = 0xf2000800, 184 INSN_VAND = 0xf2000110, 185 INSN_VBIC = 0xf2100110, 186 INSN_VEOR = 0xf3000110, 187 INSN_VORN = 0xf2300110, 188 INSN_VORR = 0xf2200110, 189 INSN_VSUB = 0xf3000800, 190 INSN_VMUL = 0xf2000910, 191 INSN_VQADD = 0xf2000010, 192 INSN_VQADD_U = 0xf3000010, 193 INSN_VQSUB = 0xf2000210, 194 INSN_VQSUB_U = 0xf3000210, 195 INSN_VMAX = 0xf2000600, 196 INSN_VMAX_U = 0xf3000600, 197 INSN_VMIN = 0xf2000610, 198 INSN_VMIN_U = 0xf3000610, 199 200 INSN_VABS = 0xf3b10300, 201 INSN_VMVN = 0xf3b00580, 202 INSN_VNEG = 0xf3b10380, 203 204 INSN_VCEQ0 = 0xf3b10100, 205 INSN_VCGT0 = 0xf3b10000, 206 INSN_VCGE0 = 0xf3b10080, 207 INSN_VCLE0 = 0xf3b10180, 208 INSN_VCLT0 = 0xf3b10200, 209 210 INSN_VCEQ = 0xf3000810, 211 INSN_VCGE = 0xf2000310, 212 INSN_VCGT = 0xf2000300, 213 INSN_VCGE_U = 0xf3000310, 214 INSN_VCGT_U = 0xf3000300, 215 216 INSN_VSHLI = 0xf2800510, /* VSHL (immediate) */ 217 INSN_VSARI = 0xf2800010, /* VSHR.S */ 218 INSN_VSHRI = 0xf3800010, /* VSHR.U */ 219 INSN_VSLI = 0xf3800510, 220 INSN_VSHL_S = 0xf2000400, /* VSHL.S (register) */ 221 INSN_VSHL_U = 0xf3000400, /* VSHL.U (register) */ 222 223 INSN_VBSL = 0xf3100110, 224 INSN_VBIT = 0xf3200110, 225 INSN_VBIF = 0xf3300110, 226 227 INSN_VTST = 0xf2000810, 228 229 INSN_VDUP_G = 0xee800b10, /* VDUP (ARM core register) */ 230 INSN_VDUP_S = 0xf3b00c00, /* VDUP (scalar) */ 231 INSN_VLDR_D = 0xed100b00, /* VLDR.64 */ 232 INSN_VLD1 = 0xf4200000, /* VLD1 (multiple single elements) */ 233 INSN_VLD1R = 0xf4a00c00, /* VLD1 (single element to all lanes) */ 234 INSN_VST1 = 0xf4000000, /* VST1 (multiple single elements) */ 235 INSN_VMOVI = 0xf2800010, /* VMOV (immediate) */ 236} ARMInsn; 237 238#define INSN_NOP (use_armv7_instructions ? INSN_NOP_v6k : INSN_NOP_v4) 239 240static const uint8_t tcg_cond_to_arm_cond[] = { 241 [TCG_COND_EQ] = COND_EQ, 242 [TCG_COND_NE] = COND_NE, 243 [TCG_COND_LT] = COND_LT, 244 [TCG_COND_GE] = COND_GE, 245 [TCG_COND_LE] = COND_LE, 246 [TCG_COND_GT] = COND_GT, 247 /* unsigned */ 248 [TCG_COND_LTU] = COND_CC, 249 [TCG_COND_GEU] = COND_CS, 250 [TCG_COND_LEU] = COND_LS, 251 [TCG_COND_GTU] = COND_HI, 252}; 253 254static int encode_imm(uint32_t imm); 255 256/* TCG private relocation type: add with pc+imm8 */ 257#define R_ARM_PC8 11 258 259/* TCG private relocation type: vldr with imm8 << 2 */ 260#define R_ARM_PC11 12 261 262static bool reloc_pc24(tcg_insn_unit *src_rw, const tcg_insn_unit *target) 263{ 264 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw); 265 ptrdiff_t offset = (tcg_ptr_byte_diff(target, src_rx) - 8) >> 2; 266 267 if (offset == sextract32(offset, 0, 24)) { 268 *src_rw = deposit32(*src_rw, 0, 24, offset); 269 return true; 270 } 271 return false; 272} 273 274static bool reloc_pc13(tcg_insn_unit *src_rw, const tcg_insn_unit *target) 275{ 276 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw); 277 ptrdiff_t offset = tcg_ptr_byte_diff(target, src_rx) - 8; 278 279 if (offset >= -0xfff && offset <= 0xfff) { 280 tcg_insn_unit insn = *src_rw; 281 bool u = (offset >= 0); 282 if (!u) { 283 offset = -offset; 284 } 285 insn = deposit32(insn, 23, 1, u); 286 insn = deposit32(insn, 0, 12, offset); 287 *src_rw = insn; 288 return true; 289 } 290 return false; 291} 292 293static bool reloc_pc11(tcg_insn_unit *src_rw, const tcg_insn_unit *target) 294{ 295 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw); 296 ptrdiff_t offset = (tcg_ptr_byte_diff(target, src_rx) - 8) / 4; 297 298 if (offset >= -0xff && offset <= 0xff) { 299 tcg_insn_unit insn = *src_rw; 300 bool u = (offset >= 0); 301 if (!u) { 302 offset = -offset; 303 } 304 insn = deposit32(insn, 23, 1, u); 305 insn = deposit32(insn, 0, 8, offset); 306 *src_rw = insn; 307 return true; 308 } 309 return false; 310} 311 312static bool reloc_pc8(tcg_insn_unit *src_rw, const tcg_insn_unit *target) 313{ 314 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw); 315 ptrdiff_t offset = tcg_ptr_byte_diff(target, src_rx) - 8; 316 int imm12 = encode_imm(offset); 317 318 if (imm12 >= 0) { 319 *src_rw = deposit32(*src_rw, 0, 12, imm12); 320 return true; 321 } 322 return false; 323} 324 325static bool patch_reloc(tcg_insn_unit *code_ptr, int type, 326 intptr_t value, intptr_t addend) 327{ 328 tcg_debug_assert(addend == 0); 329 switch (type) { 330 case R_ARM_PC24: 331 return reloc_pc24(code_ptr, (const tcg_insn_unit *)value); 332 case R_ARM_PC13: 333 return reloc_pc13(code_ptr, (const tcg_insn_unit *)value); 334 case R_ARM_PC11: 335 return reloc_pc11(code_ptr, (const tcg_insn_unit *)value); 336 case R_ARM_PC8: 337 return reloc_pc8(code_ptr, (const tcg_insn_unit *)value); 338 default: 339 g_assert_not_reached(); 340 } 341} 342 343#define TCG_CT_CONST_ARM 0x100 344#define TCG_CT_CONST_INV 0x200 345#define TCG_CT_CONST_NEG 0x400 346#define TCG_CT_CONST_ZERO 0x800 347#define TCG_CT_CONST_ORRI 0x1000 348#define TCG_CT_CONST_ANDI 0x2000 349 350#define ALL_GENERAL_REGS 0xffffu 351#define ALL_VECTOR_REGS 0xffff0000u 352 353/* 354 * r0-r3 will be overwritten when reading the tlb entry (system-mode only); 355 * r14 will be overwritten by the BLNE branching to the slow path. 356 */ 357#define ALL_QLDST_REGS \ 358 (ALL_GENERAL_REGS & ~((tcg_use_softmmu ? 0xf : 0) | (1 << TCG_REG_R14))) 359 360/* 361 * ARM immediates for ALU instructions are made of an unsigned 8-bit 362 * right-rotated by an even amount between 0 and 30. 363 * 364 * Return < 0 if @imm cannot be encoded, else the entire imm12 field. 365 */ 366static int encode_imm(uint32_t imm) 367{ 368 uint32_t rot, imm8; 369 370 /* Simple case, no rotation required. */ 371 if ((imm & ~0xff) == 0) { 372 return imm; 373 } 374 375 /* Next, try a simple even shift. */ 376 rot = ctz32(imm) & ~1; 377 imm8 = imm >> rot; 378 rot = 32 - rot; 379 if ((imm8 & ~0xff) == 0) { 380 goto found; 381 } 382 383 /* 384 * Finally, try harder with rotations. 385 * The ctz test above will have taken care of rotates >= 8. 386 */ 387 for (rot = 2; rot < 8; rot += 2) { 388 imm8 = rol32(imm, rot); 389 if ((imm8 & ~0xff) == 0) { 390 goto found; 391 } 392 } 393 /* Fail: imm cannot be encoded. */ 394 return -1; 395 396 found: 397 /* Note that rot is even, and we discard bit 0 by shifting by 7. */ 398 return rot << 7 | imm8; 399} 400 401static int encode_imm_nofail(uint32_t imm) 402{ 403 int ret = encode_imm(imm); 404 tcg_debug_assert(ret >= 0); 405 return ret; 406} 407 408static bool check_fit_imm(uint32_t imm) 409{ 410 return encode_imm(imm) >= 0; 411} 412 413/* Return true if v16 is a valid 16-bit shifted immediate. */ 414static bool is_shimm16(uint16_t v16, int *cmode, int *imm8) 415{ 416 if (v16 == (v16 & 0xff)) { 417 *cmode = 0x8; 418 *imm8 = v16 & 0xff; 419 return true; 420 } else if (v16 == (v16 & 0xff00)) { 421 *cmode = 0xa; 422 *imm8 = v16 >> 8; 423 return true; 424 } 425 return false; 426} 427 428/* Return true if v32 is a valid 32-bit shifted immediate. */ 429static bool is_shimm32(uint32_t v32, int *cmode, int *imm8) 430{ 431 if (v32 == (v32 & 0xff)) { 432 *cmode = 0x0; 433 *imm8 = v32 & 0xff; 434 return true; 435 } else if (v32 == (v32 & 0xff00)) { 436 *cmode = 0x2; 437 *imm8 = (v32 >> 8) & 0xff; 438 return true; 439 } else if (v32 == (v32 & 0xff0000)) { 440 *cmode = 0x4; 441 *imm8 = (v32 >> 16) & 0xff; 442 return true; 443 } else if (v32 == (v32 & 0xff000000)) { 444 *cmode = 0x6; 445 *imm8 = v32 >> 24; 446 return true; 447 } 448 return false; 449} 450 451/* Return true if v32 is a valid 32-bit shifting ones immediate. */ 452static bool is_soimm32(uint32_t v32, int *cmode, int *imm8) 453{ 454 if ((v32 & 0xffff00ff) == 0xff) { 455 *cmode = 0xc; 456 *imm8 = (v32 >> 8) & 0xff; 457 return true; 458 } else if ((v32 & 0xff00ffff) == 0xffff) { 459 *cmode = 0xd; 460 *imm8 = (v32 >> 16) & 0xff; 461 return true; 462 } 463 return false; 464} 465 466/* 467 * Return non-zero if v32 can be formed by MOVI+ORR. 468 * Place the parameters for MOVI in (cmode, imm8). 469 * Return the cmode for ORR; the imm8 can be had via extraction from v32. 470 */ 471static int is_shimm32_pair(uint32_t v32, int *cmode, int *imm8) 472{ 473 int i; 474 475 for (i = 6; i > 0; i -= 2) { 476 /* Mask out one byte we can add with ORR. */ 477 uint32_t tmp = v32 & ~(0xffu << (i * 4)); 478 if (is_shimm32(tmp, cmode, imm8) || 479 is_soimm32(tmp, cmode, imm8)) { 480 break; 481 } 482 } 483 return i; 484} 485 486/* Return true if V is a valid 16-bit or 32-bit shifted immediate. */ 487static bool is_shimm1632(uint32_t v32, int *cmode, int *imm8) 488{ 489 if (v32 == deposit32(v32, 16, 16, v32)) { 490 return is_shimm16(v32, cmode, imm8); 491 } else { 492 return is_shimm32(v32, cmode, imm8); 493 } 494} 495 496/* Test if a constant matches the constraint. 497 * TODO: define constraints for: 498 * 499 * ldr/str offset: between -0xfff and 0xfff 500 * ldrh/strh offset: between -0xff and 0xff 501 * mov operand2: values represented with x << (2 * y), x < 0x100 502 * add, sub, eor...: ditto 503 */ 504static bool tcg_target_const_match(int64_t val, int ct, 505 TCGType type, TCGCond cond, int vece) 506{ 507 if (ct & TCG_CT_CONST) { 508 return 1; 509 } else if ((ct & TCG_CT_CONST_ARM) && check_fit_imm(val)) { 510 return 1; 511 } else if ((ct & TCG_CT_CONST_INV) && check_fit_imm(~val)) { 512 return 1; 513 } else if ((ct & TCG_CT_CONST_NEG) && check_fit_imm(-val)) { 514 return 1; 515 } else if ((ct & TCG_CT_CONST_ZERO) && val == 0) { 516 return 1; 517 } 518 519 switch (ct & (TCG_CT_CONST_ORRI | TCG_CT_CONST_ANDI)) { 520 case 0: 521 break; 522 case TCG_CT_CONST_ANDI: 523 val = ~val; 524 /* fallthru */ 525 case TCG_CT_CONST_ORRI: 526 if (val == deposit64(val, 32, 32, val)) { 527 int cmode, imm8; 528 return is_shimm1632(val, &cmode, &imm8); 529 } 530 break; 531 default: 532 /* Both bits should not be set for the same insn. */ 533 g_assert_not_reached(); 534 } 535 536 return 0; 537} 538 539static void tcg_out_b_imm(TCGContext *s, ARMCond cond, int32_t offset) 540{ 541 tcg_out32(s, (cond << 28) | INSN_B | 542 (((offset - 8) >> 2) & 0x00ffffff)); 543} 544 545static void tcg_out_bl_imm(TCGContext *s, ARMCond cond, int32_t offset) 546{ 547 tcg_out32(s, (cond << 28) | 0x0b000000 | 548 (((offset - 8) >> 2) & 0x00ffffff)); 549} 550 551static void tcg_out_blx_reg(TCGContext *s, ARMCond cond, TCGReg rn) 552{ 553 tcg_out32(s, (cond << 28) | 0x012fff30 | rn); 554} 555 556static void tcg_out_blx_imm(TCGContext *s, int32_t offset) 557{ 558 tcg_out32(s, 0xfa000000 | ((offset & 2) << 23) | 559 (((offset - 8) >> 2) & 0x00ffffff)); 560} 561 562static void tcg_out_dat_reg(TCGContext *s, ARMCond cond, ARMInsn opc, 563 TCGReg rd, TCGReg rn, TCGReg rm, int shift) 564{ 565 tcg_out32(s, (cond << 28) | (0 << 25) | opc | 566 (rn << 16) | (rd << 12) | shift | rm); 567} 568 569static void tcg_out_mov_reg(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rm) 570{ 571 /* Simple reg-reg move, optimising out the 'do nothing' case */ 572 if (rd != rm) { 573 tcg_out_dat_reg(s, cond, ARITH_MOV, rd, 0, rm, SHIFT_IMM_LSL(0)); 574 } 575} 576 577static void tcg_out_bx_reg(TCGContext *s, ARMCond cond, TCGReg rn) 578{ 579 tcg_out32(s, (cond << 28) | 0x012fff10 | rn); 580} 581 582static void tcg_out_b_reg(TCGContext *s, ARMCond cond, TCGReg rn) 583{ 584 /* 585 * Unless the C portion of QEMU is compiled as thumb, we don't need 586 * true BX semantics; merely a branch to an address held in a register. 587 */ 588 tcg_out_bx_reg(s, cond, rn); 589} 590 591static void tcg_out_dat_imm(TCGContext *s, ARMCond cond, ARMInsn opc, 592 TCGReg rd, TCGReg rn, int im) 593{ 594 tcg_out32(s, (cond << 28) | (1 << 25) | opc | 595 (rn << 16) | (rd << 12) | im); 596} 597 598static void tcg_out_ldstm(TCGContext *s, ARMCond cond, ARMInsn opc, 599 TCGReg rn, uint16_t mask) 600{ 601 tcg_out32(s, (cond << 28) | opc | (rn << 16) | mask); 602} 603 604/* Note that this routine is used for both LDR and LDRH formats, so we do 605 not wish to include an immediate shift at this point. */ 606static void tcg_out_memop_r(TCGContext *s, ARMCond cond, ARMInsn opc, TCGReg rt, 607 TCGReg rn, TCGReg rm, bool u, bool p, bool w) 608{ 609 tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24) 610 | (w << 21) | (rn << 16) | (rt << 12) | rm); 611} 612 613static void tcg_out_memop_8(TCGContext *s, ARMCond cond, ARMInsn opc, TCGReg rt, 614 TCGReg rn, int imm8, bool p, bool w) 615{ 616 bool u = 1; 617 if (imm8 < 0) { 618 imm8 = -imm8; 619 u = 0; 620 } 621 tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24) | (w << 21) | 622 (rn << 16) | (rt << 12) | ((imm8 & 0xf0) << 4) | (imm8 & 0xf)); 623} 624 625static void tcg_out_memop_12(TCGContext *s, ARMCond cond, ARMInsn opc, 626 TCGReg rt, TCGReg rn, int imm12, bool p, bool w) 627{ 628 bool u = 1; 629 if (imm12 < 0) { 630 imm12 = -imm12; 631 u = 0; 632 } 633 tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24) | (w << 21) | 634 (rn << 16) | (rt << 12) | imm12); 635} 636 637static void tcg_out_ld32_12(TCGContext *s, ARMCond cond, TCGReg rt, 638 TCGReg rn, int imm12) 639{ 640 tcg_out_memop_12(s, cond, INSN_LDR_IMM, rt, rn, imm12, 1, 0); 641} 642 643static void tcg_out_st32_12(TCGContext *s, ARMCond cond, TCGReg rt, 644 TCGReg rn, int imm12) 645{ 646 tcg_out_memop_12(s, cond, INSN_STR_IMM, rt, rn, imm12, 1, 0); 647} 648 649static void tcg_out_ld32_r(TCGContext *s, ARMCond cond, TCGReg rt, 650 TCGReg rn, TCGReg rm) 651{ 652 tcg_out_memop_r(s, cond, INSN_LDR_REG, rt, rn, rm, 1, 1, 0); 653} 654 655static void tcg_out_st32_r(TCGContext *s, ARMCond cond, TCGReg rt, 656 TCGReg rn, TCGReg rm) 657{ 658 tcg_out_memop_r(s, cond, INSN_STR_REG, rt, rn, rm, 1, 1, 0); 659} 660 661static void tcg_out_ldrd_8(TCGContext *s, ARMCond cond, TCGReg rt, 662 TCGReg rn, int imm8) 663{ 664 tcg_out_memop_8(s, cond, INSN_LDRD_IMM, rt, rn, imm8, 1, 0); 665} 666 667static void tcg_out_ldrd_r(TCGContext *s, ARMCond cond, TCGReg rt, 668 TCGReg rn, TCGReg rm) 669{ 670 tcg_out_memop_r(s, cond, INSN_LDRD_REG, rt, rn, rm, 1, 1, 0); 671} 672 673static void __attribute__((unused)) 674tcg_out_ldrd_rwb(TCGContext *s, ARMCond cond, TCGReg rt, TCGReg rn, TCGReg rm) 675{ 676 tcg_out_memop_r(s, cond, INSN_LDRD_REG, rt, rn, rm, 1, 1, 1); 677} 678 679static void __attribute__((unused)) 680tcg_out_strd_8(TCGContext *s, ARMCond cond, TCGReg rt, TCGReg rn, int imm8) 681{ 682 tcg_out_memop_8(s, cond, INSN_STRD_IMM, rt, rn, imm8, 1, 0); 683} 684 685static void tcg_out_strd_r(TCGContext *s, ARMCond cond, TCGReg rt, 686 TCGReg rn, TCGReg rm) 687{ 688 tcg_out_memop_r(s, cond, INSN_STRD_REG, rt, rn, rm, 1, 1, 0); 689} 690 691/* Register pre-increment with base writeback. */ 692static void tcg_out_ld32_rwb(TCGContext *s, ARMCond cond, TCGReg rt, 693 TCGReg rn, TCGReg rm) 694{ 695 tcg_out_memop_r(s, cond, INSN_LDR_REG, rt, rn, rm, 1, 1, 1); 696} 697 698static void tcg_out_st32_rwb(TCGContext *s, ARMCond cond, TCGReg rt, 699 TCGReg rn, TCGReg rm) 700{ 701 tcg_out_memop_r(s, cond, INSN_STR_REG, rt, rn, rm, 1, 1, 1); 702} 703 704static void tcg_out_ld16u_8(TCGContext *s, ARMCond cond, TCGReg rt, 705 TCGReg rn, int imm8) 706{ 707 tcg_out_memop_8(s, cond, INSN_LDRH_IMM, rt, rn, imm8, 1, 0); 708} 709 710static void tcg_out_st16_8(TCGContext *s, ARMCond cond, TCGReg rt, 711 TCGReg rn, int imm8) 712{ 713 tcg_out_memop_8(s, cond, INSN_STRH_IMM, rt, rn, imm8, 1, 0); 714} 715 716static void tcg_out_ld16u_r(TCGContext *s, ARMCond cond, TCGReg rt, 717 TCGReg rn, TCGReg rm) 718{ 719 tcg_out_memop_r(s, cond, INSN_LDRH_REG, rt, rn, rm, 1, 1, 0); 720} 721 722static void tcg_out_st16_r(TCGContext *s, ARMCond cond, TCGReg rt, 723 TCGReg rn, TCGReg rm) 724{ 725 tcg_out_memop_r(s, cond, INSN_STRH_REG, rt, rn, rm, 1, 1, 0); 726} 727 728static void tcg_out_ld16s_8(TCGContext *s, ARMCond cond, TCGReg rt, 729 TCGReg rn, int imm8) 730{ 731 tcg_out_memop_8(s, cond, INSN_LDRSH_IMM, rt, rn, imm8, 1, 0); 732} 733 734static void tcg_out_ld16s_r(TCGContext *s, ARMCond cond, TCGReg rt, 735 TCGReg rn, TCGReg rm) 736{ 737 tcg_out_memop_r(s, cond, INSN_LDRSH_REG, rt, rn, rm, 1, 1, 0); 738} 739 740static void tcg_out_ld8_12(TCGContext *s, ARMCond cond, TCGReg rt, 741 TCGReg rn, int imm12) 742{ 743 tcg_out_memop_12(s, cond, INSN_LDRB_IMM, rt, rn, imm12, 1, 0); 744} 745 746static void tcg_out_st8_12(TCGContext *s, ARMCond cond, TCGReg rt, 747 TCGReg rn, int imm12) 748{ 749 tcg_out_memop_12(s, cond, INSN_STRB_IMM, rt, rn, imm12, 1, 0); 750} 751 752static void tcg_out_ld8_r(TCGContext *s, ARMCond cond, TCGReg rt, 753 TCGReg rn, TCGReg rm) 754{ 755 tcg_out_memop_r(s, cond, INSN_LDRB_REG, rt, rn, rm, 1, 1, 0); 756} 757 758static void tcg_out_st8_r(TCGContext *s, ARMCond cond, TCGReg rt, 759 TCGReg rn, TCGReg rm) 760{ 761 tcg_out_memop_r(s, cond, INSN_STRB_REG, rt, rn, rm, 1, 1, 0); 762} 763 764static void tcg_out_ld8s_8(TCGContext *s, ARMCond cond, TCGReg rt, 765 TCGReg rn, int imm8) 766{ 767 tcg_out_memop_8(s, cond, INSN_LDRSB_IMM, rt, rn, imm8, 1, 0); 768} 769 770static void tcg_out_ld8s_r(TCGContext *s, ARMCond cond, TCGReg rt, 771 TCGReg rn, TCGReg rm) 772{ 773 tcg_out_memop_r(s, cond, INSN_LDRSB_REG, rt, rn, rm, 1, 1, 0); 774} 775 776static void tcg_out_movi_pool(TCGContext *s, ARMCond cond, 777 TCGReg rd, uint32_t arg) 778{ 779 new_pool_label(s, arg, R_ARM_PC13, s->code_ptr, 0); 780 tcg_out_ld32_12(s, cond, rd, TCG_REG_PC, 0); 781} 782 783static void tcg_out_movi32(TCGContext *s, ARMCond cond, 784 TCGReg rd, uint32_t arg) 785{ 786 int imm12, diff, opc, sh1, sh2; 787 uint32_t tt0, tt1, tt2; 788 789 /* Check a single MOV/MVN before anything else. */ 790 imm12 = encode_imm(arg); 791 if (imm12 >= 0) { 792 tcg_out_dat_imm(s, cond, ARITH_MOV, rd, 0, imm12); 793 return; 794 } 795 imm12 = encode_imm(~arg); 796 if (imm12 >= 0) { 797 tcg_out_dat_imm(s, cond, ARITH_MVN, rd, 0, imm12); 798 return; 799 } 800 801 /* Check for a pc-relative address. This will usually be the TB, 802 or within the TB, which is immediately before the code block. */ 803 diff = tcg_pcrel_diff(s, (void *)arg) - 8; 804 if (diff >= 0) { 805 imm12 = encode_imm(diff); 806 if (imm12 >= 0) { 807 tcg_out_dat_imm(s, cond, ARITH_ADD, rd, TCG_REG_PC, imm12); 808 return; 809 } 810 } else { 811 imm12 = encode_imm(-diff); 812 if (imm12 >= 0) { 813 tcg_out_dat_imm(s, cond, ARITH_SUB, rd, TCG_REG_PC, imm12); 814 return; 815 } 816 } 817 818 /* Use movw + movt. */ 819 if (use_armv7_instructions) { 820 /* movw */ 821 tcg_out32(s, (cond << 28) | 0x03000000 | (rd << 12) 822 | ((arg << 4) & 0x000f0000) | (arg & 0xfff)); 823 if (arg & 0xffff0000) { 824 /* movt */ 825 tcg_out32(s, (cond << 28) | 0x03400000 | (rd << 12) 826 | ((arg >> 12) & 0x000f0000) | ((arg >> 16) & 0xfff)); 827 } 828 return; 829 } 830 831 /* Look for sequences of two insns. If we have lots of 1's, we can 832 shorten the sequence by beginning with mvn and then clearing 833 higher bits with eor. */ 834 tt0 = arg; 835 opc = ARITH_MOV; 836 if (ctpop32(arg) > 16) { 837 tt0 = ~arg; 838 opc = ARITH_MVN; 839 } 840 sh1 = ctz32(tt0) & ~1; 841 tt1 = tt0 & ~(0xff << sh1); 842 sh2 = ctz32(tt1) & ~1; 843 tt2 = tt1 & ~(0xff << sh2); 844 if (tt2 == 0) { 845 int rot; 846 847 rot = ((32 - sh1) << 7) & 0xf00; 848 tcg_out_dat_imm(s, cond, opc, rd, 0, ((tt0 >> sh1) & 0xff) | rot); 849 rot = ((32 - sh2) << 7) & 0xf00; 850 tcg_out_dat_imm(s, cond, ARITH_EOR, rd, rd, 851 ((tt0 >> sh2) & 0xff) | rot); 852 return; 853 } 854 855 /* Otherwise, drop it into the constant pool. */ 856 tcg_out_movi_pool(s, cond, rd, arg); 857} 858 859/* 860 * Emit either the reg,imm or reg,reg form of a data-processing insn. 861 * rhs must satisfy the "rI" constraint. 862 */ 863static void tcg_out_dat_rI(TCGContext *s, ARMCond cond, ARMInsn opc, 864 TCGReg dst, TCGReg lhs, TCGArg rhs, int rhs_is_const) 865{ 866 if (rhs_is_const) { 867 tcg_out_dat_imm(s, cond, opc, dst, lhs, encode_imm_nofail(rhs)); 868 } else { 869 tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0)); 870 } 871} 872 873/* 874 * Emit either the reg,imm or reg,reg form of a data-processing insn. 875 * rhs must satisfy the "rIK" constraint. 876 */ 877static void tcg_out_dat_rIK(TCGContext *s, ARMCond cond, ARMInsn opc, 878 ARMInsn opinv, TCGReg dst, TCGReg lhs, TCGArg rhs, 879 bool rhs_is_const) 880{ 881 if (rhs_is_const) { 882 int imm12 = encode_imm(rhs); 883 if (imm12 < 0) { 884 imm12 = encode_imm_nofail(~rhs); 885 opc = opinv; 886 } 887 tcg_out_dat_imm(s, cond, opc, dst, lhs, imm12); 888 } else { 889 tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0)); 890 } 891} 892 893static void tcg_out_dat_rIN(TCGContext *s, ARMCond cond, ARMInsn opc, 894 ARMInsn opneg, TCGReg dst, TCGReg lhs, TCGArg rhs, 895 bool rhs_is_const) 896{ 897 /* Emit either the reg,imm or reg,reg form of a data-processing insn. 898 * rhs must satisfy the "rIN" constraint. 899 */ 900 if (rhs_is_const) { 901 int imm12 = encode_imm(rhs); 902 if (imm12 < 0) { 903 imm12 = encode_imm_nofail(-rhs); 904 opc = opneg; 905 } 906 tcg_out_dat_imm(s, cond, opc, dst, lhs, imm12); 907 } else { 908 tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0)); 909 } 910} 911 912static void tcg_out_mul32(TCGContext *s, ARMCond cond, TCGReg rd, 913 TCGReg rn, TCGReg rm) 914{ 915 /* mul */ 916 tcg_out32(s, (cond << 28) | 0x90 | (rd << 16) | (rm << 8) | rn); 917} 918 919static void tcg_out_umull32(TCGContext *s, ARMCond cond, TCGReg rd0, 920 TCGReg rd1, TCGReg rn, TCGReg rm) 921{ 922 /* umull */ 923 tcg_out32(s, (cond << 28) | 0x00800090 | 924 (rd1 << 16) | (rd0 << 12) | (rm << 8) | rn); 925} 926 927static void tcg_out_smull32(TCGContext *s, ARMCond cond, TCGReg rd0, 928 TCGReg rd1, TCGReg rn, TCGReg rm) 929{ 930 /* smull */ 931 tcg_out32(s, (cond << 28) | 0x00c00090 | 932 (rd1 << 16) | (rd0 << 12) | (rm << 8) | rn); 933} 934 935static void tcg_out_sdiv(TCGContext *s, ARMCond cond, 936 TCGReg rd, TCGReg rn, TCGReg rm) 937{ 938 tcg_out32(s, 0x0710f010 | (cond << 28) | (rd << 16) | rn | (rm << 8)); 939} 940 941static void tcg_out_udiv(TCGContext *s, ARMCond cond, 942 TCGReg rd, TCGReg rn, TCGReg rm) 943{ 944 tcg_out32(s, 0x0730f010 | (cond << 28) | (rd << 16) | rn | (rm << 8)); 945} 946 947static void tcg_out_ext8s(TCGContext *s, TCGType t, TCGReg rd, TCGReg rn) 948{ 949 /* sxtb */ 950 tcg_out32(s, 0x06af0070 | (COND_AL << 28) | (rd << 12) | rn); 951} 952 953static void tcg_out_ext8u(TCGContext *s, TCGReg rd, TCGReg rn) 954{ 955 tcg_out_dat_imm(s, COND_AL, ARITH_AND, rd, rn, 0xff); 956} 957 958static void tcg_out_ext16s(TCGContext *s, TCGType t, TCGReg rd, TCGReg rn) 959{ 960 /* sxth */ 961 tcg_out32(s, 0x06bf0070 | (COND_AL << 28) | (rd << 12) | rn); 962} 963 964static void tcg_out_ext16u(TCGContext *s, TCGReg rd, TCGReg rn) 965{ 966 /* uxth */ 967 tcg_out32(s, 0x06ff0070 | (COND_AL << 28) | (rd << 12) | rn); 968} 969 970static void tcg_out_ext32s(TCGContext *s, TCGReg rd, TCGReg rn) 971{ 972 g_assert_not_reached(); 973} 974 975static void tcg_out_ext32u(TCGContext *s, TCGReg rd, TCGReg rn) 976{ 977 g_assert_not_reached(); 978} 979 980static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg rd, TCGReg rn) 981{ 982 g_assert_not_reached(); 983} 984 985static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg rd, TCGReg rn) 986{ 987 g_assert_not_reached(); 988} 989 990static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg rd, TCGReg rn) 991{ 992 g_assert_not_reached(); 993} 994 995static void tcg_out_bswap16(TCGContext *s, ARMCond cond, 996 TCGReg rd, TCGReg rn, int flags) 997{ 998 if (flags & TCG_BSWAP_OS) { 999 /* revsh */ 1000 tcg_out32(s, 0x06ff0fb0 | (cond << 28) | (rd << 12) | rn); 1001 return; 1002 } 1003 1004 /* rev16 */ 1005 tcg_out32(s, 0x06bf0fb0 | (cond << 28) | (rd << 12) | rn); 1006 if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) { 1007 /* uxth */ 1008 tcg_out32(s, 0x06ff0070 | (cond << 28) | (rd << 12) | rd); 1009 } 1010} 1011 1012static void tcg_out_bswap32(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rn) 1013{ 1014 /* rev */ 1015 tcg_out32(s, 0x06bf0f30 | (cond << 28) | (rd << 12) | rn); 1016} 1017 1018static void tcg_out_deposit(TCGContext *s, ARMCond cond, TCGReg rd, 1019 TCGArg a1, int ofs, int len, bool const_a1) 1020{ 1021 if (const_a1) { 1022 /* bfi becomes bfc with rn == 15. */ 1023 a1 = 15; 1024 } 1025 /* bfi/bfc */ 1026 tcg_out32(s, 0x07c00010 | (cond << 28) | (rd << 12) | a1 1027 | (ofs << 7) | ((ofs + len - 1) << 16)); 1028} 1029 1030static void tcg_out_extract(TCGContext *s, ARMCond cond, TCGReg rd, 1031 TCGReg rn, int ofs, int len) 1032{ 1033 /* ubfx */ 1034 tcg_out32(s, 0x07e00050 | (cond << 28) | (rd << 12) | rn 1035 | (ofs << 7) | ((len - 1) << 16)); 1036} 1037 1038static void tcg_out_sextract(TCGContext *s, ARMCond cond, TCGReg rd, 1039 TCGReg rn, int ofs, int len) 1040{ 1041 /* sbfx */ 1042 tcg_out32(s, 0x07a00050 | (cond << 28) | (rd << 12) | rn 1043 | (ofs << 7) | ((len - 1) << 16)); 1044} 1045 1046static void tcg_out_ld32u(TCGContext *s, ARMCond cond, 1047 TCGReg rd, TCGReg rn, int32_t offset) 1048{ 1049 if (offset > 0xfff || offset < -0xfff) { 1050 tcg_out_movi32(s, cond, TCG_REG_TMP, offset); 1051 tcg_out_ld32_r(s, cond, rd, rn, TCG_REG_TMP); 1052 } else 1053 tcg_out_ld32_12(s, cond, rd, rn, offset); 1054} 1055 1056static void tcg_out_st32(TCGContext *s, ARMCond cond, 1057 TCGReg rd, TCGReg rn, int32_t offset) 1058{ 1059 if (offset > 0xfff || offset < -0xfff) { 1060 tcg_out_movi32(s, cond, TCG_REG_TMP, offset); 1061 tcg_out_st32_r(s, cond, rd, rn, TCG_REG_TMP); 1062 } else 1063 tcg_out_st32_12(s, cond, rd, rn, offset); 1064} 1065 1066static void tcg_out_ld16u(TCGContext *s, ARMCond cond, 1067 TCGReg rd, TCGReg rn, int32_t offset) 1068{ 1069 if (offset > 0xff || offset < -0xff) { 1070 tcg_out_movi32(s, cond, TCG_REG_TMP, offset); 1071 tcg_out_ld16u_r(s, cond, rd, rn, TCG_REG_TMP); 1072 } else 1073 tcg_out_ld16u_8(s, cond, rd, rn, offset); 1074} 1075 1076static void tcg_out_ld16s(TCGContext *s, ARMCond cond, 1077 TCGReg rd, TCGReg rn, int32_t offset) 1078{ 1079 if (offset > 0xff || offset < -0xff) { 1080 tcg_out_movi32(s, cond, TCG_REG_TMP, offset); 1081 tcg_out_ld16s_r(s, cond, rd, rn, TCG_REG_TMP); 1082 } else 1083 tcg_out_ld16s_8(s, cond, rd, rn, offset); 1084} 1085 1086static void tcg_out_st16(TCGContext *s, ARMCond cond, 1087 TCGReg rd, TCGReg rn, int32_t offset) 1088{ 1089 if (offset > 0xff || offset < -0xff) { 1090 tcg_out_movi32(s, cond, TCG_REG_TMP, offset); 1091 tcg_out_st16_r(s, cond, rd, rn, TCG_REG_TMP); 1092 } else 1093 tcg_out_st16_8(s, cond, rd, rn, offset); 1094} 1095 1096static void tcg_out_ld8u(TCGContext *s, ARMCond cond, 1097 TCGReg rd, TCGReg rn, int32_t offset) 1098{ 1099 if (offset > 0xfff || offset < -0xfff) { 1100 tcg_out_movi32(s, cond, TCG_REG_TMP, offset); 1101 tcg_out_ld8_r(s, cond, rd, rn, TCG_REG_TMP); 1102 } else 1103 tcg_out_ld8_12(s, cond, rd, rn, offset); 1104} 1105 1106static void tcg_out_ld8s(TCGContext *s, ARMCond cond, 1107 TCGReg rd, TCGReg rn, int32_t offset) 1108{ 1109 if (offset > 0xff || offset < -0xff) { 1110 tcg_out_movi32(s, cond, TCG_REG_TMP, offset); 1111 tcg_out_ld8s_r(s, cond, rd, rn, TCG_REG_TMP); 1112 } else 1113 tcg_out_ld8s_8(s, cond, rd, rn, offset); 1114} 1115 1116static void tcg_out_st8(TCGContext *s, ARMCond cond, 1117 TCGReg rd, TCGReg rn, int32_t offset) 1118{ 1119 if (offset > 0xfff || offset < -0xfff) { 1120 tcg_out_movi32(s, cond, TCG_REG_TMP, offset); 1121 tcg_out_st8_r(s, cond, rd, rn, TCG_REG_TMP); 1122 } else 1123 tcg_out_st8_12(s, cond, rd, rn, offset); 1124} 1125 1126/* 1127 * The _goto case is normally between TBs within the same code buffer, and 1128 * with the code buffer limited to 16MB we wouldn't need the long case. 1129 * But we also use it for the tail-call to the qemu_ld/st helpers, which does. 1130 */ 1131static void tcg_out_goto(TCGContext *s, ARMCond cond, const tcg_insn_unit *addr) 1132{ 1133 intptr_t addri = (intptr_t)addr; 1134 ptrdiff_t disp = tcg_pcrel_diff(s, addr); 1135 bool arm_mode = !(addri & 1); 1136 1137 if (arm_mode && disp - 8 < 0x01fffffd && disp - 8 > -0x01fffffd) { 1138 tcg_out_b_imm(s, cond, disp); 1139 return; 1140 } 1141 1142 /* LDR is interworking from v5t. */ 1143 tcg_out_movi_pool(s, cond, TCG_REG_PC, addri); 1144} 1145 1146/* 1147 * The call case is mostly used for helpers - so it's not unreasonable 1148 * for them to be beyond branch range. 1149 */ 1150static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *addr) 1151{ 1152 intptr_t addri = (intptr_t)addr; 1153 ptrdiff_t disp = tcg_pcrel_diff(s, addr); 1154 bool arm_mode = !(addri & 1); 1155 1156 if (disp - 8 < 0x02000000 && disp - 8 >= -0x02000000) { 1157 if (arm_mode) { 1158 tcg_out_bl_imm(s, COND_AL, disp); 1159 } else { 1160 tcg_out_blx_imm(s, disp); 1161 } 1162 return; 1163 } 1164 1165 tcg_out_movi32(s, COND_AL, TCG_REG_TMP, addri); 1166 tcg_out_blx_reg(s, COND_AL, TCG_REG_TMP); 1167} 1168 1169static void tcg_out_call(TCGContext *s, const tcg_insn_unit *addr, 1170 const TCGHelperInfo *info) 1171{ 1172 tcg_out_call_int(s, addr); 1173} 1174 1175static void tcg_out_goto_label(TCGContext *s, ARMCond cond, TCGLabel *l) 1176{ 1177 if (l->has_value) { 1178 tcg_out_goto(s, cond, l->u.value_ptr); 1179 } else { 1180 tcg_out_reloc(s, s->code_ptr, R_ARM_PC24, l, 0); 1181 tcg_out_b_imm(s, cond, 0); 1182 } 1183} 1184 1185static void tcg_out_mb(TCGContext *s, TCGArg a0) 1186{ 1187 if (use_armv7_instructions) { 1188 tcg_out32(s, INSN_DMB_ISH); 1189 } else { 1190 tcg_out32(s, INSN_DMB_MCR); 1191 } 1192} 1193 1194static TCGCond tcg_out_cmp(TCGContext *s, TCGCond cond, TCGReg a, 1195 TCGArg b, int b_const) 1196{ 1197 if (!is_tst_cond(cond)) { 1198 tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0, a, b, b_const); 1199 return cond; 1200 } 1201 1202 cond = tcg_tst_eqne_cond(cond); 1203 if (b_const) { 1204 int imm12 = encode_imm(b); 1205 1206 /* 1207 * The compare constraints allow rIN, but TST does not support N. 1208 * Be prepared to load the constant into a scratch register. 1209 */ 1210 if (imm12 >= 0) { 1211 tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, a, imm12); 1212 return cond; 1213 } 1214 tcg_out_movi32(s, COND_AL, TCG_REG_TMP, b); 1215 b = TCG_REG_TMP; 1216 } 1217 tcg_out_dat_reg(s, COND_AL, ARITH_TST, 0, a, b, SHIFT_IMM_LSL(0)); 1218 return cond; 1219} 1220 1221static TCGCond tcg_out_cmp2(TCGContext *s, const TCGArg *args, 1222 const int *const_args) 1223{ 1224 TCGReg al = args[0]; 1225 TCGReg ah = args[1]; 1226 TCGArg bl = args[2]; 1227 TCGArg bh = args[3]; 1228 TCGCond cond = args[4]; 1229 int const_bl = const_args[2]; 1230 int const_bh = const_args[3]; 1231 1232 switch (cond) { 1233 case TCG_COND_EQ: 1234 case TCG_COND_NE: 1235 case TCG_COND_LTU: 1236 case TCG_COND_LEU: 1237 case TCG_COND_GTU: 1238 case TCG_COND_GEU: 1239 /* 1240 * We perform a conditional comparison. If the high half is 1241 * equal, then overwrite the flags with the comparison of the 1242 * low half. The resulting flags cover the whole. 1243 */ 1244 tcg_out_dat_rI(s, COND_AL, ARITH_CMP, 0, ah, bh, const_bh); 1245 tcg_out_dat_rI(s, COND_EQ, ARITH_CMP, 0, al, bl, const_bl); 1246 return cond; 1247 1248 case TCG_COND_TSTEQ: 1249 case TCG_COND_TSTNE: 1250 /* Similar, but with TST instead of CMP. */ 1251 tcg_out_dat_rI(s, COND_AL, ARITH_TST, 0, ah, bh, const_bh); 1252 tcg_out_dat_rI(s, COND_EQ, ARITH_TST, 0, al, bl, const_bl); 1253 return tcg_tst_eqne_cond(cond); 1254 1255 case TCG_COND_LT: 1256 case TCG_COND_GE: 1257 /* We perform a double-word subtraction and examine the result. 1258 We do not actually need the result of the subtract, so the 1259 low part "subtract" is a compare. For the high half we have 1260 no choice but to compute into a temporary. */ 1261 tcg_out_dat_rI(s, COND_AL, ARITH_CMP, 0, al, bl, const_bl); 1262 tcg_out_dat_rI(s, COND_AL, ARITH_SBC | TO_CPSR, 1263 TCG_REG_TMP, ah, bh, const_bh); 1264 return cond; 1265 1266 case TCG_COND_LE: 1267 case TCG_COND_GT: 1268 /* Similar, but with swapped arguments, via reversed subtract. */ 1269 tcg_out_dat_rI(s, COND_AL, ARITH_RSB | TO_CPSR, 1270 TCG_REG_TMP, al, bl, const_bl); 1271 tcg_out_dat_rI(s, COND_AL, ARITH_RSC | TO_CPSR, 1272 TCG_REG_TMP, ah, bh, const_bh); 1273 return tcg_swap_cond(cond); 1274 1275 default: 1276 g_assert_not_reached(); 1277 } 1278} 1279 1280/* 1281 * Note that TCGReg references Q-registers. 1282 * Q-regno = 2 * D-regno, so shift left by 1 while inserting. 1283 */ 1284static uint32_t encode_vd(TCGReg rd) 1285{ 1286 tcg_debug_assert(rd >= TCG_REG_Q0); 1287 return (extract32(rd, 3, 1) << 22) | (extract32(rd, 0, 3) << 13); 1288} 1289 1290static uint32_t encode_vn(TCGReg rn) 1291{ 1292 tcg_debug_assert(rn >= TCG_REG_Q0); 1293 return (extract32(rn, 3, 1) << 7) | (extract32(rn, 0, 3) << 17); 1294} 1295 1296static uint32_t encode_vm(TCGReg rm) 1297{ 1298 tcg_debug_assert(rm >= TCG_REG_Q0); 1299 return (extract32(rm, 3, 1) << 5) | (extract32(rm, 0, 3) << 1); 1300} 1301 1302static void tcg_out_vreg2(TCGContext *s, ARMInsn insn, int q, int vece, 1303 TCGReg d, TCGReg m) 1304{ 1305 tcg_out32(s, insn | (vece << 18) | (q << 6) | 1306 encode_vd(d) | encode_vm(m)); 1307} 1308 1309static void tcg_out_vreg3(TCGContext *s, ARMInsn insn, int q, int vece, 1310 TCGReg d, TCGReg n, TCGReg m) 1311{ 1312 tcg_out32(s, insn | (vece << 20) | (q << 6) | 1313 encode_vd(d) | encode_vn(n) | encode_vm(m)); 1314} 1315 1316static void tcg_out_vmovi(TCGContext *s, TCGReg rd, 1317 int q, int op, int cmode, uint8_t imm8) 1318{ 1319 tcg_out32(s, INSN_VMOVI | encode_vd(rd) | (q << 6) | (op << 5) 1320 | (cmode << 8) | extract32(imm8, 0, 4) 1321 | (extract32(imm8, 4, 3) << 16) 1322 | (extract32(imm8, 7, 1) << 24)); 1323} 1324 1325static void tcg_out_vshifti(TCGContext *s, ARMInsn insn, int q, 1326 TCGReg rd, TCGReg rm, int l_imm6) 1327{ 1328 tcg_out32(s, insn | (q << 6) | encode_vd(rd) | encode_vm(rm) | 1329 (extract32(l_imm6, 6, 1) << 7) | 1330 (extract32(l_imm6, 0, 6) << 16)); 1331} 1332 1333static void tcg_out_vldst(TCGContext *s, ARMInsn insn, 1334 TCGReg rd, TCGReg rn, int offset) 1335{ 1336 if (offset != 0) { 1337 if (check_fit_imm(offset) || check_fit_imm(-offset)) { 1338 tcg_out_dat_rIN(s, COND_AL, ARITH_ADD, ARITH_SUB, 1339 TCG_REG_TMP, rn, offset, true); 1340 } else { 1341 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, offset); 1342 tcg_out_dat_reg(s, COND_AL, ARITH_ADD, 1343 TCG_REG_TMP, TCG_REG_TMP, rn, 0); 1344 } 1345 rn = TCG_REG_TMP; 1346 } 1347 tcg_out32(s, insn | (rn << 16) | encode_vd(rd) | 0xf); 1348} 1349 1350typedef struct { 1351 ARMCond cond; 1352 TCGReg base; 1353 int index; 1354 bool index_scratch; 1355 TCGAtomAlign aa; 1356} HostAddress; 1357 1358bool tcg_target_has_memory_bswap(MemOp memop) 1359{ 1360 return false; 1361} 1362 1363static TCGReg ldst_ra_gen(TCGContext *s, const TCGLabelQemuLdst *l, int arg) 1364{ 1365 /* We arrive at the slow path via "BLNE", so R14 contains l->raddr. */ 1366 return TCG_REG_R14; 1367} 1368 1369static const TCGLdstHelperParam ldst_helper_param = { 1370 .ra_gen = ldst_ra_gen, 1371 .ntmp = 1, 1372 .tmp = { TCG_REG_TMP }, 1373}; 1374 1375static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) 1376{ 1377 MemOp opc = get_memop(lb->oi); 1378 1379 if (!reloc_pc24(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { 1380 return false; 1381 } 1382 1383 tcg_out_ld_helper_args(s, lb, &ldst_helper_param); 1384 tcg_out_call_int(s, qemu_ld_helpers[opc & MO_SIZE]); 1385 tcg_out_ld_helper_ret(s, lb, false, &ldst_helper_param); 1386 1387 tcg_out_goto(s, COND_AL, lb->raddr); 1388 return true; 1389} 1390 1391static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) 1392{ 1393 MemOp opc = get_memop(lb->oi); 1394 1395 if (!reloc_pc24(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { 1396 return false; 1397 } 1398 1399 tcg_out_st_helper_args(s, lb, &ldst_helper_param); 1400 1401 /* Tail-call to the helper, which will return to the fast path. */ 1402 tcg_out_goto(s, COND_AL, qemu_st_helpers[opc & MO_SIZE]); 1403 return true; 1404} 1405 1406/* We expect to use an 9-bit sign-magnitude negative offset from ENV. */ 1407#define MIN_TLB_MASK_TABLE_OFS -256 1408 1409static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, 1410 TCGReg addrlo, TCGReg addrhi, 1411 MemOpIdx oi, bool is_ld) 1412{ 1413 TCGLabelQemuLdst *ldst = NULL; 1414 MemOp opc = get_memop(oi); 1415 unsigned a_mask; 1416 1417 if (tcg_use_softmmu) { 1418 *h = (HostAddress){ 1419 .cond = COND_AL, 1420 .base = addrlo, 1421 .index = TCG_REG_R1, 1422 .index_scratch = true, 1423 }; 1424 } else { 1425 *h = (HostAddress){ 1426 .cond = COND_AL, 1427 .base = addrlo, 1428 .index = guest_base ? TCG_REG_GUEST_BASE : -1, 1429 .index_scratch = false, 1430 }; 1431 } 1432 1433 h->aa = atom_and_align_for_opc(s, opc, MO_ATOM_IFALIGN, false); 1434 a_mask = (1 << h->aa.align) - 1; 1435 1436 if (tcg_use_softmmu) { 1437 int mem_index = get_mmuidx(oi); 1438 int cmp_off = is_ld ? offsetof(CPUTLBEntry, addr_read) 1439 : offsetof(CPUTLBEntry, addr_write); 1440 int fast_off = tlb_mask_table_ofs(s, mem_index); 1441 unsigned s_mask = (1 << (opc & MO_SIZE)) - 1; 1442 TCGReg t_addr; 1443 1444 ldst = new_ldst_label(s); 1445 ldst->is_ld = is_ld; 1446 ldst->oi = oi; 1447 ldst->addrlo_reg = addrlo; 1448 ldst->addrhi_reg = addrhi; 1449 1450 /* Load cpu->neg.tlb.f[mmu_idx].{mask,table} into {r0,r1}. */ 1451 QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, mask) != 0); 1452 QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, table) != 4); 1453 tcg_out_ldrd_8(s, COND_AL, TCG_REG_R0, TCG_AREG0, fast_off); 1454 1455 /* Extract the tlb index from the address into R0. */ 1456 tcg_out_dat_reg(s, COND_AL, ARITH_AND, TCG_REG_R0, TCG_REG_R0, addrlo, 1457 SHIFT_IMM_LSR(s->page_bits - CPU_TLB_ENTRY_BITS)); 1458 1459 /* 1460 * Add the tlb_table pointer, creating the CPUTLBEntry address in R1. 1461 * Load the tlb comparator into R2/R3 and the fast path addend into R1. 1462 */ 1463 QEMU_BUILD_BUG_ON(HOST_BIG_ENDIAN); 1464 if (cmp_off == 0) { 1465 if (s->addr_type == TCG_TYPE_I32) { 1466 tcg_out_ld32_rwb(s, COND_AL, TCG_REG_R2, 1467 TCG_REG_R1, TCG_REG_R0); 1468 } else { 1469 tcg_out_ldrd_rwb(s, COND_AL, TCG_REG_R2, 1470 TCG_REG_R1, TCG_REG_R0); 1471 } 1472 } else { 1473 tcg_out_dat_reg(s, COND_AL, ARITH_ADD, 1474 TCG_REG_R1, TCG_REG_R1, TCG_REG_R0, 0); 1475 if (s->addr_type == TCG_TYPE_I32) { 1476 tcg_out_ld32_12(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off); 1477 } else { 1478 tcg_out_ldrd_8(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off); 1479 } 1480 } 1481 1482 /* Load the tlb addend. */ 1483 tcg_out_ld32_12(s, COND_AL, TCG_REG_R1, TCG_REG_R1, 1484 offsetof(CPUTLBEntry, addend)); 1485 1486 /* 1487 * Check alignment, check comparators. 1488 * Do this in 2-4 insns. Use MOVW for v7, if possible, 1489 * to reduce the number of sequential conditional instructions. 1490 * Almost all guests have at least 4k pages, which means that we need 1491 * to clear at least 9 bits even for an 8-byte memory, which means it 1492 * isn't worth checking for an immediate operand for BIC. 1493 * 1494 * For unaligned accesses, test the page of the last unit of alignment. 1495 * This leaves the least significant alignment bits unchanged, and of 1496 * course must be zero. 1497 */ 1498 t_addr = addrlo; 1499 if (a_mask < s_mask) { 1500 t_addr = TCG_REG_R0; 1501 tcg_out_dat_imm(s, COND_AL, ARITH_ADD, t_addr, 1502 addrlo, s_mask - a_mask); 1503 } 1504 if (use_armv7_instructions && s->page_bits <= 16) { 1505 tcg_out_movi32(s, COND_AL, TCG_REG_TMP, ~(s->page_mask | a_mask)); 1506 tcg_out_dat_reg(s, COND_AL, ARITH_BIC, TCG_REG_TMP, 1507 t_addr, TCG_REG_TMP, 0); 1508 tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, 1509 TCG_REG_R2, TCG_REG_TMP, 0); 1510 } else { 1511 if (a_mask) { 1512 tcg_debug_assert(a_mask <= 0xff); 1513 tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addrlo, a_mask); 1514 } 1515 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP, 0, t_addr, 1516 SHIFT_IMM_LSR(s->page_bits)); 1517 tcg_out_dat_reg(s, (a_mask ? COND_EQ : COND_AL), ARITH_CMP, 1518 0, TCG_REG_R2, TCG_REG_TMP, 1519 SHIFT_IMM_LSL(s->page_bits)); 1520 } 1521 1522 if (s->addr_type != TCG_TYPE_I32) { 1523 tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0, TCG_REG_R3, addrhi, 0); 1524 } 1525 } else if (a_mask) { 1526 ldst = new_ldst_label(s); 1527 ldst->is_ld = is_ld; 1528 ldst->oi = oi; 1529 ldst->addrlo_reg = addrlo; 1530 ldst->addrhi_reg = addrhi; 1531 1532 /* We are expecting alignment to max out at 7 */ 1533 tcg_debug_assert(a_mask <= 0xff); 1534 /* tst addr, #mask */ 1535 tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addrlo, a_mask); 1536 } 1537 1538 return ldst; 1539} 1540 1541static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg datalo, 1542 TCGReg datahi, HostAddress h) 1543{ 1544 TCGReg base; 1545 1546 /* Byte swapping is left to middle-end expansion. */ 1547 tcg_debug_assert((opc & MO_BSWAP) == 0); 1548 1549 switch (opc & MO_SSIZE) { 1550 case MO_UB: 1551 if (h.index < 0) { 1552 tcg_out_ld8_12(s, h.cond, datalo, h.base, 0); 1553 } else { 1554 tcg_out_ld8_r(s, h.cond, datalo, h.base, h.index); 1555 } 1556 break; 1557 case MO_SB: 1558 if (h.index < 0) { 1559 tcg_out_ld8s_8(s, h.cond, datalo, h.base, 0); 1560 } else { 1561 tcg_out_ld8s_r(s, h.cond, datalo, h.base, h.index); 1562 } 1563 break; 1564 case MO_UW: 1565 if (h.index < 0) { 1566 tcg_out_ld16u_8(s, h.cond, datalo, h.base, 0); 1567 } else { 1568 tcg_out_ld16u_r(s, h.cond, datalo, h.base, h.index); 1569 } 1570 break; 1571 case MO_SW: 1572 if (h.index < 0) { 1573 tcg_out_ld16s_8(s, h.cond, datalo, h.base, 0); 1574 } else { 1575 tcg_out_ld16s_r(s, h.cond, datalo, h.base, h.index); 1576 } 1577 break; 1578 case MO_UL: 1579 if (h.index < 0) { 1580 tcg_out_ld32_12(s, h.cond, datalo, h.base, 0); 1581 } else { 1582 tcg_out_ld32_r(s, h.cond, datalo, h.base, h.index); 1583 } 1584 break; 1585 case MO_UQ: 1586 /* We used pair allocation for datalo, so already should be aligned. */ 1587 tcg_debug_assert((datalo & 1) == 0); 1588 tcg_debug_assert(datahi == datalo + 1); 1589 /* LDRD requires alignment; double-check that. */ 1590 if (get_alignment_bits(opc) >= MO_64) { 1591 if (h.index < 0) { 1592 tcg_out_ldrd_8(s, h.cond, datalo, h.base, 0); 1593 break; 1594 } 1595 /* 1596 * Rm (the second address op) must not overlap Rt or Rt + 1. 1597 * Since datalo is aligned, we can simplify the test via alignment. 1598 * Flip the two address arguments if that works. 1599 */ 1600 if ((h.index & ~1) != datalo) { 1601 tcg_out_ldrd_r(s, h.cond, datalo, h.base, h.index); 1602 break; 1603 } 1604 if ((h.base & ~1) != datalo) { 1605 tcg_out_ldrd_r(s, h.cond, datalo, h.index, h.base); 1606 break; 1607 } 1608 } 1609 if (h.index < 0) { 1610 base = h.base; 1611 if (datalo == h.base) { 1612 tcg_out_mov_reg(s, h.cond, TCG_REG_TMP, base); 1613 base = TCG_REG_TMP; 1614 } 1615 } else if (h.index_scratch) { 1616 tcg_out_ld32_rwb(s, h.cond, datalo, h.index, h.base); 1617 tcg_out_ld32_12(s, h.cond, datahi, h.index, 4); 1618 break; 1619 } else { 1620 tcg_out_dat_reg(s, h.cond, ARITH_ADD, TCG_REG_TMP, 1621 h.base, h.index, SHIFT_IMM_LSL(0)); 1622 base = TCG_REG_TMP; 1623 } 1624 tcg_out_ld32_12(s, h.cond, datalo, base, 0); 1625 tcg_out_ld32_12(s, h.cond, datahi, base, 4); 1626 break; 1627 default: 1628 g_assert_not_reached(); 1629 } 1630} 1631 1632static void tcg_out_qemu_ld(TCGContext *s, TCGReg datalo, TCGReg datahi, 1633 TCGReg addrlo, TCGReg addrhi, 1634 MemOpIdx oi, TCGType data_type) 1635{ 1636 MemOp opc = get_memop(oi); 1637 TCGLabelQemuLdst *ldst; 1638 HostAddress h; 1639 1640 ldst = prepare_host_addr(s, &h, addrlo, addrhi, oi, true); 1641 if (ldst) { 1642 ldst->type = data_type; 1643 ldst->datalo_reg = datalo; 1644 ldst->datahi_reg = datahi; 1645 1646 /* 1647 * This a conditional BL only to load a pointer within this 1648 * opcode into LR for the slow path. We will not be using 1649 * the value for a tail call. 1650 */ 1651 ldst->label_ptr[0] = s->code_ptr; 1652 tcg_out_bl_imm(s, COND_NE, 0); 1653 1654 tcg_out_qemu_ld_direct(s, opc, datalo, datahi, h); 1655 ldst->raddr = tcg_splitwx_to_rx(s->code_ptr); 1656 } else { 1657 tcg_out_qemu_ld_direct(s, opc, datalo, datahi, h); 1658 } 1659} 1660 1661static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg datalo, 1662 TCGReg datahi, HostAddress h) 1663{ 1664 /* Byte swapping is left to middle-end expansion. */ 1665 tcg_debug_assert((opc & MO_BSWAP) == 0); 1666 1667 switch (opc & MO_SIZE) { 1668 case MO_8: 1669 if (h.index < 0) { 1670 tcg_out_st8_12(s, h.cond, datalo, h.base, 0); 1671 } else { 1672 tcg_out_st8_r(s, h.cond, datalo, h.base, h.index); 1673 } 1674 break; 1675 case MO_16: 1676 if (h.index < 0) { 1677 tcg_out_st16_8(s, h.cond, datalo, h.base, 0); 1678 } else { 1679 tcg_out_st16_r(s, h.cond, datalo, h.base, h.index); 1680 } 1681 break; 1682 case MO_32: 1683 if (h.index < 0) { 1684 tcg_out_st32_12(s, h.cond, datalo, h.base, 0); 1685 } else { 1686 tcg_out_st32_r(s, h.cond, datalo, h.base, h.index); 1687 } 1688 break; 1689 case MO_64: 1690 /* We used pair allocation for datalo, so already should be aligned. */ 1691 tcg_debug_assert((datalo & 1) == 0); 1692 tcg_debug_assert(datahi == datalo + 1); 1693 /* STRD requires alignment; double-check that. */ 1694 if (get_alignment_bits(opc) >= MO_64) { 1695 if (h.index < 0) { 1696 tcg_out_strd_8(s, h.cond, datalo, h.base, 0); 1697 } else { 1698 tcg_out_strd_r(s, h.cond, datalo, h.base, h.index); 1699 } 1700 } else if (h.index < 0) { 1701 tcg_out_st32_12(s, h.cond, datalo, h.base, 0); 1702 tcg_out_st32_12(s, h.cond, datahi, h.base, 4); 1703 } else if (h.index_scratch) { 1704 tcg_out_st32_rwb(s, h.cond, datalo, h.index, h.base); 1705 tcg_out_st32_12(s, h.cond, datahi, h.index, 4); 1706 } else { 1707 tcg_out_dat_reg(s, h.cond, ARITH_ADD, TCG_REG_TMP, 1708 h.base, h.index, SHIFT_IMM_LSL(0)); 1709 tcg_out_st32_12(s, h.cond, datalo, TCG_REG_TMP, 0); 1710 tcg_out_st32_12(s, h.cond, datahi, TCG_REG_TMP, 4); 1711 } 1712 break; 1713 default: 1714 g_assert_not_reached(); 1715 } 1716} 1717 1718static void tcg_out_qemu_st(TCGContext *s, TCGReg datalo, TCGReg datahi, 1719 TCGReg addrlo, TCGReg addrhi, 1720 MemOpIdx oi, TCGType data_type) 1721{ 1722 MemOp opc = get_memop(oi); 1723 TCGLabelQemuLdst *ldst; 1724 HostAddress h; 1725 1726 ldst = prepare_host_addr(s, &h, addrlo, addrhi, oi, false); 1727 if (ldst) { 1728 ldst->type = data_type; 1729 ldst->datalo_reg = datalo; 1730 ldst->datahi_reg = datahi; 1731 1732 h.cond = COND_EQ; 1733 tcg_out_qemu_st_direct(s, opc, datalo, datahi, h); 1734 1735 /* The conditional call is last, as we're going to return here. */ 1736 ldst->label_ptr[0] = s->code_ptr; 1737 tcg_out_bl_imm(s, COND_NE, 0); 1738 ldst->raddr = tcg_splitwx_to_rx(s->code_ptr); 1739 } else { 1740 tcg_out_qemu_st_direct(s, opc, datalo, datahi, h); 1741 } 1742} 1743 1744static void tcg_out_epilogue(TCGContext *s); 1745 1746static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg) 1747{ 1748 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R0, arg); 1749 tcg_out_epilogue(s); 1750} 1751 1752static void tcg_out_goto_tb(TCGContext *s, int which) 1753{ 1754 uintptr_t i_addr; 1755 intptr_t i_disp; 1756 1757 /* Direct branch will be patched by tb_target_set_jmp_target. */ 1758 set_jmp_insn_offset(s, which); 1759 tcg_out32(s, INSN_NOP); 1760 1761 /* When branch is out of range, fall through to indirect. */ 1762 i_addr = get_jmp_target_addr(s, which); 1763 i_disp = tcg_pcrel_diff(s, (void *)i_addr) - 8; 1764 tcg_debug_assert(i_disp < 0); 1765 if (i_disp >= -0xfff) { 1766 tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, i_disp); 1767 } else { 1768 /* 1769 * The TB is close, but outside the 12 bits addressable by 1770 * the load. We can extend this to 20 bits with a sub of a 1771 * shifted immediate from pc. 1772 */ 1773 int h = -i_disp; 1774 int l = h & 0xfff; 1775 1776 h = encode_imm_nofail(h - l); 1777 tcg_out_dat_imm(s, COND_AL, ARITH_SUB, TCG_REG_R0, TCG_REG_PC, h); 1778 tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_R0, l); 1779 } 1780 set_jmp_reset_offset(s, which); 1781} 1782 1783void tb_target_set_jmp_target(const TranslationBlock *tb, int n, 1784 uintptr_t jmp_rx, uintptr_t jmp_rw) 1785{ 1786 uintptr_t addr = tb->jmp_target_addr[n]; 1787 ptrdiff_t offset = addr - (jmp_rx + 8); 1788 tcg_insn_unit insn; 1789 1790 /* Either directly branch, or fall through to indirect branch. */ 1791 if (offset == sextract64(offset, 0, 26)) { 1792 /* B <addr> */ 1793 insn = deposit32((COND_AL << 28) | INSN_B, 0, 24, offset >> 2); 1794 } else { 1795 insn = INSN_NOP; 1796 } 1797 1798 qatomic_set((uint32_t *)jmp_rw, insn); 1799 flush_idcache_range(jmp_rx, jmp_rw, 4); 1800} 1801 1802static void tcg_out_op(TCGContext *s, TCGOpcode opc, 1803 const TCGArg args[TCG_MAX_OP_ARGS], 1804 const int const_args[TCG_MAX_OP_ARGS]) 1805{ 1806 TCGArg a0, a1, a2, a3, a4, a5; 1807 int c; 1808 1809 switch (opc) { 1810 case INDEX_op_goto_ptr: 1811 tcg_out_b_reg(s, COND_AL, args[0]); 1812 break; 1813 case INDEX_op_br: 1814 tcg_out_goto_label(s, COND_AL, arg_label(args[0])); 1815 break; 1816 1817 case INDEX_op_ld8u_i32: 1818 tcg_out_ld8u(s, COND_AL, args[0], args[1], args[2]); 1819 break; 1820 case INDEX_op_ld8s_i32: 1821 tcg_out_ld8s(s, COND_AL, args[0], args[1], args[2]); 1822 break; 1823 case INDEX_op_ld16u_i32: 1824 tcg_out_ld16u(s, COND_AL, args[0], args[1], args[2]); 1825 break; 1826 case INDEX_op_ld16s_i32: 1827 tcg_out_ld16s(s, COND_AL, args[0], args[1], args[2]); 1828 break; 1829 case INDEX_op_ld_i32: 1830 tcg_out_ld32u(s, COND_AL, args[0], args[1], args[2]); 1831 break; 1832 case INDEX_op_st8_i32: 1833 tcg_out_st8(s, COND_AL, args[0], args[1], args[2]); 1834 break; 1835 case INDEX_op_st16_i32: 1836 tcg_out_st16(s, COND_AL, args[0], args[1], args[2]); 1837 break; 1838 case INDEX_op_st_i32: 1839 tcg_out_st32(s, COND_AL, args[0], args[1], args[2]); 1840 break; 1841 1842 case INDEX_op_movcond_i32: 1843 /* Constraints mean that v2 is always in the same register as dest, 1844 * so we only need to do "if condition passed, move v1 to dest". 1845 */ 1846 c = tcg_out_cmp(s, args[5], args[1], args[2], const_args[2]); 1847 tcg_out_dat_rIK(s, tcg_cond_to_arm_cond[c], ARITH_MOV, 1848 ARITH_MVN, args[0], 0, args[3], const_args[3]); 1849 break; 1850 case INDEX_op_add_i32: 1851 tcg_out_dat_rIN(s, COND_AL, ARITH_ADD, ARITH_SUB, 1852 args[0], args[1], args[2], const_args[2]); 1853 break; 1854 case INDEX_op_sub_i32: 1855 if (const_args[1]) { 1856 if (const_args[2]) { 1857 tcg_out_movi32(s, COND_AL, args[0], args[1] - args[2]); 1858 } else { 1859 tcg_out_dat_rI(s, COND_AL, ARITH_RSB, 1860 args[0], args[2], args[1], 1); 1861 } 1862 } else { 1863 tcg_out_dat_rIN(s, COND_AL, ARITH_SUB, ARITH_ADD, 1864 args[0], args[1], args[2], const_args[2]); 1865 } 1866 break; 1867 case INDEX_op_and_i32: 1868 tcg_out_dat_rIK(s, COND_AL, ARITH_AND, ARITH_BIC, 1869 args[0], args[1], args[2], const_args[2]); 1870 break; 1871 case INDEX_op_andc_i32: 1872 tcg_out_dat_rIK(s, COND_AL, ARITH_BIC, ARITH_AND, 1873 args[0], args[1], args[2], const_args[2]); 1874 break; 1875 case INDEX_op_or_i32: 1876 c = ARITH_ORR; 1877 goto gen_arith; 1878 case INDEX_op_xor_i32: 1879 c = ARITH_EOR; 1880 /* Fall through. */ 1881 gen_arith: 1882 tcg_out_dat_rI(s, COND_AL, c, args[0], args[1], args[2], const_args[2]); 1883 break; 1884 case INDEX_op_add2_i32: 1885 a0 = args[0], a1 = args[1], a2 = args[2]; 1886 a3 = args[3], a4 = args[4], a5 = args[5]; 1887 if (a0 == a3 || (a0 == a5 && !const_args[5])) { 1888 a0 = TCG_REG_TMP; 1889 } 1890 tcg_out_dat_rIN(s, COND_AL, ARITH_ADD | TO_CPSR, ARITH_SUB | TO_CPSR, 1891 a0, a2, a4, const_args[4]); 1892 tcg_out_dat_rIK(s, COND_AL, ARITH_ADC, ARITH_SBC, 1893 a1, a3, a5, const_args[5]); 1894 tcg_out_mov_reg(s, COND_AL, args[0], a0); 1895 break; 1896 case INDEX_op_sub2_i32: 1897 a0 = args[0], a1 = args[1], a2 = args[2]; 1898 a3 = args[3], a4 = args[4], a5 = args[5]; 1899 if ((a0 == a3 && !const_args[3]) || (a0 == a5 && !const_args[5])) { 1900 a0 = TCG_REG_TMP; 1901 } 1902 if (const_args[2]) { 1903 if (const_args[4]) { 1904 tcg_out_movi32(s, COND_AL, a0, a4); 1905 a4 = a0; 1906 } 1907 tcg_out_dat_rI(s, COND_AL, ARITH_RSB | TO_CPSR, a0, a4, a2, 1); 1908 } else { 1909 tcg_out_dat_rIN(s, COND_AL, ARITH_SUB | TO_CPSR, 1910 ARITH_ADD | TO_CPSR, a0, a2, a4, const_args[4]); 1911 } 1912 if (const_args[3]) { 1913 if (const_args[5]) { 1914 tcg_out_movi32(s, COND_AL, a1, a5); 1915 a5 = a1; 1916 } 1917 tcg_out_dat_rI(s, COND_AL, ARITH_RSC, a1, a5, a3, 1); 1918 } else { 1919 tcg_out_dat_rIK(s, COND_AL, ARITH_SBC, ARITH_ADC, 1920 a1, a3, a5, const_args[5]); 1921 } 1922 tcg_out_mov_reg(s, COND_AL, args[0], a0); 1923 break; 1924 case INDEX_op_neg_i32: 1925 tcg_out_dat_imm(s, COND_AL, ARITH_RSB, args[0], args[1], 0); 1926 break; 1927 case INDEX_op_not_i32: 1928 tcg_out_dat_reg(s, COND_AL, 1929 ARITH_MVN, args[0], 0, args[1], SHIFT_IMM_LSL(0)); 1930 break; 1931 case INDEX_op_mul_i32: 1932 tcg_out_mul32(s, COND_AL, args[0], args[1], args[2]); 1933 break; 1934 case INDEX_op_mulu2_i32: 1935 tcg_out_umull32(s, COND_AL, args[0], args[1], args[2], args[3]); 1936 break; 1937 case INDEX_op_muls2_i32: 1938 tcg_out_smull32(s, COND_AL, args[0], args[1], args[2], args[3]); 1939 break; 1940 /* XXX: Perhaps args[2] & 0x1f is wrong */ 1941 case INDEX_op_shl_i32: 1942 c = const_args[2] ? 1943 SHIFT_IMM_LSL(args[2] & 0x1f) : SHIFT_REG_LSL(args[2]); 1944 goto gen_shift32; 1945 case INDEX_op_shr_i32: 1946 c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_LSR(args[2] & 0x1f) : 1947 SHIFT_IMM_LSL(0) : SHIFT_REG_LSR(args[2]); 1948 goto gen_shift32; 1949 case INDEX_op_sar_i32: 1950 c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_ASR(args[2] & 0x1f) : 1951 SHIFT_IMM_LSL(0) : SHIFT_REG_ASR(args[2]); 1952 goto gen_shift32; 1953 case INDEX_op_rotr_i32: 1954 c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_ROR(args[2] & 0x1f) : 1955 SHIFT_IMM_LSL(0) : SHIFT_REG_ROR(args[2]); 1956 /* Fall through. */ 1957 gen_shift32: 1958 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1], c); 1959 break; 1960 1961 case INDEX_op_rotl_i32: 1962 if (const_args[2]) { 1963 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1], 1964 ((0x20 - args[2]) & 0x1f) ? 1965 SHIFT_IMM_ROR((0x20 - args[2]) & 0x1f) : 1966 SHIFT_IMM_LSL(0)); 1967 } else { 1968 tcg_out_dat_imm(s, COND_AL, ARITH_RSB, TCG_REG_TMP, args[2], 0x20); 1969 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1], 1970 SHIFT_REG_ROR(TCG_REG_TMP)); 1971 } 1972 break; 1973 1974 case INDEX_op_ctz_i32: 1975 tcg_out_dat_reg(s, COND_AL, INSN_RBIT, TCG_REG_TMP, 0, args[1], 0); 1976 a1 = TCG_REG_TMP; 1977 goto do_clz; 1978 1979 case INDEX_op_clz_i32: 1980 a1 = args[1]; 1981 do_clz: 1982 a0 = args[0]; 1983 a2 = args[2]; 1984 c = const_args[2]; 1985 if (c && a2 == 32) { 1986 tcg_out_dat_reg(s, COND_AL, INSN_CLZ, a0, 0, a1, 0); 1987 break; 1988 } 1989 tcg_out_dat_imm(s, COND_AL, ARITH_CMP, 0, a1, 0); 1990 tcg_out_dat_reg(s, COND_NE, INSN_CLZ, a0, 0, a1, 0); 1991 if (c || a0 != a2) { 1992 tcg_out_dat_rIK(s, COND_EQ, ARITH_MOV, ARITH_MVN, a0, 0, a2, c); 1993 } 1994 break; 1995 1996 case INDEX_op_brcond_i32: 1997 c = tcg_out_cmp(s, args[2], args[0], args[1], const_args[1]); 1998 tcg_out_goto_label(s, tcg_cond_to_arm_cond[c], arg_label(args[3])); 1999 break; 2000 case INDEX_op_setcond_i32: 2001 c = tcg_out_cmp(s, args[3], args[1], args[2], const_args[2]); 2002 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[c], 2003 ARITH_MOV, args[0], 0, 1); 2004 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(c)], 2005 ARITH_MOV, args[0], 0, 0); 2006 break; 2007 case INDEX_op_negsetcond_i32: 2008 c = tcg_out_cmp(s, args[3], args[1], args[2], const_args[2]); 2009 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[c], 2010 ARITH_MVN, args[0], 0, 0); 2011 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(c)], 2012 ARITH_MOV, args[0], 0, 0); 2013 break; 2014 2015 case INDEX_op_brcond2_i32: 2016 c = tcg_out_cmp2(s, args, const_args); 2017 tcg_out_goto_label(s, tcg_cond_to_arm_cond[c], arg_label(args[5])); 2018 break; 2019 case INDEX_op_setcond2_i32: 2020 c = tcg_out_cmp2(s, args + 1, const_args + 1); 2021 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[c], ARITH_MOV, args[0], 0, 1); 2022 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(c)], 2023 ARITH_MOV, args[0], 0, 0); 2024 break; 2025 2026 case INDEX_op_qemu_ld_a32_i32: 2027 tcg_out_qemu_ld(s, args[0], -1, args[1], -1, args[2], TCG_TYPE_I32); 2028 break; 2029 case INDEX_op_qemu_ld_a64_i32: 2030 tcg_out_qemu_ld(s, args[0], -1, args[1], args[2], 2031 args[3], TCG_TYPE_I32); 2032 break; 2033 case INDEX_op_qemu_ld_a32_i64: 2034 tcg_out_qemu_ld(s, args[0], args[1], args[2], -1, 2035 args[3], TCG_TYPE_I64); 2036 break; 2037 case INDEX_op_qemu_ld_a64_i64: 2038 tcg_out_qemu_ld(s, args[0], args[1], args[2], args[3], 2039 args[4], TCG_TYPE_I64); 2040 break; 2041 2042 case INDEX_op_qemu_st_a32_i32: 2043 tcg_out_qemu_st(s, args[0], -1, args[1], -1, args[2], TCG_TYPE_I32); 2044 break; 2045 case INDEX_op_qemu_st_a64_i32: 2046 tcg_out_qemu_st(s, args[0], -1, args[1], args[2], 2047 args[3], TCG_TYPE_I32); 2048 break; 2049 case INDEX_op_qemu_st_a32_i64: 2050 tcg_out_qemu_st(s, args[0], args[1], args[2], -1, 2051 args[3], TCG_TYPE_I64); 2052 break; 2053 case INDEX_op_qemu_st_a64_i64: 2054 tcg_out_qemu_st(s, args[0], args[1], args[2], args[3], 2055 args[4], TCG_TYPE_I64); 2056 break; 2057 2058 case INDEX_op_bswap16_i32: 2059 tcg_out_bswap16(s, COND_AL, args[0], args[1], args[2]); 2060 break; 2061 case INDEX_op_bswap32_i32: 2062 tcg_out_bswap32(s, COND_AL, args[0], args[1]); 2063 break; 2064 2065 case INDEX_op_deposit_i32: 2066 tcg_out_deposit(s, COND_AL, args[0], args[2], 2067 args[3], args[4], const_args[2]); 2068 break; 2069 case INDEX_op_extract_i32: 2070 tcg_out_extract(s, COND_AL, args[0], args[1], args[2], args[3]); 2071 break; 2072 case INDEX_op_sextract_i32: 2073 tcg_out_sextract(s, COND_AL, args[0], args[1], args[2], args[3]); 2074 break; 2075 case INDEX_op_extract2_i32: 2076 /* ??? These optimization vs zero should be generic. */ 2077 /* ??? But we can't substitute 2 for 1 in the opcode stream yet. */ 2078 if (const_args[1]) { 2079 if (const_args[2]) { 2080 tcg_out_movi(s, TCG_TYPE_REG, args[0], 0); 2081 } else { 2082 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, 2083 args[2], SHIFT_IMM_LSL(32 - args[3])); 2084 } 2085 } else if (const_args[2]) { 2086 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, 2087 args[1], SHIFT_IMM_LSR(args[3])); 2088 } else { 2089 /* We can do extract2 in 2 insns, vs the 3 required otherwise. */ 2090 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP, 0, 2091 args[2], SHIFT_IMM_LSL(32 - args[3])); 2092 tcg_out_dat_reg(s, COND_AL, ARITH_ORR, args[0], TCG_REG_TMP, 2093 args[1], SHIFT_IMM_LSR(args[3])); 2094 } 2095 break; 2096 2097 case INDEX_op_div_i32: 2098 tcg_out_sdiv(s, COND_AL, args[0], args[1], args[2]); 2099 break; 2100 case INDEX_op_divu_i32: 2101 tcg_out_udiv(s, COND_AL, args[0], args[1], args[2]); 2102 break; 2103 2104 case INDEX_op_mb: 2105 tcg_out_mb(s, args[0]); 2106 break; 2107 2108 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ 2109 case INDEX_op_call: /* Always emitted via tcg_out_call. */ 2110 case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */ 2111 case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */ 2112 case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */ 2113 case INDEX_op_ext8u_i32: 2114 case INDEX_op_ext16s_i32: 2115 case INDEX_op_ext16u_i32: 2116 default: 2117 g_assert_not_reached(); 2118 } 2119} 2120 2121static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) 2122{ 2123 switch (op) { 2124 case INDEX_op_goto_ptr: 2125 return C_O0_I1(r); 2126 2127 case INDEX_op_ld8u_i32: 2128 case INDEX_op_ld8s_i32: 2129 case INDEX_op_ld16u_i32: 2130 case INDEX_op_ld16s_i32: 2131 case INDEX_op_ld_i32: 2132 case INDEX_op_neg_i32: 2133 case INDEX_op_not_i32: 2134 case INDEX_op_bswap16_i32: 2135 case INDEX_op_bswap32_i32: 2136 case INDEX_op_ext8s_i32: 2137 case INDEX_op_ext16s_i32: 2138 case INDEX_op_ext16u_i32: 2139 case INDEX_op_extract_i32: 2140 case INDEX_op_sextract_i32: 2141 return C_O1_I1(r, r); 2142 2143 case INDEX_op_st8_i32: 2144 case INDEX_op_st16_i32: 2145 case INDEX_op_st_i32: 2146 return C_O0_I2(r, r); 2147 2148 case INDEX_op_add_i32: 2149 case INDEX_op_sub_i32: 2150 case INDEX_op_setcond_i32: 2151 case INDEX_op_negsetcond_i32: 2152 return C_O1_I2(r, r, rIN); 2153 2154 case INDEX_op_and_i32: 2155 case INDEX_op_andc_i32: 2156 case INDEX_op_clz_i32: 2157 case INDEX_op_ctz_i32: 2158 return C_O1_I2(r, r, rIK); 2159 2160 case INDEX_op_mul_i32: 2161 case INDEX_op_div_i32: 2162 case INDEX_op_divu_i32: 2163 return C_O1_I2(r, r, r); 2164 2165 case INDEX_op_mulu2_i32: 2166 case INDEX_op_muls2_i32: 2167 return C_O2_I2(r, r, r, r); 2168 2169 case INDEX_op_or_i32: 2170 case INDEX_op_xor_i32: 2171 return C_O1_I2(r, r, rI); 2172 2173 case INDEX_op_shl_i32: 2174 case INDEX_op_shr_i32: 2175 case INDEX_op_sar_i32: 2176 case INDEX_op_rotl_i32: 2177 case INDEX_op_rotr_i32: 2178 return C_O1_I2(r, r, ri); 2179 2180 case INDEX_op_brcond_i32: 2181 return C_O0_I2(r, rIN); 2182 case INDEX_op_deposit_i32: 2183 return C_O1_I2(r, 0, rZ); 2184 case INDEX_op_extract2_i32: 2185 return C_O1_I2(r, rZ, rZ); 2186 case INDEX_op_movcond_i32: 2187 return C_O1_I4(r, r, rIN, rIK, 0); 2188 case INDEX_op_add2_i32: 2189 return C_O2_I4(r, r, r, r, rIN, rIK); 2190 case INDEX_op_sub2_i32: 2191 return C_O2_I4(r, r, rI, rI, rIN, rIK); 2192 case INDEX_op_brcond2_i32: 2193 return C_O0_I4(r, r, rI, rI); 2194 case INDEX_op_setcond2_i32: 2195 return C_O1_I4(r, r, r, rI, rI); 2196 2197 case INDEX_op_qemu_ld_a32_i32: 2198 return C_O1_I1(r, q); 2199 case INDEX_op_qemu_ld_a64_i32: 2200 return C_O1_I2(r, q, q); 2201 case INDEX_op_qemu_ld_a32_i64: 2202 return C_O2_I1(e, p, q); 2203 case INDEX_op_qemu_ld_a64_i64: 2204 return C_O2_I2(e, p, q, q); 2205 case INDEX_op_qemu_st_a32_i32: 2206 return C_O0_I2(q, q); 2207 case INDEX_op_qemu_st_a64_i32: 2208 return C_O0_I3(q, q, q); 2209 case INDEX_op_qemu_st_a32_i64: 2210 return C_O0_I3(Q, p, q); 2211 case INDEX_op_qemu_st_a64_i64: 2212 return C_O0_I4(Q, p, q, q); 2213 2214 case INDEX_op_st_vec: 2215 return C_O0_I2(w, r); 2216 case INDEX_op_ld_vec: 2217 case INDEX_op_dupm_vec: 2218 return C_O1_I1(w, r); 2219 case INDEX_op_dup_vec: 2220 return C_O1_I1(w, wr); 2221 case INDEX_op_abs_vec: 2222 case INDEX_op_neg_vec: 2223 case INDEX_op_not_vec: 2224 case INDEX_op_shli_vec: 2225 case INDEX_op_shri_vec: 2226 case INDEX_op_sari_vec: 2227 return C_O1_I1(w, w); 2228 case INDEX_op_dup2_vec: 2229 case INDEX_op_add_vec: 2230 case INDEX_op_mul_vec: 2231 case INDEX_op_smax_vec: 2232 case INDEX_op_smin_vec: 2233 case INDEX_op_ssadd_vec: 2234 case INDEX_op_sssub_vec: 2235 case INDEX_op_sub_vec: 2236 case INDEX_op_umax_vec: 2237 case INDEX_op_umin_vec: 2238 case INDEX_op_usadd_vec: 2239 case INDEX_op_ussub_vec: 2240 case INDEX_op_xor_vec: 2241 case INDEX_op_arm_sshl_vec: 2242 case INDEX_op_arm_ushl_vec: 2243 return C_O1_I2(w, w, w); 2244 case INDEX_op_arm_sli_vec: 2245 return C_O1_I2(w, 0, w); 2246 case INDEX_op_or_vec: 2247 case INDEX_op_andc_vec: 2248 return C_O1_I2(w, w, wO); 2249 case INDEX_op_and_vec: 2250 case INDEX_op_orc_vec: 2251 return C_O1_I2(w, w, wV); 2252 case INDEX_op_cmp_vec: 2253 return C_O1_I2(w, w, wZ); 2254 case INDEX_op_bitsel_vec: 2255 return C_O1_I3(w, w, w, w); 2256 default: 2257 g_assert_not_reached(); 2258 } 2259} 2260 2261static void tcg_target_init(TCGContext *s) 2262{ 2263 /* 2264 * Only probe for the platform and capabilities if we haven't already 2265 * determined maximum values at compile time. 2266 */ 2267#if !defined(use_idiv_instructions) || !defined(use_neon_instructions) 2268 { 2269 unsigned long hwcap = qemu_getauxval(AT_HWCAP); 2270#ifndef use_idiv_instructions 2271 use_idiv_instructions = (hwcap & HWCAP_ARM_IDIVA) != 0; 2272#endif 2273#ifndef use_neon_instructions 2274 use_neon_instructions = (hwcap & HWCAP_ARM_NEON) != 0; 2275#endif 2276 } 2277#endif 2278 2279 if (__ARM_ARCH < 7) { 2280 const char *pl = (const char *)qemu_getauxval(AT_PLATFORM); 2281 if (pl != NULL && pl[0] == 'v' && pl[1] >= '4' && pl[1] <= '9') { 2282 arm_arch = pl[1] - '0'; 2283 } 2284 2285 if (arm_arch < 6) { 2286 error_report("TCG: ARMv%d is unsupported; exiting", arm_arch); 2287 exit(EXIT_FAILURE); 2288 } 2289 } 2290 2291 tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS; 2292 2293 tcg_target_call_clobber_regs = 0; 2294 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R0); 2295 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R1); 2296 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R2); 2297 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R3); 2298 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R12); 2299 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R14); 2300 2301 if (use_neon_instructions) { 2302 tcg_target_available_regs[TCG_TYPE_V64] = ALL_VECTOR_REGS; 2303 tcg_target_available_regs[TCG_TYPE_V128] = ALL_VECTOR_REGS; 2304 2305 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q0); 2306 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q1); 2307 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q2); 2308 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q3); 2309 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q8); 2310 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q9); 2311 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q10); 2312 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q11); 2313 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q12); 2314 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q13); 2315 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q14); 2316 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q15); 2317 } 2318 2319 s->reserved_regs = 0; 2320 tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK); 2321 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP); 2322 tcg_regset_set_reg(s->reserved_regs, TCG_REG_PC); 2323 tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP); 2324} 2325 2326static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg, 2327 TCGReg arg1, intptr_t arg2) 2328{ 2329 switch (type) { 2330 case TCG_TYPE_I32: 2331 tcg_out_ld32u(s, COND_AL, arg, arg1, arg2); 2332 return; 2333 case TCG_TYPE_V64: 2334 /* regs 1; size 8; align 8 */ 2335 tcg_out_vldst(s, INSN_VLD1 | 0x7d0, arg, arg1, arg2); 2336 return; 2337 case TCG_TYPE_V128: 2338 /* 2339 * We have only 8-byte alignment for the stack per the ABI. 2340 * Rather than dynamically re-align the stack, it's easier 2341 * to simply not request alignment beyond that. So: 2342 * regs 2; size 8; align 8 2343 */ 2344 tcg_out_vldst(s, INSN_VLD1 | 0xad0, arg, arg1, arg2); 2345 return; 2346 default: 2347 g_assert_not_reached(); 2348 } 2349} 2350 2351static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, 2352 TCGReg arg1, intptr_t arg2) 2353{ 2354 switch (type) { 2355 case TCG_TYPE_I32: 2356 tcg_out_st32(s, COND_AL, arg, arg1, arg2); 2357 return; 2358 case TCG_TYPE_V64: 2359 /* regs 1; size 8; align 8 */ 2360 tcg_out_vldst(s, INSN_VST1 | 0x7d0, arg, arg1, arg2); 2361 return; 2362 case TCG_TYPE_V128: 2363 /* See tcg_out_ld re alignment: regs 2; size 8; align 8 */ 2364 tcg_out_vldst(s, INSN_VST1 | 0xad0, arg, arg1, arg2); 2365 return; 2366 default: 2367 g_assert_not_reached(); 2368 } 2369} 2370 2371static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, 2372 TCGReg base, intptr_t ofs) 2373{ 2374 return false; 2375} 2376 2377static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) 2378{ 2379 if (ret == arg) { 2380 return true; 2381 } 2382 switch (type) { 2383 case TCG_TYPE_I32: 2384 if (ret < TCG_REG_Q0 && arg < TCG_REG_Q0) { 2385 tcg_out_mov_reg(s, COND_AL, ret, arg); 2386 return true; 2387 } 2388 return false; 2389 2390 case TCG_TYPE_V64: 2391 case TCG_TYPE_V128: 2392 /* "VMOV D,N" is an alias for "VORR D,N,N". */ 2393 tcg_out_vreg3(s, INSN_VORR, type - TCG_TYPE_V64, 0, ret, arg, arg); 2394 return true; 2395 2396 default: 2397 g_assert_not_reached(); 2398 } 2399} 2400 2401static void tcg_out_movi(TCGContext *s, TCGType type, 2402 TCGReg ret, tcg_target_long arg) 2403{ 2404 tcg_debug_assert(type == TCG_TYPE_I32); 2405 tcg_debug_assert(ret < TCG_REG_Q0); 2406 tcg_out_movi32(s, COND_AL, ret, arg); 2407} 2408 2409static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2) 2410{ 2411 return false; 2412} 2413 2414static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs, 2415 tcg_target_long imm) 2416{ 2417 int enc, opc = ARITH_ADD; 2418 2419 /* All of the easiest immediates to encode are positive. */ 2420 if (imm < 0) { 2421 imm = -imm; 2422 opc = ARITH_SUB; 2423 } 2424 enc = encode_imm(imm); 2425 if (enc >= 0) { 2426 tcg_out_dat_imm(s, COND_AL, opc, rd, rs, enc); 2427 } else { 2428 tcg_out_movi32(s, COND_AL, TCG_REG_TMP, imm); 2429 tcg_out_dat_reg(s, COND_AL, opc, rd, rs, 2430 TCG_REG_TMP, SHIFT_IMM_LSL(0)); 2431 } 2432} 2433 2434/* Type is always V128, with I64 elements. */ 2435static void tcg_out_dup2_vec(TCGContext *s, TCGReg rd, TCGReg rl, TCGReg rh) 2436{ 2437 /* Move high element into place first. */ 2438 /* VMOV Dd+1, Ds */ 2439 tcg_out_vreg3(s, INSN_VORR | (1 << 12), 0, 0, rd, rh, rh); 2440 /* Move low element into place; tcg_out_mov will check for nop. */ 2441 tcg_out_mov(s, TCG_TYPE_V64, rd, rl); 2442} 2443 2444static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece, 2445 TCGReg rd, TCGReg rs) 2446{ 2447 int q = type - TCG_TYPE_V64; 2448 2449 if (vece == MO_64) { 2450 if (type == TCG_TYPE_V128) { 2451 tcg_out_dup2_vec(s, rd, rs, rs); 2452 } else { 2453 tcg_out_mov(s, TCG_TYPE_V64, rd, rs); 2454 } 2455 } else if (rs < TCG_REG_Q0) { 2456 int b = (vece == MO_8); 2457 int e = (vece == MO_16); 2458 tcg_out32(s, INSN_VDUP_G | (b << 22) | (q << 21) | (e << 5) | 2459 encode_vn(rd) | (rs << 12)); 2460 } else { 2461 int imm4 = 1 << vece; 2462 tcg_out32(s, INSN_VDUP_S | (imm4 << 16) | (q << 6) | 2463 encode_vd(rd) | encode_vm(rs)); 2464 } 2465 return true; 2466} 2467 2468static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece, 2469 TCGReg rd, TCGReg base, intptr_t offset) 2470{ 2471 if (vece == MO_64) { 2472 tcg_out_ld(s, TCG_TYPE_V64, rd, base, offset); 2473 if (type == TCG_TYPE_V128) { 2474 tcg_out_dup2_vec(s, rd, rd, rd); 2475 } 2476 } else { 2477 int q = type - TCG_TYPE_V64; 2478 tcg_out_vldst(s, INSN_VLD1R | (vece << 6) | (q << 5), 2479 rd, base, offset); 2480 } 2481 return true; 2482} 2483 2484static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece, 2485 TCGReg rd, int64_t v64) 2486{ 2487 int q = type - TCG_TYPE_V64; 2488 int cmode, imm8, i; 2489 2490 /* Test all bytes equal first. */ 2491 if (vece == MO_8) { 2492 tcg_out_vmovi(s, rd, q, 0, 0xe, v64); 2493 return; 2494 } 2495 2496 /* 2497 * Test all bytes 0x00 or 0xff second. This can match cases that 2498 * might otherwise take 2 or 3 insns for MO_16 or MO_32 below. 2499 */ 2500 for (i = imm8 = 0; i < 8; i++) { 2501 uint8_t byte = v64 >> (i * 8); 2502 if (byte == 0xff) { 2503 imm8 |= 1 << i; 2504 } else if (byte != 0) { 2505 goto fail_bytes; 2506 } 2507 } 2508 tcg_out_vmovi(s, rd, q, 1, 0xe, imm8); 2509 return; 2510 fail_bytes: 2511 2512 /* 2513 * Tests for various replications. For each element width, if we 2514 * cannot find an expansion there's no point checking a larger 2515 * width because we already know by replication it cannot match. 2516 */ 2517 if (vece == MO_16) { 2518 uint16_t v16 = v64; 2519 2520 if (is_shimm16(v16, &cmode, &imm8)) { 2521 tcg_out_vmovi(s, rd, q, 0, cmode, imm8); 2522 return; 2523 } 2524 if (is_shimm16(~v16, &cmode, &imm8)) { 2525 tcg_out_vmovi(s, rd, q, 1, cmode, imm8); 2526 return; 2527 } 2528 2529 /* 2530 * Otherwise, all remaining constants can be loaded in two insns: 2531 * rd = v16 & 0xff, rd |= v16 & 0xff00. 2532 */ 2533 tcg_out_vmovi(s, rd, q, 0, 0x8, v16 & 0xff); 2534 tcg_out_vmovi(s, rd, q, 0, 0xb, v16 >> 8); /* VORRI */ 2535 return; 2536 } 2537 2538 if (vece == MO_32) { 2539 uint32_t v32 = v64; 2540 2541 if (is_shimm32(v32, &cmode, &imm8) || 2542 is_soimm32(v32, &cmode, &imm8)) { 2543 tcg_out_vmovi(s, rd, q, 0, cmode, imm8); 2544 return; 2545 } 2546 if (is_shimm32(~v32, &cmode, &imm8) || 2547 is_soimm32(~v32, &cmode, &imm8)) { 2548 tcg_out_vmovi(s, rd, q, 1, cmode, imm8); 2549 return; 2550 } 2551 2552 /* 2553 * Restrict the set of constants to those we can load with 2554 * two instructions. Others we load from the pool. 2555 */ 2556 i = is_shimm32_pair(v32, &cmode, &imm8); 2557 if (i) { 2558 tcg_out_vmovi(s, rd, q, 0, cmode, imm8); 2559 tcg_out_vmovi(s, rd, q, 0, i | 1, extract32(v32, i * 4, 8)); 2560 return; 2561 } 2562 i = is_shimm32_pair(~v32, &cmode, &imm8); 2563 if (i) { 2564 tcg_out_vmovi(s, rd, q, 1, cmode, imm8); 2565 tcg_out_vmovi(s, rd, q, 1, i | 1, extract32(~v32, i * 4, 8)); 2566 return; 2567 } 2568 } 2569 2570 /* 2571 * As a last resort, load from the constant pool. 2572 */ 2573 if (!q || vece == MO_64) { 2574 new_pool_l2(s, R_ARM_PC11, s->code_ptr, 0, v64, v64 >> 32); 2575 /* VLDR Dd, [pc + offset] */ 2576 tcg_out32(s, INSN_VLDR_D | encode_vd(rd) | (0xf << 16)); 2577 if (q) { 2578 tcg_out_dup2_vec(s, rd, rd, rd); 2579 } 2580 } else { 2581 new_pool_label(s, (uint32_t)v64, R_ARM_PC8, s->code_ptr, 0); 2582 /* add tmp, pc, offset */ 2583 tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_TMP, TCG_REG_PC, 0); 2584 tcg_out_dupm_vec(s, type, MO_32, rd, TCG_REG_TMP, 0); 2585 } 2586} 2587 2588static const ARMInsn vec_cmp_insn[16] = { 2589 [TCG_COND_EQ] = INSN_VCEQ, 2590 [TCG_COND_GT] = INSN_VCGT, 2591 [TCG_COND_GE] = INSN_VCGE, 2592 [TCG_COND_GTU] = INSN_VCGT_U, 2593 [TCG_COND_GEU] = INSN_VCGE_U, 2594}; 2595 2596static const ARMInsn vec_cmp0_insn[16] = { 2597 [TCG_COND_EQ] = INSN_VCEQ0, 2598 [TCG_COND_GT] = INSN_VCGT0, 2599 [TCG_COND_GE] = INSN_VCGE0, 2600 [TCG_COND_LT] = INSN_VCLT0, 2601 [TCG_COND_LE] = INSN_VCLE0, 2602}; 2603 2604static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, 2605 unsigned vecl, unsigned vece, 2606 const TCGArg args[TCG_MAX_OP_ARGS], 2607 const int const_args[TCG_MAX_OP_ARGS]) 2608{ 2609 TCGType type = vecl + TCG_TYPE_V64; 2610 unsigned q = vecl; 2611 TCGArg a0, a1, a2, a3; 2612 int cmode, imm8; 2613 2614 a0 = args[0]; 2615 a1 = args[1]; 2616 a2 = args[2]; 2617 2618 switch (opc) { 2619 case INDEX_op_ld_vec: 2620 tcg_out_ld(s, type, a0, a1, a2); 2621 return; 2622 case INDEX_op_st_vec: 2623 tcg_out_st(s, type, a0, a1, a2); 2624 return; 2625 case INDEX_op_dupm_vec: 2626 tcg_out_dupm_vec(s, type, vece, a0, a1, a2); 2627 return; 2628 case INDEX_op_dup2_vec: 2629 tcg_out_dup2_vec(s, a0, a1, a2); 2630 return; 2631 case INDEX_op_abs_vec: 2632 tcg_out_vreg2(s, INSN_VABS, q, vece, a0, a1); 2633 return; 2634 case INDEX_op_neg_vec: 2635 tcg_out_vreg2(s, INSN_VNEG, q, vece, a0, a1); 2636 return; 2637 case INDEX_op_not_vec: 2638 tcg_out_vreg2(s, INSN_VMVN, q, 0, a0, a1); 2639 return; 2640 case INDEX_op_add_vec: 2641 tcg_out_vreg3(s, INSN_VADD, q, vece, a0, a1, a2); 2642 return; 2643 case INDEX_op_mul_vec: 2644 tcg_out_vreg3(s, INSN_VMUL, q, vece, a0, a1, a2); 2645 return; 2646 case INDEX_op_smax_vec: 2647 tcg_out_vreg3(s, INSN_VMAX, q, vece, a0, a1, a2); 2648 return; 2649 case INDEX_op_smin_vec: 2650 tcg_out_vreg3(s, INSN_VMIN, q, vece, a0, a1, a2); 2651 return; 2652 case INDEX_op_sub_vec: 2653 tcg_out_vreg3(s, INSN_VSUB, q, vece, a0, a1, a2); 2654 return; 2655 case INDEX_op_ssadd_vec: 2656 tcg_out_vreg3(s, INSN_VQADD, q, vece, a0, a1, a2); 2657 return; 2658 case INDEX_op_sssub_vec: 2659 tcg_out_vreg3(s, INSN_VQSUB, q, vece, a0, a1, a2); 2660 return; 2661 case INDEX_op_umax_vec: 2662 tcg_out_vreg3(s, INSN_VMAX_U, q, vece, a0, a1, a2); 2663 return; 2664 case INDEX_op_umin_vec: 2665 tcg_out_vreg3(s, INSN_VMIN_U, q, vece, a0, a1, a2); 2666 return; 2667 case INDEX_op_usadd_vec: 2668 tcg_out_vreg3(s, INSN_VQADD_U, q, vece, a0, a1, a2); 2669 return; 2670 case INDEX_op_ussub_vec: 2671 tcg_out_vreg3(s, INSN_VQSUB_U, q, vece, a0, a1, a2); 2672 return; 2673 case INDEX_op_xor_vec: 2674 tcg_out_vreg3(s, INSN_VEOR, q, 0, a0, a1, a2); 2675 return; 2676 case INDEX_op_arm_sshl_vec: 2677 /* 2678 * Note that Vm is the data and Vn is the shift count, 2679 * therefore the arguments appear reversed. 2680 */ 2681 tcg_out_vreg3(s, INSN_VSHL_S, q, vece, a0, a2, a1); 2682 return; 2683 case INDEX_op_arm_ushl_vec: 2684 /* See above. */ 2685 tcg_out_vreg3(s, INSN_VSHL_U, q, vece, a0, a2, a1); 2686 return; 2687 case INDEX_op_shli_vec: 2688 tcg_out_vshifti(s, INSN_VSHLI, q, a0, a1, a2 + (8 << vece)); 2689 return; 2690 case INDEX_op_shri_vec: 2691 tcg_out_vshifti(s, INSN_VSHRI, q, a0, a1, (16 << vece) - a2); 2692 return; 2693 case INDEX_op_sari_vec: 2694 tcg_out_vshifti(s, INSN_VSARI, q, a0, a1, (16 << vece) - a2); 2695 return; 2696 case INDEX_op_arm_sli_vec: 2697 tcg_out_vshifti(s, INSN_VSLI, q, a0, a2, args[3] + (8 << vece)); 2698 return; 2699 2700 case INDEX_op_andc_vec: 2701 if (!const_args[2]) { 2702 tcg_out_vreg3(s, INSN_VBIC, q, 0, a0, a1, a2); 2703 return; 2704 } 2705 a2 = ~a2; 2706 /* fall through */ 2707 case INDEX_op_and_vec: 2708 if (const_args[2]) { 2709 is_shimm1632(~a2, &cmode, &imm8); 2710 if (a0 == a1) { 2711 tcg_out_vmovi(s, a0, q, 1, cmode | 1, imm8); /* VBICI */ 2712 return; 2713 } 2714 tcg_out_vmovi(s, a0, q, 1, cmode, imm8); /* VMVNI */ 2715 a2 = a0; 2716 } 2717 tcg_out_vreg3(s, INSN_VAND, q, 0, a0, a1, a2); 2718 return; 2719 2720 case INDEX_op_orc_vec: 2721 if (!const_args[2]) { 2722 tcg_out_vreg3(s, INSN_VORN, q, 0, a0, a1, a2); 2723 return; 2724 } 2725 a2 = ~a2; 2726 /* fall through */ 2727 case INDEX_op_or_vec: 2728 if (const_args[2]) { 2729 is_shimm1632(a2, &cmode, &imm8); 2730 if (a0 == a1) { 2731 tcg_out_vmovi(s, a0, q, 0, cmode | 1, imm8); /* VORRI */ 2732 return; 2733 } 2734 tcg_out_vmovi(s, a0, q, 0, cmode, imm8); /* VMOVI */ 2735 a2 = a0; 2736 } 2737 tcg_out_vreg3(s, INSN_VORR, q, 0, a0, a1, a2); 2738 return; 2739 2740 case INDEX_op_cmp_vec: 2741 { 2742 TCGCond cond = args[3]; 2743 2744 if (cond == TCG_COND_NE) { 2745 if (const_args[2]) { 2746 tcg_out_vreg3(s, INSN_VTST, q, vece, a0, a1, a1); 2747 } else { 2748 tcg_out_vreg3(s, INSN_VCEQ, q, vece, a0, a1, a2); 2749 tcg_out_vreg2(s, INSN_VMVN, q, 0, a0, a0); 2750 } 2751 } else { 2752 ARMInsn insn; 2753 2754 if (const_args[2]) { 2755 insn = vec_cmp0_insn[cond]; 2756 if (insn) { 2757 tcg_out_vreg2(s, insn, q, vece, a0, a1); 2758 return; 2759 } 2760 tcg_out_dupi_vec(s, type, MO_8, TCG_VEC_TMP, 0); 2761 a2 = TCG_VEC_TMP; 2762 } 2763 insn = vec_cmp_insn[cond]; 2764 if (insn == 0) { 2765 TCGArg t; 2766 t = a1, a1 = a2, a2 = t; 2767 cond = tcg_swap_cond(cond); 2768 insn = vec_cmp_insn[cond]; 2769 tcg_debug_assert(insn != 0); 2770 } 2771 tcg_out_vreg3(s, insn, q, vece, a0, a1, a2); 2772 } 2773 } 2774 return; 2775 2776 case INDEX_op_bitsel_vec: 2777 a3 = args[3]; 2778 if (a0 == a3) { 2779 tcg_out_vreg3(s, INSN_VBIT, q, 0, a0, a2, a1); 2780 } else if (a0 == a2) { 2781 tcg_out_vreg3(s, INSN_VBIF, q, 0, a0, a3, a1); 2782 } else { 2783 tcg_out_mov(s, type, a0, a1); 2784 tcg_out_vreg3(s, INSN_VBSL, q, 0, a0, a2, a3); 2785 } 2786 return; 2787 2788 case INDEX_op_mov_vec: /* Always emitted via tcg_out_mov. */ 2789 case INDEX_op_dup_vec: /* Always emitted via tcg_out_dup_vec. */ 2790 default: 2791 g_assert_not_reached(); 2792 } 2793} 2794 2795int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece) 2796{ 2797 switch (opc) { 2798 case INDEX_op_add_vec: 2799 case INDEX_op_sub_vec: 2800 case INDEX_op_and_vec: 2801 case INDEX_op_andc_vec: 2802 case INDEX_op_or_vec: 2803 case INDEX_op_orc_vec: 2804 case INDEX_op_xor_vec: 2805 case INDEX_op_not_vec: 2806 case INDEX_op_shli_vec: 2807 case INDEX_op_shri_vec: 2808 case INDEX_op_sari_vec: 2809 case INDEX_op_ssadd_vec: 2810 case INDEX_op_sssub_vec: 2811 case INDEX_op_usadd_vec: 2812 case INDEX_op_ussub_vec: 2813 case INDEX_op_bitsel_vec: 2814 return 1; 2815 case INDEX_op_abs_vec: 2816 case INDEX_op_cmp_vec: 2817 case INDEX_op_mul_vec: 2818 case INDEX_op_neg_vec: 2819 case INDEX_op_smax_vec: 2820 case INDEX_op_smin_vec: 2821 case INDEX_op_umax_vec: 2822 case INDEX_op_umin_vec: 2823 return vece < MO_64; 2824 case INDEX_op_shlv_vec: 2825 case INDEX_op_shrv_vec: 2826 case INDEX_op_sarv_vec: 2827 case INDEX_op_rotli_vec: 2828 case INDEX_op_rotlv_vec: 2829 case INDEX_op_rotrv_vec: 2830 return -1; 2831 default: 2832 return 0; 2833 } 2834} 2835 2836void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece, 2837 TCGArg a0, ...) 2838{ 2839 va_list va; 2840 TCGv_vec v0, v1, v2, t1, t2, c1; 2841 TCGArg a2; 2842 2843 va_start(va, a0); 2844 v0 = temp_tcgv_vec(arg_temp(a0)); 2845 v1 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg))); 2846 a2 = va_arg(va, TCGArg); 2847 va_end(va); 2848 2849 switch (opc) { 2850 case INDEX_op_shlv_vec: 2851 /* 2852 * Merely propagate shlv_vec to arm_ushl_vec. 2853 * In this way we don't set TCG_TARGET_HAS_shv_vec 2854 * because everything is done via expansion. 2855 */ 2856 v2 = temp_tcgv_vec(arg_temp(a2)); 2857 vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(v0), 2858 tcgv_vec_arg(v1), tcgv_vec_arg(v2)); 2859 break; 2860 2861 case INDEX_op_shrv_vec: 2862 case INDEX_op_sarv_vec: 2863 /* Right shifts are negative left shifts for NEON. */ 2864 v2 = temp_tcgv_vec(arg_temp(a2)); 2865 t1 = tcg_temp_new_vec(type); 2866 tcg_gen_neg_vec(vece, t1, v2); 2867 if (opc == INDEX_op_shrv_vec) { 2868 opc = INDEX_op_arm_ushl_vec; 2869 } else { 2870 opc = INDEX_op_arm_sshl_vec; 2871 } 2872 vec_gen_3(opc, type, vece, tcgv_vec_arg(v0), 2873 tcgv_vec_arg(v1), tcgv_vec_arg(t1)); 2874 tcg_temp_free_vec(t1); 2875 break; 2876 2877 case INDEX_op_rotli_vec: 2878 t1 = tcg_temp_new_vec(type); 2879 tcg_gen_shri_vec(vece, t1, v1, -a2 & ((8 << vece) - 1)); 2880 vec_gen_4(INDEX_op_arm_sli_vec, type, vece, 2881 tcgv_vec_arg(v0), tcgv_vec_arg(t1), tcgv_vec_arg(v1), a2); 2882 tcg_temp_free_vec(t1); 2883 break; 2884 2885 case INDEX_op_rotlv_vec: 2886 v2 = temp_tcgv_vec(arg_temp(a2)); 2887 t1 = tcg_temp_new_vec(type); 2888 c1 = tcg_constant_vec(type, vece, 8 << vece); 2889 tcg_gen_sub_vec(vece, t1, v2, c1); 2890 /* Right shifts are negative left shifts for NEON. */ 2891 vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(t1), 2892 tcgv_vec_arg(v1), tcgv_vec_arg(t1)); 2893 vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(v0), 2894 tcgv_vec_arg(v1), tcgv_vec_arg(v2)); 2895 tcg_gen_or_vec(vece, v0, v0, t1); 2896 tcg_temp_free_vec(t1); 2897 break; 2898 2899 case INDEX_op_rotrv_vec: 2900 v2 = temp_tcgv_vec(arg_temp(a2)); 2901 t1 = tcg_temp_new_vec(type); 2902 t2 = tcg_temp_new_vec(type); 2903 c1 = tcg_constant_vec(type, vece, 8 << vece); 2904 tcg_gen_neg_vec(vece, t1, v2); 2905 tcg_gen_sub_vec(vece, t2, c1, v2); 2906 /* Right shifts are negative left shifts for NEON. */ 2907 vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(t1), 2908 tcgv_vec_arg(v1), tcgv_vec_arg(t1)); 2909 vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(t2), 2910 tcgv_vec_arg(v1), tcgv_vec_arg(t2)); 2911 tcg_gen_or_vec(vece, v0, t1, t2); 2912 tcg_temp_free_vec(t1); 2913 tcg_temp_free_vec(t2); 2914 break; 2915 2916 default: 2917 g_assert_not_reached(); 2918 } 2919} 2920 2921static void tcg_out_nop_fill(tcg_insn_unit *p, int count) 2922{ 2923 int i; 2924 for (i = 0; i < count; ++i) { 2925 p[i] = INSN_NOP; 2926 } 2927} 2928 2929/* Compute frame size via macros, to share between tcg_target_qemu_prologue 2930 and tcg_register_jit. */ 2931 2932#define PUSH_SIZE ((11 - 4 + 1 + 1) * sizeof(tcg_target_long)) 2933 2934#define FRAME_SIZE \ 2935 ((PUSH_SIZE \ 2936 + TCG_STATIC_CALL_ARGS_SIZE \ 2937 + CPU_TEMP_BUF_NLONGS * sizeof(long) \ 2938 + TCG_TARGET_STACK_ALIGN - 1) \ 2939 & -TCG_TARGET_STACK_ALIGN) 2940 2941#define STACK_ADDEND (FRAME_SIZE - PUSH_SIZE) 2942 2943static void tcg_target_qemu_prologue(TCGContext *s) 2944{ 2945 /* Calling convention requires us to save r4-r11 and lr. */ 2946 /* stmdb sp!, { r4 - r11, lr } */ 2947 tcg_out_ldstm(s, COND_AL, INSN_STMDB, TCG_REG_CALL_STACK, 2948 (1 << TCG_REG_R4) | (1 << TCG_REG_R5) | (1 << TCG_REG_R6) | 2949 (1 << TCG_REG_R7) | (1 << TCG_REG_R8) | (1 << TCG_REG_R9) | 2950 (1 << TCG_REG_R10) | (1 << TCG_REG_R11) | (1 << TCG_REG_R14)); 2951 2952 /* Reserve callee argument and tcg temp space. */ 2953 tcg_out_dat_rI(s, COND_AL, ARITH_SUB, TCG_REG_CALL_STACK, 2954 TCG_REG_CALL_STACK, STACK_ADDEND, 1); 2955 tcg_set_frame(s, TCG_REG_CALL_STACK, TCG_STATIC_CALL_ARGS_SIZE, 2956 CPU_TEMP_BUF_NLONGS * sizeof(long)); 2957 2958 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]); 2959 2960 if (!tcg_use_softmmu && guest_base) { 2961 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_GUEST_BASE, guest_base); 2962 tcg_regset_set_reg(s->reserved_regs, TCG_REG_GUEST_BASE); 2963 } 2964 2965 tcg_out_b_reg(s, COND_AL, tcg_target_call_iarg_regs[1]); 2966 2967 /* 2968 * Return path for goto_ptr. Set return value to 0, a-la exit_tb, 2969 * and fall through to the rest of the epilogue. 2970 */ 2971 tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr); 2972 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R0, 0); 2973 tcg_out_epilogue(s); 2974} 2975 2976static void tcg_out_epilogue(TCGContext *s) 2977{ 2978 /* Release local stack frame. */ 2979 tcg_out_dat_rI(s, COND_AL, ARITH_ADD, TCG_REG_CALL_STACK, 2980 TCG_REG_CALL_STACK, STACK_ADDEND, 1); 2981 2982 /* ldmia sp!, { r4 - r11, pc } */ 2983 tcg_out_ldstm(s, COND_AL, INSN_LDMIA, TCG_REG_CALL_STACK, 2984 (1 << TCG_REG_R4) | (1 << TCG_REG_R5) | (1 << TCG_REG_R6) | 2985 (1 << TCG_REG_R7) | (1 << TCG_REG_R8) | (1 << TCG_REG_R9) | 2986 (1 << TCG_REG_R10) | (1 << TCG_REG_R11) | (1 << TCG_REG_PC)); 2987} 2988 2989static void tcg_out_tb_start(TCGContext *s) 2990{ 2991 /* nothing to do */ 2992} 2993 2994typedef struct { 2995 DebugFrameHeader h; 2996 uint8_t fde_def_cfa[4]; 2997 uint8_t fde_reg_ofs[18]; 2998} DebugFrame; 2999 3000#define ELF_HOST_MACHINE EM_ARM 3001 3002/* We're expecting a 2 byte uleb128 encoded value. */ 3003QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14)); 3004 3005static const DebugFrame debug_frame = { 3006 .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */ 3007 .h.cie.id = -1, 3008 .h.cie.version = 1, 3009 .h.cie.code_align = 1, 3010 .h.cie.data_align = 0x7c, /* sleb128 -4 */ 3011 .h.cie.return_column = 14, 3012 3013 /* Total FDE size does not include the "len" member. */ 3014 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset), 3015 3016 .fde_def_cfa = { 3017 12, 13, /* DW_CFA_def_cfa sp, ... */ 3018 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */ 3019 (FRAME_SIZE >> 7) 3020 }, 3021 .fde_reg_ofs = { 3022 /* The following must match the stmdb in the prologue. */ 3023 0x8e, 1, /* DW_CFA_offset, lr, -4 */ 3024 0x8b, 2, /* DW_CFA_offset, r11, -8 */ 3025 0x8a, 3, /* DW_CFA_offset, r10, -12 */ 3026 0x89, 4, /* DW_CFA_offset, r9, -16 */ 3027 0x88, 5, /* DW_CFA_offset, r8, -20 */ 3028 0x87, 6, /* DW_CFA_offset, r7, -24 */ 3029 0x86, 7, /* DW_CFA_offset, r6, -28 */ 3030 0x85, 8, /* DW_CFA_offset, r5, -32 */ 3031 0x84, 9, /* DW_CFA_offset, r4, -36 */ 3032 } 3033}; 3034 3035void tcg_register_jit(const void *buf, size_t buf_size) 3036{ 3037 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame)); 3038} 3039