1/* 2 * Tiny Code Generator for QEMU 3 * 4 * Copyright (c) 2008 Andrzej Zaborowski 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 25#include "elf.h" 26#include "../tcg-ldst.c.inc" 27#include "../tcg-pool.c.inc" 28 29int arm_arch = __ARM_ARCH; 30 31#ifndef use_idiv_instructions 32bool use_idiv_instructions; 33#endif 34#ifndef use_neon_instructions 35bool use_neon_instructions; 36#endif 37 38#ifdef CONFIG_DEBUG_TCG 39static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { 40 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7", 41 "%r8", "%r9", "%r10", "%r11", "%r12", "%sp", "%r14", "%pc", 42 "%q0", "%q1", "%q2", "%q3", "%q4", "%q5", "%q6", "%q7", 43 "%q8", "%q9", "%q10", "%q11", "%q12", "%q13", "%q14", "%q15", 44}; 45#endif 46 47static const int tcg_target_reg_alloc_order[] = { 48 TCG_REG_R4, 49 TCG_REG_R5, 50 TCG_REG_R6, 51 TCG_REG_R7, 52 TCG_REG_R8, 53 TCG_REG_R9, 54 TCG_REG_R10, 55 TCG_REG_R11, 56 TCG_REG_R13, 57 TCG_REG_R0, 58 TCG_REG_R1, 59 TCG_REG_R2, 60 TCG_REG_R3, 61 TCG_REG_R12, 62 TCG_REG_R14, 63 64 TCG_REG_Q0, 65 TCG_REG_Q1, 66 TCG_REG_Q2, 67 TCG_REG_Q3, 68 /* Q4 - Q7 are call-saved, and skipped. */ 69 TCG_REG_Q8, 70 TCG_REG_Q9, 71 TCG_REG_Q10, 72 TCG_REG_Q11, 73 TCG_REG_Q12, 74 TCG_REG_Q13, 75 TCG_REG_Q14, 76 TCG_REG_Q15, 77}; 78 79static const int tcg_target_call_iarg_regs[4] = { 80 TCG_REG_R0, TCG_REG_R1, TCG_REG_R2, TCG_REG_R3 81}; 82 83static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot) 84{ 85 tcg_debug_assert(kind == TCG_CALL_RET_NORMAL); 86 tcg_debug_assert(slot >= 0 && slot <= 3); 87 return TCG_REG_R0 + slot; 88} 89 90#define TCG_REG_TMP TCG_REG_R12 91#define TCG_VEC_TMP TCG_REG_Q15 92#ifndef CONFIG_SOFTMMU 93#define TCG_REG_GUEST_BASE TCG_REG_R11 94#endif 95 96typedef enum { 97 COND_EQ = 0x0, 98 COND_NE = 0x1, 99 COND_CS = 0x2, /* Unsigned greater or equal */ 100 COND_CC = 0x3, /* Unsigned less than */ 101 COND_MI = 0x4, /* Negative */ 102 COND_PL = 0x5, /* Zero or greater */ 103 COND_VS = 0x6, /* Overflow */ 104 COND_VC = 0x7, /* No overflow */ 105 COND_HI = 0x8, /* Unsigned greater than */ 106 COND_LS = 0x9, /* Unsigned less or equal */ 107 COND_GE = 0xa, 108 COND_LT = 0xb, 109 COND_GT = 0xc, 110 COND_LE = 0xd, 111 COND_AL = 0xe, 112} ARMCond; 113 114#define TO_CPSR (1 << 20) 115 116#define SHIFT_IMM_LSL(im) (((im) << 7) | 0x00) 117#define SHIFT_IMM_LSR(im) (((im) << 7) | 0x20) 118#define SHIFT_IMM_ASR(im) (((im) << 7) | 0x40) 119#define SHIFT_IMM_ROR(im) (((im) << 7) | 0x60) 120#define SHIFT_REG_LSL(rs) (((rs) << 8) | 0x10) 121#define SHIFT_REG_LSR(rs) (((rs) << 8) | 0x30) 122#define SHIFT_REG_ASR(rs) (((rs) << 8) | 0x50) 123#define SHIFT_REG_ROR(rs) (((rs) << 8) | 0x70) 124 125typedef enum { 126 ARITH_AND = 0x0 << 21, 127 ARITH_EOR = 0x1 << 21, 128 ARITH_SUB = 0x2 << 21, 129 ARITH_RSB = 0x3 << 21, 130 ARITH_ADD = 0x4 << 21, 131 ARITH_ADC = 0x5 << 21, 132 ARITH_SBC = 0x6 << 21, 133 ARITH_RSC = 0x7 << 21, 134 ARITH_TST = 0x8 << 21 | TO_CPSR, 135 ARITH_CMP = 0xa << 21 | TO_CPSR, 136 ARITH_CMN = 0xb << 21 | TO_CPSR, 137 ARITH_ORR = 0xc << 21, 138 ARITH_MOV = 0xd << 21, 139 ARITH_BIC = 0xe << 21, 140 ARITH_MVN = 0xf << 21, 141 142 INSN_B = 0x0a000000, 143 144 INSN_CLZ = 0x016f0f10, 145 INSN_RBIT = 0x06ff0f30, 146 147 INSN_LDMIA = 0x08b00000, 148 INSN_STMDB = 0x09200000, 149 150 INSN_LDR_IMM = 0x04100000, 151 INSN_LDR_REG = 0x06100000, 152 INSN_STR_IMM = 0x04000000, 153 INSN_STR_REG = 0x06000000, 154 155 INSN_LDRH_IMM = 0x005000b0, 156 INSN_LDRH_REG = 0x001000b0, 157 INSN_LDRSH_IMM = 0x005000f0, 158 INSN_LDRSH_REG = 0x001000f0, 159 INSN_STRH_IMM = 0x004000b0, 160 INSN_STRH_REG = 0x000000b0, 161 162 INSN_LDRB_IMM = 0x04500000, 163 INSN_LDRB_REG = 0x06500000, 164 INSN_LDRSB_IMM = 0x005000d0, 165 INSN_LDRSB_REG = 0x001000d0, 166 INSN_STRB_IMM = 0x04400000, 167 INSN_STRB_REG = 0x06400000, 168 169 INSN_LDRD_IMM = 0x004000d0, 170 INSN_LDRD_REG = 0x000000d0, 171 INSN_STRD_IMM = 0x004000f0, 172 INSN_STRD_REG = 0x000000f0, 173 174 INSN_DMB_ISH = 0xf57ff05b, 175 INSN_DMB_MCR = 0xee070fba, 176 177 /* Architected nop introduced in v6k. */ 178 /* ??? This is an MSR (imm) 0,0,0 insn. Anyone know if this 179 also Just So Happened to do nothing on pre-v6k so that we 180 don't need to conditionalize it? */ 181 INSN_NOP_v6k = 0xe320f000, 182 /* Otherwise the assembler uses mov r0,r0 */ 183 INSN_NOP_v4 = (COND_AL << 28) | ARITH_MOV, 184 185 INSN_VADD = 0xf2000800, 186 INSN_VAND = 0xf2000110, 187 INSN_VBIC = 0xf2100110, 188 INSN_VEOR = 0xf3000110, 189 INSN_VORN = 0xf2300110, 190 INSN_VORR = 0xf2200110, 191 INSN_VSUB = 0xf3000800, 192 INSN_VMUL = 0xf2000910, 193 INSN_VQADD = 0xf2000010, 194 INSN_VQADD_U = 0xf3000010, 195 INSN_VQSUB = 0xf2000210, 196 INSN_VQSUB_U = 0xf3000210, 197 INSN_VMAX = 0xf2000600, 198 INSN_VMAX_U = 0xf3000600, 199 INSN_VMIN = 0xf2000610, 200 INSN_VMIN_U = 0xf3000610, 201 202 INSN_VABS = 0xf3b10300, 203 INSN_VMVN = 0xf3b00580, 204 INSN_VNEG = 0xf3b10380, 205 206 INSN_VCEQ0 = 0xf3b10100, 207 INSN_VCGT0 = 0xf3b10000, 208 INSN_VCGE0 = 0xf3b10080, 209 INSN_VCLE0 = 0xf3b10180, 210 INSN_VCLT0 = 0xf3b10200, 211 212 INSN_VCEQ = 0xf3000810, 213 INSN_VCGE = 0xf2000310, 214 INSN_VCGT = 0xf2000300, 215 INSN_VCGE_U = 0xf3000310, 216 INSN_VCGT_U = 0xf3000300, 217 218 INSN_VSHLI = 0xf2800510, /* VSHL (immediate) */ 219 INSN_VSARI = 0xf2800010, /* VSHR.S */ 220 INSN_VSHRI = 0xf3800010, /* VSHR.U */ 221 INSN_VSLI = 0xf3800510, 222 INSN_VSHL_S = 0xf2000400, /* VSHL.S (register) */ 223 INSN_VSHL_U = 0xf3000400, /* VSHL.U (register) */ 224 225 INSN_VBSL = 0xf3100110, 226 INSN_VBIT = 0xf3200110, 227 INSN_VBIF = 0xf3300110, 228 229 INSN_VTST = 0xf2000810, 230 231 INSN_VDUP_G = 0xee800b10, /* VDUP (ARM core register) */ 232 INSN_VDUP_S = 0xf3b00c00, /* VDUP (scalar) */ 233 INSN_VLDR_D = 0xed100b00, /* VLDR.64 */ 234 INSN_VLD1 = 0xf4200000, /* VLD1 (multiple single elements) */ 235 INSN_VLD1R = 0xf4a00c00, /* VLD1 (single element to all lanes) */ 236 INSN_VST1 = 0xf4000000, /* VST1 (multiple single elements) */ 237 INSN_VMOVI = 0xf2800010, /* VMOV (immediate) */ 238} ARMInsn; 239 240#define INSN_NOP (use_armv7_instructions ? INSN_NOP_v6k : INSN_NOP_v4) 241 242static const uint8_t tcg_cond_to_arm_cond[] = { 243 [TCG_COND_EQ] = COND_EQ, 244 [TCG_COND_NE] = COND_NE, 245 [TCG_COND_LT] = COND_LT, 246 [TCG_COND_GE] = COND_GE, 247 [TCG_COND_LE] = COND_LE, 248 [TCG_COND_GT] = COND_GT, 249 /* unsigned */ 250 [TCG_COND_LTU] = COND_CC, 251 [TCG_COND_GEU] = COND_CS, 252 [TCG_COND_LEU] = COND_LS, 253 [TCG_COND_GTU] = COND_HI, 254}; 255 256static int encode_imm(uint32_t imm); 257 258/* TCG private relocation type: add with pc+imm8 */ 259#define R_ARM_PC8 11 260 261/* TCG private relocation type: vldr with imm8 << 2 */ 262#define R_ARM_PC11 12 263 264static bool reloc_pc24(tcg_insn_unit *src_rw, const tcg_insn_unit *target) 265{ 266 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw); 267 ptrdiff_t offset = (tcg_ptr_byte_diff(target, src_rx) - 8) >> 2; 268 269 if (offset == sextract32(offset, 0, 24)) { 270 *src_rw = deposit32(*src_rw, 0, 24, offset); 271 return true; 272 } 273 return false; 274} 275 276static bool reloc_pc13(tcg_insn_unit *src_rw, const tcg_insn_unit *target) 277{ 278 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw); 279 ptrdiff_t offset = tcg_ptr_byte_diff(target, src_rx) - 8; 280 281 if (offset >= -0xfff && offset <= 0xfff) { 282 tcg_insn_unit insn = *src_rw; 283 bool u = (offset >= 0); 284 if (!u) { 285 offset = -offset; 286 } 287 insn = deposit32(insn, 23, 1, u); 288 insn = deposit32(insn, 0, 12, offset); 289 *src_rw = insn; 290 return true; 291 } 292 return false; 293} 294 295static bool reloc_pc11(tcg_insn_unit *src_rw, const tcg_insn_unit *target) 296{ 297 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw); 298 ptrdiff_t offset = (tcg_ptr_byte_diff(target, src_rx) - 8) / 4; 299 300 if (offset >= -0xff && offset <= 0xff) { 301 tcg_insn_unit insn = *src_rw; 302 bool u = (offset >= 0); 303 if (!u) { 304 offset = -offset; 305 } 306 insn = deposit32(insn, 23, 1, u); 307 insn = deposit32(insn, 0, 8, offset); 308 *src_rw = insn; 309 return true; 310 } 311 return false; 312} 313 314static bool reloc_pc8(tcg_insn_unit *src_rw, const tcg_insn_unit *target) 315{ 316 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw); 317 ptrdiff_t offset = tcg_ptr_byte_diff(target, src_rx) - 8; 318 int imm12 = encode_imm(offset); 319 320 if (imm12 >= 0) { 321 *src_rw = deposit32(*src_rw, 0, 12, imm12); 322 return true; 323 } 324 return false; 325} 326 327static bool patch_reloc(tcg_insn_unit *code_ptr, int type, 328 intptr_t value, intptr_t addend) 329{ 330 tcg_debug_assert(addend == 0); 331 switch (type) { 332 case R_ARM_PC24: 333 return reloc_pc24(code_ptr, (const tcg_insn_unit *)value); 334 case R_ARM_PC13: 335 return reloc_pc13(code_ptr, (const tcg_insn_unit *)value); 336 case R_ARM_PC11: 337 return reloc_pc11(code_ptr, (const tcg_insn_unit *)value); 338 case R_ARM_PC8: 339 return reloc_pc8(code_ptr, (const tcg_insn_unit *)value); 340 default: 341 g_assert_not_reached(); 342 } 343} 344 345#define TCG_CT_CONST_ARM 0x100 346#define TCG_CT_CONST_INV 0x200 347#define TCG_CT_CONST_NEG 0x400 348#define TCG_CT_CONST_ZERO 0x800 349#define TCG_CT_CONST_ORRI 0x1000 350#define TCG_CT_CONST_ANDI 0x2000 351 352#define ALL_GENERAL_REGS 0xffffu 353#define ALL_VECTOR_REGS 0xffff0000u 354 355/* 356 * r0-r3 will be overwritten when reading the tlb entry (softmmu only); 357 * r14 will be overwritten by the BLNE branching to the slow path. 358 */ 359#ifdef CONFIG_SOFTMMU 360#define ALL_QLDST_REGS \ 361 (ALL_GENERAL_REGS & ~((1 << TCG_REG_R0) | (1 << TCG_REG_R1) | \ 362 (1 << TCG_REG_R2) | (1 << TCG_REG_R3) | \ 363 (1 << TCG_REG_R14))) 364#else 365#define ALL_QLDST_REGS (ALL_GENERAL_REGS & ~(1 << TCG_REG_R14)) 366#endif 367 368/* 369 * ARM immediates for ALU instructions are made of an unsigned 8-bit 370 * right-rotated by an even amount between 0 and 30. 371 * 372 * Return < 0 if @imm cannot be encoded, else the entire imm12 field. 373 */ 374static int encode_imm(uint32_t imm) 375{ 376 uint32_t rot, imm8; 377 378 /* Simple case, no rotation required. */ 379 if ((imm & ~0xff) == 0) { 380 return imm; 381 } 382 383 /* Next, try a simple even shift. */ 384 rot = ctz32(imm) & ~1; 385 imm8 = imm >> rot; 386 rot = 32 - rot; 387 if ((imm8 & ~0xff) == 0) { 388 goto found; 389 } 390 391 /* 392 * Finally, try harder with rotations. 393 * The ctz test above will have taken care of rotates >= 8. 394 */ 395 for (rot = 2; rot < 8; rot += 2) { 396 imm8 = rol32(imm, rot); 397 if ((imm8 & ~0xff) == 0) { 398 goto found; 399 } 400 } 401 /* Fail: imm cannot be encoded. */ 402 return -1; 403 404 found: 405 /* Note that rot is even, and we discard bit 0 by shifting by 7. */ 406 return rot << 7 | imm8; 407} 408 409static int encode_imm_nofail(uint32_t imm) 410{ 411 int ret = encode_imm(imm); 412 tcg_debug_assert(ret >= 0); 413 return ret; 414} 415 416static bool check_fit_imm(uint32_t imm) 417{ 418 return encode_imm(imm) >= 0; 419} 420 421/* Return true if v16 is a valid 16-bit shifted immediate. */ 422static bool is_shimm16(uint16_t v16, int *cmode, int *imm8) 423{ 424 if (v16 == (v16 & 0xff)) { 425 *cmode = 0x8; 426 *imm8 = v16 & 0xff; 427 return true; 428 } else if (v16 == (v16 & 0xff00)) { 429 *cmode = 0xa; 430 *imm8 = v16 >> 8; 431 return true; 432 } 433 return false; 434} 435 436/* Return true if v32 is a valid 32-bit shifted immediate. */ 437static bool is_shimm32(uint32_t v32, int *cmode, int *imm8) 438{ 439 if (v32 == (v32 & 0xff)) { 440 *cmode = 0x0; 441 *imm8 = v32 & 0xff; 442 return true; 443 } else if (v32 == (v32 & 0xff00)) { 444 *cmode = 0x2; 445 *imm8 = (v32 >> 8) & 0xff; 446 return true; 447 } else if (v32 == (v32 & 0xff0000)) { 448 *cmode = 0x4; 449 *imm8 = (v32 >> 16) & 0xff; 450 return true; 451 } else if (v32 == (v32 & 0xff000000)) { 452 *cmode = 0x6; 453 *imm8 = v32 >> 24; 454 return true; 455 } 456 return false; 457} 458 459/* Return true if v32 is a valid 32-bit shifting ones immediate. */ 460static bool is_soimm32(uint32_t v32, int *cmode, int *imm8) 461{ 462 if ((v32 & 0xffff00ff) == 0xff) { 463 *cmode = 0xc; 464 *imm8 = (v32 >> 8) & 0xff; 465 return true; 466 } else if ((v32 & 0xff00ffff) == 0xffff) { 467 *cmode = 0xd; 468 *imm8 = (v32 >> 16) & 0xff; 469 return true; 470 } 471 return false; 472} 473 474/* 475 * Return non-zero if v32 can be formed by MOVI+ORR. 476 * Place the parameters for MOVI in (cmode, imm8). 477 * Return the cmode for ORR; the imm8 can be had via extraction from v32. 478 */ 479static int is_shimm32_pair(uint32_t v32, int *cmode, int *imm8) 480{ 481 int i; 482 483 for (i = 6; i > 0; i -= 2) { 484 /* Mask out one byte we can add with ORR. */ 485 uint32_t tmp = v32 & ~(0xffu << (i * 4)); 486 if (is_shimm32(tmp, cmode, imm8) || 487 is_soimm32(tmp, cmode, imm8)) { 488 break; 489 } 490 } 491 return i; 492} 493 494/* Return true if V is a valid 16-bit or 32-bit shifted immediate. */ 495static bool is_shimm1632(uint32_t v32, int *cmode, int *imm8) 496{ 497 if (v32 == deposit32(v32, 16, 16, v32)) { 498 return is_shimm16(v32, cmode, imm8); 499 } else { 500 return is_shimm32(v32, cmode, imm8); 501 } 502} 503 504/* Test if a constant matches the constraint. 505 * TODO: define constraints for: 506 * 507 * ldr/str offset: between -0xfff and 0xfff 508 * ldrh/strh offset: between -0xff and 0xff 509 * mov operand2: values represented with x << (2 * y), x < 0x100 510 * add, sub, eor...: ditto 511 */ 512static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece) 513{ 514 if (ct & TCG_CT_CONST) { 515 return 1; 516 } else if ((ct & TCG_CT_CONST_ARM) && check_fit_imm(val)) { 517 return 1; 518 } else if ((ct & TCG_CT_CONST_INV) && check_fit_imm(~val)) { 519 return 1; 520 } else if ((ct & TCG_CT_CONST_NEG) && check_fit_imm(-val)) { 521 return 1; 522 } else if ((ct & TCG_CT_CONST_ZERO) && val == 0) { 523 return 1; 524 } 525 526 switch (ct & (TCG_CT_CONST_ORRI | TCG_CT_CONST_ANDI)) { 527 case 0: 528 break; 529 case TCG_CT_CONST_ANDI: 530 val = ~val; 531 /* fallthru */ 532 case TCG_CT_CONST_ORRI: 533 if (val == deposit64(val, 32, 32, val)) { 534 int cmode, imm8; 535 return is_shimm1632(val, &cmode, &imm8); 536 } 537 break; 538 default: 539 /* Both bits should not be set for the same insn. */ 540 g_assert_not_reached(); 541 } 542 543 return 0; 544} 545 546static void tcg_out_b_imm(TCGContext *s, ARMCond cond, int32_t offset) 547{ 548 tcg_out32(s, (cond << 28) | INSN_B | 549 (((offset - 8) >> 2) & 0x00ffffff)); 550} 551 552static void tcg_out_bl_imm(TCGContext *s, ARMCond cond, int32_t offset) 553{ 554 tcg_out32(s, (cond << 28) | 0x0b000000 | 555 (((offset - 8) >> 2) & 0x00ffffff)); 556} 557 558static void tcg_out_blx_reg(TCGContext *s, ARMCond cond, TCGReg rn) 559{ 560 tcg_out32(s, (cond << 28) | 0x012fff30 | rn); 561} 562 563static void tcg_out_blx_imm(TCGContext *s, int32_t offset) 564{ 565 tcg_out32(s, 0xfa000000 | ((offset & 2) << 23) | 566 (((offset - 8) >> 2) & 0x00ffffff)); 567} 568 569static void tcg_out_dat_reg(TCGContext *s, ARMCond cond, ARMInsn opc, 570 TCGReg rd, TCGReg rn, TCGReg rm, int shift) 571{ 572 tcg_out32(s, (cond << 28) | (0 << 25) | opc | 573 (rn << 16) | (rd << 12) | shift | rm); 574} 575 576static void tcg_out_mov_reg(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rm) 577{ 578 /* Simple reg-reg move, optimising out the 'do nothing' case */ 579 if (rd != rm) { 580 tcg_out_dat_reg(s, cond, ARITH_MOV, rd, 0, rm, SHIFT_IMM_LSL(0)); 581 } 582} 583 584static void tcg_out_bx_reg(TCGContext *s, ARMCond cond, TCGReg rn) 585{ 586 tcg_out32(s, (cond << 28) | 0x012fff10 | rn); 587} 588 589static void tcg_out_b_reg(TCGContext *s, ARMCond cond, TCGReg rn) 590{ 591 /* 592 * Unless the C portion of QEMU is compiled as thumb, we don't need 593 * true BX semantics; merely a branch to an address held in a register. 594 */ 595 tcg_out_bx_reg(s, cond, rn); 596} 597 598static void tcg_out_dat_imm(TCGContext *s, ARMCond cond, ARMInsn opc, 599 TCGReg rd, TCGReg rn, int im) 600{ 601 tcg_out32(s, (cond << 28) | (1 << 25) | opc | 602 (rn << 16) | (rd << 12) | im); 603} 604 605static void tcg_out_ldstm(TCGContext *s, ARMCond cond, ARMInsn opc, 606 TCGReg rn, uint16_t mask) 607{ 608 tcg_out32(s, (cond << 28) | opc | (rn << 16) | mask); 609} 610 611/* Note that this routine is used for both LDR and LDRH formats, so we do 612 not wish to include an immediate shift at this point. */ 613static void tcg_out_memop_r(TCGContext *s, ARMCond cond, ARMInsn opc, TCGReg rt, 614 TCGReg rn, TCGReg rm, bool u, bool p, bool w) 615{ 616 tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24) 617 | (w << 21) | (rn << 16) | (rt << 12) | rm); 618} 619 620static void tcg_out_memop_8(TCGContext *s, ARMCond cond, ARMInsn opc, TCGReg rt, 621 TCGReg rn, int imm8, bool p, bool w) 622{ 623 bool u = 1; 624 if (imm8 < 0) { 625 imm8 = -imm8; 626 u = 0; 627 } 628 tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24) | (w << 21) | 629 (rn << 16) | (rt << 12) | ((imm8 & 0xf0) << 4) | (imm8 & 0xf)); 630} 631 632static void tcg_out_memop_12(TCGContext *s, ARMCond cond, ARMInsn opc, 633 TCGReg rt, TCGReg rn, int imm12, bool p, bool w) 634{ 635 bool u = 1; 636 if (imm12 < 0) { 637 imm12 = -imm12; 638 u = 0; 639 } 640 tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24) | (w << 21) | 641 (rn << 16) | (rt << 12) | imm12); 642} 643 644static void tcg_out_ld32_12(TCGContext *s, ARMCond cond, TCGReg rt, 645 TCGReg rn, int imm12) 646{ 647 tcg_out_memop_12(s, cond, INSN_LDR_IMM, rt, rn, imm12, 1, 0); 648} 649 650static void tcg_out_st32_12(TCGContext *s, ARMCond cond, TCGReg rt, 651 TCGReg rn, int imm12) 652{ 653 tcg_out_memop_12(s, cond, INSN_STR_IMM, rt, rn, imm12, 1, 0); 654} 655 656static void tcg_out_ld32_r(TCGContext *s, ARMCond cond, TCGReg rt, 657 TCGReg rn, TCGReg rm) 658{ 659 tcg_out_memop_r(s, cond, INSN_LDR_REG, rt, rn, rm, 1, 1, 0); 660} 661 662static void tcg_out_st32_r(TCGContext *s, ARMCond cond, TCGReg rt, 663 TCGReg rn, TCGReg rm) 664{ 665 tcg_out_memop_r(s, cond, INSN_STR_REG, rt, rn, rm, 1, 1, 0); 666} 667 668static void tcg_out_ldrd_8(TCGContext *s, ARMCond cond, TCGReg rt, 669 TCGReg rn, int imm8) 670{ 671 tcg_out_memop_8(s, cond, INSN_LDRD_IMM, rt, rn, imm8, 1, 0); 672} 673 674static void tcg_out_ldrd_r(TCGContext *s, ARMCond cond, TCGReg rt, 675 TCGReg rn, TCGReg rm) 676{ 677 tcg_out_memop_r(s, cond, INSN_LDRD_REG, rt, rn, rm, 1, 1, 0); 678} 679 680static void __attribute__((unused)) 681tcg_out_ldrd_rwb(TCGContext *s, ARMCond cond, TCGReg rt, TCGReg rn, TCGReg rm) 682{ 683 tcg_out_memop_r(s, cond, INSN_LDRD_REG, rt, rn, rm, 1, 1, 1); 684} 685 686static void __attribute__((unused)) 687tcg_out_strd_8(TCGContext *s, ARMCond cond, TCGReg rt, TCGReg rn, int imm8) 688{ 689 tcg_out_memop_8(s, cond, INSN_STRD_IMM, rt, rn, imm8, 1, 0); 690} 691 692static void tcg_out_strd_r(TCGContext *s, ARMCond cond, TCGReg rt, 693 TCGReg rn, TCGReg rm) 694{ 695 tcg_out_memop_r(s, cond, INSN_STRD_REG, rt, rn, rm, 1, 1, 0); 696} 697 698/* Register pre-increment with base writeback. */ 699static void tcg_out_ld32_rwb(TCGContext *s, ARMCond cond, TCGReg rt, 700 TCGReg rn, TCGReg rm) 701{ 702 tcg_out_memop_r(s, cond, INSN_LDR_REG, rt, rn, rm, 1, 1, 1); 703} 704 705static void tcg_out_st32_rwb(TCGContext *s, ARMCond cond, TCGReg rt, 706 TCGReg rn, TCGReg rm) 707{ 708 tcg_out_memop_r(s, cond, INSN_STR_REG, rt, rn, rm, 1, 1, 1); 709} 710 711static void tcg_out_ld16u_8(TCGContext *s, ARMCond cond, TCGReg rt, 712 TCGReg rn, int imm8) 713{ 714 tcg_out_memop_8(s, cond, INSN_LDRH_IMM, rt, rn, imm8, 1, 0); 715} 716 717static void tcg_out_st16_8(TCGContext *s, ARMCond cond, TCGReg rt, 718 TCGReg rn, int imm8) 719{ 720 tcg_out_memop_8(s, cond, INSN_STRH_IMM, rt, rn, imm8, 1, 0); 721} 722 723static void tcg_out_ld16u_r(TCGContext *s, ARMCond cond, TCGReg rt, 724 TCGReg rn, TCGReg rm) 725{ 726 tcg_out_memop_r(s, cond, INSN_LDRH_REG, rt, rn, rm, 1, 1, 0); 727} 728 729static void tcg_out_st16_r(TCGContext *s, ARMCond cond, TCGReg rt, 730 TCGReg rn, TCGReg rm) 731{ 732 tcg_out_memop_r(s, cond, INSN_STRH_REG, rt, rn, rm, 1, 1, 0); 733} 734 735static void tcg_out_ld16s_8(TCGContext *s, ARMCond cond, TCGReg rt, 736 TCGReg rn, int imm8) 737{ 738 tcg_out_memop_8(s, cond, INSN_LDRSH_IMM, rt, rn, imm8, 1, 0); 739} 740 741static void tcg_out_ld16s_r(TCGContext *s, ARMCond cond, TCGReg rt, 742 TCGReg rn, TCGReg rm) 743{ 744 tcg_out_memop_r(s, cond, INSN_LDRSH_REG, rt, rn, rm, 1, 1, 0); 745} 746 747static void tcg_out_ld8_12(TCGContext *s, ARMCond cond, TCGReg rt, 748 TCGReg rn, int imm12) 749{ 750 tcg_out_memop_12(s, cond, INSN_LDRB_IMM, rt, rn, imm12, 1, 0); 751} 752 753static void tcg_out_st8_12(TCGContext *s, ARMCond cond, TCGReg rt, 754 TCGReg rn, int imm12) 755{ 756 tcg_out_memop_12(s, cond, INSN_STRB_IMM, rt, rn, imm12, 1, 0); 757} 758 759static void tcg_out_ld8_r(TCGContext *s, ARMCond cond, TCGReg rt, 760 TCGReg rn, TCGReg rm) 761{ 762 tcg_out_memop_r(s, cond, INSN_LDRB_REG, rt, rn, rm, 1, 1, 0); 763} 764 765static void tcg_out_st8_r(TCGContext *s, ARMCond cond, TCGReg rt, 766 TCGReg rn, TCGReg rm) 767{ 768 tcg_out_memop_r(s, cond, INSN_STRB_REG, rt, rn, rm, 1, 1, 0); 769} 770 771static void tcg_out_ld8s_8(TCGContext *s, ARMCond cond, TCGReg rt, 772 TCGReg rn, int imm8) 773{ 774 tcg_out_memop_8(s, cond, INSN_LDRSB_IMM, rt, rn, imm8, 1, 0); 775} 776 777static void tcg_out_ld8s_r(TCGContext *s, ARMCond cond, TCGReg rt, 778 TCGReg rn, TCGReg rm) 779{ 780 tcg_out_memop_r(s, cond, INSN_LDRSB_REG, rt, rn, rm, 1, 1, 0); 781} 782 783static void tcg_out_movi_pool(TCGContext *s, ARMCond cond, 784 TCGReg rd, uint32_t arg) 785{ 786 new_pool_label(s, arg, R_ARM_PC13, s->code_ptr, 0); 787 tcg_out_ld32_12(s, cond, rd, TCG_REG_PC, 0); 788} 789 790static void tcg_out_movi32(TCGContext *s, ARMCond cond, 791 TCGReg rd, uint32_t arg) 792{ 793 int imm12, diff, opc, sh1, sh2; 794 uint32_t tt0, tt1, tt2; 795 796 /* Check a single MOV/MVN before anything else. */ 797 imm12 = encode_imm(arg); 798 if (imm12 >= 0) { 799 tcg_out_dat_imm(s, cond, ARITH_MOV, rd, 0, imm12); 800 return; 801 } 802 imm12 = encode_imm(~arg); 803 if (imm12 >= 0) { 804 tcg_out_dat_imm(s, cond, ARITH_MVN, rd, 0, imm12); 805 return; 806 } 807 808 /* Check for a pc-relative address. This will usually be the TB, 809 or within the TB, which is immediately before the code block. */ 810 diff = tcg_pcrel_diff(s, (void *)arg) - 8; 811 if (diff >= 0) { 812 imm12 = encode_imm(diff); 813 if (imm12 >= 0) { 814 tcg_out_dat_imm(s, cond, ARITH_ADD, rd, TCG_REG_PC, imm12); 815 return; 816 } 817 } else { 818 imm12 = encode_imm(-diff); 819 if (imm12 >= 0) { 820 tcg_out_dat_imm(s, cond, ARITH_SUB, rd, TCG_REG_PC, imm12); 821 return; 822 } 823 } 824 825 /* Use movw + movt. */ 826 if (use_armv7_instructions) { 827 /* movw */ 828 tcg_out32(s, (cond << 28) | 0x03000000 | (rd << 12) 829 | ((arg << 4) & 0x000f0000) | (arg & 0xfff)); 830 if (arg & 0xffff0000) { 831 /* movt */ 832 tcg_out32(s, (cond << 28) | 0x03400000 | (rd << 12) 833 | ((arg >> 12) & 0x000f0000) | ((arg >> 16) & 0xfff)); 834 } 835 return; 836 } 837 838 /* Look for sequences of two insns. If we have lots of 1's, we can 839 shorten the sequence by beginning with mvn and then clearing 840 higher bits with eor. */ 841 tt0 = arg; 842 opc = ARITH_MOV; 843 if (ctpop32(arg) > 16) { 844 tt0 = ~arg; 845 opc = ARITH_MVN; 846 } 847 sh1 = ctz32(tt0) & ~1; 848 tt1 = tt0 & ~(0xff << sh1); 849 sh2 = ctz32(tt1) & ~1; 850 tt2 = tt1 & ~(0xff << sh2); 851 if (tt2 == 0) { 852 int rot; 853 854 rot = ((32 - sh1) << 7) & 0xf00; 855 tcg_out_dat_imm(s, cond, opc, rd, 0, ((tt0 >> sh1) & 0xff) | rot); 856 rot = ((32 - sh2) << 7) & 0xf00; 857 tcg_out_dat_imm(s, cond, ARITH_EOR, rd, rd, 858 ((tt0 >> sh2) & 0xff) | rot); 859 return; 860 } 861 862 /* Otherwise, drop it into the constant pool. */ 863 tcg_out_movi_pool(s, cond, rd, arg); 864} 865 866/* 867 * Emit either the reg,imm or reg,reg form of a data-processing insn. 868 * rhs must satisfy the "rI" constraint. 869 */ 870static void tcg_out_dat_rI(TCGContext *s, ARMCond cond, ARMInsn opc, 871 TCGReg dst, TCGReg lhs, TCGArg rhs, int rhs_is_const) 872{ 873 if (rhs_is_const) { 874 tcg_out_dat_imm(s, cond, opc, dst, lhs, encode_imm_nofail(rhs)); 875 } else { 876 tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0)); 877 } 878} 879 880/* 881 * Emit either the reg,imm or reg,reg form of a data-processing insn. 882 * rhs must satisfy the "rIK" constraint. 883 */ 884static void tcg_out_dat_rIK(TCGContext *s, ARMCond cond, ARMInsn opc, 885 ARMInsn opinv, TCGReg dst, TCGReg lhs, TCGArg rhs, 886 bool rhs_is_const) 887{ 888 if (rhs_is_const) { 889 int imm12 = encode_imm(rhs); 890 if (imm12 < 0) { 891 imm12 = encode_imm_nofail(~rhs); 892 opc = opinv; 893 } 894 tcg_out_dat_imm(s, cond, opc, dst, lhs, imm12); 895 } else { 896 tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0)); 897 } 898} 899 900static void tcg_out_dat_rIN(TCGContext *s, ARMCond cond, ARMInsn opc, 901 ARMInsn opneg, TCGReg dst, TCGReg lhs, TCGArg rhs, 902 bool rhs_is_const) 903{ 904 /* Emit either the reg,imm or reg,reg form of a data-processing insn. 905 * rhs must satisfy the "rIN" constraint. 906 */ 907 if (rhs_is_const) { 908 int imm12 = encode_imm(rhs); 909 if (imm12 < 0) { 910 imm12 = encode_imm_nofail(-rhs); 911 opc = opneg; 912 } 913 tcg_out_dat_imm(s, cond, opc, dst, lhs, imm12); 914 } else { 915 tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0)); 916 } 917} 918 919static void tcg_out_mul32(TCGContext *s, ARMCond cond, TCGReg rd, 920 TCGReg rn, TCGReg rm) 921{ 922 /* mul */ 923 tcg_out32(s, (cond << 28) | 0x90 | (rd << 16) | (rm << 8) | rn); 924} 925 926static void tcg_out_umull32(TCGContext *s, ARMCond cond, TCGReg rd0, 927 TCGReg rd1, TCGReg rn, TCGReg rm) 928{ 929 /* umull */ 930 tcg_out32(s, (cond << 28) | 0x00800090 | 931 (rd1 << 16) | (rd0 << 12) | (rm << 8) | rn); 932} 933 934static void tcg_out_smull32(TCGContext *s, ARMCond cond, TCGReg rd0, 935 TCGReg rd1, TCGReg rn, TCGReg rm) 936{ 937 /* smull */ 938 tcg_out32(s, (cond << 28) | 0x00c00090 | 939 (rd1 << 16) | (rd0 << 12) | (rm << 8) | rn); 940} 941 942static void tcg_out_sdiv(TCGContext *s, ARMCond cond, 943 TCGReg rd, TCGReg rn, TCGReg rm) 944{ 945 tcg_out32(s, 0x0710f010 | (cond << 28) | (rd << 16) | rn | (rm << 8)); 946} 947 948static void tcg_out_udiv(TCGContext *s, ARMCond cond, 949 TCGReg rd, TCGReg rn, TCGReg rm) 950{ 951 tcg_out32(s, 0x0730f010 | (cond << 28) | (rd << 16) | rn | (rm << 8)); 952} 953 954static void tcg_out_ext8s(TCGContext *s, TCGType t, TCGReg rd, TCGReg rn) 955{ 956 /* sxtb */ 957 tcg_out32(s, 0x06af0070 | (COND_AL << 28) | (rd << 12) | rn); 958} 959 960static void tcg_out_ext8u(TCGContext *s, TCGReg rd, TCGReg rn) 961{ 962 tcg_out_dat_imm(s, COND_AL, ARITH_AND, rd, rn, 0xff); 963} 964 965static void tcg_out_ext16s(TCGContext *s, TCGType t, TCGReg rd, TCGReg rn) 966{ 967 /* sxth */ 968 tcg_out32(s, 0x06bf0070 | (COND_AL << 28) | (rd << 12) | rn); 969} 970 971static void tcg_out_ext16u(TCGContext *s, TCGReg rd, TCGReg rn) 972{ 973 /* uxth */ 974 tcg_out32(s, 0x06ff0070 | (COND_AL << 28) | (rd << 12) | rn); 975} 976 977static void tcg_out_ext32s(TCGContext *s, TCGReg rd, TCGReg rn) 978{ 979 g_assert_not_reached(); 980} 981 982static void tcg_out_ext32u(TCGContext *s, TCGReg rd, TCGReg rn) 983{ 984 g_assert_not_reached(); 985} 986 987static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg rd, TCGReg rn) 988{ 989 g_assert_not_reached(); 990} 991 992static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg rd, TCGReg rn) 993{ 994 g_assert_not_reached(); 995} 996 997static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg rd, TCGReg rn) 998{ 999 g_assert_not_reached(); 1000} 1001 1002static void tcg_out_bswap16(TCGContext *s, ARMCond cond, 1003 TCGReg rd, TCGReg rn, int flags) 1004{ 1005 if (flags & TCG_BSWAP_OS) { 1006 /* revsh */ 1007 tcg_out32(s, 0x06ff0fb0 | (cond << 28) | (rd << 12) | rn); 1008 return; 1009 } 1010 1011 /* rev16 */ 1012 tcg_out32(s, 0x06bf0fb0 | (cond << 28) | (rd << 12) | rn); 1013 if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) { 1014 /* uxth */ 1015 tcg_out32(s, 0x06ff0070 | (cond << 28) | (rd << 12) | rd); 1016 } 1017} 1018 1019static void tcg_out_bswap32(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rn) 1020{ 1021 /* rev */ 1022 tcg_out32(s, 0x06bf0f30 | (cond << 28) | (rd << 12) | rn); 1023} 1024 1025static void tcg_out_deposit(TCGContext *s, ARMCond cond, TCGReg rd, 1026 TCGArg a1, int ofs, int len, bool const_a1) 1027{ 1028 if (const_a1) { 1029 /* bfi becomes bfc with rn == 15. */ 1030 a1 = 15; 1031 } 1032 /* bfi/bfc */ 1033 tcg_out32(s, 0x07c00010 | (cond << 28) | (rd << 12) | a1 1034 | (ofs << 7) | ((ofs + len - 1) << 16)); 1035} 1036 1037static void tcg_out_extract(TCGContext *s, ARMCond cond, TCGReg rd, 1038 TCGReg rn, int ofs, int len) 1039{ 1040 /* ubfx */ 1041 tcg_out32(s, 0x07e00050 | (cond << 28) | (rd << 12) | rn 1042 | (ofs << 7) | ((len - 1) << 16)); 1043} 1044 1045static void tcg_out_sextract(TCGContext *s, ARMCond cond, TCGReg rd, 1046 TCGReg rn, int ofs, int len) 1047{ 1048 /* sbfx */ 1049 tcg_out32(s, 0x07a00050 | (cond << 28) | (rd << 12) | rn 1050 | (ofs << 7) | ((len - 1) << 16)); 1051} 1052 1053static void tcg_out_ld32u(TCGContext *s, ARMCond cond, 1054 TCGReg rd, TCGReg rn, int32_t offset) 1055{ 1056 if (offset > 0xfff || offset < -0xfff) { 1057 tcg_out_movi32(s, cond, TCG_REG_TMP, offset); 1058 tcg_out_ld32_r(s, cond, rd, rn, TCG_REG_TMP); 1059 } else 1060 tcg_out_ld32_12(s, cond, rd, rn, offset); 1061} 1062 1063static void tcg_out_st32(TCGContext *s, ARMCond cond, 1064 TCGReg rd, TCGReg rn, int32_t offset) 1065{ 1066 if (offset > 0xfff || offset < -0xfff) { 1067 tcg_out_movi32(s, cond, TCG_REG_TMP, offset); 1068 tcg_out_st32_r(s, cond, rd, rn, TCG_REG_TMP); 1069 } else 1070 tcg_out_st32_12(s, cond, rd, rn, offset); 1071} 1072 1073static void tcg_out_ld16u(TCGContext *s, ARMCond cond, 1074 TCGReg rd, TCGReg rn, int32_t offset) 1075{ 1076 if (offset > 0xff || offset < -0xff) { 1077 tcg_out_movi32(s, cond, TCG_REG_TMP, offset); 1078 tcg_out_ld16u_r(s, cond, rd, rn, TCG_REG_TMP); 1079 } else 1080 tcg_out_ld16u_8(s, cond, rd, rn, offset); 1081} 1082 1083static void tcg_out_ld16s(TCGContext *s, ARMCond cond, 1084 TCGReg rd, TCGReg rn, int32_t offset) 1085{ 1086 if (offset > 0xff || offset < -0xff) { 1087 tcg_out_movi32(s, cond, TCG_REG_TMP, offset); 1088 tcg_out_ld16s_r(s, cond, rd, rn, TCG_REG_TMP); 1089 } else 1090 tcg_out_ld16s_8(s, cond, rd, rn, offset); 1091} 1092 1093static void tcg_out_st16(TCGContext *s, ARMCond cond, 1094 TCGReg rd, TCGReg rn, int32_t offset) 1095{ 1096 if (offset > 0xff || offset < -0xff) { 1097 tcg_out_movi32(s, cond, TCG_REG_TMP, offset); 1098 tcg_out_st16_r(s, cond, rd, rn, TCG_REG_TMP); 1099 } else 1100 tcg_out_st16_8(s, cond, rd, rn, offset); 1101} 1102 1103static void tcg_out_ld8u(TCGContext *s, ARMCond cond, 1104 TCGReg rd, TCGReg rn, int32_t offset) 1105{ 1106 if (offset > 0xfff || offset < -0xfff) { 1107 tcg_out_movi32(s, cond, TCG_REG_TMP, offset); 1108 tcg_out_ld8_r(s, cond, rd, rn, TCG_REG_TMP); 1109 } else 1110 tcg_out_ld8_12(s, cond, rd, rn, offset); 1111} 1112 1113static void tcg_out_ld8s(TCGContext *s, ARMCond cond, 1114 TCGReg rd, TCGReg rn, int32_t offset) 1115{ 1116 if (offset > 0xff || offset < -0xff) { 1117 tcg_out_movi32(s, cond, TCG_REG_TMP, offset); 1118 tcg_out_ld8s_r(s, cond, rd, rn, TCG_REG_TMP); 1119 } else 1120 tcg_out_ld8s_8(s, cond, rd, rn, offset); 1121} 1122 1123static void tcg_out_st8(TCGContext *s, ARMCond cond, 1124 TCGReg rd, TCGReg rn, int32_t offset) 1125{ 1126 if (offset > 0xfff || offset < -0xfff) { 1127 tcg_out_movi32(s, cond, TCG_REG_TMP, offset); 1128 tcg_out_st8_r(s, cond, rd, rn, TCG_REG_TMP); 1129 } else 1130 tcg_out_st8_12(s, cond, rd, rn, offset); 1131} 1132 1133/* 1134 * The _goto case is normally between TBs within the same code buffer, and 1135 * with the code buffer limited to 16MB we wouldn't need the long case. 1136 * But we also use it for the tail-call to the qemu_ld/st helpers, which does. 1137 */ 1138static void tcg_out_goto(TCGContext *s, ARMCond cond, const tcg_insn_unit *addr) 1139{ 1140 intptr_t addri = (intptr_t)addr; 1141 ptrdiff_t disp = tcg_pcrel_diff(s, addr); 1142 bool arm_mode = !(addri & 1); 1143 1144 if (arm_mode && disp - 8 < 0x01fffffd && disp - 8 > -0x01fffffd) { 1145 tcg_out_b_imm(s, cond, disp); 1146 return; 1147 } 1148 1149 /* LDR is interworking from v5t. */ 1150 tcg_out_movi_pool(s, cond, TCG_REG_PC, addri); 1151} 1152 1153/* 1154 * The call case is mostly used for helpers - so it's not unreasonable 1155 * for them to be beyond branch range. 1156 */ 1157static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *addr) 1158{ 1159 intptr_t addri = (intptr_t)addr; 1160 ptrdiff_t disp = tcg_pcrel_diff(s, addr); 1161 bool arm_mode = !(addri & 1); 1162 1163 if (disp - 8 < 0x02000000 && disp - 8 >= -0x02000000) { 1164 if (arm_mode) { 1165 tcg_out_bl_imm(s, COND_AL, disp); 1166 } else { 1167 tcg_out_blx_imm(s, disp); 1168 } 1169 return; 1170 } 1171 1172 tcg_out_movi32(s, COND_AL, TCG_REG_TMP, addri); 1173 tcg_out_blx_reg(s, COND_AL, TCG_REG_TMP); 1174} 1175 1176static void tcg_out_call(TCGContext *s, const tcg_insn_unit *addr, 1177 const TCGHelperInfo *info) 1178{ 1179 tcg_out_call_int(s, addr); 1180} 1181 1182static void tcg_out_goto_label(TCGContext *s, ARMCond cond, TCGLabel *l) 1183{ 1184 if (l->has_value) { 1185 tcg_out_goto(s, cond, l->u.value_ptr); 1186 } else { 1187 tcg_out_reloc(s, s->code_ptr, R_ARM_PC24, l, 0); 1188 tcg_out_b_imm(s, cond, 0); 1189 } 1190} 1191 1192static void tcg_out_mb(TCGContext *s, TCGArg a0) 1193{ 1194 if (use_armv7_instructions) { 1195 tcg_out32(s, INSN_DMB_ISH); 1196 } else { 1197 tcg_out32(s, INSN_DMB_MCR); 1198 } 1199} 1200 1201static TCGCond tcg_out_cmp2(TCGContext *s, const TCGArg *args, 1202 const int *const_args) 1203{ 1204 TCGReg al = args[0]; 1205 TCGReg ah = args[1]; 1206 TCGArg bl = args[2]; 1207 TCGArg bh = args[3]; 1208 TCGCond cond = args[4]; 1209 int const_bl = const_args[2]; 1210 int const_bh = const_args[3]; 1211 1212 switch (cond) { 1213 case TCG_COND_EQ: 1214 case TCG_COND_NE: 1215 case TCG_COND_LTU: 1216 case TCG_COND_LEU: 1217 case TCG_COND_GTU: 1218 case TCG_COND_GEU: 1219 /* 1220 * We perform a conditional comparison. If the high half is 1221 * equal, then overwrite the flags with the comparison of the 1222 * low half. The resulting flags cover the whole. 1223 */ 1224 tcg_out_dat_rI(s, COND_AL, ARITH_CMP, 0, ah, bh, const_bh); 1225 tcg_out_dat_rI(s, COND_EQ, ARITH_CMP, 0, al, bl, const_bl); 1226 return cond; 1227 1228 case TCG_COND_LT: 1229 case TCG_COND_GE: 1230 /* We perform a double-word subtraction and examine the result. 1231 We do not actually need the result of the subtract, so the 1232 low part "subtract" is a compare. For the high half we have 1233 no choice but to compute into a temporary. */ 1234 tcg_out_dat_rI(s, COND_AL, ARITH_CMP, 0, al, bl, const_bl); 1235 tcg_out_dat_rI(s, COND_AL, ARITH_SBC | TO_CPSR, 1236 TCG_REG_TMP, ah, bh, const_bh); 1237 return cond; 1238 1239 case TCG_COND_LE: 1240 case TCG_COND_GT: 1241 /* Similar, but with swapped arguments, via reversed subtract. */ 1242 tcg_out_dat_rI(s, COND_AL, ARITH_RSB | TO_CPSR, 1243 TCG_REG_TMP, al, bl, const_bl); 1244 tcg_out_dat_rI(s, COND_AL, ARITH_RSC | TO_CPSR, 1245 TCG_REG_TMP, ah, bh, const_bh); 1246 return tcg_swap_cond(cond); 1247 1248 default: 1249 g_assert_not_reached(); 1250 } 1251} 1252 1253/* 1254 * Note that TCGReg references Q-registers. 1255 * Q-regno = 2 * D-regno, so shift left by 1 while inserting. 1256 */ 1257static uint32_t encode_vd(TCGReg rd) 1258{ 1259 tcg_debug_assert(rd >= TCG_REG_Q0); 1260 return (extract32(rd, 3, 1) << 22) | (extract32(rd, 0, 3) << 13); 1261} 1262 1263static uint32_t encode_vn(TCGReg rn) 1264{ 1265 tcg_debug_assert(rn >= TCG_REG_Q0); 1266 return (extract32(rn, 3, 1) << 7) | (extract32(rn, 0, 3) << 17); 1267} 1268 1269static uint32_t encode_vm(TCGReg rm) 1270{ 1271 tcg_debug_assert(rm >= TCG_REG_Q0); 1272 return (extract32(rm, 3, 1) << 5) | (extract32(rm, 0, 3) << 1); 1273} 1274 1275static void tcg_out_vreg2(TCGContext *s, ARMInsn insn, int q, int vece, 1276 TCGReg d, TCGReg m) 1277{ 1278 tcg_out32(s, insn | (vece << 18) | (q << 6) | 1279 encode_vd(d) | encode_vm(m)); 1280} 1281 1282static void tcg_out_vreg3(TCGContext *s, ARMInsn insn, int q, int vece, 1283 TCGReg d, TCGReg n, TCGReg m) 1284{ 1285 tcg_out32(s, insn | (vece << 20) | (q << 6) | 1286 encode_vd(d) | encode_vn(n) | encode_vm(m)); 1287} 1288 1289static void tcg_out_vmovi(TCGContext *s, TCGReg rd, 1290 int q, int op, int cmode, uint8_t imm8) 1291{ 1292 tcg_out32(s, INSN_VMOVI | encode_vd(rd) | (q << 6) | (op << 5) 1293 | (cmode << 8) | extract32(imm8, 0, 4) 1294 | (extract32(imm8, 4, 3) << 16) 1295 | (extract32(imm8, 7, 1) << 24)); 1296} 1297 1298static void tcg_out_vshifti(TCGContext *s, ARMInsn insn, int q, 1299 TCGReg rd, TCGReg rm, int l_imm6) 1300{ 1301 tcg_out32(s, insn | (q << 6) | encode_vd(rd) | encode_vm(rm) | 1302 (extract32(l_imm6, 6, 1) << 7) | 1303 (extract32(l_imm6, 0, 6) << 16)); 1304} 1305 1306static void tcg_out_vldst(TCGContext *s, ARMInsn insn, 1307 TCGReg rd, TCGReg rn, int offset) 1308{ 1309 if (offset != 0) { 1310 if (check_fit_imm(offset) || check_fit_imm(-offset)) { 1311 tcg_out_dat_rIN(s, COND_AL, ARITH_ADD, ARITH_SUB, 1312 TCG_REG_TMP, rn, offset, true); 1313 } else { 1314 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, offset); 1315 tcg_out_dat_reg(s, COND_AL, ARITH_ADD, 1316 TCG_REG_TMP, TCG_REG_TMP, rn, 0); 1317 } 1318 rn = TCG_REG_TMP; 1319 } 1320 tcg_out32(s, insn | (rn << 16) | encode_vd(rd) | 0xf); 1321} 1322 1323typedef struct { 1324 ARMCond cond; 1325 TCGReg base; 1326 int index; 1327 bool index_scratch; 1328 TCGAtomAlign aa; 1329} HostAddress; 1330 1331bool tcg_target_has_memory_bswap(MemOp memop) 1332{ 1333 return false; 1334} 1335 1336static TCGReg ldst_ra_gen(TCGContext *s, const TCGLabelQemuLdst *l, int arg) 1337{ 1338 /* We arrive at the slow path via "BLNE", so R14 contains l->raddr. */ 1339 return TCG_REG_R14; 1340} 1341 1342static const TCGLdstHelperParam ldst_helper_param = { 1343 .ra_gen = ldst_ra_gen, 1344 .ntmp = 1, 1345 .tmp = { TCG_REG_TMP }, 1346}; 1347 1348static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) 1349{ 1350 MemOp opc = get_memop(lb->oi); 1351 1352 if (!reloc_pc24(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { 1353 return false; 1354 } 1355 1356 tcg_out_ld_helper_args(s, lb, &ldst_helper_param); 1357 tcg_out_call_int(s, qemu_ld_helpers[opc & MO_SIZE]); 1358 tcg_out_ld_helper_ret(s, lb, false, &ldst_helper_param); 1359 1360 tcg_out_goto(s, COND_AL, lb->raddr); 1361 return true; 1362} 1363 1364static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) 1365{ 1366 MemOp opc = get_memop(lb->oi); 1367 1368 if (!reloc_pc24(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { 1369 return false; 1370 } 1371 1372 tcg_out_st_helper_args(s, lb, &ldst_helper_param); 1373 1374 /* Tail-call to the helper, which will return to the fast path. */ 1375 tcg_out_goto(s, COND_AL, qemu_st_helpers[opc & MO_SIZE]); 1376 return true; 1377} 1378 1379/* We expect to use an 9-bit sign-magnitude negative offset from ENV. */ 1380#define MIN_TLB_MASK_TABLE_OFS -256 1381 1382static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, 1383 TCGReg addrlo, TCGReg addrhi, 1384 MemOpIdx oi, bool is_ld) 1385{ 1386 TCGLabelQemuLdst *ldst = NULL; 1387 MemOp opc = get_memop(oi); 1388 unsigned a_mask; 1389 1390#ifdef CONFIG_SOFTMMU 1391 *h = (HostAddress){ 1392 .cond = COND_AL, 1393 .base = addrlo, 1394 .index = TCG_REG_R1, 1395 .index_scratch = true, 1396 }; 1397#else 1398 *h = (HostAddress){ 1399 .cond = COND_AL, 1400 .base = addrlo, 1401 .index = guest_base ? TCG_REG_GUEST_BASE : -1, 1402 .index_scratch = false, 1403 }; 1404#endif 1405 1406 h->aa = atom_and_align_for_opc(s, opc, MO_ATOM_IFALIGN, false); 1407 a_mask = (1 << h->aa.align) - 1; 1408 1409#ifdef CONFIG_SOFTMMU 1410 int mem_index = get_mmuidx(oi); 1411 int cmp_off = is_ld ? offsetof(CPUTLBEntry, addr_read) 1412 : offsetof(CPUTLBEntry, addr_write); 1413 int fast_off = tlb_mask_table_ofs(s, mem_index); 1414 unsigned s_mask = (1 << (opc & MO_SIZE)) - 1; 1415 TCGReg t_addr; 1416 1417 ldst = new_ldst_label(s); 1418 ldst->is_ld = is_ld; 1419 ldst->oi = oi; 1420 ldst->addrlo_reg = addrlo; 1421 ldst->addrhi_reg = addrhi; 1422 1423 /* Load cpu->neg.tlb.f[mmu_idx].{mask,table} into {r0,r1}. */ 1424 QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, mask) != 0); 1425 QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, table) != 4); 1426 tcg_out_ldrd_8(s, COND_AL, TCG_REG_R0, TCG_AREG0, fast_off); 1427 1428 /* Extract the tlb index from the address into R0. */ 1429 tcg_out_dat_reg(s, COND_AL, ARITH_AND, TCG_REG_R0, TCG_REG_R0, addrlo, 1430 SHIFT_IMM_LSR(s->page_bits - CPU_TLB_ENTRY_BITS)); 1431 1432 /* 1433 * Add the tlb_table pointer, creating the CPUTLBEntry address in R1. 1434 * Load the tlb comparator into R2/R3 and the fast path addend into R1. 1435 */ 1436 QEMU_BUILD_BUG_ON(HOST_BIG_ENDIAN); 1437 if (cmp_off == 0) { 1438 if (s->addr_type == TCG_TYPE_I32) { 1439 tcg_out_ld32_rwb(s, COND_AL, TCG_REG_R2, TCG_REG_R1, TCG_REG_R0); 1440 } else { 1441 tcg_out_ldrd_rwb(s, COND_AL, TCG_REG_R2, TCG_REG_R1, TCG_REG_R0); 1442 } 1443 } else { 1444 tcg_out_dat_reg(s, COND_AL, ARITH_ADD, 1445 TCG_REG_R1, TCG_REG_R1, TCG_REG_R0, 0); 1446 if (s->addr_type == TCG_TYPE_I32) { 1447 tcg_out_ld32_12(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off); 1448 } else { 1449 tcg_out_ldrd_8(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off); 1450 } 1451 } 1452 1453 /* Load the tlb addend. */ 1454 tcg_out_ld32_12(s, COND_AL, TCG_REG_R1, TCG_REG_R1, 1455 offsetof(CPUTLBEntry, addend)); 1456 1457 /* 1458 * Check alignment, check comparators. 1459 * Do this in 2-4 insns. Use MOVW for v7, if possible, 1460 * to reduce the number of sequential conditional instructions. 1461 * Almost all guests have at least 4k pages, which means that we need 1462 * to clear at least 9 bits even for an 8-byte memory, which means it 1463 * isn't worth checking for an immediate operand for BIC. 1464 * 1465 * For unaligned accesses, test the page of the last unit of alignment. 1466 * This leaves the least significant alignment bits unchanged, and of 1467 * course must be zero. 1468 */ 1469 t_addr = addrlo; 1470 if (a_mask < s_mask) { 1471 t_addr = TCG_REG_R0; 1472 tcg_out_dat_imm(s, COND_AL, ARITH_ADD, t_addr, 1473 addrlo, s_mask - a_mask); 1474 } 1475 if (use_armv7_instructions && s->page_bits <= 16) { 1476 tcg_out_movi32(s, COND_AL, TCG_REG_TMP, ~(s->page_mask | a_mask)); 1477 tcg_out_dat_reg(s, COND_AL, ARITH_BIC, TCG_REG_TMP, 1478 t_addr, TCG_REG_TMP, 0); 1479 tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, TCG_REG_R2, TCG_REG_TMP, 0); 1480 } else { 1481 if (a_mask) { 1482 tcg_debug_assert(a_mask <= 0xff); 1483 tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addrlo, a_mask); 1484 } 1485 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP, 0, t_addr, 1486 SHIFT_IMM_LSR(s->page_bits)); 1487 tcg_out_dat_reg(s, (a_mask ? COND_EQ : COND_AL), ARITH_CMP, 1488 0, TCG_REG_R2, TCG_REG_TMP, 1489 SHIFT_IMM_LSL(s->page_bits)); 1490 } 1491 1492 if (s->addr_type != TCG_TYPE_I32) { 1493 tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0, TCG_REG_R3, addrhi, 0); 1494 } 1495#else 1496 if (a_mask) { 1497 ldst = new_ldst_label(s); 1498 ldst->is_ld = is_ld; 1499 ldst->oi = oi; 1500 ldst->addrlo_reg = addrlo; 1501 ldst->addrhi_reg = addrhi; 1502 1503 /* We are expecting alignment to max out at 7 */ 1504 tcg_debug_assert(a_mask <= 0xff); 1505 /* tst addr, #mask */ 1506 tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addrlo, a_mask); 1507 } 1508#endif 1509 1510 return ldst; 1511} 1512 1513static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg datalo, 1514 TCGReg datahi, HostAddress h) 1515{ 1516 TCGReg base; 1517 1518 /* Byte swapping is left to middle-end expansion. */ 1519 tcg_debug_assert((opc & MO_BSWAP) == 0); 1520 1521 switch (opc & MO_SSIZE) { 1522 case MO_UB: 1523 if (h.index < 0) { 1524 tcg_out_ld8_12(s, h.cond, datalo, h.base, 0); 1525 } else { 1526 tcg_out_ld8_r(s, h.cond, datalo, h.base, h.index); 1527 } 1528 break; 1529 case MO_SB: 1530 if (h.index < 0) { 1531 tcg_out_ld8s_8(s, h.cond, datalo, h.base, 0); 1532 } else { 1533 tcg_out_ld8s_r(s, h.cond, datalo, h.base, h.index); 1534 } 1535 break; 1536 case MO_UW: 1537 if (h.index < 0) { 1538 tcg_out_ld16u_8(s, h.cond, datalo, h.base, 0); 1539 } else { 1540 tcg_out_ld16u_r(s, h.cond, datalo, h.base, h.index); 1541 } 1542 break; 1543 case MO_SW: 1544 if (h.index < 0) { 1545 tcg_out_ld16s_8(s, h.cond, datalo, h.base, 0); 1546 } else { 1547 tcg_out_ld16s_r(s, h.cond, datalo, h.base, h.index); 1548 } 1549 break; 1550 case MO_UL: 1551 if (h.index < 0) { 1552 tcg_out_ld32_12(s, h.cond, datalo, h.base, 0); 1553 } else { 1554 tcg_out_ld32_r(s, h.cond, datalo, h.base, h.index); 1555 } 1556 break; 1557 case MO_UQ: 1558 /* We used pair allocation for datalo, so already should be aligned. */ 1559 tcg_debug_assert((datalo & 1) == 0); 1560 tcg_debug_assert(datahi == datalo + 1); 1561 /* LDRD requires alignment; double-check that. */ 1562 if (get_alignment_bits(opc) >= MO_64) { 1563 if (h.index < 0) { 1564 tcg_out_ldrd_8(s, h.cond, datalo, h.base, 0); 1565 break; 1566 } 1567 /* 1568 * Rm (the second address op) must not overlap Rt or Rt + 1. 1569 * Since datalo is aligned, we can simplify the test via alignment. 1570 * Flip the two address arguments if that works. 1571 */ 1572 if ((h.index & ~1) != datalo) { 1573 tcg_out_ldrd_r(s, h.cond, datalo, h.base, h.index); 1574 break; 1575 } 1576 if ((h.base & ~1) != datalo) { 1577 tcg_out_ldrd_r(s, h.cond, datalo, h.index, h.base); 1578 break; 1579 } 1580 } 1581 if (h.index < 0) { 1582 base = h.base; 1583 if (datalo == h.base) { 1584 tcg_out_mov_reg(s, h.cond, TCG_REG_TMP, base); 1585 base = TCG_REG_TMP; 1586 } 1587 } else if (h.index_scratch) { 1588 tcg_out_ld32_rwb(s, h.cond, datalo, h.index, h.base); 1589 tcg_out_ld32_12(s, h.cond, datahi, h.index, 4); 1590 break; 1591 } else { 1592 tcg_out_dat_reg(s, h.cond, ARITH_ADD, TCG_REG_TMP, 1593 h.base, h.index, SHIFT_IMM_LSL(0)); 1594 base = TCG_REG_TMP; 1595 } 1596 tcg_out_ld32_12(s, h.cond, datalo, base, 0); 1597 tcg_out_ld32_12(s, h.cond, datahi, base, 4); 1598 break; 1599 default: 1600 g_assert_not_reached(); 1601 } 1602} 1603 1604static void tcg_out_qemu_ld(TCGContext *s, TCGReg datalo, TCGReg datahi, 1605 TCGReg addrlo, TCGReg addrhi, 1606 MemOpIdx oi, TCGType data_type) 1607{ 1608 MemOp opc = get_memop(oi); 1609 TCGLabelQemuLdst *ldst; 1610 HostAddress h; 1611 1612 ldst = prepare_host_addr(s, &h, addrlo, addrhi, oi, true); 1613 if (ldst) { 1614 ldst->type = data_type; 1615 ldst->datalo_reg = datalo; 1616 ldst->datahi_reg = datahi; 1617 1618 /* 1619 * This a conditional BL only to load a pointer within this 1620 * opcode into LR for the slow path. We will not be using 1621 * the value for a tail call. 1622 */ 1623 ldst->label_ptr[0] = s->code_ptr; 1624 tcg_out_bl_imm(s, COND_NE, 0); 1625 1626 tcg_out_qemu_ld_direct(s, opc, datalo, datahi, h); 1627 ldst->raddr = tcg_splitwx_to_rx(s->code_ptr); 1628 } else { 1629 tcg_out_qemu_ld_direct(s, opc, datalo, datahi, h); 1630 } 1631} 1632 1633static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg datalo, 1634 TCGReg datahi, HostAddress h) 1635{ 1636 /* Byte swapping is left to middle-end expansion. */ 1637 tcg_debug_assert((opc & MO_BSWAP) == 0); 1638 1639 switch (opc & MO_SIZE) { 1640 case MO_8: 1641 if (h.index < 0) { 1642 tcg_out_st8_12(s, h.cond, datalo, h.base, 0); 1643 } else { 1644 tcg_out_st8_r(s, h.cond, datalo, h.base, h.index); 1645 } 1646 break; 1647 case MO_16: 1648 if (h.index < 0) { 1649 tcg_out_st16_8(s, h.cond, datalo, h.base, 0); 1650 } else { 1651 tcg_out_st16_r(s, h.cond, datalo, h.base, h.index); 1652 } 1653 break; 1654 case MO_32: 1655 if (h.index < 0) { 1656 tcg_out_st32_12(s, h.cond, datalo, h.base, 0); 1657 } else { 1658 tcg_out_st32_r(s, h.cond, datalo, h.base, h.index); 1659 } 1660 break; 1661 case MO_64: 1662 /* We used pair allocation for datalo, so already should be aligned. */ 1663 tcg_debug_assert((datalo & 1) == 0); 1664 tcg_debug_assert(datahi == datalo + 1); 1665 /* STRD requires alignment; double-check that. */ 1666 if (get_alignment_bits(opc) >= MO_64) { 1667 if (h.index < 0) { 1668 tcg_out_strd_8(s, h.cond, datalo, h.base, 0); 1669 } else { 1670 tcg_out_strd_r(s, h.cond, datalo, h.base, h.index); 1671 } 1672 } else if (h.index_scratch) { 1673 tcg_out_st32_rwb(s, h.cond, datalo, h.index, h.base); 1674 tcg_out_st32_12(s, h.cond, datahi, h.index, 4); 1675 } else { 1676 tcg_out_dat_reg(s, h.cond, ARITH_ADD, TCG_REG_TMP, 1677 h.base, h.index, SHIFT_IMM_LSL(0)); 1678 tcg_out_st32_12(s, h.cond, datalo, TCG_REG_TMP, 0); 1679 tcg_out_st32_12(s, h.cond, datahi, TCG_REG_TMP, 4); 1680 } 1681 break; 1682 default: 1683 g_assert_not_reached(); 1684 } 1685} 1686 1687static void tcg_out_qemu_st(TCGContext *s, TCGReg datalo, TCGReg datahi, 1688 TCGReg addrlo, TCGReg addrhi, 1689 MemOpIdx oi, TCGType data_type) 1690{ 1691 MemOp opc = get_memop(oi); 1692 TCGLabelQemuLdst *ldst; 1693 HostAddress h; 1694 1695 ldst = prepare_host_addr(s, &h, addrlo, addrhi, oi, false); 1696 if (ldst) { 1697 ldst->type = data_type; 1698 ldst->datalo_reg = datalo; 1699 ldst->datahi_reg = datahi; 1700 1701 h.cond = COND_EQ; 1702 tcg_out_qemu_st_direct(s, opc, datalo, datahi, h); 1703 1704 /* The conditional call is last, as we're going to return here. */ 1705 ldst->label_ptr[0] = s->code_ptr; 1706 tcg_out_bl_imm(s, COND_NE, 0); 1707 ldst->raddr = tcg_splitwx_to_rx(s->code_ptr); 1708 } else { 1709 tcg_out_qemu_st_direct(s, opc, datalo, datahi, h); 1710 } 1711} 1712 1713static void tcg_out_epilogue(TCGContext *s); 1714 1715static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg) 1716{ 1717 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R0, arg); 1718 tcg_out_epilogue(s); 1719} 1720 1721static void tcg_out_goto_tb(TCGContext *s, int which) 1722{ 1723 uintptr_t i_addr; 1724 intptr_t i_disp; 1725 1726 /* Direct branch will be patched by tb_target_set_jmp_target. */ 1727 set_jmp_insn_offset(s, which); 1728 tcg_out32(s, INSN_NOP); 1729 1730 /* When branch is out of range, fall through to indirect. */ 1731 i_addr = get_jmp_target_addr(s, which); 1732 i_disp = tcg_pcrel_diff(s, (void *)i_addr) - 8; 1733 tcg_debug_assert(i_disp < 0); 1734 if (i_disp >= -0xfff) { 1735 tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, i_disp); 1736 } else { 1737 /* 1738 * The TB is close, but outside the 12 bits addressable by 1739 * the load. We can extend this to 20 bits with a sub of a 1740 * shifted immediate from pc. 1741 */ 1742 int h = -i_disp; 1743 int l = h & 0xfff; 1744 1745 h = encode_imm_nofail(h - l); 1746 tcg_out_dat_imm(s, COND_AL, ARITH_SUB, TCG_REG_R0, TCG_REG_PC, h); 1747 tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_R0, l); 1748 } 1749 set_jmp_reset_offset(s, which); 1750} 1751 1752void tb_target_set_jmp_target(const TranslationBlock *tb, int n, 1753 uintptr_t jmp_rx, uintptr_t jmp_rw) 1754{ 1755 uintptr_t addr = tb->jmp_target_addr[n]; 1756 ptrdiff_t offset = addr - (jmp_rx + 8); 1757 tcg_insn_unit insn; 1758 1759 /* Either directly branch, or fall through to indirect branch. */ 1760 if (offset == sextract64(offset, 0, 26)) { 1761 /* B <addr> */ 1762 insn = deposit32((COND_AL << 28) | INSN_B, 0, 24, offset >> 2); 1763 } else { 1764 insn = INSN_NOP; 1765 } 1766 1767 qatomic_set((uint32_t *)jmp_rw, insn); 1768 flush_idcache_range(jmp_rx, jmp_rw, 4); 1769} 1770 1771static void tcg_out_op(TCGContext *s, TCGOpcode opc, 1772 const TCGArg args[TCG_MAX_OP_ARGS], 1773 const int const_args[TCG_MAX_OP_ARGS]) 1774{ 1775 TCGArg a0, a1, a2, a3, a4, a5; 1776 int c; 1777 1778 switch (opc) { 1779 case INDEX_op_goto_ptr: 1780 tcg_out_b_reg(s, COND_AL, args[0]); 1781 break; 1782 case INDEX_op_br: 1783 tcg_out_goto_label(s, COND_AL, arg_label(args[0])); 1784 break; 1785 1786 case INDEX_op_ld8u_i32: 1787 tcg_out_ld8u(s, COND_AL, args[0], args[1], args[2]); 1788 break; 1789 case INDEX_op_ld8s_i32: 1790 tcg_out_ld8s(s, COND_AL, args[0], args[1], args[2]); 1791 break; 1792 case INDEX_op_ld16u_i32: 1793 tcg_out_ld16u(s, COND_AL, args[0], args[1], args[2]); 1794 break; 1795 case INDEX_op_ld16s_i32: 1796 tcg_out_ld16s(s, COND_AL, args[0], args[1], args[2]); 1797 break; 1798 case INDEX_op_ld_i32: 1799 tcg_out_ld32u(s, COND_AL, args[0], args[1], args[2]); 1800 break; 1801 case INDEX_op_st8_i32: 1802 tcg_out_st8(s, COND_AL, args[0], args[1], args[2]); 1803 break; 1804 case INDEX_op_st16_i32: 1805 tcg_out_st16(s, COND_AL, args[0], args[1], args[2]); 1806 break; 1807 case INDEX_op_st_i32: 1808 tcg_out_st32(s, COND_AL, args[0], args[1], args[2]); 1809 break; 1810 1811 case INDEX_op_movcond_i32: 1812 /* Constraints mean that v2 is always in the same register as dest, 1813 * so we only need to do "if condition passed, move v1 to dest". 1814 */ 1815 tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0, 1816 args[1], args[2], const_args[2]); 1817 tcg_out_dat_rIK(s, tcg_cond_to_arm_cond[args[5]], ARITH_MOV, 1818 ARITH_MVN, args[0], 0, args[3], const_args[3]); 1819 break; 1820 case INDEX_op_add_i32: 1821 tcg_out_dat_rIN(s, COND_AL, ARITH_ADD, ARITH_SUB, 1822 args[0], args[1], args[2], const_args[2]); 1823 break; 1824 case INDEX_op_sub_i32: 1825 if (const_args[1]) { 1826 if (const_args[2]) { 1827 tcg_out_movi32(s, COND_AL, args[0], args[1] - args[2]); 1828 } else { 1829 tcg_out_dat_rI(s, COND_AL, ARITH_RSB, 1830 args[0], args[2], args[1], 1); 1831 } 1832 } else { 1833 tcg_out_dat_rIN(s, COND_AL, ARITH_SUB, ARITH_ADD, 1834 args[0], args[1], args[2], const_args[2]); 1835 } 1836 break; 1837 case INDEX_op_and_i32: 1838 tcg_out_dat_rIK(s, COND_AL, ARITH_AND, ARITH_BIC, 1839 args[0], args[1], args[2], const_args[2]); 1840 break; 1841 case INDEX_op_andc_i32: 1842 tcg_out_dat_rIK(s, COND_AL, ARITH_BIC, ARITH_AND, 1843 args[0], args[1], args[2], const_args[2]); 1844 break; 1845 case INDEX_op_or_i32: 1846 c = ARITH_ORR; 1847 goto gen_arith; 1848 case INDEX_op_xor_i32: 1849 c = ARITH_EOR; 1850 /* Fall through. */ 1851 gen_arith: 1852 tcg_out_dat_rI(s, COND_AL, c, args[0], args[1], args[2], const_args[2]); 1853 break; 1854 case INDEX_op_add2_i32: 1855 a0 = args[0], a1 = args[1], a2 = args[2]; 1856 a3 = args[3], a4 = args[4], a5 = args[5]; 1857 if (a0 == a3 || (a0 == a5 && !const_args[5])) { 1858 a0 = TCG_REG_TMP; 1859 } 1860 tcg_out_dat_rIN(s, COND_AL, ARITH_ADD | TO_CPSR, ARITH_SUB | TO_CPSR, 1861 a0, a2, a4, const_args[4]); 1862 tcg_out_dat_rIK(s, COND_AL, ARITH_ADC, ARITH_SBC, 1863 a1, a3, a5, const_args[5]); 1864 tcg_out_mov_reg(s, COND_AL, args[0], a0); 1865 break; 1866 case INDEX_op_sub2_i32: 1867 a0 = args[0], a1 = args[1], a2 = args[2]; 1868 a3 = args[3], a4 = args[4], a5 = args[5]; 1869 if ((a0 == a3 && !const_args[3]) || (a0 == a5 && !const_args[5])) { 1870 a0 = TCG_REG_TMP; 1871 } 1872 if (const_args[2]) { 1873 if (const_args[4]) { 1874 tcg_out_movi32(s, COND_AL, a0, a4); 1875 a4 = a0; 1876 } 1877 tcg_out_dat_rI(s, COND_AL, ARITH_RSB | TO_CPSR, a0, a4, a2, 1); 1878 } else { 1879 tcg_out_dat_rIN(s, COND_AL, ARITH_SUB | TO_CPSR, 1880 ARITH_ADD | TO_CPSR, a0, a2, a4, const_args[4]); 1881 } 1882 if (const_args[3]) { 1883 if (const_args[5]) { 1884 tcg_out_movi32(s, COND_AL, a1, a5); 1885 a5 = a1; 1886 } 1887 tcg_out_dat_rI(s, COND_AL, ARITH_RSC, a1, a5, a3, 1); 1888 } else { 1889 tcg_out_dat_rIK(s, COND_AL, ARITH_SBC, ARITH_ADC, 1890 a1, a3, a5, const_args[5]); 1891 } 1892 tcg_out_mov_reg(s, COND_AL, args[0], a0); 1893 break; 1894 case INDEX_op_neg_i32: 1895 tcg_out_dat_imm(s, COND_AL, ARITH_RSB, args[0], args[1], 0); 1896 break; 1897 case INDEX_op_not_i32: 1898 tcg_out_dat_reg(s, COND_AL, 1899 ARITH_MVN, args[0], 0, args[1], SHIFT_IMM_LSL(0)); 1900 break; 1901 case INDEX_op_mul_i32: 1902 tcg_out_mul32(s, COND_AL, args[0], args[1], args[2]); 1903 break; 1904 case INDEX_op_mulu2_i32: 1905 tcg_out_umull32(s, COND_AL, args[0], args[1], args[2], args[3]); 1906 break; 1907 case INDEX_op_muls2_i32: 1908 tcg_out_smull32(s, COND_AL, args[0], args[1], args[2], args[3]); 1909 break; 1910 /* XXX: Perhaps args[2] & 0x1f is wrong */ 1911 case INDEX_op_shl_i32: 1912 c = const_args[2] ? 1913 SHIFT_IMM_LSL(args[2] & 0x1f) : SHIFT_REG_LSL(args[2]); 1914 goto gen_shift32; 1915 case INDEX_op_shr_i32: 1916 c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_LSR(args[2] & 0x1f) : 1917 SHIFT_IMM_LSL(0) : SHIFT_REG_LSR(args[2]); 1918 goto gen_shift32; 1919 case INDEX_op_sar_i32: 1920 c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_ASR(args[2] & 0x1f) : 1921 SHIFT_IMM_LSL(0) : SHIFT_REG_ASR(args[2]); 1922 goto gen_shift32; 1923 case INDEX_op_rotr_i32: 1924 c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_ROR(args[2] & 0x1f) : 1925 SHIFT_IMM_LSL(0) : SHIFT_REG_ROR(args[2]); 1926 /* Fall through. */ 1927 gen_shift32: 1928 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1], c); 1929 break; 1930 1931 case INDEX_op_rotl_i32: 1932 if (const_args[2]) { 1933 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1], 1934 ((0x20 - args[2]) & 0x1f) ? 1935 SHIFT_IMM_ROR((0x20 - args[2]) & 0x1f) : 1936 SHIFT_IMM_LSL(0)); 1937 } else { 1938 tcg_out_dat_imm(s, COND_AL, ARITH_RSB, TCG_REG_TMP, args[2], 0x20); 1939 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1], 1940 SHIFT_REG_ROR(TCG_REG_TMP)); 1941 } 1942 break; 1943 1944 case INDEX_op_ctz_i32: 1945 tcg_out_dat_reg(s, COND_AL, INSN_RBIT, TCG_REG_TMP, 0, args[1], 0); 1946 a1 = TCG_REG_TMP; 1947 goto do_clz; 1948 1949 case INDEX_op_clz_i32: 1950 a1 = args[1]; 1951 do_clz: 1952 a0 = args[0]; 1953 a2 = args[2]; 1954 c = const_args[2]; 1955 if (c && a2 == 32) { 1956 tcg_out_dat_reg(s, COND_AL, INSN_CLZ, a0, 0, a1, 0); 1957 break; 1958 } 1959 tcg_out_dat_imm(s, COND_AL, ARITH_CMP, 0, a1, 0); 1960 tcg_out_dat_reg(s, COND_NE, INSN_CLZ, a0, 0, a1, 0); 1961 if (c || a0 != a2) { 1962 tcg_out_dat_rIK(s, COND_EQ, ARITH_MOV, ARITH_MVN, a0, 0, a2, c); 1963 } 1964 break; 1965 1966 case INDEX_op_brcond_i32: 1967 tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0, 1968 args[0], args[1], const_args[1]); 1969 tcg_out_goto_label(s, tcg_cond_to_arm_cond[args[2]], 1970 arg_label(args[3])); 1971 break; 1972 case INDEX_op_setcond_i32: 1973 tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0, 1974 args[1], args[2], const_args[2]); 1975 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[args[3]], 1976 ARITH_MOV, args[0], 0, 1); 1977 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(args[3])], 1978 ARITH_MOV, args[0], 0, 0); 1979 break; 1980 case INDEX_op_negsetcond_i32: 1981 tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0, 1982 args[1], args[2], const_args[2]); 1983 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[args[3]], 1984 ARITH_MVN, args[0], 0, 0); 1985 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(args[3])], 1986 ARITH_MOV, args[0], 0, 0); 1987 break; 1988 1989 case INDEX_op_brcond2_i32: 1990 c = tcg_out_cmp2(s, args, const_args); 1991 tcg_out_goto_label(s, tcg_cond_to_arm_cond[c], arg_label(args[5])); 1992 break; 1993 case INDEX_op_setcond2_i32: 1994 c = tcg_out_cmp2(s, args + 1, const_args + 1); 1995 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[c], ARITH_MOV, args[0], 0, 1); 1996 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(c)], 1997 ARITH_MOV, args[0], 0, 0); 1998 break; 1999 2000 case INDEX_op_qemu_ld_a32_i32: 2001 tcg_out_qemu_ld(s, args[0], -1, args[1], -1, args[2], TCG_TYPE_I32); 2002 break; 2003 case INDEX_op_qemu_ld_a64_i32: 2004 tcg_out_qemu_ld(s, args[0], -1, args[1], args[2], 2005 args[3], TCG_TYPE_I32); 2006 break; 2007 case INDEX_op_qemu_ld_a32_i64: 2008 tcg_out_qemu_ld(s, args[0], args[1], args[2], -1, 2009 args[3], TCG_TYPE_I64); 2010 break; 2011 case INDEX_op_qemu_ld_a64_i64: 2012 tcg_out_qemu_ld(s, args[0], args[1], args[2], args[3], 2013 args[4], TCG_TYPE_I64); 2014 break; 2015 2016 case INDEX_op_qemu_st_a32_i32: 2017 tcg_out_qemu_st(s, args[0], -1, args[1], -1, args[2], TCG_TYPE_I32); 2018 break; 2019 case INDEX_op_qemu_st_a64_i32: 2020 tcg_out_qemu_st(s, args[0], -1, args[1], args[2], 2021 args[3], TCG_TYPE_I32); 2022 break; 2023 case INDEX_op_qemu_st_a32_i64: 2024 tcg_out_qemu_st(s, args[0], args[1], args[2], -1, 2025 args[3], TCG_TYPE_I64); 2026 break; 2027 case INDEX_op_qemu_st_a64_i64: 2028 tcg_out_qemu_st(s, args[0], args[1], args[2], args[3], 2029 args[4], TCG_TYPE_I64); 2030 break; 2031 2032 case INDEX_op_bswap16_i32: 2033 tcg_out_bswap16(s, COND_AL, args[0], args[1], args[2]); 2034 break; 2035 case INDEX_op_bswap32_i32: 2036 tcg_out_bswap32(s, COND_AL, args[0], args[1]); 2037 break; 2038 2039 case INDEX_op_deposit_i32: 2040 tcg_out_deposit(s, COND_AL, args[0], args[2], 2041 args[3], args[4], const_args[2]); 2042 break; 2043 case INDEX_op_extract_i32: 2044 tcg_out_extract(s, COND_AL, args[0], args[1], args[2], args[3]); 2045 break; 2046 case INDEX_op_sextract_i32: 2047 tcg_out_sextract(s, COND_AL, args[0], args[1], args[2], args[3]); 2048 break; 2049 case INDEX_op_extract2_i32: 2050 /* ??? These optimization vs zero should be generic. */ 2051 /* ??? But we can't substitute 2 for 1 in the opcode stream yet. */ 2052 if (const_args[1]) { 2053 if (const_args[2]) { 2054 tcg_out_movi(s, TCG_TYPE_REG, args[0], 0); 2055 } else { 2056 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, 2057 args[2], SHIFT_IMM_LSL(32 - args[3])); 2058 } 2059 } else if (const_args[2]) { 2060 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, 2061 args[1], SHIFT_IMM_LSR(args[3])); 2062 } else { 2063 /* We can do extract2 in 2 insns, vs the 3 required otherwise. */ 2064 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP, 0, 2065 args[2], SHIFT_IMM_LSL(32 - args[3])); 2066 tcg_out_dat_reg(s, COND_AL, ARITH_ORR, args[0], TCG_REG_TMP, 2067 args[1], SHIFT_IMM_LSR(args[3])); 2068 } 2069 break; 2070 2071 case INDEX_op_div_i32: 2072 tcg_out_sdiv(s, COND_AL, args[0], args[1], args[2]); 2073 break; 2074 case INDEX_op_divu_i32: 2075 tcg_out_udiv(s, COND_AL, args[0], args[1], args[2]); 2076 break; 2077 2078 case INDEX_op_mb: 2079 tcg_out_mb(s, args[0]); 2080 break; 2081 2082 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ 2083 case INDEX_op_call: /* Always emitted via tcg_out_call. */ 2084 case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */ 2085 case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */ 2086 case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */ 2087 case INDEX_op_ext8u_i32: 2088 case INDEX_op_ext16s_i32: 2089 case INDEX_op_ext16u_i32: 2090 default: 2091 g_assert_not_reached(); 2092 } 2093} 2094 2095static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) 2096{ 2097 switch (op) { 2098 case INDEX_op_goto_ptr: 2099 return C_O0_I1(r); 2100 2101 case INDEX_op_ld8u_i32: 2102 case INDEX_op_ld8s_i32: 2103 case INDEX_op_ld16u_i32: 2104 case INDEX_op_ld16s_i32: 2105 case INDEX_op_ld_i32: 2106 case INDEX_op_neg_i32: 2107 case INDEX_op_not_i32: 2108 case INDEX_op_bswap16_i32: 2109 case INDEX_op_bswap32_i32: 2110 case INDEX_op_ext8s_i32: 2111 case INDEX_op_ext16s_i32: 2112 case INDEX_op_ext16u_i32: 2113 case INDEX_op_extract_i32: 2114 case INDEX_op_sextract_i32: 2115 return C_O1_I1(r, r); 2116 2117 case INDEX_op_st8_i32: 2118 case INDEX_op_st16_i32: 2119 case INDEX_op_st_i32: 2120 return C_O0_I2(r, r); 2121 2122 case INDEX_op_add_i32: 2123 case INDEX_op_sub_i32: 2124 case INDEX_op_setcond_i32: 2125 case INDEX_op_negsetcond_i32: 2126 return C_O1_I2(r, r, rIN); 2127 2128 case INDEX_op_and_i32: 2129 case INDEX_op_andc_i32: 2130 case INDEX_op_clz_i32: 2131 case INDEX_op_ctz_i32: 2132 return C_O1_I2(r, r, rIK); 2133 2134 case INDEX_op_mul_i32: 2135 case INDEX_op_div_i32: 2136 case INDEX_op_divu_i32: 2137 return C_O1_I2(r, r, r); 2138 2139 case INDEX_op_mulu2_i32: 2140 case INDEX_op_muls2_i32: 2141 return C_O2_I2(r, r, r, r); 2142 2143 case INDEX_op_or_i32: 2144 case INDEX_op_xor_i32: 2145 return C_O1_I2(r, r, rI); 2146 2147 case INDEX_op_shl_i32: 2148 case INDEX_op_shr_i32: 2149 case INDEX_op_sar_i32: 2150 case INDEX_op_rotl_i32: 2151 case INDEX_op_rotr_i32: 2152 return C_O1_I2(r, r, ri); 2153 2154 case INDEX_op_brcond_i32: 2155 return C_O0_I2(r, rIN); 2156 case INDEX_op_deposit_i32: 2157 return C_O1_I2(r, 0, rZ); 2158 case INDEX_op_extract2_i32: 2159 return C_O1_I2(r, rZ, rZ); 2160 case INDEX_op_movcond_i32: 2161 return C_O1_I4(r, r, rIN, rIK, 0); 2162 case INDEX_op_add2_i32: 2163 return C_O2_I4(r, r, r, r, rIN, rIK); 2164 case INDEX_op_sub2_i32: 2165 return C_O2_I4(r, r, rI, rI, rIN, rIK); 2166 case INDEX_op_brcond2_i32: 2167 return C_O0_I4(r, r, rI, rI); 2168 case INDEX_op_setcond2_i32: 2169 return C_O1_I4(r, r, r, rI, rI); 2170 2171 case INDEX_op_qemu_ld_a32_i32: 2172 return C_O1_I1(r, q); 2173 case INDEX_op_qemu_ld_a64_i32: 2174 return C_O1_I2(r, q, q); 2175 case INDEX_op_qemu_ld_a32_i64: 2176 return C_O2_I1(e, p, q); 2177 case INDEX_op_qemu_ld_a64_i64: 2178 return C_O2_I2(e, p, q, q); 2179 case INDEX_op_qemu_st_a32_i32: 2180 return C_O0_I2(q, q); 2181 case INDEX_op_qemu_st_a64_i32: 2182 return C_O0_I3(q, q, q); 2183 case INDEX_op_qemu_st_a32_i64: 2184 return C_O0_I3(Q, p, q); 2185 case INDEX_op_qemu_st_a64_i64: 2186 return C_O0_I4(Q, p, q, q); 2187 2188 case INDEX_op_st_vec: 2189 return C_O0_I2(w, r); 2190 case INDEX_op_ld_vec: 2191 case INDEX_op_dupm_vec: 2192 return C_O1_I1(w, r); 2193 case INDEX_op_dup_vec: 2194 return C_O1_I1(w, wr); 2195 case INDEX_op_abs_vec: 2196 case INDEX_op_neg_vec: 2197 case INDEX_op_not_vec: 2198 case INDEX_op_shli_vec: 2199 case INDEX_op_shri_vec: 2200 case INDEX_op_sari_vec: 2201 return C_O1_I1(w, w); 2202 case INDEX_op_dup2_vec: 2203 case INDEX_op_add_vec: 2204 case INDEX_op_mul_vec: 2205 case INDEX_op_smax_vec: 2206 case INDEX_op_smin_vec: 2207 case INDEX_op_ssadd_vec: 2208 case INDEX_op_sssub_vec: 2209 case INDEX_op_sub_vec: 2210 case INDEX_op_umax_vec: 2211 case INDEX_op_umin_vec: 2212 case INDEX_op_usadd_vec: 2213 case INDEX_op_ussub_vec: 2214 case INDEX_op_xor_vec: 2215 case INDEX_op_arm_sshl_vec: 2216 case INDEX_op_arm_ushl_vec: 2217 return C_O1_I2(w, w, w); 2218 case INDEX_op_arm_sli_vec: 2219 return C_O1_I2(w, 0, w); 2220 case INDEX_op_or_vec: 2221 case INDEX_op_andc_vec: 2222 return C_O1_I2(w, w, wO); 2223 case INDEX_op_and_vec: 2224 case INDEX_op_orc_vec: 2225 return C_O1_I2(w, w, wV); 2226 case INDEX_op_cmp_vec: 2227 return C_O1_I2(w, w, wZ); 2228 case INDEX_op_bitsel_vec: 2229 return C_O1_I3(w, w, w, w); 2230 default: 2231 g_assert_not_reached(); 2232 } 2233} 2234 2235static void tcg_target_init(TCGContext *s) 2236{ 2237 /* 2238 * Only probe for the platform and capabilities if we haven't already 2239 * determined maximum values at compile time. 2240 */ 2241#if !defined(use_idiv_instructions) || !defined(use_neon_instructions) 2242 { 2243 unsigned long hwcap = qemu_getauxval(AT_HWCAP); 2244#ifndef use_idiv_instructions 2245 use_idiv_instructions = (hwcap & HWCAP_ARM_IDIVA) != 0; 2246#endif 2247#ifndef use_neon_instructions 2248 use_neon_instructions = (hwcap & HWCAP_ARM_NEON) != 0; 2249#endif 2250 } 2251#endif 2252 2253 if (__ARM_ARCH < 7) { 2254 const char *pl = (const char *)qemu_getauxval(AT_PLATFORM); 2255 if (pl != NULL && pl[0] == 'v' && pl[1] >= '4' && pl[1] <= '9') { 2256 arm_arch = pl[1] - '0'; 2257 } 2258 2259 if (arm_arch < 6) { 2260 error_report("TCG: ARMv%d is unsupported; exiting", arm_arch); 2261 exit(EXIT_FAILURE); 2262 } 2263 } 2264 2265 tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS; 2266 2267 tcg_target_call_clobber_regs = 0; 2268 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R0); 2269 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R1); 2270 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R2); 2271 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R3); 2272 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R12); 2273 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R14); 2274 2275 if (use_neon_instructions) { 2276 tcg_target_available_regs[TCG_TYPE_V64] = ALL_VECTOR_REGS; 2277 tcg_target_available_regs[TCG_TYPE_V128] = ALL_VECTOR_REGS; 2278 2279 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q0); 2280 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q1); 2281 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q2); 2282 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q3); 2283 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q8); 2284 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q9); 2285 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q10); 2286 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q11); 2287 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q12); 2288 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q13); 2289 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q14); 2290 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q15); 2291 } 2292 2293 s->reserved_regs = 0; 2294 tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK); 2295 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP); 2296 tcg_regset_set_reg(s->reserved_regs, TCG_REG_PC); 2297 tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP); 2298} 2299 2300static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg, 2301 TCGReg arg1, intptr_t arg2) 2302{ 2303 switch (type) { 2304 case TCG_TYPE_I32: 2305 tcg_out_ld32u(s, COND_AL, arg, arg1, arg2); 2306 return; 2307 case TCG_TYPE_V64: 2308 /* regs 1; size 8; align 8 */ 2309 tcg_out_vldst(s, INSN_VLD1 | 0x7d0, arg, arg1, arg2); 2310 return; 2311 case TCG_TYPE_V128: 2312 /* 2313 * We have only 8-byte alignment for the stack per the ABI. 2314 * Rather than dynamically re-align the stack, it's easier 2315 * to simply not request alignment beyond that. So: 2316 * regs 2; size 8; align 8 2317 */ 2318 tcg_out_vldst(s, INSN_VLD1 | 0xad0, arg, arg1, arg2); 2319 return; 2320 default: 2321 g_assert_not_reached(); 2322 } 2323} 2324 2325static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, 2326 TCGReg arg1, intptr_t arg2) 2327{ 2328 switch (type) { 2329 case TCG_TYPE_I32: 2330 tcg_out_st32(s, COND_AL, arg, arg1, arg2); 2331 return; 2332 case TCG_TYPE_V64: 2333 /* regs 1; size 8; align 8 */ 2334 tcg_out_vldst(s, INSN_VST1 | 0x7d0, arg, arg1, arg2); 2335 return; 2336 case TCG_TYPE_V128: 2337 /* See tcg_out_ld re alignment: regs 2; size 8; align 8 */ 2338 tcg_out_vldst(s, INSN_VST1 | 0xad0, arg, arg1, arg2); 2339 return; 2340 default: 2341 g_assert_not_reached(); 2342 } 2343} 2344 2345static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, 2346 TCGReg base, intptr_t ofs) 2347{ 2348 return false; 2349} 2350 2351static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) 2352{ 2353 if (ret == arg) { 2354 return true; 2355 } 2356 switch (type) { 2357 case TCG_TYPE_I32: 2358 if (ret < TCG_REG_Q0 && arg < TCG_REG_Q0) { 2359 tcg_out_mov_reg(s, COND_AL, ret, arg); 2360 return true; 2361 } 2362 return false; 2363 2364 case TCG_TYPE_V64: 2365 case TCG_TYPE_V128: 2366 /* "VMOV D,N" is an alias for "VORR D,N,N". */ 2367 tcg_out_vreg3(s, INSN_VORR, type - TCG_TYPE_V64, 0, ret, arg, arg); 2368 return true; 2369 2370 default: 2371 g_assert_not_reached(); 2372 } 2373} 2374 2375static void tcg_out_movi(TCGContext *s, TCGType type, 2376 TCGReg ret, tcg_target_long arg) 2377{ 2378 tcg_debug_assert(type == TCG_TYPE_I32); 2379 tcg_debug_assert(ret < TCG_REG_Q0); 2380 tcg_out_movi32(s, COND_AL, ret, arg); 2381} 2382 2383static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2) 2384{ 2385 return false; 2386} 2387 2388static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs, 2389 tcg_target_long imm) 2390{ 2391 int enc, opc = ARITH_ADD; 2392 2393 /* All of the easiest immediates to encode are positive. */ 2394 if (imm < 0) { 2395 imm = -imm; 2396 opc = ARITH_SUB; 2397 } 2398 enc = encode_imm(imm); 2399 if (enc >= 0) { 2400 tcg_out_dat_imm(s, COND_AL, opc, rd, rs, enc); 2401 } else { 2402 tcg_out_movi32(s, COND_AL, TCG_REG_TMP, imm); 2403 tcg_out_dat_reg(s, COND_AL, opc, rd, rs, 2404 TCG_REG_TMP, SHIFT_IMM_LSL(0)); 2405 } 2406} 2407 2408/* Type is always V128, with I64 elements. */ 2409static void tcg_out_dup2_vec(TCGContext *s, TCGReg rd, TCGReg rl, TCGReg rh) 2410{ 2411 /* Move high element into place first. */ 2412 /* VMOV Dd+1, Ds */ 2413 tcg_out_vreg3(s, INSN_VORR | (1 << 12), 0, 0, rd, rh, rh); 2414 /* Move low element into place; tcg_out_mov will check for nop. */ 2415 tcg_out_mov(s, TCG_TYPE_V64, rd, rl); 2416} 2417 2418static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece, 2419 TCGReg rd, TCGReg rs) 2420{ 2421 int q = type - TCG_TYPE_V64; 2422 2423 if (vece == MO_64) { 2424 if (type == TCG_TYPE_V128) { 2425 tcg_out_dup2_vec(s, rd, rs, rs); 2426 } else { 2427 tcg_out_mov(s, TCG_TYPE_V64, rd, rs); 2428 } 2429 } else if (rs < TCG_REG_Q0) { 2430 int b = (vece == MO_8); 2431 int e = (vece == MO_16); 2432 tcg_out32(s, INSN_VDUP_G | (b << 22) | (q << 21) | (e << 5) | 2433 encode_vn(rd) | (rs << 12)); 2434 } else { 2435 int imm4 = 1 << vece; 2436 tcg_out32(s, INSN_VDUP_S | (imm4 << 16) | (q << 6) | 2437 encode_vd(rd) | encode_vm(rs)); 2438 } 2439 return true; 2440} 2441 2442static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece, 2443 TCGReg rd, TCGReg base, intptr_t offset) 2444{ 2445 if (vece == MO_64) { 2446 tcg_out_ld(s, TCG_TYPE_V64, rd, base, offset); 2447 if (type == TCG_TYPE_V128) { 2448 tcg_out_dup2_vec(s, rd, rd, rd); 2449 } 2450 } else { 2451 int q = type - TCG_TYPE_V64; 2452 tcg_out_vldst(s, INSN_VLD1R | (vece << 6) | (q << 5), 2453 rd, base, offset); 2454 } 2455 return true; 2456} 2457 2458static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece, 2459 TCGReg rd, int64_t v64) 2460{ 2461 int q = type - TCG_TYPE_V64; 2462 int cmode, imm8, i; 2463 2464 /* Test all bytes equal first. */ 2465 if (vece == MO_8) { 2466 tcg_out_vmovi(s, rd, q, 0, 0xe, v64); 2467 return; 2468 } 2469 2470 /* 2471 * Test all bytes 0x00 or 0xff second. This can match cases that 2472 * might otherwise take 2 or 3 insns for MO_16 or MO_32 below. 2473 */ 2474 for (i = imm8 = 0; i < 8; i++) { 2475 uint8_t byte = v64 >> (i * 8); 2476 if (byte == 0xff) { 2477 imm8 |= 1 << i; 2478 } else if (byte != 0) { 2479 goto fail_bytes; 2480 } 2481 } 2482 tcg_out_vmovi(s, rd, q, 1, 0xe, imm8); 2483 return; 2484 fail_bytes: 2485 2486 /* 2487 * Tests for various replications. For each element width, if we 2488 * cannot find an expansion there's no point checking a larger 2489 * width because we already know by replication it cannot match. 2490 */ 2491 if (vece == MO_16) { 2492 uint16_t v16 = v64; 2493 2494 if (is_shimm16(v16, &cmode, &imm8)) { 2495 tcg_out_vmovi(s, rd, q, 0, cmode, imm8); 2496 return; 2497 } 2498 if (is_shimm16(~v16, &cmode, &imm8)) { 2499 tcg_out_vmovi(s, rd, q, 1, cmode, imm8); 2500 return; 2501 } 2502 2503 /* 2504 * Otherwise, all remaining constants can be loaded in two insns: 2505 * rd = v16 & 0xff, rd |= v16 & 0xff00. 2506 */ 2507 tcg_out_vmovi(s, rd, q, 0, 0x8, v16 & 0xff); 2508 tcg_out_vmovi(s, rd, q, 0, 0xb, v16 >> 8); /* VORRI */ 2509 return; 2510 } 2511 2512 if (vece == MO_32) { 2513 uint32_t v32 = v64; 2514 2515 if (is_shimm32(v32, &cmode, &imm8) || 2516 is_soimm32(v32, &cmode, &imm8)) { 2517 tcg_out_vmovi(s, rd, q, 0, cmode, imm8); 2518 return; 2519 } 2520 if (is_shimm32(~v32, &cmode, &imm8) || 2521 is_soimm32(~v32, &cmode, &imm8)) { 2522 tcg_out_vmovi(s, rd, q, 1, cmode, imm8); 2523 return; 2524 } 2525 2526 /* 2527 * Restrict the set of constants to those we can load with 2528 * two instructions. Others we load from the pool. 2529 */ 2530 i = is_shimm32_pair(v32, &cmode, &imm8); 2531 if (i) { 2532 tcg_out_vmovi(s, rd, q, 0, cmode, imm8); 2533 tcg_out_vmovi(s, rd, q, 0, i | 1, extract32(v32, i * 4, 8)); 2534 return; 2535 } 2536 i = is_shimm32_pair(~v32, &cmode, &imm8); 2537 if (i) { 2538 tcg_out_vmovi(s, rd, q, 1, cmode, imm8); 2539 tcg_out_vmovi(s, rd, q, 1, i | 1, extract32(~v32, i * 4, 8)); 2540 return; 2541 } 2542 } 2543 2544 /* 2545 * As a last resort, load from the constant pool. 2546 */ 2547 if (!q || vece == MO_64) { 2548 new_pool_l2(s, R_ARM_PC11, s->code_ptr, 0, v64, v64 >> 32); 2549 /* VLDR Dd, [pc + offset] */ 2550 tcg_out32(s, INSN_VLDR_D | encode_vd(rd) | (0xf << 16)); 2551 if (q) { 2552 tcg_out_dup2_vec(s, rd, rd, rd); 2553 } 2554 } else { 2555 new_pool_label(s, (uint32_t)v64, R_ARM_PC8, s->code_ptr, 0); 2556 /* add tmp, pc, offset */ 2557 tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_TMP, TCG_REG_PC, 0); 2558 tcg_out_dupm_vec(s, type, MO_32, rd, TCG_REG_TMP, 0); 2559 } 2560} 2561 2562static const ARMInsn vec_cmp_insn[16] = { 2563 [TCG_COND_EQ] = INSN_VCEQ, 2564 [TCG_COND_GT] = INSN_VCGT, 2565 [TCG_COND_GE] = INSN_VCGE, 2566 [TCG_COND_GTU] = INSN_VCGT_U, 2567 [TCG_COND_GEU] = INSN_VCGE_U, 2568}; 2569 2570static const ARMInsn vec_cmp0_insn[16] = { 2571 [TCG_COND_EQ] = INSN_VCEQ0, 2572 [TCG_COND_GT] = INSN_VCGT0, 2573 [TCG_COND_GE] = INSN_VCGE0, 2574 [TCG_COND_LT] = INSN_VCLT0, 2575 [TCG_COND_LE] = INSN_VCLE0, 2576}; 2577 2578static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, 2579 unsigned vecl, unsigned vece, 2580 const TCGArg args[TCG_MAX_OP_ARGS], 2581 const int const_args[TCG_MAX_OP_ARGS]) 2582{ 2583 TCGType type = vecl + TCG_TYPE_V64; 2584 unsigned q = vecl; 2585 TCGArg a0, a1, a2, a3; 2586 int cmode, imm8; 2587 2588 a0 = args[0]; 2589 a1 = args[1]; 2590 a2 = args[2]; 2591 2592 switch (opc) { 2593 case INDEX_op_ld_vec: 2594 tcg_out_ld(s, type, a0, a1, a2); 2595 return; 2596 case INDEX_op_st_vec: 2597 tcg_out_st(s, type, a0, a1, a2); 2598 return; 2599 case INDEX_op_dupm_vec: 2600 tcg_out_dupm_vec(s, type, vece, a0, a1, a2); 2601 return; 2602 case INDEX_op_dup2_vec: 2603 tcg_out_dup2_vec(s, a0, a1, a2); 2604 return; 2605 case INDEX_op_abs_vec: 2606 tcg_out_vreg2(s, INSN_VABS, q, vece, a0, a1); 2607 return; 2608 case INDEX_op_neg_vec: 2609 tcg_out_vreg2(s, INSN_VNEG, q, vece, a0, a1); 2610 return; 2611 case INDEX_op_not_vec: 2612 tcg_out_vreg2(s, INSN_VMVN, q, 0, a0, a1); 2613 return; 2614 case INDEX_op_add_vec: 2615 tcg_out_vreg3(s, INSN_VADD, q, vece, a0, a1, a2); 2616 return; 2617 case INDEX_op_mul_vec: 2618 tcg_out_vreg3(s, INSN_VMUL, q, vece, a0, a1, a2); 2619 return; 2620 case INDEX_op_smax_vec: 2621 tcg_out_vreg3(s, INSN_VMAX, q, vece, a0, a1, a2); 2622 return; 2623 case INDEX_op_smin_vec: 2624 tcg_out_vreg3(s, INSN_VMIN, q, vece, a0, a1, a2); 2625 return; 2626 case INDEX_op_sub_vec: 2627 tcg_out_vreg3(s, INSN_VSUB, q, vece, a0, a1, a2); 2628 return; 2629 case INDEX_op_ssadd_vec: 2630 tcg_out_vreg3(s, INSN_VQADD, q, vece, a0, a1, a2); 2631 return; 2632 case INDEX_op_sssub_vec: 2633 tcg_out_vreg3(s, INSN_VQSUB, q, vece, a0, a1, a2); 2634 return; 2635 case INDEX_op_umax_vec: 2636 tcg_out_vreg3(s, INSN_VMAX_U, q, vece, a0, a1, a2); 2637 return; 2638 case INDEX_op_umin_vec: 2639 tcg_out_vreg3(s, INSN_VMIN_U, q, vece, a0, a1, a2); 2640 return; 2641 case INDEX_op_usadd_vec: 2642 tcg_out_vreg3(s, INSN_VQADD_U, q, vece, a0, a1, a2); 2643 return; 2644 case INDEX_op_ussub_vec: 2645 tcg_out_vreg3(s, INSN_VQSUB_U, q, vece, a0, a1, a2); 2646 return; 2647 case INDEX_op_xor_vec: 2648 tcg_out_vreg3(s, INSN_VEOR, q, 0, a0, a1, a2); 2649 return; 2650 case INDEX_op_arm_sshl_vec: 2651 /* 2652 * Note that Vm is the data and Vn is the shift count, 2653 * therefore the arguments appear reversed. 2654 */ 2655 tcg_out_vreg3(s, INSN_VSHL_S, q, vece, a0, a2, a1); 2656 return; 2657 case INDEX_op_arm_ushl_vec: 2658 /* See above. */ 2659 tcg_out_vreg3(s, INSN_VSHL_U, q, vece, a0, a2, a1); 2660 return; 2661 case INDEX_op_shli_vec: 2662 tcg_out_vshifti(s, INSN_VSHLI, q, a0, a1, a2 + (8 << vece)); 2663 return; 2664 case INDEX_op_shri_vec: 2665 tcg_out_vshifti(s, INSN_VSHRI, q, a0, a1, (16 << vece) - a2); 2666 return; 2667 case INDEX_op_sari_vec: 2668 tcg_out_vshifti(s, INSN_VSARI, q, a0, a1, (16 << vece) - a2); 2669 return; 2670 case INDEX_op_arm_sli_vec: 2671 tcg_out_vshifti(s, INSN_VSLI, q, a0, a2, args[3] + (8 << vece)); 2672 return; 2673 2674 case INDEX_op_andc_vec: 2675 if (!const_args[2]) { 2676 tcg_out_vreg3(s, INSN_VBIC, q, 0, a0, a1, a2); 2677 return; 2678 } 2679 a2 = ~a2; 2680 /* fall through */ 2681 case INDEX_op_and_vec: 2682 if (const_args[2]) { 2683 is_shimm1632(~a2, &cmode, &imm8); 2684 if (a0 == a1) { 2685 tcg_out_vmovi(s, a0, q, 1, cmode | 1, imm8); /* VBICI */ 2686 return; 2687 } 2688 tcg_out_vmovi(s, a0, q, 1, cmode, imm8); /* VMVNI */ 2689 a2 = a0; 2690 } 2691 tcg_out_vreg3(s, INSN_VAND, q, 0, a0, a1, a2); 2692 return; 2693 2694 case INDEX_op_orc_vec: 2695 if (!const_args[2]) { 2696 tcg_out_vreg3(s, INSN_VORN, q, 0, a0, a1, a2); 2697 return; 2698 } 2699 a2 = ~a2; 2700 /* fall through */ 2701 case INDEX_op_or_vec: 2702 if (const_args[2]) { 2703 is_shimm1632(a2, &cmode, &imm8); 2704 if (a0 == a1) { 2705 tcg_out_vmovi(s, a0, q, 0, cmode | 1, imm8); /* VORRI */ 2706 return; 2707 } 2708 tcg_out_vmovi(s, a0, q, 0, cmode, imm8); /* VMOVI */ 2709 a2 = a0; 2710 } 2711 tcg_out_vreg3(s, INSN_VORR, q, 0, a0, a1, a2); 2712 return; 2713 2714 case INDEX_op_cmp_vec: 2715 { 2716 TCGCond cond = args[3]; 2717 2718 if (cond == TCG_COND_NE) { 2719 if (const_args[2]) { 2720 tcg_out_vreg3(s, INSN_VTST, q, vece, a0, a1, a1); 2721 } else { 2722 tcg_out_vreg3(s, INSN_VCEQ, q, vece, a0, a1, a2); 2723 tcg_out_vreg2(s, INSN_VMVN, q, 0, a0, a0); 2724 } 2725 } else { 2726 ARMInsn insn; 2727 2728 if (const_args[2]) { 2729 insn = vec_cmp0_insn[cond]; 2730 if (insn) { 2731 tcg_out_vreg2(s, insn, q, vece, a0, a1); 2732 return; 2733 } 2734 tcg_out_dupi_vec(s, type, MO_8, TCG_VEC_TMP, 0); 2735 a2 = TCG_VEC_TMP; 2736 } 2737 insn = vec_cmp_insn[cond]; 2738 if (insn == 0) { 2739 TCGArg t; 2740 t = a1, a1 = a2, a2 = t; 2741 cond = tcg_swap_cond(cond); 2742 insn = vec_cmp_insn[cond]; 2743 tcg_debug_assert(insn != 0); 2744 } 2745 tcg_out_vreg3(s, insn, q, vece, a0, a1, a2); 2746 } 2747 } 2748 return; 2749 2750 case INDEX_op_bitsel_vec: 2751 a3 = args[3]; 2752 if (a0 == a3) { 2753 tcg_out_vreg3(s, INSN_VBIT, q, 0, a0, a2, a1); 2754 } else if (a0 == a2) { 2755 tcg_out_vreg3(s, INSN_VBIF, q, 0, a0, a3, a1); 2756 } else { 2757 tcg_out_mov(s, type, a0, a1); 2758 tcg_out_vreg3(s, INSN_VBSL, q, 0, a0, a2, a3); 2759 } 2760 return; 2761 2762 case INDEX_op_mov_vec: /* Always emitted via tcg_out_mov. */ 2763 case INDEX_op_dup_vec: /* Always emitted via tcg_out_dup_vec. */ 2764 default: 2765 g_assert_not_reached(); 2766 } 2767} 2768 2769int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece) 2770{ 2771 switch (opc) { 2772 case INDEX_op_add_vec: 2773 case INDEX_op_sub_vec: 2774 case INDEX_op_and_vec: 2775 case INDEX_op_andc_vec: 2776 case INDEX_op_or_vec: 2777 case INDEX_op_orc_vec: 2778 case INDEX_op_xor_vec: 2779 case INDEX_op_not_vec: 2780 case INDEX_op_shli_vec: 2781 case INDEX_op_shri_vec: 2782 case INDEX_op_sari_vec: 2783 case INDEX_op_ssadd_vec: 2784 case INDEX_op_sssub_vec: 2785 case INDEX_op_usadd_vec: 2786 case INDEX_op_ussub_vec: 2787 case INDEX_op_bitsel_vec: 2788 return 1; 2789 case INDEX_op_abs_vec: 2790 case INDEX_op_cmp_vec: 2791 case INDEX_op_mul_vec: 2792 case INDEX_op_neg_vec: 2793 case INDEX_op_smax_vec: 2794 case INDEX_op_smin_vec: 2795 case INDEX_op_umax_vec: 2796 case INDEX_op_umin_vec: 2797 return vece < MO_64; 2798 case INDEX_op_shlv_vec: 2799 case INDEX_op_shrv_vec: 2800 case INDEX_op_sarv_vec: 2801 case INDEX_op_rotli_vec: 2802 case INDEX_op_rotlv_vec: 2803 case INDEX_op_rotrv_vec: 2804 return -1; 2805 default: 2806 return 0; 2807 } 2808} 2809 2810void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece, 2811 TCGArg a0, ...) 2812{ 2813 va_list va; 2814 TCGv_vec v0, v1, v2, t1, t2, c1; 2815 TCGArg a2; 2816 2817 va_start(va, a0); 2818 v0 = temp_tcgv_vec(arg_temp(a0)); 2819 v1 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg))); 2820 a2 = va_arg(va, TCGArg); 2821 va_end(va); 2822 2823 switch (opc) { 2824 case INDEX_op_shlv_vec: 2825 /* 2826 * Merely propagate shlv_vec to arm_ushl_vec. 2827 * In this way we don't set TCG_TARGET_HAS_shv_vec 2828 * because everything is done via expansion. 2829 */ 2830 v2 = temp_tcgv_vec(arg_temp(a2)); 2831 vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(v0), 2832 tcgv_vec_arg(v1), tcgv_vec_arg(v2)); 2833 break; 2834 2835 case INDEX_op_shrv_vec: 2836 case INDEX_op_sarv_vec: 2837 /* Right shifts are negative left shifts for NEON. */ 2838 v2 = temp_tcgv_vec(arg_temp(a2)); 2839 t1 = tcg_temp_new_vec(type); 2840 tcg_gen_neg_vec(vece, t1, v2); 2841 if (opc == INDEX_op_shrv_vec) { 2842 opc = INDEX_op_arm_ushl_vec; 2843 } else { 2844 opc = INDEX_op_arm_sshl_vec; 2845 } 2846 vec_gen_3(opc, type, vece, tcgv_vec_arg(v0), 2847 tcgv_vec_arg(v1), tcgv_vec_arg(t1)); 2848 tcg_temp_free_vec(t1); 2849 break; 2850 2851 case INDEX_op_rotli_vec: 2852 t1 = tcg_temp_new_vec(type); 2853 tcg_gen_shri_vec(vece, t1, v1, -a2 & ((8 << vece) - 1)); 2854 vec_gen_4(INDEX_op_arm_sli_vec, type, vece, 2855 tcgv_vec_arg(v0), tcgv_vec_arg(t1), tcgv_vec_arg(v1), a2); 2856 tcg_temp_free_vec(t1); 2857 break; 2858 2859 case INDEX_op_rotlv_vec: 2860 v2 = temp_tcgv_vec(arg_temp(a2)); 2861 t1 = tcg_temp_new_vec(type); 2862 c1 = tcg_constant_vec(type, vece, 8 << vece); 2863 tcg_gen_sub_vec(vece, t1, v2, c1); 2864 /* Right shifts are negative left shifts for NEON. */ 2865 vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(t1), 2866 tcgv_vec_arg(v1), tcgv_vec_arg(t1)); 2867 vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(v0), 2868 tcgv_vec_arg(v1), tcgv_vec_arg(v2)); 2869 tcg_gen_or_vec(vece, v0, v0, t1); 2870 tcg_temp_free_vec(t1); 2871 break; 2872 2873 case INDEX_op_rotrv_vec: 2874 v2 = temp_tcgv_vec(arg_temp(a2)); 2875 t1 = tcg_temp_new_vec(type); 2876 t2 = tcg_temp_new_vec(type); 2877 c1 = tcg_constant_vec(type, vece, 8 << vece); 2878 tcg_gen_neg_vec(vece, t1, v2); 2879 tcg_gen_sub_vec(vece, t2, c1, v2); 2880 /* Right shifts are negative left shifts for NEON. */ 2881 vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(t1), 2882 tcgv_vec_arg(v1), tcgv_vec_arg(t1)); 2883 vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(t2), 2884 tcgv_vec_arg(v1), tcgv_vec_arg(t2)); 2885 tcg_gen_or_vec(vece, v0, t1, t2); 2886 tcg_temp_free_vec(t1); 2887 tcg_temp_free_vec(t2); 2888 break; 2889 2890 default: 2891 g_assert_not_reached(); 2892 } 2893} 2894 2895static void tcg_out_nop_fill(tcg_insn_unit *p, int count) 2896{ 2897 int i; 2898 for (i = 0; i < count; ++i) { 2899 p[i] = INSN_NOP; 2900 } 2901} 2902 2903/* Compute frame size via macros, to share between tcg_target_qemu_prologue 2904 and tcg_register_jit. */ 2905 2906#define PUSH_SIZE ((11 - 4 + 1 + 1) * sizeof(tcg_target_long)) 2907 2908#define FRAME_SIZE \ 2909 ((PUSH_SIZE \ 2910 + TCG_STATIC_CALL_ARGS_SIZE \ 2911 + CPU_TEMP_BUF_NLONGS * sizeof(long) \ 2912 + TCG_TARGET_STACK_ALIGN - 1) \ 2913 & -TCG_TARGET_STACK_ALIGN) 2914 2915#define STACK_ADDEND (FRAME_SIZE - PUSH_SIZE) 2916 2917static void tcg_target_qemu_prologue(TCGContext *s) 2918{ 2919 /* Calling convention requires us to save r4-r11 and lr. */ 2920 /* stmdb sp!, { r4 - r11, lr } */ 2921 tcg_out_ldstm(s, COND_AL, INSN_STMDB, TCG_REG_CALL_STACK, 2922 (1 << TCG_REG_R4) | (1 << TCG_REG_R5) | (1 << TCG_REG_R6) | 2923 (1 << TCG_REG_R7) | (1 << TCG_REG_R8) | (1 << TCG_REG_R9) | 2924 (1 << TCG_REG_R10) | (1 << TCG_REG_R11) | (1 << TCG_REG_R14)); 2925 2926 /* Reserve callee argument and tcg temp space. */ 2927 tcg_out_dat_rI(s, COND_AL, ARITH_SUB, TCG_REG_CALL_STACK, 2928 TCG_REG_CALL_STACK, STACK_ADDEND, 1); 2929 tcg_set_frame(s, TCG_REG_CALL_STACK, TCG_STATIC_CALL_ARGS_SIZE, 2930 CPU_TEMP_BUF_NLONGS * sizeof(long)); 2931 2932 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]); 2933 2934#ifndef CONFIG_SOFTMMU 2935 if (guest_base) { 2936 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_GUEST_BASE, guest_base); 2937 tcg_regset_set_reg(s->reserved_regs, TCG_REG_GUEST_BASE); 2938 } 2939#endif 2940 2941 tcg_out_b_reg(s, COND_AL, tcg_target_call_iarg_regs[1]); 2942 2943 /* 2944 * Return path for goto_ptr. Set return value to 0, a-la exit_tb, 2945 * and fall through to the rest of the epilogue. 2946 */ 2947 tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr); 2948 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R0, 0); 2949 tcg_out_epilogue(s); 2950} 2951 2952static void tcg_out_epilogue(TCGContext *s) 2953{ 2954 /* Release local stack frame. */ 2955 tcg_out_dat_rI(s, COND_AL, ARITH_ADD, TCG_REG_CALL_STACK, 2956 TCG_REG_CALL_STACK, STACK_ADDEND, 1); 2957 2958 /* ldmia sp!, { r4 - r11, pc } */ 2959 tcg_out_ldstm(s, COND_AL, INSN_LDMIA, TCG_REG_CALL_STACK, 2960 (1 << TCG_REG_R4) | (1 << TCG_REG_R5) | (1 << TCG_REG_R6) | 2961 (1 << TCG_REG_R7) | (1 << TCG_REG_R8) | (1 << TCG_REG_R9) | 2962 (1 << TCG_REG_R10) | (1 << TCG_REG_R11) | (1 << TCG_REG_PC)); 2963} 2964 2965static void tcg_out_tb_start(TCGContext *s) 2966{ 2967 /* nothing to do */ 2968} 2969 2970typedef struct { 2971 DebugFrameHeader h; 2972 uint8_t fde_def_cfa[4]; 2973 uint8_t fde_reg_ofs[18]; 2974} DebugFrame; 2975 2976#define ELF_HOST_MACHINE EM_ARM 2977 2978/* We're expecting a 2 byte uleb128 encoded value. */ 2979QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14)); 2980 2981static const DebugFrame debug_frame = { 2982 .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */ 2983 .h.cie.id = -1, 2984 .h.cie.version = 1, 2985 .h.cie.code_align = 1, 2986 .h.cie.data_align = 0x7c, /* sleb128 -4 */ 2987 .h.cie.return_column = 14, 2988 2989 /* Total FDE size does not include the "len" member. */ 2990 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset), 2991 2992 .fde_def_cfa = { 2993 12, 13, /* DW_CFA_def_cfa sp, ... */ 2994 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */ 2995 (FRAME_SIZE >> 7) 2996 }, 2997 .fde_reg_ofs = { 2998 /* The following must match the stmdb in the prologue. */ 2999 0x8e, 1, /* DW_CFA_offset, lr, -4 */ 3000 0x8b, 2, /* DW_CFA_offset, r11, -8 */ 3001 0x8a, 3, /* DW_CFA_offset, r10, -12 */ 3002 0x89, 4, /* DW_CFA_offset, r9, -16 */ 3003 0x88, 5, /* DW_CFA_offset, r8, -20 */ 3004 0x87, 6, /* DW_CFA_offset, r7, -24 */ 3005 0x86, 7, /* DW_CFA_offset, r6, -28 */ 3006 0x85, 8, /* DW_CFA_offset, r5, -32 */ 3007 0x84, 9, /* DW_CFA_offset, r4, -36 */ 3008 } 3009}; 3010 3011void tcg_register_jit(const void *buf, size_t buf_size) 3012{ 3013 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame)); 3014} 3015