1 /*
2 * ARM translation
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 */
21 #include "qemu/osdep.h"
22
23 #include "translate.h"
24 #include "translate-a32.h"
25 #include "qemu/log.h"
26 #include "arm_ldst.h"
27 #include "semihosting/semihost.h"
28 #include "cpregs.h"
29 #include "exec/helper-proto.h"
30
31 #define HELPER_H "helper.h"
32 #include "exec/helper-info.c.inc"
33 #undef HELPER_H
34
35 #define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T)
36 #define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5)
37 /* currently all emulated v5 cores are also v5TE, so don't bother */
38 #define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5)
39 #define ENABLE_ARCH_5J dc_isar_feature(aa32_jazelle, s)
40 #define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6)
41 #define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K)
42 #define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2)
43 #define ENABLE_ARCH_7 arm_dc_feature(s, ARM_FEATURE_V7)
44 #define ENABLE_ARCH_8 arm_dc_feature(s, ARM_FEATURE_V8)
45
46 /* These are TCG temporaries used only by the legacy iwMMXt decoder */
47 static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
48 /* These are TCG globals which alias CPUARMState fields */
49 static TCGv_i32 cpu_R[16];
50 TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
51 TCGv_i64 cpu_exclusive_addr;
52 TCGv_i64 cpu_exclusive_val;
53
54 static const char * const regnames[] =
55 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
56 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
57
58
59 /* initialize TCG globals. */
arm_translate_init(void)60 void arm_translate_init(void)
61 {
62 int i;
63
64 for (i = 0; i < 16; i++) {
65 cpu_R[i] = tcg_global_mem_new_i32(tcg_env,
66 offsetof(CPUARMState, regs[i]),
67 regnames[i]);
68 }
69 cpu_CF = tcg_global_mem_new_i32(tcg_env, offsetof(CPUARMState, CF), "CF");
70 cpu_NF = tcg_global_mem_new_i32(tcg_env, offsetof(CPUARMState, NF), "NF");
71 cpu_VF = tcg_global_mem_new_i32(tcg_env, offsetof(CPUARMState, VF), "VF");
72 cpu_ZF = tcg_global_mem_new_i32(tcg_env, offsetof(CPUARMState, ZF), "ZF");
73
74 cpu_exclusive_addr = tcg_global_mem_new_i64(tcg_env,
75 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
76 cpu_exclusive_val = tcg_global_mem_new_i64(tcg_env,
77 offsetof(CPUARMState, exclusive_val), "exclusive_val");
78
79 a64_translate_init();
80 }
81
asimd_imm_const(uint32_t imm,int cmode,int op)82 uint64_t asimd_imm_const(uint32_t imm, int cmode, int op)
83 {
84 /* Expand the encoded constant as per AdvSIMDExpandImm pseudocode */
85 switch (cmode) {
86 case 0: case 1:
87 /* no-op */
88 break;
89 case 2: case 3:
90 imm <<= 8;
91 break;
92 case 4: case 5:
93 imm <<= 16;
94 break;
95 case 6: case 7:
96 imm <<= 24;
97 break;
98 case 8: case 9:
99 imm |= imm << 16;
100 break;
101 case 10: case 11:
102 imm = (imm << 8) | (imm << 24);
103 break;
104 case 12:
105 imm = (imm << 8) | 0xff;
106 break;
107 case 13:
108 imm = (imm << 16) | 0xffff;
109 break;
110 case 14:
111 if (op) {
112 /*
113 * This and cmode == 15 op == 1 are the only cases where
114 * the top and bottom 32 bits of the encoded constant differ.
115 */
116 uint64_t imm64 = 0;
117 int n;
118
119 for (n = 0; n < 8; n++) {
120 if (imm & (1 << n)) {
121 imm64 |= (0xffULL << (n * 8));
122 }
123 }
124 return imm64;
125 }
126 imm |= (imm << 8) | (imm << 16) | (imm << 24);
127 break;
128 case 15:
129 if (op) {
130 /* Reserved encoding for AArch32; valid for AArch64 */
131 uint64_t imm64 = (uint64_t)(imm & 0x3f) << 48;
132 if (imm & 0x80) {
133 imm64 |= 0x8000000000000000ULL;
134 }
135 if (imm & 0x40) {
136 imm64 |= 0x3fc0000000000000ULL;
137 } else {
138 imm64 |= 0x4000000000000000ULL;
139 }
140 return imm64;
141 }
142 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
143 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
144 break;
145 }
146 if (op) {
147 imm = ~imm;
148 }
149 return dup_const(MO_32, imm);
150 }
151
152 /* Generate a label used for skipping this instruction */
arm_gen_condlabel(DisasContext * s)153 void arm_gen_condlabel(DisasContext *s)
154 {
155 if (!s->condjmp) {
156 s->condlabel = gen_disas_label(s);
157 s->condjmp = 1;
158 }
159 }
160
161 /* Flags for the disas_set_da_iss info argument:
162 * lower bits hold the Rt register number, higher bits are flags.
163 */
164 typedef enum ISSInfo {
165 ISSNone = 0,
166 ISSRegMask = 0x1f,
167 ISSInvalid = (1 << 5),
168 ISSIsAcqRel = (1 << 6),
169 ISSIsWrite = (1 << 7),
170 ISSIs16Bit = (1 << 8),
171 } ISSInfo;
172
173 /*
174 * Store var into env + offset to a member with size bytes.
175 * Free var after use.
176 */
store_cpu_offset(TCGv_i32 var,int offset,int size)177 void store_cpu_offset(TCGv_i32 var, int offset, int size)
178 {
179 switch (size) {
180 case 1:
181 tcg_gen_st8_i32(var, tcg_env, offset);
182 break;
183 case 4:
184 tcg_gen_st_i32(var, tcg_env, offset);
185 break;
186 default:
187 g_assert_not_reached();
188 }
189 }
190
191 /* Save the syndrome information for a Data Abort */
disas_set_da_iss(DisasContext * s,MemOp memop,ISSInfo issinfo)192 static void disas_set_da_iss(DisasContext *s, MemOp memop, ISSInfo issinfo)
193 {
194 uint32_t syn;
195 int sas = memop & MO_SIZE;
196 bool sse = memop & MO_SIGN;
197 bool is_acqrel = issinfo & ISSIsAcqRel;
198 bool is_write = issinfo & ISSIsWrite;
199 bool is_16bit = issinfo & ISSIs16Bit;
200 int srt = issinfo & ISSRegMask;
201
202 if (issinfo & ISSInvalid) {
203 /* Some callsites want to conditionally provide ISS info,
204 * eg "only if this was not a writeback"
205 */
206 return;
207 }
208
209 if (srt == 15) {
210 /* For AArch32, insns where the src/dest is R15 never generate
211 * ISS information. Catching that here saves checking at all
212 * the call sites.
213 */
214 return;
215 }
216
217 syn = syn_data_abort_with_iss(0, sas, sse, srt, 0, is_acqrel,
218 0, 0, 0, is_write, 0, is_16bit);
219 disas_set_insn_syndrome(s, syn);
220 }
221
get_a32_user_mem_index(DisasContext * s)222 static inline int get_a32_user_mem_index(DisasContext *s)
223 {
224 /* Return the core mmu_idx to use for A32/T32 "unprivileged load/store"
225 * insns:
226 * if PL2, UNPREDICTABLE (we choose to implement as if PL0)
227 * otherwise, access as if at PL0.
228 */
229 switch (s->mmu_idx) {
230 case ARMMMUIdx_E3:
231 case ARMMMUIdx_E30_0:
232 case ARMMMUIdx_E30_3_PAN:
233 return arm_to_core_mmu_idx(ARMMMUIdx_E30_0);
234 case ARMMMUIdx_E2: /* this one is UNPREDICTABLE */
235 case ARMMMUIdx_E10_0:
236 case ARMMMUIdx_E10_1:
237 case ARMMMUIdx_E10_1_PAN:
238 return arm_to_core_mmu_idx(ARMMMUIdx_E10_0);
239 case ARMMMUIdx_MUser:
240 case ARMMMUIdx_MPriv:
241 return arm_to_core_mmu_idx(ARMMMUIdx_MUser);
242 case ARMMMUIdx_MUserNegPri:
243 case ARMMMUIdx_MPrivNegPri:
244 return arm_to_core_mmu_idx(ARMMMUIdx_MUserNegPri);
245 case ARMMMUIdx_MSUser:
246 case ARMMMUIdx_MSPriv:
247 return arm_to_core_mmu_idx(ARMMMUIdx_MSUser);
248 case ARMMMUIdx_MSUserNegPri:
249 case ARMMMUIdx_MSPrivNegPri:
250 return arm_to_core_mmu_idx(ARMMMUIdx_MSUserNegPri);
251 default:
252 g_assert_not_reached();
253 }
254 }
255
256 /* The pc_curr difference for an architectural jump. */
jmp_diff(DisasContext * s,target_long diff)257 static target_long jmp_diff(DisasContext *s, target_long diff)
258 {
259 return diff + (s->thumb ? 4 : 8);
260 }
261
gen_pc_plus_diff(DisasContext * s,TCGv_i32 var,target_long diff)262 static void gen_pc_plus_diff(DisasContext *s, TCGv_i32 var, target_long diff)
263 {
264 assert(s->pc_save != -1);
265 if (tb_cflags(s->base.tb) & CF_PCREL) {
266 tcg_gen_addi_i32(var, cpu_R[15], (s->pc_curr - s->pc_save) + diff);
267 } else {
268 tcg_gen_movi_i32(var, s->pc_curr + diff);
269 }
270 }
271
272 /* Set a variable to the value of a CPU register. */
load_reg_var(DisasContext * s,TCGv_i32 var,int reg)273 void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
274 {
275 if (reg == 15) {
276 gen_pc_plus_diff(s, var, jmp_diff(s, 0));
277 } else {
278 tcg_gen_mov_i32(var, cpu_R[reg]);
279 }
280 }
281
282 /*
283 * Create a new temp, REG + OFS, except PC is ALIGN(PC, 4).
284 * This is used for load/store for which use of PC implies (literal),
285 * or ADD that implies ADR.
286 */
add_reg_for_lit(DisasContext * s,int reg,int ofs)287 TCGv_i32 add_reg_for_lit(DisasContext *s, int reg, int ofs)
288 {
289 TCGv_i32 tmp = tcg_temp_new_i32();
290
291 if (reg == 15) {
292 /*
293 * This address is computed from an aligned PC:
294 * subtract off the low bits.
295 */
296 gen_pc_plus_diff(s, tmp, jmp_diff(s, ofs - (s->pc_curr & 3)));
297 } else {
298 tcg_gen_addi_i32(tmp, cpu_R[reg], ofs);
299 }
300 return tmp;
301 }
302
303 /* Set a CPU register. The source must be a temporary and will be
304 marked as dead. */
store_reg(DisasContext * s,int reg,TCGv_i32 var)305 void store_reg(DisasContext *s, int reg, TCGv_i32 var)
306 {
307 if (reg == 15) {
308 /* In Thumb mode, we must ignore bit 0.
309 * In ARM mode, for ARMv4 and ARMv5, it is UNPREDICTABLE if bits [1:0]
310 * are not 0b00, but for ARMv6 and above, we must ignore bits [1:0].
311 * We choose to ignore [1:0] in ARM mode for all architecture versions.
312 */
313 tcg_gen_andi_i32(var, var, s->thumb ? ~1 : ~3);
314 s->base.is_jmp = DISAS_JUMP;
315 s->pc_save = -1;
316 } else if (reg == 13 && arm_dc_feature(s, ARM_FEATURE_M)) {
317 /* For M-profile SP bits [1:0] are always zero */
318 tcg_gen_andi_i32(var, var, ~3);
319 }
320 tcg_gen_mov_i32(cpu_R[reg], var);
321 }
322
323 /*
324 * Variant of store_reg which applies v8M stack-limit checks before updating
325 * SP. If the check fails this will result in an exception being taken.
326 * We disable the stack checks for CONFIG_USER_ONLY because we have
327 * no idea what the stack limits should be in that case.
328 * If stack checking is not being done this just acts like store_reg().
329 */
store_sp_checked(DisasContext * s,TCGv_i32 var)330 static void store_sp_checked(DisasContext *s, TCGv_i32 var)
331 {
332 #ifndef CONFIG_USER_ONLY
333 if (s->v8m_stackcheck) {
334 gen_helper_v8m_stackcheck(tcg_env, var);
335 }
336 #endif
337 store_reg(s, 13, var);
338 }
339
340 /* Value extensions. */
341 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
342 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
343 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
344 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
345
346 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
347 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
348
gen_set_cpsr(TCGv_i32 var,uint32_t mask)349 void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
350 {
351 gen_helper_cpsr_write(tcg_env, var, tcg_constant_i32(mask));
352 }
353
gen_rebuild_hflags(DisasContext * s,bool new_el)354 static void gen_rebuild_hflags(DisasContext *s, bool new_el)
355 {
356 bool m_profile = arm_dc_feature(s, ARM_FEATURE_M);
357
358 if (new_el) {
359 if (m_profile) {
360 gen_helper_rebuild_hflags_m32_newel(tcg_env);
361 } else {
362 gen_helper_rebuild_hflags_a32_newel(tcg_env);
363 }
364 } else {
365 TCGv_i32 tcg_el = tcg_constant_i32(s->current_el);
366 if (m_profile) {
367 gen_helper_rebuild_hflags_m32(tcg_env, tcg_el);
368 } else {
369 gen_helper_rebuild_hflags_a32(tcg_env, tcg_el);
370 }
371 }
372 }
373
gen_exception_internal(int excp)374 static void gen_exception_internal(int excp)
375 {
376 assert(excp_is_internal(excp));
377 gen_helper_exception_internal(tcg_env, tcg_constant_i32(excp));
378 }
379
gen_singlestep_exception(DisasContext * s)380 static void gen_singlestep_exception(DisasContext *s)
381 {
382 /* We just completed step of an insn. Move from Active-not-pending
383 * to Active-pending, and then also take the swstep exception.
384 * This corresponds to making the (IMPDEF) choice to prioritize
385 * swstep exceptions over asynchronous exceptions taken to an exception
386 * level where debug is disabled. This choice has the advantage that
387 * we do not need to maintain internal state corresponding to the
388 * ISV/EX syndrome bits between completion of the step and generation
389 * of the exception, and our syndrome information is always correct.
390 */
391 gen_ss_advance(s);
392 gen_swstep_exception(s, 1, s->is_ldex);
393 s->base.is_jmp = DISAS_NORETURN;
394 }
395
clear_eci_state(DisasContext * s)396 void clear_eci_state(DisasContext *s)
397 {
398 /*
399 * Clear any ECI/ICI state: used when a load multiple/store
400 * multiple insn executes.
401 */
402 if (s->eci) {
403 store_cpu_field_constant(0, condexec_bits);
404 s->eci = 0;
405 }
406 }
407
gen_smul_dual(TCGv_i32 a,TCGv_i32 b)408 static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
409 {
410 TCGv_i32 tmp1 = tcg_temp_new_i32();
411 TCGv_i32 tmp2 = tcg_temp_new_i32();
412 tcg_gen_ext16s_i32(tmp1, a);
413 tcg_gen_ext16s_i32(tmp2, b);
414 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
415 tcg_gen_sari_i32(a, a, 16);
416 tcg_gen_sari_i32(b, b, 16);
417 tcg_gen_mul_i32(b, b, a);
418 tcg_gen_mov_i32(a, tmp1);
419 }
420
421 /* Byteswap each halfword. */
gen_rev16(TCGv_i32 dest,TCGv_i32 var)422 void gen_rev16(TCGv_i32 dest, TCGv_i32 var)
423 {
424 TCGv_i32 tmp = tcg_temp_new_i32();
425 TCGv_i32 mask = tcg_constant_i32(0x00ff00ff);
426 tcg_gen_shri_i32(tmp, var, 8);
427 tcg_gen_and_i32(tmp, tmp, mask);
428 tcg_gen_and_i32(var, var, mask);
429 tcg_gen_shli_i32(var, var, 8);
430 tcg_gen_or_i32(dest, var, tmp);
431 }
432
433 /* Byteswap low halfword and sign extend. */
gen_revsh(TCGv_i32 dest,TCGv_i32 var)434 static void gen_revsh(TCGv_i32 dest, TCGv_i32 var)
435 {
436 tcg_gen_bswap16_i32(var, var, TCG_BSWAP_OS);
437 }
438
439 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
440 tmp = (t0 ^ t1) & 0x8000;
441 t0 &= ~0x8000;
442 t1 &= ~0x8000;
443 t0 = (t0 + t1) ^ tmp;
444 */
445
gen_add16(TCGv_i32 dest,TCGv_i32 t0,TCGv_i32 t1)446 static void gen_add16(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
447 {
448 TCGv_i32 tmp = tcg_temp_new_i32();
449 tcg_gen_xor_i32(tmp, t0, t1);
450 tcg_gen_andi_i32(tmp, tmp, 0x8000);
451 tcg_gen_andi_i32(t0, t0, ~0x8000);
452 tcg_gen_andi_i32(t1, t1, ~0x8000);
453 tcg_gen_add_i32(t0, t0, t1);
454 tcg_gen_xor_i32(dest, t0, tmp);
455 }
456
457 /* Set N and Z flags from var. */
gen_logic_CC(TCGv_i32 var)458 static inline void gen_logic_CC(TCGv_i32 var)
459 {
460 tcg_gen_mov_i32(cpu_NF, var);
461 tcg_gen_mov_i32(cpu_ZF, var);
462 }
463
464 /* dest = T0 + T1 + CF. */
gen_add_carry(TCGv_i32 dest,TCGv_i32 t0,TCGv_i32 t1)465 static void gen_add_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
466 {
467 tcg_gen_add_i32(dest, t0, t1);
468 tcg_gen_add_i32(dest, dest, cpu_CF);
469 }
470
471 /* dest = T0 - T1 + CF - 1. */
gen_sub_carry(TCGv_i32 dest,TCGv_i32 t0,TCGv_i32 t1)472 static void gen_sub_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
473 {
474 tcg_gen_sub_i32(dest, t0, t1);
475 tcg_gen_add_i32(dest, dest, cpu_CF);
476 tcg_gen_subi_i32(dest, dest, 1);
477 }
478
479 /* dest = T0 + T1. Compute C, N, V and Z flags */
gen_add_CC(TCGv_i32 dest,TCGv_i32 t0,TCGv_i32 t1)480 static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
481 {
482 TCGv_i32 tmp = tcg_temp_new_i32();
483 tcg_gen_movi_i32(tmp, 0);
484 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
485 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
486 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
487 tcg_gen_xor_i32(tmp, t0, t1);
488 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
489 tcg_gen_mov_i32(dest, cpu_NF);
490 }
491
492 /* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
gen_adc_CC(TCGv_i32 dest,TCGv_i32 t0,TCGv_i32 t1)493 static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
494 {
495 TCGv_i32 tmp = tcg_temp_new_i32();
496 if (TCG_TARGET_HAS_add2_i32) {
497 tcg_gen_movi_i32(tmp, 0);
498 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
499 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
500 } else {
501 TCGv_i64 q0 = tcg_temp_new_i64();
502 TCGv_i64 q1 = tcg_temp_new_i64();
503 tcg_gen_extu_i32_i64(q0, t0);
504 tcg_gen_extu_i32_i64(q1, t1);
505 tcg_gen_add_i64(q0, q0, q1);
506 tcg_gen_extu_i32_i64(q1, cpu_CF);
507 tcg_gen_add_i64(q0, q0, q1);
508 tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
509 }
510 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
511 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
512 tcg_gen_xor_i32(tmp, t0, t1);
513 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
514 tcg_gen_mov_i32(dest, cpu_NF);
515 }
516
517 /* dest = T0 - T1. Compute C, N, V and Z flags */
gen_sub_CC(TCGv_i32 dest,TCGv_i32 t0,TCGv_i32 t1)518 static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
519 {
520 TCGv_i32 tmp;
521 tcg_gen_sub_i32(cpu_NF, t0, t1);
522 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
523 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
524 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
525 tmp = tcg_temp_new_i32();
526 tcg_gen_xor_i32(tmp, t0, t1);
527 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
528 tcg_gen_mov_i32(dest, cpu_NF);
529 }
530
531 /* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
gen_sbc_CC(TCGv_i32 dest,TCGv_i32 t0,TCGv_i32 t1)532 static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
533 {
534 TCGv_i32 tmp = tcg_temp_new_i32();
535 tcg_gen_not_i32(tmp, t1);
536 gen_adc_CC(dest, t0, tmp);
537 }
538
539 #define GEN_SHIFT(name) \
540 static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
541 { \
542 TCGv_i32 tmpd = tcg_temp_new_i32(); \
543 TCGv_i32 tmp1 = tcg_temp_new_i32(); \
544 TCGv_i32 zero = tcg_constant_i32(0); \
545 tcg_gen_andi_i32(tmp1, t1, 0x1f); \
546 tcg_gen_##name##_i32(tmpd, t0, tmp1); \
547 tcg_gen_andi_i32(tmp1, t1, 0xe0); \
548 tcg_gen_movcond_i32(TCG_COND_NE, dest, tmp1, zero, zero, tmpd); \
549 }
550 GEN_SHIFT(shl)
GEN_SHIFT(shr)551 GEN_SHIFT(shr)
552 #undef GEN_SHIFT
553
554 static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
555 {
556 TCGv_i32 tmp1 = tcg_temp_new_i32();
557
558 tcg_gen_andi_i32(tmp1, t1, 0xff);
559 tcg_gen_umin_i32(tmp1, tmp1, tcg_constant_i32(31));
560 tcg_gen_sar_i32(dest, t0, tmp1);
561 }
562
shifter_out_im(TCGv_i32 var,int shift)563 static void shifter_out_im(TCGv_i32 var, int shift)
564 {
565 tcg_gen_extract_i32(cpu_CF, var, shift, 1);
566 }
567
568 /* Shift by immediate. Includes special handling for shift == 0. */
gen_arm_shift_im(TCGv_i32 var,int shiftop,int shift,int flags)569 static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop,
570 int shift, int flags)
571 {
572 switch (shiftop) {
573 case 0: /* LSL */
574 if (shift != 0) {
575 if (flags)
576 shifter_out_im(var, 32 - shift);
577 tcg_gen_shli_i32(var, var, shift);
578 }
579 break;
580 case 1: /* LSR */
581 if (shift == 0) {
582 if (flags) {
583 tcg_gen_shri_i32(cpu_CF, var, 31);
584 }
585 tcg_gen_movi_i32(var, 0);
586 } else {
587 if (flags)
588 shifter_out_im(var, shift - 1);
589 tcg_gen_shri_i32(var, var, shift);
590 }
591 break;
592 case 2: /* ASR */
593 if (shift == 0)
594 shift = 32;
595 if (flags)
596 shifter_out_im(var, shift - 1);
597 if (shift == 32)
598 shift = 31;
599 tcg_gen_sari_i32(var, var, shift);
600 break;
601 case 3: /* ROR/RRX */
602 if (shift != 0) {
603 if (flags)
604 shifter_out_im(var, shift - 1);
605 tcg_gen_rotri_i32(var, var, shift); break;
606 } else {
607 TCGv_i32 tmp = tcg_temp_new_i32();
608 tcg_gen_shli_i32(tmp, cpu_CF, 31);
609 if (flags)
610 shifter_out_im(var, 0);
611 tcg_gen_shri_i32(var, var, 1);
612 tcg_gen_or_i32(var, var, tmp);
613 }
614 }
615 };
616
gen_arm_shift_reg(TCGv_i32 var,int shiftop,TCGv_i32 shift,int flags)617 static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop,
618 TCGv_i32 shift, int flags)
619 {
620 if (flags) {
621 switch (shiftop) {
622 case 0: gen_helper_shl_cc(var, tcg_env, var, shift); break;
623 case 1: gen_helper_shr_cc(var, tcg_env, var, shift); break;
624 case 2: gen_helper_sar_cc(var, tcg_env, var, shift); break;
625 case 3: gen_helper_ror_cc(var, tcg_env, var, shift); break;
626 }
627 } else {
628 switch (shiftop) {
629 case 0:
630 gen_shl(var, var, shift);
631 break;
632 case 1:
633 gen_shr(var, var, shift);
634 break;
635 case 2:
636 gen_sar(var, var, shift);
637 break;
638 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
639 tcg_gen_rotr_i32(var, var, shift); break;
640 }
641 }
642 }
643
644 /*
645 * Generate a conditional based on ARM condition code cc.
646 * This is common between ARM and Aarch64 targets.
647 */
arm_test_cc(DisasCompare * cmp,int cc)648 void arm_test_cc(DisasCompare *cmp, int cc)
649 {
650 TCGv_i32 value;
651 TCGCond cond;
652
653 switch (cc) {
654 case 0: /* eq: Z */
655 case 1: /* ne: !Z */
656 cond = TCG_COND_EQ;
657 value = cpu_ZF;
658 break;
659
660 case 2: /* cs: C */
661 case 3: /* cc: !C */
662 cond = TCG_COND_NE;
663 value = cpu_CF;
664 break;
665
666 case 4: /* mi: N */
667 case 5: /* pl: !N */
668 cond = TCG_COND_LT;
669 value = cpu_NF;
670 break;
671
672 case 6: /* vs: V */
673 case 7: /* vc: !V */
674 cond = TCG_COND_LT;
675 value = cpu_VF;
676 break;
677
678 case 8: /* hi: C && !Z */
679 case 9: /* ls: !C || Z -> !(C && !Z) */
680 cond = TCG_COND_NE;
681 value = tcg_temp_new_i32();
682 /* CF is 1 for C, so -CF is an all-bits-set mask for C;
683 ZF is non-zero for !Z; so AND the two subexpressions. */
684 tcg_gen_neg_i32(value, cpu_CF);
685 tcg_gen_and_i32(value, value, cpu_ZF);
686 break;
687
688 case 10: /* ge: N == V -> N ^ V == 0 */
689 case 11: /* lt: N != V -> N ^ V != 0 */
690 /* Since we're only interested in the sign bit, == 0 is >= 0. */
691 cond = TCG_COND_GE;
692 value = tcg_temp_new_i32();
693 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
694 break;
695
696 case 12: /* gt: !Z && N == V */
697 case 13: /* le: Z || N != V */
698 cond = TCG_COND_NE;
699 value = tcg_temp_new_i32();
700 /* (N == V) is equal to the sign bit of ~(NF ^ VF). Propagate
701 * the sign bit then AND with ZF to yield the result. */
702 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
703 tcg_gen_sari_i32(value, value, 31);
704 tcg_gen_andc_i32(value, cpu_ZF, value);
705 break;
706
707 case 14: /* always */
708 case 15: /* always */
709 /* Use the ALWAYS condition, which will fold early.
710 * It doesn't matter what we use for the value. */
711 cond = TCG_COND_ALWAYS;
712 value = cpu_ZF;
713 goto no_invert;
714
715 default:
716 fprintf(stderr, "Bad condition code 0x%x\n", cc);
717 abort();
718 }
719
720 if (cc & 1) {
721 cond = tcg_invert_cond(cond);
722 }
723
724 no_invert:
725 cmp->cond = cond;
726 cmp->value = value;
727 }
728
arm_jump_cc(DisasCompare * cmp,TCGLabel * label)729 void arm_jump_cc(DisasCompare *cmp, TCGLabel *label)
730 {
731 tcg_gen_brcondi_i32(cmp->cond, cmp->value, 0, label);
732 }
733
arm_gen_test_cc(int cc,TCGLabel * label)734 void arm_gen_test_cc(int cc, TCGLabel *label)
735 {
736 DisasCompare cmp;
737 arm_test_cc(&cmp, cc);
738 arm_jump_cc(&cmp, label);
739 }
740
gen_set_condexec(DisasContext * s)741 void gen_set_condexec(DisasContext *s)
742 {
743 if (s->condexec_mask) {
744 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
745
746 store_cpu_field_constant(val, condexec_bits);
747 }
748 }
749
gen_update_pc(DisasContext * s,target_long diff)750 void gen_update_pc(DisasContext *s, target_long diff)
751 {
752 gen_pc_plus_diff(s, cpu_R[15], diff);
753 s->pc_save = s->pc_curr + diff;
754 }
755
756 /* Set PC and Thumb state from var. var is marked as dead. */
gen_bx(DisasContext * s,TCGv_i32 var)757 static inline void gen_bx(DisasContext *s, TCGv_i32 var)
758 {
759 s->base.is_jmp = DISAS_JUMP;
760 tcg_gen_andi_i32(cpu_R[15], var, ~1);
761 tcg_gen_andi_i32(var, var, 1);
762 store_cpu_field(var, thumb);
763 s->pc_save = -1;
764 }
765
766 /*
767 * Set PC and Thumb state from var. var is marked as dead.
768 * For M-profile CPUs, include logic to detect exception-return
769 * branches and handle them. This is needed for Thumb POP/LDM to PC, LDR to PC,
770 * and BX reg, and no others, and happens only for code in Handler mode.
771 * The Security Extension also requires us to check for the FNC_RETURN
772 * which signals a function return from non-secure state; this can happen
773 * in both Handler and Thread mode.
774 * To avoid having to do multiple comparisons in inline generated code,
775 * we make the check we do here loose, so it will match for EXC_RETURN
776 * in Thread mode. For system emulation do_v7m_exception_exit() checks
777 * for these spurious cases and returns without doing anything (giving
778 * the same behaviour as for a branch to a non-magic address).
779 *
780 * In linux-user mode it is unclear what the right behaviour for an
781 * attempted FNC_RETURN should be, because in real hardware this will go
782 * directly to Secure code (ie not the Linux kernel) which will then treat
783 * the error in any way it chooses. For QEMU we opt to make the FNC_RETURN
784 * attempt behave the way it would on a CPU without the security extension,
785 * which is to say "like a normal branch". That means we can simply treat
786 * all branches as normal with no magic address behaviour.
787 */
gen_bx_excret(DisasContext * s,TCGv_i32 var)788 static inline void gen_bx_excret(DisasContext *s, TCGv_i32 var)
789 {
790 /* Generate the same code here as for a simple bx, but flag via
791 * s->base.is_jmp that we need to do the rest of the work later.
792 */
793 gen_bx(s, var);
794 #ifndef CONFIG_USER_ONLY
795 if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY) ||
796 (s->v7m_handler_mode && arm_dc_feature(s, ARM_FEATURE_M))) {
797 s->base.is_jmp = DISAS_BX_EXCRET;
798 }
799 #endif
800 }
801
gen_bx_excret_final_code(DisasContext * s)802 static inline void gen_bx_excret_final_code(DisasContext *s)
803 {
804 /* Generate the code to finish possible exception return and end the TB */
805 DisasLabel excret_label = gen_disas_label(s);
806 uint32_t min_magic;
807
808 if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY)) {
809 /* Covers FNC_RETURN and EXC_RETURN magic */
810 min_magic = FNC_RETURN_MIN_MAGIC;
811 } else {
812 /* EXC_RETURN magic only */
813 min_magic = EXC_RETURN_MIN_MAGIC;
814 }
815
816 /* Is the new PC value in the magic range indicating exception return? */
817 tcg_gen_brcondi_i32(TCG_COND_GEU, cpu_R[15], min_magic, excret_label.label);
818 /* No: end the TB as we would for a DISAS_JMP */
819 if (s->ss_active) {
820 gen_singlestep_exception(s);
821 } else {
822 tcg_gen_exit_tb(NULL, 0);
823 }
824 set_disas_label(s, excret_label);
825 /* Yes: this is an exception return.
826 * At this point in runtime env->regs[15] and env->thumb will hold
827 * the exception-return magic number, which do_v7m_exception_exit()
828 * will read. Nothing else will be able to see those values because
829 * the cpu-exec main loop guarantees that we will always go straight
830 * from raising the exception to the exception-handling code.
831 *
832 * gen_ss_advance(s) does nothing on M profile currently but
833 * calling it is conceptually the right thing as we have executed
834 * this instruction (compare SWI, HVC, SMC handling).
835 */
836 gen_ss_advance(s);
837 gen_exception_internal(EXCP_EXCEPTION_EXIT);
838 }
839
gen_bxns(DisasContext * s,int rm)840 static inline void gen_bxns(DisasContext *s, int rm)
841 {
842 TCGv_i32 var = load_reg(s, rm);
843
844 /* The bxns helper may raise an EXCEPTION_EXIT exception, so in theory
845 * we need to sync state before calling it, but:
846 * - we don't need to do gen_update_pc() because the bxns helper will
847 * always set the PC itself
848 * - we don't need to do gen_set_condexec() because BXNS is UNPREDICTABLE
849 * unless it's outside an IT block or the last insn in an IT block,
850 * so we know that condexec == 0 (already set at the top of the TB)
851 * is correct in the non-UNPREDICTABLE cases, and we can choose
852 * "zeroes the IT bits" as our UNPREDICTABLE behaviour otherwise.
853 */
854 gen_helper_v7m_bxns(tcg_env, var);
855 s->base.is_jmp = DISAS_EXIT;
856 }
857
gen_blxns(DisasContext * s,int rm)858 static inline void gen_blxns(DisasContext *s, int rm)
859 {
860 TCGv_i32 var = load_reg(s, rm);
861
862 /* We don't need to sync condexec state, for the same reason as bxns.
863 * We do however need to set the PC, because the blxns helper reads it.
864 * The blxns helper may throw an exception.
865 */
866 gen_update_pc(s, curr_insn_len(s));
867 gen_helper_v7m_blxns(tcg_env, var);
868 s->base.is_jmp = DISAS_EXIT;
869 }
870
871 /* Variant of store_reg which uses branch&exchange logic when storing
872 to r15 in ARM architecture v7 and above. The source must be a temporary
873 and will be marked as dead. */
store_reg_bx(DisasContext * s,int reg,TCGv_i32 var)874 static inline void store_reg_bx(DisasContext *s, int reg, TCGv_i32 var)
875 {
876 if (reg == 15 && ENABLE_ARCH_7) {
877 gen_bx(s, var);
878 } else {
879 store_reg(s, reg, var);
880 }
881 }
882
883 /* Variant of store_reg which uses branch&exchange logic when storing
884 * to r15 in ARM architecture v5T and above. This is used for storing
885 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
886 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
store_reg_from_load(DisasContext * s,int reg,TCGv_i32 var)887 static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
888 {
889 if (reg == 15 && ENABLE_ARCH_5) {
890 gen_bx_excret(s, var);
891 } else {
892 store_reg(s, reg, var);
893 }
894 }
895
896 #ifdef CONFIG_USER_ONLY
897 #define IS_USER_ONLY 1
898 #else
899 #define IS_USER_ONLY 0
900 #endif
901
pow2_align(unsigned i)902 MemOp pow2_align(unsigned i)
903 {
904 static const MemOp mop_align[] = {
905 0, MO_ALIGN_2, MO_ALIGN_4, MO_ALIGN_8, MO_ALIGN_16, MO_ALIGN_32
906 };
907 g_assert(i < ARRAY_SIZE(mop_align));
908 return mop_align[i];
909 }
910
911 /*
912 * Abstractions of "generate code to do a guest load/store for
913 * AArch32", where a vaddr is always 32 bits (and is zero
914 * extended if we're a 64 bit core) and data is also
915 * 32 bits unless specifically doing a 64 bit access.
916 * These functions work like tcg_gen_qemu_{ld,st}* except
917 * that the address argument is TCGv_i32 rather than TCGv.
918 */
919
gen_aa32_addr(DisasContext * s,TCGv_i32 a32,MemOp op)920 static TCGv gen_aa32_addr(DisasContext *s, TCGv_i32 a32, MemOp op)
921 {
922 TCGv addr = tcg_temp_new();
923 tcg_gen_extu_i32_tl(addr, a32);
924
925 /* Not needed for user-mode BE32, where we use MO_BE instead. */
926 if (!IS_USER_ONLY && s->sctlr_b && (op & MO_SIZE) < MO_32) {
927 tcg_gen_xori_tl(addr, addr, 4 - (1 << (op & MO_SIZE)));
928 }
929 return addr;
930 }
931
932 /*
933 * Internal routines are used for NEON cases where the endianness
934 * and/or alignment has already been taken into account and manipulated.
935 */
gen_aa32_ld_internal_i32(DisasContext * s,TCGv_i32 val,TCGv_i32 a32,int index,MemOp opc)936 void gen_aa32_ld_internal_i32(DisasContext *s, TCGv_i32 val,
937 TCGv_i32 a32, int index, MemOp opc)
938 {
939 TCGv addr = gen_aa32_addr(s, a32, opc);
940 tcg_gen_qemu_ld_i32(val, addr, index, opc);
941 }
942
gen_aa32_st_internal_i32(DisasContext * s,TCGv_i32 val,TCGv_i32 a32,int index,MemOp opc)943 void gen_aa32_st_internal_i32(DisasContext *s, TCGv_i32 val,
944 TCGv_i32 a32, int index, MemOp opc)
945 {
946 TCGv addr = gen_aa32_addr(s, a32, opc);
947 tcg_gen_qemu_st_i32(val, addr, index, opc);
948 }
949
gen_aa32_ld_internal_i64(DisasContext * s,TCGv_i64 val,TCGv_i32 a32,int index,MemOp opc)950 void gen_aa32_ld_internal_i64(DisasContext *s, TCGv_i64 val,
951 TCGv_i32 a32, int index, MemOp opc)
952 {
953 TCGv addr = gen_aa32_addr(s, a32, opc);
954
955 tcg_gen_qemu_ld_i64(val, addr, index, opc);
956
957 /* Not needed for user-mode BE32, where we use MO_BE instead. */
958 if (!IS_USER_ONLY && s->sctlr_b && (opc & MO_SIZE) == MO_64) {
959 tcg_gen_rotri_i64(val, val, 32);
960 }
961 }
962
gen_aa32_st_internal_i64(DisasContext * s,TCGv_i64 val,TCGv_i32 a32,int index,MemOp opc)963 void gen_aa32_st_internal_i64(DisasContext *s, TCGv_i64 val,
964 TCGv_i32 a32, int index, MemOp opc)
965 {
966 TCGv addr = gen_aa32_addr(s, a32, opc);
967
968 /* Not needed for user-mode BE32, where we use MO_BE instead. */
969 if (!IS_USER_ONLY && s->sctlr_b && (opc & MO_SIZE) == MO_64) {
970 TCGv_i64 tmp = tcg_temp_new_i64();
971 tcg_gen_rotri_i64(tmp, val, 32);
972 tcg_gen_qemu_st_i64(tmp, addr, index, opc);
973 } else {
974 tcg_gen_qemu_st_i64(val, addr, index, opc);
975 }
976 }
977
gen_aa32_ld_i32(DisasContext * s,TCGv_i32 val,TCGv_i32 a32,int index,MemOp opc)978 void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
979 int index, MemOp opc)
980 {
981 gen_aa32_ld_internal_i32(s, val, a32, index, finalize_memop(s, opc));
982 }
983
gen_aa32_st_i32(DisasContext * s,TCGv_i32 val,TCGv_i32 a32,int index,MemOp opc)984 void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
985 int index, MemOp opc)
986 {
987 gen_aa32_st_internal_i32(s, val, a32, index, finalize_memop(s, opc));
988 }
989
gen_aa32_ld_i64(DisasContext * s,TCGv_i64 val,TCGv_i32 a32,int index,MemOp opc)990 void gen_aa32_ld_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
991 int index, MemOp opc)
992 {
993 gen_aa32_ld_internal_i64(s, val, a32, index, finalize_memop(s, opc));
994 }
995
gen_aa32_st_i64(DisasContext * s,TCGv_i64 val,TCGv_i32 a32,int index,MemOp opc)996 void gen_aa32_st_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
997 int index, MemOp opc)
998 {
999 gen_aa32_st_internal_i64(s, val, a32, index, finalize_memop(s, opc));
1000 }
1001
1002 #define DO_GEN_LD(SUFF, OPC) \
1003 static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
1004 TCGv_i32 a32, int index) \
1005 { \
1006 gen_aa32_ld_i32(s, val, a32, index, OPC); \
1007 }
1008
1009 #define DO_GEN_ST(SUFF, OPC) \
1010 static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
1011 TCGv_i32 a32, int index) \
1012 { \
1013 gen_aa32_st_i32(s, val, a32, index, OPC); \
1014 }
1015
gen_hvc(DisasContext * s,int imm16)1016 static inline void gen_hvc(DisasContext *s, int imm16)
1017 {
1018 /* The pre HVC helper handles cases when HVC gets trapped
1019 * as an undefined insn by runtime configuration (ie before
1020 * the insn really executes).
1021 */
1022 gen_update_pc(s, 0);
1023 gen_helper_pre_hvc(tcg_env);
1024 /* Otherwise we will treat this as a real exception which
1025 * happens after execution of the insn. (The distinction matters
1026 * for the PC value reported to the exception handler and also
1027 * for single stepping.)
1028 */
1029 s->svc_imm = imm16;
1030 gen_update_pc(s, curr_insn_len(s));
1031 s->base.is_jmp = DISAS_HVC;
1032 }
1033
gen_smc(DisasContext * s)1034 static inline void gen_smc(DisasContext *s)
1035 {
1036 /* As with HVC, we may take an exception either before or after
1037 * the insn executes.
1038 */
1039 gen_update_pc(s, 0);
1040 gen_helper_pre_smc(tcg_env, tcg_constant_i32(syn_aa32_smc()));
1041 gen_update_pc(s, curr_insn_len(s));
1042 s->base.is_jmp = DISAS_SMC;
1043 }
1044
gen_exception_internal_insn(DisasContext * s,int excp)1045 static void gen_exception_internal_insn(DisasContext *s, int excp)
1046 {
1047 gen_set_condexec(s);
1048 gen_update_pc(s, 0);
1049 gen_exception_internal(excp);
1050 s->base.is_jmp = DISAS_NORETURN;
1051 }
1052
gen_exception_el_v(int excp,uint32_t syndrome,TCGv_i32 tcg_el)1053 static void gen_exception_el_v(int excp, uint32_t syndrome, TCGv_i32 tcg_el)
1054 {
1055 gen_helper_exception_with_syndrome_el(tcg_env, tcg_constant_i32(excp),
1056 tcg_constant_i32(syndrome), tcg_el);
1057 }
1058
gen_exception_el(int excp,uint32_t syndrome,uint32_t target_el)1059 static void gen_exception_el(int excp, uint32_t syndrome, uint32_t target_el)
1060 {
1061 gen_exception_el_v(excp, syndrome, tcg_constant_i32(target_el));
1062 }
1063
gen_exception(int excp,uint32_t syndrome)1064 static void gen_exception(int excp, uint32_t syndrome)
1065 {
1066 gen_helper_exception_with_syndrome(tcg_env, tcg_constant_i32(excp),
1067 tcg_constant_i32(syndrome));
1068 }
1069
gen_exception_insn_el_v(DisasContext * s,target_long pc_diff,int excp,uint32_t syn,TCGv_i32 tcg_el)1070 static void gen_exception_insn_el_v(DisasContext *s, target_long pc_diff,
1071 int excp, uint32_t syn, TCGv_i32 tcg_el)
1072 {
1073 if (s->aarch64) {
1074 gen_a64_update_pc(s, pc_diff);
1075 } else {
1076 gen_set_condexec(s);
1077 gen_update_pc(s, pc_diff);
1078 }
1079 gen_exception_el_v(excp, syn, tcg_el);
1080 s->base.is_jmp = DISAS_NORETURN;
1081 }
1082
gen_exception_insn_el(DisasContext * s,target_long pc_diff,int excp,uint32_t syn,uint32_t target_el)1083 void gen_exception_insn_el(DisasContext *s, target_long pc_diff, int excp,
1084 uint32_t syn, uint32_t target_el)
1085 {
1086 gen_exception_insn_el_v(s, pc_diff, excp, syn,
1087 tcg_constant_i32(target_el));
1088 }
1089
gen_exception_insn(DisasContext * s,target_long pc_diff,int excp,uint32_t syn)1090 void gen_exception_insn(DisasContext *s, target_long pc_diff,
1091 int excp, uint32_t syn)
1092 {
1093 if (s->aarch64) {
1094 gen_a64_update_pc(s, pc_diff);
1095 } else {
1096 gen_set_condexec(s);
1097 gen_update_pc(s, pc_diff);
1098 }
1099 gen_exception(excp, syn);
1100 s->base.is_jmp = DISAS_NORETURN;
1101 }
1102
gen_exception_bkpt_insn(DisasContext * s,uint32_t syn)1103 static void gen_exception_bkpt_insn(DisasContext *s, uint32_t syn)
1104 {
1105 gen_set_condexec(s);
1106 gen_update_pc(s, 0);
1107 gen_helper_exception_bkpt_insn(tcg_env, tcg_constant_i32(syn));
1108 s->base.is_jmp = DISAS_NORETURN;
1109 }
1110
unallocated_encoding(DisasContext * s)1111 void unallocated_encoding(DisasContext *s)
1112 {
1113 /* Unallocated and reserved encodings are uncategorized */
1114 gen_exception_insn(s, 0, EXCP_UDEF, syn_uncategorized());
1115 }
1116
1117 /* Force a TB lookup after an instruction that changes the CPU state. */
gen_lookup_tb(DisasContext * s)1118 void gen_lookup_tb(DisasContext *s)
1119 {
1120 gen_pc_plus_diff(s, cpu_R[15], curr_insn_len(s));
1121 s->base.is_jmp = DISAS_EXIT;
1122 }
1123
gen_hlt(DisasContext * s,int imm)1124 static inline void gen_hlt(DisasContext *s, int imm)
1125 {
1126 /* HLT. This has two purposes.
1127 * Architecturally, it is an external halting debug instruction.
1128 * Since QEMU doesn't implement external debug, we treat this as
1129 * it is required for halting debug disabled: it will UNDEF.
1130 * Secondly, "HLT 0x3C" is a T32 semihosting trap instruction,
1131 * and "HLT 0xF000" is an A32 semihosting syscall. These traps
1132 * must trigger semihosting even for ARMv7 and earlier, where
1133 * HLT was an undefined encoding.
1134 * In system mode, we don't allow userspace access to
1135 * semihosting, to provide some semblance of security
1136 * (and for consistency with our 32-bit semihosting).
1137 */
1138 if (semihosting_enabled(s->current_el == 0) &&
1139 (imm == (s->thumb ? 0x3c : 0xf000))) {
1140 gen_exception_internal_insn(s, EXCP_SEMIHOST);
1141 return;
1142 }
1143
1144 unallocated_encoding(s);
1145 }
1146
1147 /*
1148 * Return the offset of a "full" NEON Dreg.
1149 */
neon_full_reg_offset(unsigned reg)1150 long neon_full_reg_offset(unsigned reg)
1151 {
1152 return offsetof(CPUARMState, vfp.zregs[reg >> 1].d[reg & 1]);
1153 }
1154
1155 /*
1156 * Return the offset of a 2**SIZE piece of a NEON register, at index ELE,
1157 * where 0 is the least significant end of the register.
1158 */
neon_element_offset(int reg,int element,MemOp memop)1159 long neon_element_offset(int reg, int element, MemOp memop)
1160 {
1161 int element_size = 1 << (memop & MO_SIZE);
1162 int ofs = element * element_size;
1163 #if HOST_BIG_ENDIAN
1164 /*
1165 * Calculate the offset assuming fully little-endian,
1166 * then XOR to account for the order of the 8-byte units.
1167 */
1168 if (element_size < 8) {
1169 ofs ^= 8 - element_size;
1170 }
1171 #endif
1172 return neon_full_reg_offset(reg) + ofs;
1173 }
1174
1175 /* Return the offset of a VFP Dreg (dp = true) or VFP Sreg (dp = false). */
vfp_reg_offset(bool dp,unsigned reg)1176 long vfp_reg_offset(bool dp, unsigned reg)
1177 {
1178 if (dp) {
1179 return neon_element_offset(reg, 0, MO_64);
1180 } else {
1181 return neon_element_offset(reg >> 1, reg & 1, MO_32);
1182 }
1183 }
1184
read_neon_element32(TCGv_i32 dest,int reg,int ele,MemOp memop)1185 void read_neon_element32(TCGv_i32 dest, int reg, int ele, MemOp memop)
1186 {
1187 long off = neon_element_offset(reg, ele, memop);
1188
1189 switch (memop) {
1190 case MO_SB:
1191 tcg_gen_ld8s_i32(dest, tcg_env, off);
1192 break;
1193 case MO_UB:
1194 tcg_gen_ld8u_i32(dest, tcg_env, off);
1195 break;
1196 case MO_SW:
1197 tcg_gen_ld16s_i32(dest, tcg_env, off);
1198 break;
1199 case MO_UW:
1200 tcg_gen_ld16u_i32(dest, tcg_env, off);
1201 break;
1202 case MO_UL:
1203 case MO_SL:
1204 tcg_gen_ld_i32(dest, tcg_env, off);
1205 break;
1206 default:
1207 g_assert_not_reached();
1208 }
1209 }
1210
read_neon_element64(TCGv_i64 dest,int reg,int ele,MemOp memop)1211 void read_neon_element64(TCGv_i64 dest, int reg, int ele, MemOp memop)
1212 {
1213 long off = neon_element_offset(reg, ele, memop);
1214
1215 switch (memop) {
1216 case MO_SL:
1217 tcg_gen_ld32s_i64(dest, tcg_env, off);
1218 break;
1219 case MO_UL:
1220 tcg_gen_ld32u_i64(dest, tcg_env, off);
1221 break;
1222 case MO_UQ:
1223 tcg_gen_ld_i64(dest, tcg_env, off);
1224 break;
1225 default:
1226 g_assert_not_reached();
1227 }
1228 }
1229
write_neon_element32(TCGv_i32 src,int reg,int ele,MemOp memop)1230 void write_neon_element32(TCGv_i32 src, int reg, int ele, MemOp memop)
1231 {
1232 long off = neon_element_offset(reg, ele, memop);
1233
1234 switch (memop) {
1235 case MO_8:
1236 tcg_gen_st8_i32(src, tcg_env, off);
1237 break;
1238 case MO_16:
1239 tcg_gen_st16_i32(src, tcg_env, off);
1240 break;
1241 case MO_32:
1242 tcg_gen_st_i32(src, tcg_env, off);
1243 break;
1244 default:
1245 g_assert_not_reached();
1246 }
1247 }
1248
write_neon_element64(TCGv_i64 src,int reg,int ele,MemOp memop)1249 void write_neon_element64(TCGv_i64 src, int reg, int ele, MemOp memop)
1250 {
1251 long off = neon_element_offset(reg, ele, memop);
1252
1253 switch (memop) {
1254 case MO_32:
1255 tcg_gen_st32_i64(src, tcg_env, off);
1256 break;
1257 case MO_64:
1258 tcg_gen_st_i64(src, tcg_env, off);
1259 break;
1260 default:
1261 g_assert_not_reached();
1262 }
1263 }
1264
1265 #define ARM_CP_RW_BIT (1 << 20)
1266
iwmmxt_load_reg(TCGv_i64 var,int reg)1267 static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
1268 {
1269 tcg_gen_ld_i64(var, tcg_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
1270 }
1271
iwmmxt_store_reg(TCGv_i64 var,int reg)1272 static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
1273 {
1274 tcg_gen_st_i64(var, tcg_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
1275 }
1276
iwmmxt_load_creg(int reg)1277 static inline TCGv_i32 iwmmxt_load_creg(int reg)
1278 {
1279 TCGv_i32 var = tcg_temp_new_i32();
1280 tcg_gen_ld_i32(var, tcg_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
1281 return var;
1282 }
1283
iwmmxt_store_creg(int reg,TCGv_i32 var)1284 static inline void iwmmxt_store_creg(int reg, TCGv_i32 var)
1285 {
1286 tcg_gen_st_i32(var, tcg_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
1287 }
1288
gen_op_iwmmxt_movq_wRn_M0(int rn)1289 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1290 {
1291 iwmmxt_store_reg(cpu_M0, rn);
1292 }
1293
gen_op_iwmmxt_movq_M0_wRn(int rn)1294 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1295 {
1296 iwmmxt_load_reg(cpu_M0, rn);
1297 }
1298
gen_op_iwmmxt_orq_M0_wRn(int rn)1299 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1300 {
1301 iwmmxt_load_reg(cpu_V1, rn);
1302 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1303 }
1304
gen_op_iwmmxt_andq_M0_wRn(int rn)1305 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1306 {
1307 iwmmxt_load_reg(cpu_V1, rn);
1308 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1309 }
1310
gen_op_iwmmxt_xorq_M0_wRn(int rn)1311 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1312 {
1313 iwmmxt_load_reg(cpu_V1, rn);
1314 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1315 }
1316
1317 #define IWMMXT_OP(name) \
1318 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1319 { \
1320 iwmmxt_load_reg(cpu_V1, rn); \
1321 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1322 }
1323
1324 #define IWMMXT_OP_ENV(name) \
1325 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1326 { \
1327 iwmmxt_load_reg(cpu_V1, rn); \
1328 gen_helper_iwmmxt_##name(cpu_M0, tcg_env, cpu_M0, cpu_V1); \
1329 }
1330
1331 #define IWMMXT_OP_ENV_SIZE(name) \
1332 IWMMXT_OP_ENV(name##b) \
1333 IWMMXT_OP_ENV(name##w) \
1334 IWMMXT_OP_ENV(name##l)
1335
1336 #define IWMMXT_OP_ENV1(name) \
1337 static inline void gen_op_iwmmxt_##name##_M0(void) \
1338 { \
1339 gen_helper_iwmmxt_##name(cpu_M0, tcg_env, cpu_M0); \
1340 }
1341
1342 IWMMXT_OP(maddsq)
IWMMXT_OP(madduq)1343 IWMMXT_OP(madduq)
1344 IWMMXT_OP(sadb)
1345 IWMMXT_OP(sadw)
1346 IWMMXT_OP(mulslw)
1347 IWMMXT_OP(mulshw)
1348 IWMMXT_OP(mululw)
1349 IWMMXT_OP(muluhw)
1350 IWMMXT_OP(macsw)
1351 IWMMXT_OP(macuw)
1352
1353 IWMMXT_OP_ENV_SIZE(unpackl)
1354 IWMMXT_OP_ENV_SIZE(unpackh)
1355
1356 IWMMXT_OP_ENV1(unpacklub)
1357 IWMMXT_OP_ENV1(unpackluw)
1358 IWMMXT_OP_ENV1(unpacklul)
1359 IWMMXT_OP_ENV1(unpackhub)
1360 IWMMXT_OP_ENV1(unpackhuw)
1361 IWMMXT_OP_ENV1(unpackhul)
1362 IWMMXT_OP_ENV1(unpacklsb)
1363 IWMMXT_OP_ENV1(unpacklsw)
1364 IWMMXT_OP_ENV1(unpacklsl)
1365 IWMMXT_OP_ENV1(unpackhsb)
1366 IWMMXT_OP_ENV1(unpackhsw)
1367 IWMMXT_OP_ENV1(unpackhsl)
1368
1369 IWMMXT_OP_ENV_SIZE(cmpeq)
1370 IWMMXT_OP_ENV_SIZE(cmpgtu)
1371 IWMMXT_OP_ENV_SIZE(cmpgts)
1372
1373 IWMMXT_OP_ENV_SIZE(mins)
1374 IWMMXT_OP_ENV_SIZE(minu)
1375 IWMMXT_OP_ENV_SIZE(maxs)
1376 IWMMXT_OP_ENV_SIZE(maxu)
1377
1378 IWMMXT_OP_ENV_SIZE(subn)
1379 IWMMXT_OP_ENV_SIZE(addn)
1380 IWMMXT_OP_ENV_SIZE(subu)
1381 IWMMXT_OP_ENV_SIZE(addu)
1382 IWMMXT_OP_ENV_SIZE(subs)
1383 IWMMXT_OP_ENV_SIZE(adds)
1384
1385 IWMMXT_OP_ENV(avgb0)
1386 IWMMXT_OP_ENV(avgb1)
1387 IWMMXT_OP_ENV(avgw0)
1388 IWMMXT_OP_ENV(avgw1)
1389
1390 IWMMXT_OP_ENV(packuw)
1391 IWMMXT_OP_ENV(packul)
1392 IWMMXT_OP_ENV(packuq)
1393 IWMMXT_OP_ENV(packsw)
1394 IWMMXT_OP_ENV(packsl)
1395 IWMMXT_OP_ENV(packsq)
1396
1397 static void gen_op_iwmmxt_set_mup(void)
1398 {
1399 TCGv_i32 tmp;
1400 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1401 tcg_gen_ori_i32(tmp, tmp, 2);
1402 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1403 }
1404
gen_op_iwmmxt_set_cup(void)1405 static void gen_op_iwmmxt_set_cup(void)
1406 {
1407 TCGv_i32 tmp;
1408 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1409 tcg_gen_ori_i32(tmp, tmp, 1);
1410 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1411 }
1412
gen_op_iwmmxt_setpsr_nz(void)1413 static void gen_op_iwmmxt_setpsr_nz(void)
1414 {
1415 TCGv_i32 tmp = tcg_temp_new_i32();
1416 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1417 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1418 }
1419
gen_op_iwmmxt_addl_M0_wRn(int rn)1420 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1421 {
1422 iwmmxt_load_reg(cpu_V1, rn);
1423 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
1424 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1425 }
1426
gen_iwmmxt_address(DisasContext * s,uint32_t insn,TCGv_i32 dest)1427 static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn,
1428 TCGv_i32 dest)
1429 {
1430 int rd;
1431 uint32_t offset;
1432 TCGv_i32 tmp;
1433
1434 rd = (insn >> 16) & 0xf;
1435 tmp = load_reg(s, rd);
1436
1437 offset = (insn & 0xff) << ((insn >> 7) & 2);
1438 if (insn & (1 << 24)) {
1439 /* Pre indexed */
1440 if (insn & (1 << 23))
1441 tcg_gen_addi_i32(tmp, tmp, offset);
1442 else
1443 tcg_gen_addi_i32(tmp, tmp, -offset);
1444 tcg_gen_mov_i32(dest, tmp);
1445 if (insn & (1 << 21)) {
1446 store_reg(s, rd, tmp);
1447 }
1448 } else if (insn & (1 << 21)) {
1449 /* Post indexed */
1450 tcg_gen_mov_i32(dest, tmp);
1451 if (insn & (1 << 23))
1452 tcg_gen_addi_i32(tmp, tmp, offset);
1453 else
1454 tcg_gen_addi_i32(tmp, tmp, -offset);
1455 store_reg(s, rd, tmp);
1456 } else if (!(insn & (1 << 23)))
1457 return 1;
1458 return 0;
1459 }
1460
gen_iwmmxt_shift(uint32_t insn,uint32_t mask,TCGv_i32 dest)1461 static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
1462 {
1463 int rd = (insn >> 0) & 0xf;
1464 TCGv_i32 tmp;
1465
1466 if (insn & (1 << 8)) {
1467 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
1468 return 1;
1469 } else {
1470 tmp = iwmmxt_load_creg(rd);
1471 }
1472 } else {
1473 tmp = tcg_temp_new_i32();
1474 iwmmxt_load_reg(cpu_V0, rd);
1475 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
1476 }
1477 tcg_gen_andi_i32(tmp, tmp, mask);
1478 tcg_gen_mov_i32(dest, tmp);
1479 return 0;
1480 }
1481
1482 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
1483 (ie. an undefined instruction). */
disas_iwmmxt_insn(DisasContext * s,uint32_t insn)1484 static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
1485 {
1486 int rd, wrd;
1487 int rdhi, rdlo, rd0, rd1, i;
1488 TCGv_i32 addr;
1489 TCGv_i32 tmp, tmp2, tmp3;
1490
1491 if ((insn & 0x0e000e00) == 0x0c000000) {
1492 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1493 wrd = insn & 0xf;
1494 rdlo = (insn >> 12) & 0xf;
1495 rdhi = (insn >> 16) & 0xf;
1496 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
1497 iwmmxt_load_reg(cpu_V0, wrd);
1498 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
1499 tcg_gen_extrh_i64_i32(cpu_R[rdhi], cpu_V0);
1500 } else { /* TMCRR */
1501 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1502 iwmmxt_store_reg(cpu_V0, wrd);
1503 gen_op_iwmmxt_set_mup();
1504 }
1505 return 0;
1506 }
1507
1508 wrd = (insn >> 12) & 0xf;
1509 addr = tcg_temp_new_i32();
1510 if (gen_iwmmxt_address(s, insn, addr)) {
1511 return 1;
1512 }
1513 if (insn & ARM_CP_RW_BIT) {
1514 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
1515 tmp = tcg_temp_new_i32();
1516 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
1517 iwmmxt_store_creg(wrd, tmp);
1518 } else {
1519 i = 1;
1520 if (insn & (1 << 8)) {
1521 if (insn & (1 << 22)) { /* WLDRD */
1522 gen_aa32_ld64(s, cpu_M0, addr, get_mem_index(s));
1523 i = 0;
1524 } else { /* WLDRW wRd */
1525 tmp = tcg_temp_new_i32();
1526 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
1527 }
1528 } else {
1529 tmp = tcg_temp_new_i32();
1530 if (insn & (1 << 22)) { /* WLDRH */
1531 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
1532 } else { /* WLDRB */
1533 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
1534 }
1535 }
1536 if (i) {
1537 tcg_gen_extu_i32_i64(cpu_M0, tmp);
1538 }
1539 gen_op_iwmmxt_movq_wRn_M0(wrd);
1540 }
1541 } else {
1542 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
1543 tmp = iwmmxt_load_creg(wrd);
1544 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
1545 } else {
1546 gen_op_iwmmxt_movq_M0_wRn(wrd);
1547 tmp = tcg_temp_new_i32();
1548 if (insn & (1 << 8)) {
1549 if (insn & (1 << 22)) { /* WSTRD */
1550 gen_aa32_st64(s, cpu_M0, addr, get_mem_index(s));
1551 } else { /* WSTRW wRd */
1552 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1553 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
1554 }
1555 } else {
1556 if (insn & (1 << 22)) { /* WSTRH */
1557 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1558 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
1559 } else { /* WSTRB */
1560 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1561 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
1562 }
1563 }
1564 }
1565 }
1566 return 0;
1567 }
1568
1569 if ((insn & 0x0f000000) != 0x0e000000)
1570 return 1;
1571
1572 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1573 case 0x000: /* WOR */
1574 wrd = (insn >> 12) & 0xf;
1575 rd0 = (insn >> 0) & 0xf;
1576 rd1 = (insn >> 16) & 0xf;
1577 gen_op_iwmmxt_movq_M0_wRn(rd0);
1578 gen_op_iwmmxt_orq_M0_wRn(rd1);
1579 gen_op_iwmmxt_setpsr_nz();
1580 gen_op_iwmmxt_movq_wRn_M0(wrd);
1581 gen_op_iwmmxt_set_mup();
1582 gen_op_iwmmxt_set_cup();
1583 break;
1584 case 0x011: /* TMCR */
1585 if (insn & 0xf)
1586 return 1;
1587 rd = (insn >> 12) & 0xf;
1588 wrd = (insn >> 16) & 0xf;
1589 switch (wrd) {
1590 case ARM_IWMMXT_wCID:
1591 case ARM_IWMMXT_wCASF:
1592 break;
1593 case ARM_IWMMXT_wCon:
1594 gen_op_iwmmxt_set_cup();
1595 /* Fall through. */
1596 case ARM_IWMMXT_wCSSF:
1597 tmp = iwmmxt_load_creg(wrd);
1598 tmp2 = load_reg(s, rd);
1599 tcg_gen_andc_i32(tmp, tmp, tmp2);
1600 iwmmxt_store_creg(wrd, tmp);
1601 break;
1602 case ARM_IWMMXT_wCGR0:
1603 case ARM_IWMMXT_wCGR1:
1604 case ARM_IWMMXT_wCGR2:
1605 case ARM_IWMMXT_wCGR3:
1606 gen_op_iwmmxt_set_cup();
1607 tmp = load_reg(s, rd);
1608 iwmmxt_store_creg(wrd, tmp);
1609 break;
1610 default:
1611 return 1;
1612 }
1613 break;
1614 case 0x100: /* WXOR */
1615 wrd = (insn >> 12) & 0xf;
1616 rd0 = (insn >> 0) & 0xf;
1617 rd1 = (insn >> 16) & 0xf;
1618 gen_op_iwmmxt_movq_M0_wRn(rd0);
1619 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1620 gen_op_iwmmxt_setpsr_nz();
1621 gen_op_iwmmxt_movq_wRn_M0(wrd);
1622 gen_op_iwmmxt_set_mup();
1623 gen_op_iwmmxt_set_cup();
1624 break;
1625 case 0x111: /* TMRC */
1626 if (insn & 0xf)
1627 return 1;
1628 rd = (insn >> 12) & 0xf;
1629 wrd = (insn >> 16) & 0xf;
1630 tmp = iwmmxt_load_creg(wrd);
1631 store_reg(s, rd, tmp);
1632 break;
1633 case 0x300: /* WANDN */
1634 wrd = (insn >> 12) & 0xf;
1635 rd0 = (insn >> 0) & 0xf;
1636 rd1 = (insn >> 16) & 0xf;
1637 gen_op_iwmmxt_movq_M0_wRn(rd0);
1638 tcg_gen_neg_i64(cpu_M0, cpu_M0);
1639 gen_op_iwmmxt_andq_M0_wRn(rd1);
1640 gen_op_iwmmxt_setpsr_nz();
1641 gen_op_iwmmxt_movq_wRn_M0(wrd);
1642 gen_op_iwmmxt_set_mup();
1643 gen_op_iwmmxt_set_cup();
1644 break;
1645 case 0x200: /* WAND */
1646 wrd = (insn >> 12) & 0xf;
1647 rd0 = (insn >> 0) & 0xf;
1648 rd1 = (insn >> 16) & 0xf;
1649 gen_op_iwmmxt_movq_M0_wRn(rd0);
1650 gen_op_iwmmxt_andq_M0_wRn(rd1);
1651 gen_op_iwmmxt_setpsr_nz();
1652 gen_op_iwmmxt_movq_wRn_M0(wrd);
1653 gen_op_iwmmxt_set_mup();
1654 gen_op_iwmmxt_set_cup();
1655 break;
1656 case 0x810: case 0xa10: /* WMADD */
1657 wrd = (insn >> 12) & 0xf;
1658 rd0 = (insn >> 0) & 0xf;
1659 rd1 = (insn >> 16) & 0xf;
1660 gen_op_iwmmxt_movq_M0_wRn(rd0);
1661 if (insn & (1 << 21))
1662 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1663 else
1664 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1665 gen_op_iwmmxt_movq_wRn_M0(wrd);
1666 gen_op_iwmmxt_set_mup();
1667 break;
1668 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1669 wrd = (insn >> 12) & 0xf;
1670 rd0 = (insn >> 16) & 0xf;
1671 rd1 = (insn >> 0) & 0xf;
1672 gen_op_iwmmxt_movq_M0_wRn(rd0);
1673 switch ((insn >> 22) & 3) {
1674 case 0:
1675 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1676 break;
1677 case 1:
1678 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1679 break;
1680 case 2:
1681 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1682 break;
1683 case 3:
1684 return 1;
1685 }
1686 gen_op_iwmmxt_movq_wRn_M0(wrd);
1687 gen_op_iwmmxt_set_mup();
1688 gen_op_iwmmxt_set_cup();
1689 break;
1690 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1691 wrd = (insn >> 12) & 0xf;
1692 rd0 = (insn >> 16) & 0xf;
1693 rd1 = (insn >> 0) & 0xf;
1694 gen_op_iwmmxt_movq_M0_wRn(rd0);
1695 switch ((insn >> 22) & 3) {
1696 case 0:
1697 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1698 break;
1699 case 1:
1700 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1701 break;
1702 case 2:
1703 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1704 break;
1705 case 3:
1706 return 1;
1707 }
1708 gen_op_iwmmxt_movq_wRn_M0(wrd);
1709 gen_op_iwmmxt_set_mup();
1710 gen_op_iwmmxt_set_cup();
1711 break;
1712 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1713 wrd = (insn >> 12) & 0xf;
1714 rd0 = (insn >> 16) & 0xf;
1715 rd1 = (insn >> 0) & 0xf;
1716 gen_op_iwmmxt_movq_M0_wRn(rd0);
1717 if (insn & (1 << 22))
1718 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1719 else
1720 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1721 if (!(insn & (1 << 20)))
1722 gen_op_iwmmxt_addl_M0_wRn(wrd);
1723 gen_op_iwmmxt_movq_wRn_M0(wrd);
1724 gen_op_iwmmxt_set_mup();
1725 break;
1726 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1727 wrd = (insn >> 12) & 0xf;
1728 rd0 = (insn >> 16) & 0xf;
1729 rd1 = (insn >> 0) & 0xf;
1730 gen_op_iwmmxt_movq_M0_wRn(rd0);
1731 if (insn & (1 << 21)) {
1732 if (insn & (1 << 20))
1733 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1734 else
1735 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1736 } else {
1737 if (insn & (1 << 20))
1738 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1739 else
1740 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1741 }
1742 gen_op_iwmmxt_movq_wRn_M0(wrd);
1743 gen_op_iwmmxt_set_mup();
1744 break;
1745 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1746 wrd = (insn >> 12) & 0xf;
1747 rd0 = (insn >> 16) & 0xf;
1748 rd1 = (insn >> 0) & 0xf;
1749 gen_op_iwmmxt_movq_M0_wRn(rd0);
1750 if (insn & (1 << 21))
1751 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1752 else
1753 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1754 if (!(insn & (1 << 20))) {
1755 iwmmxt_load_reg(cpu_V1, wrd);
1756 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1757 }
1758 gen_op_iwmmxt_movq_wRn_M0(wrd);
1759 gen_op_iwmmxt_set_mup();
1760 break;
1761 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1762 wrd = (insn >> 12) & 0xf;
1763 rd0 = (insn >> 16) & 0xf;
1764 rd1 = (insn >> 0) & 0xf;
1765 gen_op_iwmmxt_movq_M0_wRn(rd0);
1766 switch ((insn >> 22) & 3) {
1767 case 0:
1768 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1769 break;
1770 case 1:
1771 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1772 break;
1773 case 2:
1774 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1775 break;
1776 case 3:
1777 return 1;
1778 }
1779 gen_op_iwmmxt_movq_wRn_M0(wrd);
1780 gen_op_iwmmxt_set_mup();
1781 gen_op_iwmmxt_set_cup();
1782 break;
1783 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1784 wrd = (insn >> 12) & 0xf;
1785 rd0 = (insn >> 16) & 0xf;
1786 rd1 = (insn >> 0) & 0xf;
1787 gen_op_iwmmxt_movq_M0_wRn(rd0);
1788 if (insn & (1 << 22)) {
1789 if (insn & (1 << 20))
1790 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1791 else
1792 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1793 } else {
1794 if (insn & (1 << 20))
1795 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1796 else
1797 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1798 }
1799 gen_op_iwmmxt_movq_wRn_M0(wrd);
1800 gen_op_iwmmxt_set_mup();
1801 gen_op_iwmmxt_set_cup();
1802 break;
1803 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1804 wrd = (insn >> 12) & 0xf;
1805 rd0 = (insn >> 16) & 0xf;
1806 rd1 = (insn >> 0) & 0xf;
1807 gen_op_iwmmxt_movq_M0_wRn(rd0);
1808 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1809 tcg_gen_andi_i32(tmp, tmp, 7);
1810 iwmmxt_load_reg(cpu_V1, rd1);
1811 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
1812 gen_op_iwmmxt_movq_wRn_M0(wrd);
1813 gen_op_iwmmxt_set_mup();
1814 break;
1815 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1816 if (((insn >> 6) & 3) == 3)
1817 return 1;
1818 rd = (insn >> 12) & 0xf;
1819 wrd = (insn >> 16) & 0xf;
1820 tmp = load_reg(s, rd);
1821 gen_op_iwmmxt_movq_M0_wRn(wrd);
1822 switch ((insn >> 6) & 3) {
1823 case 0:
1824 tmp2 = tcg_constant_i32(0xff);
1825 tmp3 = tcg_constant_i32((insn & 7) << 3);
1826 break;
1827 case 1:
1828 tmp2 = tcg_constant_i32(0xffff);
1829 tmp3 = tcg_constant_i32((insn & 3) << 4);
1830 break;
1831 case 2:
1832 tmp2 = tcg_constant_i32(0xffffffff);
1833 tmp3 = tcg_constant_i32((insn & 1) << 5);
1834 break;
1835 default:
1836 g_assert_not_reached();
1837 }
1838 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
1839 gen_op_iwmmxt_movq_wRn_M0(wrd);
1840 gen_op_iwmmxt_set_mup();
1841 break;
1842 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1843 rd = (insn >> 12) & 0xf;
1844 wrd = (insn >> 16) & 0xf;
1845 if (rd == 15 || ((insn >> 22) & 3) == 3)
1846 return 1;
1847 gen_op_iwmmxt_movq_M0_wRn(wrd);
1848 tmp = tcg_temp_new_i32();
1849 switch ((insn >> 22) & 3) {
1850 case 0:
1851 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
1852 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1853 if (insn & 8) {
1854 tcg_gen_ext8s_i32(tmp, tmp);
1855 } else {
1856 tcg_gen_andi_i32(tmp, tmp, 0xff);
1857 }
1858 break;
1859 case 1:
1860 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
1861 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1862 if (insn & 8) {
1863 tcg_gen_ext16s_i32(tmp, tmp);
1864 } else {
1865 tcg_gen_andi_i32(tmp, tmp, 0xffff);
1866 }
1867 break;
1868 case 2:
1869 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
1870 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1871 break;
1872 }
1873 store_reg(s, rd, tmp);
1874 break;
1875 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
1876 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1877 return 1;
1878 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1879 switch ((insn >> 22) & 3) {
1880 case 0:
1881 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
1882 break;
1883 case 1:
1884 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
1885 break;
1886 case 2:
1887 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
1888 break;
1889 }
1890 tcg_gen_shli_i32(tmp, tmp, 28);
1891 gen_set_nzcv(tmp);
1892 break;
1893 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1894 if (((insn >> 6) & 3) == 3)
1895 return 1;
1896 rd = (insn >> 12) & 0xf;
1897 wrd = (insn >> 16) & 0xf;
1898 tmp = load_reg(s, rd);
1899 switch ((insn >> 6) & 3) {
1900 case 0:
1901 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
1902 break;
1903 case 1:
1904 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
1905 break;
1906 case 2:
1907 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
1908 break;
1909 }
1910 gen_op_iwmmxt_movq_wRn_M0(wrd);
1911 gen_op_iwmmxt_set_mup();
1912 break;
1913 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1914 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1915 return 1;
1916 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1917 tmp2 = tcg_temp_new_i32();
1918 tcg_gen_mov_i32(tmp2, tmp);
1919 switch ((insn >> 22) & 3) {
1920 case 0:
1921 for (i = 0; i < 7; i ++) {
1922 tcg_gen_shli_i32(tmp2, tmp2, 4);
1923 tcg_gen_and_i32(tmp, tmp, tmp2);
1924 }
1925 break;
1926 case 1:
1927 for (i = 0; i < 3; i ++) {
1928 tcg_gen_shli_i32(tmp2, tmp2, 8);
1929 tcg_gen_and_i32(tmp, tmp, tmp2);
1930 }
1931 break;
1932 case 2:
1933 tcg_gen_shli_i32(tmp2, tmp2, 16);
1934 tcg_gen_and_i32(tmp, tmp, tmp2);
1935 break;
1936 }
1937 gen_set_nzcv(tmp);
1938 break;
1939 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1940 wrd = (insn >> 12) & 0xf;
1941 rd0 = (insn >> 16) & 0xf;
1942 gen_op_iwmmxt_movq_M0_wRn(rd0);
1943 switch ((insn >> 22) & 3) {
1944 case 0:
1945 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
1946 break;
1947 case 1:
1948 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
1949 break;
1950 case 2:
1951 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
1952 break;
1953 case 3:
1954 return 1;
1955 }
1956 gen_op_iwmmxt_movq_wRn_M0(wrd);
1957 gen_op_iwmmxt_set_mup();
1958 break;
1959 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
1960 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1961 return 1;
1962 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1963 tmp2 = tcg_temp_new_i32();
1964 tcg_gen_mov_i32(tmp2, tmp);
1965 switch ((insn >> 22) & 3) {
1966 case 0:
1967 for (i = 0; i < 7; i ++) {
1968 tcg_gen_shli_i32(tmp2, tmp2, 4);
1969 tcg_gen_or_i32(tmp, tmp, tmp2);
1970 }
1971 break;
1972 case 1:
1973 for (i = 0; i < 3; i ++) {
1974 tcg_gen_shli_i32(tmp2, tmp2, 8);
1975 tcg_gen_or_i32(tmp, tmp, tmp2);
1976 }
1977 break;
1978 case 2:
1979 tcg_gen_shli_i32(tmp2, tmp2, 16);
1980 tcg_gen_or_i32(tmp, tmp, tmp2);
1981 break;
1982 }
1983 gen_set_nzcv(tmp);
1984 break;
1985 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1986 rd = (insn >> 12) & 0xf;
1987 rd0 = (insn >> 16) & 0xf;
1988 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
1989 return 1;
1990 gen_op_iwmmxt_movq_M0_wRn(rd0);
1991 tmp = tcg_temp_new_i32();
1992 switch ((insn >> 22) & 3) {
1993 case 0:
1994 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
1995 break;
1996 case 1:
1997 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
1998 break;
1999 case 2:
2000 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
2001 break;
2002 }
2003 store_reg(s, rd, tmp);
2004 break;
2005 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
2006 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2007 wrd = (insn >> 12) & 0xf;
2008 rd0 = (insn >> 16) & 0xf;
2009 rd1 = (insn >> 0) & 0xf;
2010 gen_op_iwmmxt_movq_M0_wRn(rd0);
2011 switch ((insn >> 22) & 3) {
2012 case 0:
2013 if (insn & (1 << 21))
2014 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2015 else
2016 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2017 break;
2018 case 1:
2019 if (insn & (1 << 21))
2020 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2021 else
2022 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2023 break;
2024 case 2:
2025 if (insn & (1 << 21))
2026 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2027 else
2028 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2029 break;
2030 case 3:
2031 return 1;
2032 }
2033 gen_op_iwmmxt_movq_wRn_M0(wrd);
2034 gen_op_iwmmxt_set_mup();
2035 gen_op_iwmmxt_set_cup();
2036 break;
2037 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2038 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2039 wrd = (insn >> 12) & 0xf;
2040 rd0 = (insn >> 16) & 0xf;
2041 gen_op_iwmmxt_movq_M0_wRn(rd0);
2042 switch ((insn >> 22) & 3) {
2043 case 0:
2044 if (insn & (1 << 21))
2045 gen_op_iwmmxt_unpacklsb_M0();
2046 else
2047 gen_op_iwmmxt_unpacklub_M0();
2048 break;
2049 case 1:
2050 if (insn & (1 << 21))
2051 gen_op_iwmmxt_unpacklsw_M0();
2052 else
2053 gen_op_iwmmxt_unpackluw_M0();
2054 break;
2055 case 2:
2056 if (insn & (1 << 21))
2057 gen_op_iwmmxt_unpacklsl_M0();
2058 else
2059 gen_op_iwmmxt_unpacklul_M0();
2060 break;
2061 case 3:
2062 return 1;
2063 }
2064 gen_op_iwmmxt_movq_wRn_M0(wrd);
2065 gen_op_iwmmxt_set_mup();
2066 gen_op_iwmmxt_set_cup();
2067 break;
2068 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2069 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2070 wrd = (insn >> 12) & 0xf;
2071 rd0 = (insn >> 16) & 0xf;
2072 gen_op_iwmmxt_movq_M0_wRn(rd0);
2073 switch ((insn >> 22) & 3) {
2074 case 0:
2075 if (insn & (1 << 21))
2076 gen_op_iwmmxt_unpackhsb_M0();
2077 else
2078 gen_op_iwmmxt_unpackhub_M0();
2079 break;
2080 case 1:
2081 if (insn & (1 << 21))
2082 gen_op_iwmmxt_unpackhsw_M0();
2083 else
2084 gen_op_iwmmxt_unpackhuw_M0();
2085 break;
2086 case 2:
2087 if (insn & (1 << 21))
2088 gen_op_iwmmxt_unpackhsl_M0();
2089 else
2090 gen_op_iwmmxt_unpackhul_M0();
2091 break;
2092 case 3:
2093 return 1;
2094 }
2095 gen_op_iwmmxt_movq_wRn_M0(wrd);
2096 gen_op_iwmmxt_set_mup();
2097 gen_op_iwmmxt_set_cup();
2098 break;
2099 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2100 case 0x214: case 0x614: case 0xa14: case 0xe14:
2101 if (((insn >> 22) & 3) == 0)
2102 return 1;
2103 wrd = (insn >> 12) & 0xf;
2104 rd0 = (insn >> 16) & 0xf;
2105 gen_op_iwmmxt_movq_M0_wRn(rd0);
2106 tmp = tcg_temp_new_i32();
2107 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2108 return 1;
2109 }
2110 switch ((insn >> 22) & 3) {
2111 case 1:
2112 gen_helper_iwmmxt_srlw(cpu_M0, tcg_env, cpu_M0, tmp);
2113 break;
2114 case 2:
2115 gen_helper_iwmmxt_srll(cpu_M0, tcg_env, cpu_M0, tmp);
2116 break;
2117 case 3:
2118 gen_helper_iwmmxt_srlq(cpu_M0, tcg_env, cpu_M0, tmp);
2119 break;
2120 }
2121 gen_op_iwmmxt_movq_wRn_M0(wrd);
2122 gen_op_iwmmxt_set_mup();
2123 gen_op_iwmmxt_set_cup();
2124 break;
2125 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2126 case 0x014: case 0x414: case 0x814: case 0xc14:
2127 if (((insn >> 22) & 3) == 0)
2128 return 1;
2129 wrd = (insn >> 12) & 0xf;
2130 rd0 = (insn >> 16) & 0xf;
2131 gen_op_iwmmxt_movq_M0_wRn(rd0);
2132 tmp = tcg_temp_new_i32();
2133 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2134 return 1;
2135 }
2136 switch ((insn >> 22) & 3) {
2137 case 1:
2138 gen_helper_iwmmxt_sraw(cpu_M0, tcg_env, cpu_M0, tmp);
2139 break;
2140 case 2:
2141 gen_helper_iwmmxt_sral(cpu_M0, tcg_env, cpu_M0, tmp);
2142 break;
2143 case 3:
2144 gen_helper_iwmmxt_sraq(cpu_M0, tcg_env, cpu_M0, tmp);
2145 break;
2146 }
2147 gen_op_iwmmxt_movq_wRn_M0(wrd);
2148 gen_op_iwmmxt_set_mup();
2149 gen_op_iwmmxt_set_cup();
2150 break;
2151 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2152 case 0x114: case 0x514: case 0x914: case 0xd14:
2153 if (((insn >> 22) & 3) == 0)
2154 return 1;
2155 wrd = (insn >> 12) & 0xf;
2156 rd0 = (insn >> 16) & 0xf;
2157 gen_op_iwmmxt_movq_M0_wRn(rd0);
2158 tmp = tcg_temp_new_i32();
2159 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2160 return 1;
2161 }
2162 switch ((insn >> 22) & 3) {
2163 case 1:
2164 gen_helper_iwmmxt_sllw(cpu_M0, tcg_env, cpu_M0, tmp);
2165 break;
2166 case 2:
2167 gen_helper_iwmmxt_slll(cpu_M0, tcg_env, cpu_M0, tmp);
2168 break;
2169 case 3:
2170 gen_helper_iwmmxt_sllq(cpu_M0, tcg_env, cpu_M0, tmp);
2171 break;
2172 }
2173 gen_op_iwmmxt_movq_wRn_M0(wrd);
2174 gen_op_iwmmxt_set_mup();
2175 gen_op_iwmmxt_set_cup();
2176 break;
2177 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2178 case 0x314: case 0x714: case 0xb14: case 0xf14:
2179 if (((insn >> 22) & 3) == 0)
2180 return 1;
2181 wrd = (insn >> 12) & 0xf;
2182 rd0 = (insn >> 16) & 0xf;
2183 gen_op_iwmmxt_movq_M0_wRn(rd0);
2184 tmp = tcg_temp_new_i32();
2185 switch ((insn >> 22) & 3) {
2186 case 1:
2187 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
2188 return 1;
2189 }
2190 gen_helper_iwmmxt_rorw(cpu_M0, tcg_env, cpu_M0, tmp);
2191 break;
2192 case 2:
2193 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
2194 return 1;
2195 }
2196 gen_helper_iwmmxt_rorl(cpu_M0, tcg_env, cpu_M0, tmp);
2197 break;
2198 case 3:
2199 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
2200 return 1;
2201 }
2202 gen_helper_iwmmxt_rorq(cpu_M0, tcg_env, cpu_M0, tmp);
2203 break;
2204 }
2205 gen_op_iwmmxt_movq_wRn_M0(wrd);
2206 gen_op_iwmmxt_set_mup();
2207 gen_op_iwmmxt_set_cup();
2208 break;
2209 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2210 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2211 wrd = (insn >> 12) & 0xf;
2212 rd0 = (insn >> 16) & 0xf;
2213 rd1 = (insn >> 0) & 0xf;
2214 gen_op_iwmmxt_movq_M0_wRn(rd0);
2215 switch ((insn >> 22) & 3) {
2216 case 0:
2217 if (insn & (1 << 21))
2218 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2219 else
2220 gen_op_iwmmxt_minub_M0_wRn(rd1);
2221 break;
2222 case 1:
2223 if (insn & (1 << 21))
2224 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2225 else
2226 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2227 break;
2228 case 2:
2229 if (insn & (1 << 21))
2230 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2231 else
2232 gen_op_iwmmxt_minul_M0_wRn(rd1);
2233 break;
2234 case 3:
2235 return 1;
2236 }
2237 gen_op_iwmmxt_movq_wRn_M0(wrd);
2238 gen_op_iwmmxt_set_mup();
2239 break;
2240 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2241 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2242 wrd = (insn >> 12) & 0xf;
2243 rd0 = (insn >> 16) & 0xf;
2244 rd1 = (insn >> 0) & 0xf;
2245 gen_op_iwmmxt_movq_M0_wRn(rd0);
2246 switch ((insn >> 22) & 3) {
2247 case 0:
2248 if (insn & (1 << 21))
2249 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2250 else
2251 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2252 break;
2253 case 1:
2254 if (insn & (1 << 21))
2255 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2256 else
2257 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2258 break;
2259 case 2:
2260 if (insn & (1 << 21))
2261 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2262 else
2263 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2264 break;
2265 case 3:
2266 return 1;
2267 }
2268 gen_op_iwmmxt_movq_wRn_M0(wrd);
2269 gen_op_iwmmxt_set_mup();
2270 break;
2271 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2272 case 0x402: case 0x502: case 0x602: case 0x702:
2273 wrd = (insn >> 12) & 0xf;
2274 rd0 = (insn >> 16) & 0xf;
2275 rd1 = (insn >> 0) & 0xf;
2276 gen_op_iwmmxt_movq_M0_wRn(rd0);
2277 iwmmxt_load_reg(cpu_V1, rd1);
2278 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1,
2279 tcg_constant_i32((insn >> 20) & 3));
2280 gen_op_iwmmxt_movq_wRn_M0(wrd);
2281 gen_op_iwmmxt_set_mup();
2282 break;
2283 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2284 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2285 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2286 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2287 wrd = (insn >> 12) & 0xf;
2288 rd0 = (insn >> 16) & 0xf;
2289 rd1 = (insn >> 0) & 0xf;
2290 gen_op_iwmmxt_movq_M0_wRn(rd0);
2291 switch ((insn >> 20) & 0xf) {
2292 case 0x0:
2293 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2294 break;
2295 case 0x1:
2296 gen_op_iwmmxt_subub_M0_wRn(rd1);
2297 break;
2298 case 0x3:
2299 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2300 break;
2301 case 0x4:
2302 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2303 break;
2304 case 0x5:
2305 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2306 break;
2307 case 0x7:
2308 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2309 break;
2310 case 0x8:
2311 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2312 break;
2313 case 0x9:
2314 gen_op_iwmmxt_subul_M0_wRn(rd1);
2315 break;
2316 case 0xb:
2317 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2318 break;
2319 default:
2320 return 1;
2321 }
2322 gen_op_iwmmxt_movq_wRn_M0(wrd);
2323 gen_op_iwmmxt_set_mup();
2324 gen_op_iwmmxt_set_cup();
2325 break;
2326 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2327 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2328 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2329 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2330 wrd = (insn >> 12) & 0xf;
2331 rd0 = (insn >> 16) & 0xf;
2332 gen_op_iwmmxt_movq_M0_wRn(rd0);
2333 tmp = tcg_constant_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
2334 gen_helper_iwmmxt_shufh(cpu_M0, tcg_env, cpu_M0, tmp);
2335 gen_op_iwmmxt_movq_wRn_M0(wrd);
2336 gen_op_iwmmxt_set_mup();
2337 gen_op_iwmmxt_set_cup();
2338 break;
2339 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2340 case 0x418: case 0x518: case 0x618: case 0x718:
2341 case 0x818: case 0x918: case 0xa18: case 0xb18:
2342 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2343 wrd = (insn >> 12) & 0xf;
2344 rd0 = (insn >> 16) & 0xf;
2345 rd1 = (insn >> 0) & 0xf;
2346 gen_op_iwmmxt_movq_M0_wRn(rd0);
2347 switch ((insn >> 20) & 0xf) {
2348 case 0x0:
2349 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2350 break;
2351 case 0x1:
2352 gen_op_iwmmxt_addub_M0_wRn(rd1);
2353 break;
2354 case 0x3:
2355 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2356 break;
2357 case 0x4:
2358 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2359 break;
2360 case 0x5:
2361 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2362 break;
2363 case 0x7:
2364 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2365 break;
2366 case 0x8:
2367 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2368 break;
2369 case 0x9:
2370 gen_op_iwmmxt_addul_M0_wRn(rd1);
2371 break;
2372 case 0xb:
2373 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2374 break;
2375 default:
2376 return 1;
2377 }
2378 gen_op_iwmmxt_movq_wRn_M0(wrd);
2379 gen_op_iwmmxt_set_mup();
2380 gen_op_iwmmxt_set_cup();
2381 break;
2382 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2383 case 0x408: case 0x508: case 0x608: case 0x708:
2384 case 0x808: case 0x908: case 0xa08: case 0xb08:
2385 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2386 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2387 return 1;
2388 wrd = (insn >> 12) & 0xf;
2389 rd0 = (insn >> 16) & 0xf;
2390 rd1 = (insn >> 0) & 0xf;
2391 gen_op_iwmmxt_movq_M0_wRn(rd0);
2392 switch ((insn >> 22) & 3) {
2393 case 1:
2394 if (insn & (1 << 21))
2395 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2396 else
2397 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2398 break;
2399 case 2:
2400 if (insn & (1 << 21))
2401 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2402 else
2403 gen_op_iwmmxt_packul_M0_wRn(rd1);
2404 break;
2405 case 3:
2406 if (insn & (1 << 21))
2407 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2408 else
2409 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2410 break;
2411 }
2412 gen_op_iwmmxt_movq_wRn_M0(wrd);
2413 gen_op_iwmmxt_set_mup();
2414 gen_op_iwmmxt_set_cup();
2415 break;
2416 case 0x201: case 0x203: case 0x205: case 0x207:
2417 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2418 case 0x211: case 0x213: case 0x215: case 0x217:
2419 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2420 wrd = (insn >> 5) & 0xf;
2421 rd0 = (insn >> 12) & 0xf;
2422 rd1 = (insn >> 0) & 0xf;
2423 if (rd0 == 0xf || rd1 == 0xf)
2424 return 1;
2425 gen_op_iwmmxt_movq_M0_wRn(wrd);
2426 tmp = load_reg(s, rd0);
2427 tmp2 = load_reg(s, rd1);
2428 switch ((insn >> 16) & 0xf) {
2429 case 0x0: /* TMIA */
2430 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2431 break;
2432 case 0x8: /* TMIAPH */
2433 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2434 break;
2435 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2436 if (insn & (1 << 16))
2437 tcg_gen_shri_i32(tmp, tmp, 16);
2438 if (insn & (1 << 17))
2439 tcg_gen_shri_i32(tmp2, tmp2, 16);
2440 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2441 break;
2442 default:
2443 return 1;
2444 }
2445 gen_op_iwmmxt_movq_wRn_M0(wrd);
2446 gen_op_iwmmxt_set_mup();
2447 break;
2448 default:
2449 return 1;
2450 }
2451
2452 return 0;
2453 }
2454
2455 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
2456 (ie. an undefined instruction). */
disas_dsp_insn(DisasContext * s,uint32_t insn)2457 static int disas_dsp_insn(DisasContext *s, uint32_t insn)
2458 {
2459 int acc, rd0, rd1, rdhi, rdlo;
2460 TCGv_i32 tmp, tmp2;
2461
2462 if ((insn & 0x0ff00f10) == 0x0e200010) {
2463 /* Multiply with Internal Accumulate Format */
2464 rd0 = (insn >> 12) & 0xf;
2465 rd1 = insn & 0xf;
2466 acc = (insn >> 5) & 7;
2467
2468 if (acc != 0)
2469 return 1;
2470
2471 tmp = load_reg(s, rd0);
2472 tmp2 = load_reg(s, rd1);
2473 switch ((insn >> 16) & 0xf) {
2474 case 0x0: /* MIA */
2475 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2476 break;
2477 case 0x8: /* MIAPH */
2478 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2479 break;
2480 case 0xc: /* MIABB */
2481 case 0xd: /* MIABT */
2482 case 0xe: /* MIATB */
2483 case 0xf: /* MIATT */
2484 if (insn & (1 << 16))
2485 tcg_gen_shri_i32(tmp, tmp, 16);
2486 if (insn & (1 << 17))
2487 tcg_gen_shri_i32(tmp2, tmp2, 16);
2488 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2489 break;
2490 default:
2491 return 1;
2492 }
2493
2494 gen_op_iwmmxt_movq_wRn_M0(acc);
2495 return 0;
2496 }
2497
2498 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2499 /* Internal Accumulator Access Format */
2500 rdhi = (insn >> 16) & 0xf;
2501 rdlo = (insn >> 12) & 0xf;
2502 acc = insn & 7;
2503
2504 if (acc != 0)
2505 return 1;
2506
2507 if (insn & ARM_CP_RW_BIT) { /* MRA */
2508 iwmmxt_load_reg(cpu_V0, acc);
2509 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
2510 tcg_gen_extrh_i64_i32(cpu_R[rdhi], cpu_V0);
2511 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
2512 } else { /* MAR */
2513 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2514 iwmmxt_store_reg(cpu_V0, acc);
2515 }
2516 return 0;
2517 }
2518
2519 return 1;
2520 }
2521
gen_goto_ptr(void)2522 static void gen_goto_ptr(void)
2523 {
2524 tcg_gen_lookup_and_goto_ptr();
2525 }
2526
2527 /* This will end the TB but doesn't guarantee we'll return to
2528 * cpu_loop_exec. Any live exit_requests will be processed as we
2529 * enter the next TB.
2530 */
gen_goto_tb(DisasContext * s,int n,target_long diff)2531 static void gen_goto_tb(DisasContext *s, int n, target_long diff)
2532 {
2533 if (translator_use_goto_tb(&s->base, s->pc_curr + diff)) {
2534 /*
2535 * For pcrel, the pc must always be up-to-date on entry to
2536 * the linked TB, so that it can use simple additions for all
2537 * further adjustments. For !pcrel, the linked TB is compiled
2538 * to know its full virtual address, so we can delay the
2539 * update to pc to the unlinked path. A long chain of links
2540 * can thus avoid many updates to the PC.
2541 */
2542 if (tb_cflags(s->base.tb) & CF_PCREL) {
2543 gen_update_pc(s, diff);
2544 tcg_gen_goto_tb(n);
2545 } else {
2546 tcg_gen_goto_tb(n);
2547 gen_update_pc(s, diff);
2548 }
2549 tcg_gen_exit_tb(s->base.tb, n);
2550 } else {
2551 gen_update_pc(s, diff);
2552 gen_goto_ptr();
2553 }
2554 s->base.is_jmp = DISAS_NORETURN;
2555 }
2556
2557 /* Jump, specifying which TB number to use if we gen_goto_tb() */
gen_jmp_tb(DisasContext * s,target_long diff,int tbno)2558 static void gen_jmp_tb(DisasContext *s, target_long diff, int tbno)
2559 {
2560 if (unlikely(s->ss_active)) {
2561 /* An indirect jump so that we still trigger the debug exception. */
2562 gen_update_pc(s, diff);
2563 s->base.is_jmp = DISAS_JUMP;
2564 return;
2565 }
2566 switch (s->base.is_jmp) {
2567 case DISAS_NEXT:
2568 case DISAS_TOO_MANY:
2569 case DISAS_NORETURN:
2570 /*
2571 * The normal case: just go to the destination TB.
2572 * NB: NORETURN happens if we generate code like
2573 * gen_brcondi(l);
2574 * gen_jmp();
2575 * gen_set_label(l);
2576 * gen_jmp();
2577 * on the second call to gen_jmp().
2578 */
2579 gen_goto_tb(s, tbno, diff);
2580 break;
2581 case DISAS_UPDATE_NOCHAIN:
2582 case DISAS_UPDATE_EXIT:
2583 /*
2584 * We already decided we're leaving the TB for some other reason.
2585 * Avoid using goto_tb so we really do exit back to the main loop
2586 * and don't chain to another TB.
2587 */
2588 gen_update_pc(s, diff);
2589 gen_goto_ptr();
2590 s->base.is_jmp = DISAS_NORETURN;
2591 break;
2592 default:
2593 /*
2594 * We shouldn't be emitting code for a jump and also have
2595 * is_jmp set to one of the special cases like DISAS_SWI.
2596 */
2597 g_assert_not_reached();
2598 }
2599 }
2600
gen_jmp(DisasContext * s,target_long diff)2601 static inline void gen_jmp(DisasContext *s, target_long diff)
2602 {
2603 gen_jmp_tb(s, diff, 0);
2604 }
2605
gen_mulxy(TCGv_i32 t0,TCGv_i32 t1,int x,int y)2606 static inline void gen_mulxy(TCGv_i32 t0, TCGv_i32 t1, int x, int y)
2607 {
2608 if (x)
2609 tcg_gen_sari_i32(t0, t0, 16);
2610 else
2611 gen_sxth(t0);
2612 if (y)
2613 tcg_gen_sari_i32(t1, t1, 16);
2614 else
2615 gen_sxth(t1);
2616 tcg_gen_mul_i32(t0, t0, t1);
2617 }
2618
2619 /* Return the mask of PSR bits set by a MSR instruction. */
msr_mask(DisasContext * s,int flags,int spsr)2620 static uint32_t msr_mask(DisasContext *s, int flags, int spsr)
2621 {
2622 uint32_t mask = 0;
2623
2624 if (flags & (1 << 0)) {
2625 mask |= 0xff;
2626 }
2627 if (flags & (1 << 1)) {
2628 mask |= 0xff00;
2629 }
2630 if (flags & (1 << 2)) {
2631 mask |= 0xff0000;
2632 }
2633 if (flags & (1 << 3)) {
2634 mask |= 0xff000000;
2635 }
2636
2637 /* Mask out undefined and reserved bits. */
2638 mask &= aarch32_cpsr_valid_mask(s->features, s->isar);
2639
2640 /* Mask out execution state. */
2641 if (!spsr) {
2642 mask &= ~CPSR_EXEC;
2643 }
2644
2645 /* Mask out privileged bits. */
2646 if (IS_USER(s)) {
2647 mask &= CPSR_USER;
2648 }
2649 return mask;
2650 }
2651
2652 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
gen_set_psr(DisasContext * s,uint32_t mask,int spsr,TCGv_i32 t0)2653 static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0)
2654 {
2655 TCGv_i32 tmp;
2656 if (spsr) {
2657 /* ??? This is also undefined in system mode. */
2658 if (IS_USER(s))
2659 return 1;
2660
2661 tmp = load_cpu_field(spsr);
2662 tcg_gen_andi_i32(tmp, tmp, ~mask);
2663 tcg_gen_andi_i32(t0, t0, mask);
2664 tcg_gen_or_i32(tmp, tmp, t0);
2665 store_cpu_field(tmp, spsr);
2666 } else {
2667 gen_set_cpsr(t0, mask);
2668 }
2669 gen_lookup_tb(s);
2670 return 0;
2671 }
2672
2673 /* Returns nonzero if access to the PSR is not permitted. */
gen_set_psr_im(DisasContext * s,uint32_t mask,int spsr,uint32_t val)2674 static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
2675 {
2676 TCGv_i32 tmp;
2677 tmp = tcg_temp_new_i32();
2678 tcg_gen_movi_i32(tmp, val);
2679 return gen_set_psr(s, mask, spsr, tmp);
2680 }
2681
msr_banked_access_decode(DisasContext * s,int r,int sysm,int rn,int * tgtmode,int * regno)2682 static bool msr_banked_access_decode(DisasContext *s, int r, int sysm, int rn,
2683 int *tgtmode, int *regno)
2684 {
2685 /* Decode the r and sysm fields of MSR/MRS banked accesses into
2686 * the target mode and register number, and identify the various
2687 * unpredictable cases.
2688 * MSR (banked) and MRS (banked) are CONSTRAINED UNPREDICTABLE if:
2689 * + executed in user mode
2690 * + using R15 as the src/dest register
2691 * + accessing an unimplemented register
2692 * + accessing a register that's inaccessible at current PL/security state*
2693 * + accessing a register that you could access with a different insn
2694 * We choose to UNDEF in all these cases.
2695 * Since we don't know which of the various AArch32 modes we are in
2696 * we have to defer some checks to runtime.
2697 * Accesses to Monitor mode registers from Secure EL1 (which implies
2698 * that EL3 is AArch64) must trap to EL3.
2699 *
2700 * If the access checks fail this function will emit code to take
2701 * an exception and return false. Otherwise it will return true,
2702 * and set *tgtmode and *regno appropriately.
2703 */
2704 /* These instructions are present only in ARMv8, or in ARMv7 with the
2705 * Virtualization Extensions.
2706 */
2707 if (!arm_dc_feature(s, ARM_FEATURE_V8) &&
2708 !arm_dc_feature(s, ARM_FEATURE_EL2)) {
2709 goto undef;
2710 }
2711
2712 if (IS_USER(s) || rn == 15) {
2713 goto undef;
2714 }
2715
2716 /* The table in the v8 ARM ARM section F5.2.3 describes the encoding
2717 * of registers into (r, sysm).
2718 */
2719 if (r) {
2720 /* SPSRs for other modes */
2721 switch (sysm) {
2722 case 0xe: /* SPSR_fiq */
2723 *tgtmode = ARM_CPU_MODE_FIQ;
2724 break;
2725 case 0x10: /* SPSR_irq */
2726 *tgtmode = ARM_CPU_MODE_IRQ;
2727 break;
2728 case 0x12: /* SPSR_svc */
2729 *tgtmode = ARM_CPU_MODE_SVC;
2730 break;
2731 case 0x14: /* SPSR_abt */
2732 *tgtmode = ARM_CPU_MODE_ABT;
2733 break;
2734 case 0x16: /* SPSR_und */
2735 *tgtmode = ARM_CPU_MODE_UND;
2736 break;
2737 case 0x1c: /* SPSR_mon */
2738 *tgtmode = ARM_CPU_MODE_MON;
2739 break;
2740 case 0x1e: /* SPSR_hyp */
2741 *tgtmode = ARM_CPU_MODE_HYP;
2742 break;
2743 default: /* unallocated */
2744 goto undef;
2745 }
2746 /* We arbitrarily assign SPSR a register number of 16. */
2747 *regno = 16;
2748 } else {
2749 /* general purpose registers for other modes */
2750 switch (sysm) {
2751 case 0x0 ... 0x6: /* 0b00xxx : r8_usr ... r14_usr */
2752 *tgtmode = ARM_CPU_MODE_USR;
2753 *regno = sysm + 8;
2754 break;
2755 case 0x8 ... 0xe: /* 0b01xxx : r8_fiq ... r14_fiq */
2756 *tgtmode = ARM_CPU_MODE_FIQ;
2757 *regno = sysm;
2758 break;
2759 case 0x10 ... 0x11: /* 0b1000x : r14_irq, r13_irq */
2760 *tgtmode = ARM_CPU_MODE_IRQ;
2761 *regno = sysm & 1 ? 13 : 14;
2762 break;
2763 case 0x12 ... 0x13: /* 0b1001x : r14_svc, r13_svc */
2764 *tgtmode = ARM_CPU_MODE_SVC;
2765 *regno = sysm & 1 ? 13 : 14;
2766 break;
2767 case 0x14 ... 0x15: /* 0b1010x : r14_abt, r13_abt */
2768 *tgtmode = ARM_CPU_MODE_ABT;
2769 *regno = sysm & 1 ? 13 : 14;
2770 break;
2771 case 0x16 ... 0x17: /* 0b1011x : r14_und, r13_und */
2772 *tgtmode = ARM_CPU_MODE_UND;
2773 *regno = sysm & 1 ? 13 : 14;
2774 break;
2775 case 0x1c ... 0x1d: /* 0b1110x : r14_mon, r13_mon */
2776 *tgtmode = ARM_CPU_MODE_MON;
2777 *regno = sysm & 1 ? 13 : 14;
2778 break;
2779 case 0x1e ... 0x1f: /* 0b1111x : elr_hyp, r13_hyp */
2780 *tgtmode = ARM_CPU_MODE_HYP;
2781 /* Arbitrarily pick 17 for ELR_Hyp (which is not a banked LR!) */
2782 *regno = sysm & 1 ? 13 : 17;
2783 break;
2784 default: /* unallocated */
2785 goto undef;
2786 }
2787 }
2788
2789 /* Catch the 'accessing inaccessible register' cases we can detect
2790 * at translate time.
2791 */
2792 switch (*tgtmode) {
2793 case ARM_CPU_MODE_MON:
2794 if (!arm_dc_feature(s, ARM_FEATURE_EL3) || s->ns) {
2795 goto undef;
2796 }
2797 if (s->current_el == 1) {
2798 /* If we're in Secure EL1 (which implies that EL3 is AArch64)
2799 * then accesses to Mon registers trap to Secure EL2, if it exists,
2800 * otherwise EL3.
2801 */
2802 TCGv_i32 tcg_el;
2803
2804 if (arm_dc_feature(s, ARM_FEATURE_AARCH64) &&
2805 dc_isar_feature(aa64_sel2, s)) {
2806 /* Target EL is EL<3 minus SCR_EL3.EEL2> */
2807 tcg_el = load_cpu_field_low32(cp15.scr_el3);
2808 tcg_gen_sextract_i32(tcg_el, tcg_el, ctz32(SCR_EEL2), 1);
2809 tcg_gen_addi_i32(tcg_el, tcg_el, 3);
2810 } else {
2811 tcg_el = tcg_constant_i32(3);
2812 }
2813
2814 gen_exception_insn_el_v(s, 0, EXCP_UDEF,
2815 syn_uncategorized(), tcg_el);
2816 return false;
2817 }
2818 break;
2819 case ARM_CPU_MODE_HYP:
2820 /*
2821 * r13_hyp can only be accessed from Monitor mode, and so we
2822 * can forbid accesses from EL2 or below.
2823 * elr_hyp can be accessed also from Hyp mode, so forbid
2824 * accesses from EL0 or EL1.
2825 * SPSR_hyp is supposed to be in the same category as r13_hyp
2826 * and UNPREDICTABLE if accessed from anything except Monitor
2827 * mode. However there is some real-world code that will do
2828 * it because at least some hardware happens to permit the
2829 * access. (Notably a standard Cortex-R52 startup code fragment
2830 * does this.) So we permit SPSR_hyp from Hyp mode also, to allow
2831 * this (incorrect) guest code to run.
2832 */
2833 if (!arm_dc_feature(s, ARM_FEATURE_EL2) || s->current_el < 2
2834 || (s->current_el < 3 && *regno != 16 && *regno != 17)) {
2835 goto undef;
2836 }
2837 break;
2838 default:
2839 break;
2840 }
2841
2842 return true;
2843
2844 undef:
2845 /* If we get here then some access check did not pass */
2846 gen_exception_insn(s, 0, EXCP_UDEF, syn_uncategorized());
2847 return false;
2848 }
2849
gen_msr_banked(DisasContext * s,int r,int sysm,int rn)2850 static void gen_msr_banked(DisasContext *s, int r, int sysm, int rn)
2851 {
2852 TCGv_i32 tcg_reg;
2853 int tgtmode = 0, regno = 0;
2854
2855 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, ®no)) {
2856 return;
2857 }
2858
2859 /* Sync state because msr_banked() can raise exceptions */
2860 gen_set_condexec(s);
2861 gen_update_pc(s, 0);
2862 tcg_reg = load_reg(s, rn);
2863 gen_helper_msr_banked(tcg_env, tcg_reg,
2864 tcg_constant_i32(tgtmode),
2865 tcg_constant_i32(regno));
2866 s->base.is_jmp = DISAS_UPDATE_EXIT;
2867 }
2868
gen_mrs_banked(DisasContext * s,int r,int sysm,int rn)2869 static void gen_mrs_banked(DisasContext *s, int r, int sysm, int rn)
2870 {
2871 TCGv_i32 tcg_reg;
2872 int tgtmode = 0, regno = 0;
2873
2874 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, ®no)) {
2875 return;
2876 }
2877
2878 /* Sync state because mrs_banked() can raise exceptions */
2879 gen_set_condexec(s);
2880 gen_update_pc(s, 0);
2881 tcg_reg = tcg_temp_new_i32();
2882 gen_helper_mrs_banked(tcg_reg, tcg_env,
2883 tcg_constant_i32(tgtmode),
2884 tcg_constant_i32(regno));
2885 store_reg(s, rn, tcg_reg);
2886 s->base.is_jmp = DISAS_UPDATE_EXIT;
2887 }
2888
2889 /* Store value to PC as for an exception return (ie don't
2890 * mask bits). The subsequent call to gen_helper_cpsr_write_eret()
2891 * will do the masking based on the new value of the Thumb bit.
2892 */
store_pc_exc_ret(DisasContext * s,TCGv_i32 pc)2893 static void store_pc_exc_ret(DisasContext *s, TCGv_i32 pc)
2894 {
2895 tcg_gen_mov_i32(cpu_R[15], pc);
2896 }
2897
2898 /* Generate a v6 exception return. Marks both values as dead. */
gen_rfe(DisasContext * s,TCGv_i32 pc,TCGv_i32 cpsr)2899 static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
2900 {
2901 store_pc_exc_ret(s, pc);
2902 /* The cpsr_write_eret helper will mask the low bits of PC
2903 * appropriately depending on the new Thumb bit, so it must
2904 * be called after storing the new PC.
2905 */
2906 translator_io_start(&s->base);
2907 gen_helper_cpsr_write_eret(tcg_env, cpsr);
2908 /* Must exit loop to check un-masked IRQs */
2909 s->base.is_jmp = DISAS_EXIT;
2910 }
2911
2912 /* Generate an old-style exception return. Marks pc as dead. */
gen_exception_return(DisasContext * s,TCGv_i32 pc)2913 static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
2914 {
2915 gen_rfe(s, pc, load_cpu_field(spsr));
2916 }
2917
aa32_cpreg_encoding_in_impdef_space(uint8_t crn,uint8_t crm)2918 static bool aa32_cpreg_encoding_in_impdef_space(uint8_t crn, uint8_t crm)
2919 {
2920 static const uint16_t mask[3] = {
2921 0b0000000111100111, /* crn == 9, crm == {c0-c2, c5-c8} */
2922 0b0000000100010011, /* crn == 10, crm == {c0, c1, c4, c8} */
2923 0b1000000111111111, /* crn == 11, crm == {c0-c8, c15} */
2924 };
2925
2926 if (crn >= 9 && crn <= 11) {
2927 return (mask[crn - 9] >> crm) & 1;
2928 }
2929 return false;
2930 }
2931
do_coproc_insn(DisasContext * s,int cpnum,int is64,int opc1,int crn,int crm,int opc2,bool isread,int rt,int rt2)2932 static void do_coproc_insn(DisasContext *s, int cpnum, int is64,
2933 int opc1, int crn, int crm, int opc2,
2934 bool isread, int rt, int rt2)
2935 {
2936 uint32_t key = ENCODE_CP_REG(cpnum, is64, s->ns, crn, crm, opc1, opc2);
2937 const ARMCPRegInfo *ri = get_arm_cp_reginfo(s->cp_regs, key);
2938 TCGv_ptr tcg_ri = NULL;
2939 bool need_exit_tb = false;
2940 uint32_t syndrome;
2941
2942 /*
2943 * Note that since we are an implementation which takes an
2944 * exception on a trapped conditional instruction only if the
2945 * instruction passes its condition code check, we can take
2946 * advantage of the clause in the ARM ARM that allows us to set
2947 * the COND field in the instruction to 0xE in all cases.
2948 * We could fish the actual condition out of the insn (ARM)
2949 * or the condexec bits (Thumb) but it isn't necessary.
2950 */
2951 switch (cpnum) {
2952 case 14:
2953 if (is64) {
2954 syndrome = syn_cp14_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
2955 isread, false);
2956 } else {
2957 syndrome = syn_cp14_rt_trap(1, 0xe, opc1, opc2, crn, crm,
2958 rt, isread, false);
2959 }
2960 break;
2961 case 15:
2962 if (is64) {
2963 syndrome = syn_cp15_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
2964 isread, false);
2965 } else {
2966 syndrome = syn_cp15_rt_trap(1, 0xe, opc1, opc2, crn, crm,
2967 rt, isread, false);
2968 }
2969 break;
2970 default:
2971 /*
2972 * ARMv8 defines that only coprocessors 14 and 15 exist,
2973 * so this can only happen if this is an ARMv7 or earlier CPU,
2974 * in which case the syndrome information won't actually be
2975 * guest visible.
2976 */
2977 assert(!arm_dc_feature(s, ARM_FEATURE_V8));
2978 syndrome = syn_uncategorized();
2979 break;
2980 }
2981
2982 if (s->hstr_active && cpnum == 15 && s->current_el == 1) {
2983 /*
2984 * At EL1, check for a HSTR_EL2 trap, which must take precedence
2985 * over the UNDEF for "no such register" or the UNDEF for "access
2986 * permissions forbid this EL1 access". HSTR_EL2 traps from EL0
2987 * only happen if the cpreg doesn't UNDEF at EL0, so we do those in
2988 * access_check_cp_reg(), after the checks for whether the access
2989 * configurably trapped to EL1.
2990 */
2991 uint32_t maskbit = is64 ? crm : crn;
2992
2993 if (maskbit != 4 && maskbit != 14) {
2994 /* T4 and T14 are RES0 so never cause traps */
2995 TCGv_i32 t;
2996 DisasLabel over = gen_disas_label(s);
2997
2998 t = load_cpu_offset(offsetoflow32(CPUARMState, cp15.hstr_el2));
2999 tcg_gen_andi_i32(t, t, 1u << maskbit);
3000 tcg_gen_brcondi_i32(TCG_COND_EQ, t, 0, over.label);
3001
3002 gen_exception_insn_el(s, 0, EXCP_UDEF, syndrome, 2);
3003 /*
3004 * gen_exception_insn() will set is_jmp to DISAS_NORETURN,
3005 * but since we're conditionally branching over it, we want
3006 * to assume continue-to-next-instruction.
3007 */
3008 s->base.is_jmp = DISAS_NEXT;
3009 set_disas_label(s, over);
3010 }
3011 }
3012
3013 if (cpnum == 15 && aa32_cpreg_encoding_in_impdef_space(crn, crm)) {
3014 /*
3015 * Check for TIDCP trap, which must take precedence over the UNDEF
3016 * for "no such register" etc. It shares precedence with HSTR,
3017 * but raises the same exception, so order doesn't matter.
3018 */
3019 switch (s->current_el) {
3020 case 0:
3021 if (arm_dc_feature(s, ARM_FEATURE_AARCH64)
3022 && dc_isar_feature(aa64_tidcp1, s)) {
3023 gen_helper_tidcp_el0(tcg_env, tcg_constant_i32(syndrome));
3024 }
3025 break;
3026 case 1:
3027 gen_helper_tidcp_el1(tcg_env, tcg_constant_i32(syndrome));
3028 break;
3029 }
3030 }
3031
3032 if (!ri) {
3033 /*
3034 * Unknown register; this might be a guest error or a QEMU
3035 * unimplemented feature.
3036 */
3037 if (is64) {
3038 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
3039 "64 bit system register cp:%d opc1: %d crm:%d "
3040 "(%s)\n",
3041 isread ? "read" : "write", cpnum, opc1, crm,
3042 s->ns ? "non-secure" : "secure");
3043 } else {
3044 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
3045 "system register cp:%d opc1:%d crn:%d crm:%d "
3046 "opc2:%d (%s)\n",
3047 isread ? "read" : "write", cpnum, opc1, crn,
3048 crm, opc2, s->ns ? "non-secure" : "secure");
3049 }
3050 unallocated_encoding(s);
3051 return;
3052 }
3053
3054 /* Check access permissions */
3055 if (!cp_access_ok(s->current_el, ri, isread)) {
3056 unallocated_encoding(s);
3057 return;
3058 }
3059
3060 if ((s->hstr_active && s->current_el == 0) || ri->accessfn ||
3061 (ri->fgt && s->fgt_active) ||
3062 (arm_dc_feature(s, ARM_FEATURE_XSCALE) && cpnum < 14)) {
3063 /*
3064 * Emit code to perform further access permissions checks at
3065 * runtime; this may result in an exception.
3066 * Note that on XScale all cp0..c13 registers do an access check
3067 * call in order to handle c15_cpar.
3068 */
3069 gen_set_condexec(s);
3070 gen_update_pc(s, 0);
3071 tcg_ri = tcg_temp_new_ptr();
3072 gen_helper_access_check_cp_reg(tcg_ri, tcg_env,
3073 tcg_constant_i32(key),
3074 tcg_constant_i32(syndrome),
3075 tcg_constant_i32(isread));
3076 } else if (ri->type & ARM_CP_RAISES_EXC) {
3077 /*
3078 * The readfn or writefn might raise an exception;
3079 * synchronize the CPU state in case it does.
3080 */
3081 gen_set_condexec(s);
3082 gen_update_pc(s, 0);
3083 }
3084
3085 /* Handle special cases first */
3086 switch (ri->type & ARM_CP_SPECIAL_MASK) {
3087 case 0:
3088 break;
3089 case ARM_CP_NOP:
3090 return;
3091 case ARM_CP_WFI:
3092 if (isread) {
3093 unallocated_encoding(s);
3094 } else {
3095 gen_update_pc(s, curr_insn_len(s));
3096 s->base.is_jmp = DISAS_WFI;
3097 }
3098 return;
3099 default:
3100 g_assert_not_reached();
3101 }
3102
3103 if (ri->type & ARM_CP_IO) {
3104 /* I/O operations must end the TB here (whether read or write) */
3105 need_exit_tb = translator_io_start(&s->base);
3106 }
3107
3108 if (isread) {
3109 /* Read */
3110 if (is64) {
3111 TCGv_i64 tmp64;
3112 TCGv_i32 tmp;
3113 if (ri->type & ARM_CP_CONST) {
3114 tmp64 = tcg_constant_i64(ri->resetvalue);
3115 } else if (ri->readfn) {
3116 if (!tcg_ri) {
3117 tcg_ri = gen_lookup_cp_reg(key);
3118 }
3119 tmp64 = tcg_temp_new_i64();
3120 gen_helper_get_cp_reg64(tmp64, tcg_env, tcg_ri);
3121 } else {
3122 tmp64 = tcg_temp_new_i64();
3123 tcg_gen_ld_i64(tmp64, tcg_env, ri->fieldoffset);
3124 }
3125 tmp = tcg_temp_new_i32();
3126 tcg_gen_extrl_i64_i32(tmp, tmp64);
3127 store_reg(s, rt, tmp);
3128 tmp = tcg_temp_new_i32();
3129 tcg_gen_extrh_i64_i32(tmp, tmp64);
3130 store_reg(s, rt2, tmp);
3131 } else {
3132 TCGv_i32 tmp;
3133 if (ri->type & ARM_CP_CONST) {
3134 tmp = tcg_constant_i32(ri->resetvalue);
3135 } else if (ri->readfn) {
3136 if (!tcg_ri) {
3137 tcg_ri = gen_lookup_cp_reg(key);
3138 }
3139 tmp = tcg_temp_new_i32();
3140 gen_helper_get_cp_reg(tmp, tcg_env, tcg_ri);
3141 } else {
3142 tmp = load_cpu_offset(ri->fieldoffset);
3143 }
3144 if (rt == 15) {
3145 /* Destination register of r15 for 32 bit loads sets
3146 * the condition codes from the high 4 bits of the value
3147 */
3148 gen_set_nzcv(tmp);
3149 } else {
3150 store_reg(s, rt, tmp);
3151 }
3152 }
3153 } else {
3154 /* Write */
3155 if (ri->type & ARM_CP_CONST) {
3156 /* If not forbidden by access permissions, treat as WI */
3157 return;
3158 }
3159
3160 if (is64) {
3161 TCGv_i32 tmplo, tmphi;
3162 TCGv_i64 tmp64 = tcg_temp_new_i64();
3163 tmplo = load_reg(s, rt);
3164 tmphi = load_reg(s, rt2);
3165 tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
3166 if (ri->writefn) {
3167 if (!tcg_ri) {
3168 tcg_ri = gen_lookup_cp_reg(key);
3169 }
3170 gen_helper_set_cp_reg64(tcg_env, tcg_ri, tmp64);
3171 } else {
3172 tcg_gen_st_i64(tmp64, tcg_env, ri->fieldoffset);
3173 }
3174 } else {
3175 TCGv_i32 tmp = load_reg(s, rt);
3176 if (ri->writefn) {
3177 if (!tcg_ri) {
3178 tcg_ri = gen_lookup_cp_reg(key);
3179 }
3180 gen_helper_set_cp_reg(tcg_env, tcg_ri, tmp);
3181 } else {
3182 store_cpu_offset(tmp, ri->fieldoffset, 4);
3183 }
3184 }
3185 }
3186
3187 if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
3188 /*
3189 * A write to any coprocessor register that ends a TB
3190 * must rebuild the hflags for the next TB.
3191 */
3192 gen_rebuild_hflags(s, ri->type & ARM_CP_NEWEL);
3193 /*
3194 * We default to ending the TB on a coprocessor register write,
3195 * but allow this to be suppressed by the register definition
3196 * (usually only necessary to work around guest bugs).
3197 */
3198 need_exit_tb = true;
3199 }
3200 if (need_exit_tb) {
3201 gen_lookup_tb(s);
3202 }
3203 }
3204
3205 /* Decode XScale DSP or iWMMXt insn (in the copro space, cp=0 or 1) */
disas_xscale_insn(DisasContext * s,uint32_t insn)3206 static void disas_xscale_insn(DisasContext *s, uint32_t insn)
3207 {
3208 int cpnum = (insn >> 8) & 0xf;
3209
3210 if (extract32(s->c15_cpar, cpnum, 1) == 0) {
3211 unallocated_encoding(s);
3212 } else if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
3213 if (disas_iwmmxt_insn(s, insn)) {
3214 unallocated_encoding(s);
3215 }
3216 } else if (arm_dc_feature(s, ARM_FEATURE_XSCALE)) {
3217 if (disas_dsp_insn(s, insn)) {
3218 unallocated_encoding(s);
3219 }
3220 }
3221 }
3222
3223 /* Store a 64-bit value to a register pair. Clobbers val. */
gen_storeq_reg(DisasContext * s,int rlow,int rhigh,TCGv_i64 val)3224 static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
3225 {
3226 TCGv_i32 tmp;
3227 tmp = tcg_temp_new_i32();
3228 tcg_gen_extrl_i64_i32(tmp, val);
3229 store_reg(s, rlow, tmp);
3230 tmp = tcg_temp_new_i32();
3231 tcg_gen_extrh_i64_i32(tmp, val);
3232 store_reg(s, rhigh, tmp);
3233 }
3234
3235 /* load and add a 64-bit value from a register pair. */
gen_addq(DisasContext * s,TCGv_i64 val,int rlow,int rhigh)3236 static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
3237 {
3238 TCGv_i64 tmp;
3239 TCGv_i32 tmpl;
3240 TCGv_i32 tmph;
3241
3242 /* Load 64-bit value rd:rn. */
3243 tmpl = load_reg(s, rlow);
3244 tmph = load_reg(s, rhigh);
3245 tmp = tcg_temp_new_i64();
3246 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
3247 tcg_gen_add_i64(val, val, tmp);
3248 }
3249
3250 /* Set N and Z flags from hi|lo. */
gen_logicq_cc(TCGv_i32 lo,TCGv_i32 hi)3251 static void gen_logicq_cc(TCGv_i32 lo, TCGv_i32 hi)
3252 {
3253 tcg_gen_mov_i32(cpu_NF, hi);
3254 tcg_gen_or_i32(cpu_ZF, lo, hi);
3255 }
3256
3257 /* Load/Store exclusive instructions are implemented by remembering
3258 the value/address loaded, and seeing if these are the same
3259 when the store is performed. This should be sufficient to implement
3260 the architecturally mandated semantics, and avoids having to monitor
3261 regular stores. The compare vs the remembered value is done during
3262 the cmpxchg operation, but we must compare the addresses manually. */
gen_load_exclusive(DisasContext * s,int rt,int rt2,TCGv_i32 addr,int size)3263 static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
3264 TCGv_i32 addr, int size)
3265 {
3266 TCGv_i32 tmp = tcg_temp_new_i32();
3267 MemOp opc = size | MO_ALIGN | s->be_data;
3268
3269 s->is_ldex = true;
3270
3271 if (size == 3) {
3272 TCGv_i32 tmp2 = tcg_temp_new_i32();
3273 TCGv_i64 t64 = tcg_temp_new_i64();
3274
3275 /*
3276 * For AArch32, architecturally the 32-bit word at the lowest
3277 * address is always Rt and the one at addr+4 is Rt2, even if
3278 * the CPU is big-endian. That means we don't want to do a
3279 * gen_aa32_ld_i64(), which checks SCTLR_B as if for an
3280 * architecturally 64-bit access, but instead do a 64-bit access
3281 * using MO_BE if appropriate and then split the two halves.
3282 */
3283 TCGv taddr = gen_aa32_addr(s, addr, opc);
3284
3285 tcg_gen_qemu_ld_i64(t64, taddr, get_mem_index(s), opc);
3286 tcg_gen_mov_i64(cpu_exclusive_val, t64);
3287 if (s->be_data == MO_BE) {
3288 tcg_gen_extr_i64_i32(tmp2, tmp, t64);
3289 } else {
3290 tcg_gen_extr_i64_i32(tmp, tmp2, t64);
3291 }
3292 store_reg(s, rt2, tmp2);
3293 } else {
3294 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), opc);
3295 tcg_gen_extu_i32_i64(cpu_exclusive_val, tmp);
3296 }
3297
3298 store_reg(s, rt, tmp);
3299 tcg_gen_extu_i32_i64(cpu_exclusive_addr, addr);
3300 }
3301
gen_clrex(DisasContext * s)3302 static void gen_clrex(DisasContext *s)
3303 {
3304 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
3305 }
3306
gen_store_exclusive(DisasContext * s,int rd,int rt,int rt2,TCGv_i32 addr,int size)3307 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
3308 TCGv_i32 addr, int size)
3309 {
3310 TCGv_i32 t0, t1, t2;
3311 TCGv_i64 extaddr;
3312 TCGv taddr;
3313 TCGLabel *done_label;
3314 TCGLabel *fail_label;
3315 MemOp opc = size | MO_ALIGN | s->be_data;
3316
3317 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
3318 [addr] = {Rt};
3319 {Rd} = 0;
3320 } else {
3321 {Rd} = 1;
3322 } */
3323 fail_label = gen_new_label();
3324 done_label = gen_new_label();
3325 extaddr = tcg_temp_new_i64();
3326 tcg_gen_extu_i32_i64(extaddr, addr);
3327 tcg_gen_brcond_i64(TCG_COND_NE, extaddr, cpu_exclusive_addr, fail_label);
3328
3329 taddr = gen_aa32_addr(s, addr, opc);
3330 t0 = tcg_temp_new_i32();
3331 t1 = load_reg(s, rt);
3332 if (size == 3) {
3333 TCGv_i64 o64 = tcg_temp_new_i64();
3334 TCGv_i64 n64 = tcg_temp_new_i64();
3335
3336 t2 = load_reg(s, rt2);
3337
3338 /*
3339 * For AArch32, architecturally the 32-bit word at the lowest
3340 * address is always Rt and the one at addr+4 is Rt2, even if
3341 * the CPU is big-endian. Since we're going to treat this as a
3342 * single 64-bit BE store, we need to put the two halves in the
3343 * opposite order for BE to LE, so that they end up in the right
3344 * places. We don't want gen_aa32_st_i64, because that checks
3345 * SCTLR_B as if for an architectural 64-bit access.
3346 */
3347 if (s->be_data == MO_BE) {
3348 tcg_gen_concat_i32_i64(n64, t2, t1);
3349 } else {
3350 tcg_gen_concat_i32_i64(n64, t1, t2);
3351 }
3352
3353 tcg_gen_atomic_cmpxchg_i64(o64, taddr, cpu_exclusive_val, n64,
3354 get_mem_index(s), opc);
3355
3356 tcg_gen_setcond_i64(TCG_COND_NE, o64, o64, cpu_exclusive_val);
3357 tcg_gen_extrl_i64_i32(t0, o64);
3358 } else {
3359 t2 = tcg_temp_new_i32();
3360 tcg_gen_extrl_i64_i32(t2, cpu_exclusive_val);
3361 tcg_gen_atomic_cmpxchg_i32(t0, taddr, t2, t1, get_mem_index(s), opc);
3362 tcg_gen_setcond_i32(TCG_COND_NE, t0, t0, t2);
3363 }
3364 tcg_gen_mov_i32(cpu_R[rd], t0);
3365 tcg_gen_br(done_label);
3366
3367 gen_set_label(fail_label);
3368 tcg_gen_movi_i32(cpu_R[rd], 1);
3369 gen_set_label(done_label);
3370 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
3371 }
3372
3373 /* gen_srs:
3374 * @env: CPUARMState
3375 * @s: DisasContext
3376 * @mode: mode field from insn (which stack to store to)
3377 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
3378 * @writeback: true if writeback bit set
3379 *
3380 * Generate code for the SRS (Store Return State) insn.
3381 */
gen_srs(DisasContext * s,uint32_t mode,uint32_t amode,bool writeback)3382 static void gen_srs(DisasContext *s,
3383 uint32_t mode, uint32_t amode, bool writeback)
3384 {
3385 int32_t offset;
3386 TCGv_i32 addr, tmp;
3387 bool undef = false;
3388
3389 /* SRS is:
3390 * - trapped to EL3 if EL3 is AArch64 and we are at Secure EL1
3391 * and specified mode is monitor mode
3392 * - UNDEFINED in Hyp mode
3393 * - UNPREDICTABLE in User or System mode
3394 * - UNPREDICTABLE if the specified mode is:
3395 * -- not implemented
3396 * -- not a valid mode number
3397 * -- a mode that's at a higher exception level
3398 * -- Monitor, if we are Non-secure
3399 * For the UNPREDICTABLE cases we choose to UNDEF.
3400 */
3401 if (s->current_el == 1 && !s->ns && mode == ARM_CPU_MODE_MON) {
3402 gen_exception_insn_el(s, 0, EXCP_UDEF, syn_uncategorized(), 3);
3403 return;
3404 }
3405
3406 if (s->current_el == 0 || s->current_el == 2) {
3407 undef = true;
3408 }
3409
3410 switch (mode) {
3411 case ARM_CPU_MODE_USR:
3412 case ARM_CPU_MODE_FIQ:
3413 case ARM_CPU_MODE_IRQ:
3414 case ARM_CPU_MODE_SVC:
3415 case ARM_CPU_MODE_ABT:
3416 case ARM_CPU_MODE_UND:
3417 case ARM_CPU_MODE_SYS:
3418 break;
3419 case ARM_CPU_MODE_HYP:
3420 if (s->current_el == 1 || !arm_dc_feature(s, ARM_FEATURE_EL2)) {
3421 undef = true;
3422 }
3423 break;
3424 case ARM_CPU_MODE_MON:
3425 /* No need to check specifically for "are we non-secure" because
3426 * we've already made EL0 UNDEF and handled the trap for S-EL1;
3427 * so if this isn't EL3 then we must be non-secure.
3428 */
3429 if (s->current_el != 3) {
3430 undef = true;
3431 }
3432 break;
3433 default:
3434 undef = true;
3435 }
3436
3437 if (undef) {
3438 unallocated_encoding(s);
3439 return;
3440 }
3441
3442 addr = tcg_temp_new_i32();
3443 /* get_r13_banked() will raise an exception if called from System mode */
3444 gen_set_condexec(s);
3445 gen_update_pc(s, 0);
3446 gen_helper_get_r13_banked(addr, tcg_env, tcg_constant_i32(mode));
3447 switch (amode) {
3448 case 0: /* DA */
3449 offset = -4;
3450 break;
3451 case 1: /* IA */
3452 offset = 0;
3453 break;
3454 case 2: /* DB */
3455 offset = -8;
3456 break;
3457 case 3: /* IB */
3458 offset = 4;
3459 break;
3460 default:
3461 g_assert_not_reached();
3462 }
3463 tcg_gen_addi_i32(addr, addr, offset);
3464 tmp = load_reg(s, 14);
3465 gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), MO_UL | MO_ALIGN);
3466 tmp = load_cpu_field(spsr);
3467 tcg_gen_addi_i32(addr, addr, 4);
3468 gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), MO_UL | MO_ALIGN);
3469 if (writeback) {
3470 switch (amode) {
3471 case 0:
3472 offset = -8;
3473 break;
3474 case 1:
3475 offset = 4;
3476 break;
3477 case 2:
3478 offset = -4;
3479 break;
3480 case 3:
3481 offset = 0;
3482 break;
3483 default:
3484 g_assert_not_reached();
3485 }
3486 tcg_gen_addi_i32(addr, addr, offset);
3487 gen_helper_set_r13_banked(tcg_env, tcg_constant_i32(mode), addr);
3488 }
3489 s->base.is_jmp = DISAS_UPDATE_EXIT;
3490 }
3491
3492 /* Skip this instruction if the ARM condition is false */
arm_skip_unless(DisasContext * s,uint32_t cond)3493 static void arm_skip_unless(DisasContext *s, uint32_t cond)
3494 {
3495 arm_gen_condlabel(s);
3496 arm_gen_test_cc(cond ^ 1, s->condlabel.label);
3497 }
3498
3499
3500 /*
3501 * Constant expanders used by T16/T32 decode
3502 */
3503
3504 /* Return only the rotation part of T32ExpandImm. */
t32_expandimm_rot(DisasContext * s,int x)3505 static int t32_expandimm_rot(DisasContext *s, int x)
3506 {
3507 return x & 0xc00 ? extract32(x, 7, 5) : 0;
3508 }
3509
3510 /* Return the unrotated immediate from T32ExpandImm. */
t32_expandimm_imm(DisasContext * s,int x)3511 static int t32_expandimm_imm(DisasContext *s, int x)
3512 {
3513 int imm = extract32(x, 0, 8);
3514
3515 switch (extract32(x, 8, 4)) {
3516 case 0: /* XY */
3517 /* Nothing to do. */
3518 break;
3519 case 1: /* 00XY00XY */
3520 imm *= 0x00010001;
3521 break;
3522 case 2: /* XY00XY00 */
3523 imm *= 0x01000100;
3524 break;
3525 case 3: /* XYXYXYXY */
3526 imm *= 0x01010101;
3527 break;
3528 default:
3529 /* Rotated constant. */
3530 imm |= 0x80;
3531 break;
3532 }
3533 return imm;
3534 }
3535
t32_branch24(DisasContext * s,int x)3536 static int t32_branch24(DisasContext *s, int x)
3537 {
3538 /* Convert J1:J2 at x[22:21] to I2:I1, which involves I=J^~S. */
3539 x ^= !(x < 0) * (3 << 21);
3540 /* Append the final zero. */
3541 return x << 1;
3542 }
3543
t16_setflags(DisasContext * s)3544 static int t16_setflags(DisasContext *s)
3545 {
3546 return s->condexec_mask == 0;
3547 }
3548
t16_push_list(DisasContext * s,int x)3549 static int t16_push_list(DisasContext *s, int x)
3550 {
3551 return (x & 0xff) | (x & 0x100) << (14 - 8);
3552 }
3553
t16_pop_list(DisasContext * s,int x)3554 static int t16_pop_list(DisasContext *s, int x)
3555 {
3556 return (x & 0xff) | (x & 0x100) << (15 - 8);
3557 }
3558
3559 /*
3560 * Include the generated decoders.
3561 */
3562
3563 #include "decode-a32.c.inc"
3564 #include "decode-a32-uncond.c.inc"
3565 #include "decode-t32.c.inc"
3566 #include "decode-t16.c.inc"
3567
valid_cp(DisasContext * s,int cp)3568 static bool valid_cp(DisasContext *s, int cp)
3569 {
3570 /*
3571 * Return true if this coprocessor field indicates something
3572 * that's really a possible coprocessor.
3573 * For v7 and earlier, coprocessors 8..15 were reserved for Arm use,
3574 * and of those only cp14 and cp15 were used for registers.
3575 * cp10 and cp11 were used for VFP and Neon, whose decode is
3576 * dealt with elsewhere. With the advent of fp16, cp9 is also
3577 * now part of VFP.
3578 * For v8A and later, the encoding has been tightened so that
3579 * only cp14 and cp15 are valid, and other values aren't considered
3580 * to be in the coprocessor-instruction space at all. v8M still
3581 * permits coprocessors 0..7.
3582 * For XScale, we must not decode the XScale cp0, cp1 space as
3583 * a standard coprocessor insn, because we want to fall through to
3584 * the legacy disas_xscale_insn() decoder after decodetree is done.
3585 */
3586 if (arm_dc_feature(s, ARM_FEATURE_XSCALE) && (cp == 0 || cp == 1)) {
3587 return false;
3588 }
3589
3590 if (arm_dc_feature(s, ARM_FEATURE_V8) &&
3591 !arm_dc_feature(s, ARM_FEATURE_M)) {
3592 return cp >= 14;
3593 }
3594 return cp < 8 || cp >= 14;
3595 }
3596
trans_MCR(DisasContext * s,arg_MCR * a)3597 static bool trans_MCR(DisasContext *s, arg_MCR *a)
3598 {
3599 if (!valid_cp(s, a->cp)) {
3600 return false;
3601 }
3602 do_coproc_insn(s, a->cp, false, a->opc1, a->crn, a->crm, a->opc2,
3603 false, a->rt, 0);
3604 return true;
3605 }
3606
trans_MRC(DisasContext * s,arg_MRC * a)3607 static bool trans_MRC(DisasContext *s, arg_MRC *a)
3608 {
3609 if (!valid_cp(s, a->cp)) {
3610 return false;
3611 }
3612 do_coproc_insn(s, a->cp, false, a->opc1, a->crn, a->crm, a->opc2,
3613 true, a->rt, 0);
3614 return true;
3615 }
3616
trans_MCRR(DisasContext * s,arg_MCRR * a)3617 static bool trans_MCRR(DisasContext *s, arg_MCRR *a)
3618 {
3619 if (!valid_cp(s, a->cp)) {
3620 return false;
3621 }
3622 do_coproc_insn(s, a->cp, true, a->opc1, 0, a->crm, 0,
3623 false, a->rt, a->rt2);
3624 return true;
3625 }
3626
trans_MRRC(DisasContext * s,arg_MRRC * a)3627 static bool trans_MRRC(DisasContext *s, arg_MRRC *a)
3628 {
3629 if (!valid_cp(s, a->cp)) {
3630 return false;
3631 }
3632 do_coproc_insn(s, a->cp, true, a->opc1, 0, a->crm, 0,
3633 true, a->rt, a->rt2);
3634 return true;
3635 }
3636
3637 /* Helpers to swap operands for reverse-subtract. */
gen_rsb(TCGv_i32 dst,TCGv_i32 a,TCGv_i32 b)3638 static void gen_rsb(TCGv_i32 dst, TCGv_i32 a, TCGv_i32 b)
3639 {
3640 tcg_gen_sub_i32(dst, b, a);
3641 }
3642
gen_rsb_CC(TCGv_i32 dst,TCGv_i32 a,TCGv_i32 b)3643 static void gen_rsb_CC(TCGv_i32 dst, TCGv_i32 a, TCGv_i32 b)
3644 {
3645 gen_sub_CC(dst, b, a);
3646 }
3647
gen_rsc(TCGv_i32 dest,TCGv_i32 a,TCGv_i32 b)3648 static void gen_rsc(TCGv_i32 dest, TCGv_i32 a, TCGv_i32 b)
3649 {
3650 gen_sub_carry(dest, b, a);
3651 }
3652
gen_rsc_CC(TCGv_i32 dest,TCGv_i32 a,TCGv_i32 b)3653 static void gen_rsc_CC(TCGv_i32 dest, TCGv_i32 a, TCGv_i32 b)
3654 {
3655 gen_sbc_CC(dest, b, a);
3656 }
3657
3658 /*
3659 * Helpers for the data processing routines.
3660 *
3661 * After the computation store the results back.
3662 * This may be suppressed altogether (STREG_NONE), require a runtime
3663 * check against the stack limits (STREG_SP_CHECK), or generate an
3664 * exception return. Oh, or store into a register.
3665 *
3666 * Always return true, indicating success for a trans_* function.
3667 */
3668 typedef enum {
3669 STREG_NONE,
3670 STREG_NORMAL,
3671 STREG_SP_CHECK,
3672 STREG_EXC_RET,
3673 } StoreRegKind;
3674
store_reg_kind(DisasContext * s,int rd,TCGv_i32 val,StoreRegKind kind)3675 static bool store_reg_kind(DisasContext *s, int rd,
3676 TCGv_i32 val, StoreRegKind kind)
3677 {
3678 switch (kind) {
3679 case STREG_NONE:
3680 return true;
3681 case STREG_NORMAL:
3682 /* See ALUWritePC: Interworking only from a32 mode. */
3683 if (s->thumb) {
3684 store_reg(s, rd, val);
3685 } else {
3686 store_reg_bx(s, rd, val);
3687 }
3688 return true;
3689 case STREG_SP_CHECK:
3690 store_sp_checked(s, val);
3691 return true;
3692 case STREG_EXC_RET:
3693 gen_exception_return(s, val);
3694 return true;
3695 }
3696 g_assert_not_reached();
3697 }
3698
3699 /*
3700 * Data Processing (register)
3701 *
3702 * Operate, with set flags, one register source,
3703 * one immediate shifted register source, and a destination.
3704 */
op_s_rrr_shi(DisasContext * s,arg_s_rrr_shi * a,void (* gen)(TCGv_i32,TCGv_i32,TCGv_i32),int logic_cc,StoreRegKind kind)3705 static bool op_s_rrr_shi(DisasContext *s, arg_s_rrr_shi *a,
3706 void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32),
3707 int logic_cc, StoreRegKind kind)
3708 {
3709 TCGv_i32 tmp1, tmp2;
3710
3711 tmp2 = load_reg(s, a->rm);
3712 gen_arm_shift_im(tmp2, a->shty, a->shim, logic_cc);
3713 tmp1 = load_reg(s, a->rn);
3714
3715 gen(tmp1, tmp1, tmp2);
3716
3717 if (logic_cc) {
3718 gen_logic_CC(tmp1);
3719 }
3720 return store_reg_kind(s, a->rd, tmp1, kind);
3721 }
3722
op_s_rxr_shi(DisasContext * s,arg_s_rrr_shi * a,void (* gen)(TCGv_i32,TCGv_i32),int logic_cc,StoreRegKind kind)3723 static bool op_s_rxr_shi(DisasContext *s, arg_s_rrr_shi *a,
3724 void (*gen)(TCGv_i32, TCGv_i32),
3725 int logic_cc, StoreRegKind kind)
3726 {
3727 TCGv_i32 tmp;
3728
3729 tmp = load_reg(s, a->rm);
3730 gen_arm_shift_im(tmp, a->shty, a->shim, logic_cc);
3731
3732 gen(tmp, tmp);
3733 if (logic_cc) {
3734 gen_logic_CC(tmp);
3735 }
3736 return store_reg_kind(s, a->rd, tmp, kind);
3737 }
3738
3739 /*
3740 * Data-processing (register-shifted register)
3741 *
3742 * Operate, with set flags, one register source,
3743 * one register shifted register source, and a destination.
3744 */
op_s_rrr_shr(DisasContext * s,arg_s_rrr_shr * a,void (* gen)(TCGv_i32,TCGv_i32,TCGv_i32),int logic_cc,StoreRegKind kind)3745 static bool op_s_rrr_shr(DisasContext *s, arg_s_rrr_shr *a,
3746 void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32),
3747 int logic_cc, StoreRegKind kind)
3748 {
3749 TCGv_i32 tmp1, tmp2;
3750
3751 tmp1 = load_reg(s, a->rs);
3752 tmp2 = load_reg(s, a->rm);
3753 gen_arm_shift_reg(tmp2, a->shty, tmp1, logic_cc);
3754 tmp1 = load_reg(s, a->rn);
3755
3756 gen(tmp1, tmp1, tmp2);
3757
3758 if (logic_cc) {
3759 gen_logic_CC(tmp1);
3760 }
3761 return store_reg_kind(s, a->rd, tmp1, kind);
3762 }
3763
op_s_rxr_shr(DisasContext * s,arg_s_rrr_shr * a,void (* gen)(TCGv_i32,TCGv_i32),int logic_cc,StoreRegKind kind)3764 static bool op_s_rxr_shr(DisasContext *s, arg_s_rrr_shr *a,
3765 void (*gen)(TCGv_i32, TCGv_i32),
3766 int logic_cc, StoreRegKind kind)
3767 {
3768 TCGv_i32 tmp1, tmp2;
3769
3770 tmp1 = load_reg(s, a->rs);
3771 tmp2 = load_reg(s, a->rm);
3772 gen_arm_shift_reg(tmp2, a->shty, tmp1, logic_cc);
3773
3774 gen(tmp2, tmp2);
3775 if (logic_cc) {
3776 gen_logic_CC(tmp2);
3777 }
3778 return store_reg_kind(s, a->rd, tmp2, kind);
3779 }
3780
3781 /*
3782 * Data-processing (immediate)
3783 *
3784 * Operate, with set flags, one register source,
3785 * one rotated immediate, and a destination.
3786 *
3787 * Note that logic_cc && a->rot setting CF based on the msb of the
3788 * immediate is the reason why we must pass in the unrotated form
3789 * of the immediate.
3790 */
op_s_rri_rot(DisasContext * s,arg_s_rri_rot * a,void (* gen)(TCGv_i32,TCGv_i32,TCGv_i32),int logic_cc,StoreRegKind kind)3791 static bool op_s_rri_rot(DisasContext *s, arg_s_rri_rot *a,
3792 void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32),
3793 int logic_cc, StoreRegKind kind)
3794 {
3795 TCGv_i32 tmp1;
3796 uint32_t imm;
3797
3798 imm = ror32(a->imm, a->rot);
3799 if (logic_cc && a->rot) {
3800 tcg_gen_movi_i32(cpu_CF, imm >> 31);
3801 }
3802 tmp1 = load_reg(s, a->rn);
3803
3804 gen(tmp1, tmp1, tcg_constant_i32(imm));
3805
3806 if (logic_cc) {
3807 gen_logic_CC(tmp1);
3808 }
3809 return store_reg_kind(s, a->rd, tmp1, kind);
3810 }
3811
op_s_rxi_rot(DisasContext * s,arg_s_rri_rot * a,void (* gen)(TCGv_i32,TCGv_i32),int logic_cc,StoreRegKind kind)3812 static bool op_s_rxi_rot(DisasContext *s, arg_s_rri_rot *a,
3813 void (*gen)(TCGv_i32, TCGv_i32),
3814 int logic_cc, StoreRegKind kind)
3815 {
3816 TCGv_i32 tmp;
3817 uint32_t imm;
3818
3819 imm = ror32(a->imm, a->rot);
3820 if (logic_cc && a->rot) {
3821 tcg_gen_movi_i32(cpu_CF, imm >> 31);
3822 }
3823
3824 tmp = tcg_temp_new_i32();
3825 gen(tmp, tcg_constant_i32(imm));
3826
3827 if (logic_cc) {
3828 gen_logic_CC(tmp);
3829 }
3830 return store_reg_kind(s, a->rd, tmp, kind);
3831 }
3832
3833 #define DO_ANY3(NAME, OP, L, K) \
3834 static bool trans_##NAME##_rrri(DisasContext *s, arg_s_rrr_shi *a) \
3835 { StoreRegKind k = (K); return op_s_rrr_shi(s, a, OP, L, k); } \
3836 static bool trans_##NAME##_rrrr(DisasContext *s, arg_s_rrr_shr *a) \
3837 { StoreRegKind k = (K); return op_s_rrr_shr(s, a, OP, L, k); } \
3838 static bool trans_##NAME##_rri(DisasContext *s, arg_s_rri_rot *a) \
3839 { StoreRegKind k = (K); return op_s_rri_rot(s, a, OP, L, k); }
3840
3841 #define DO_ANY2(NAME, OP, L, K) \
3842 static bool trans_##NAME##_rxri(DisasContext *s, arg_s_rrr_shi *a) \
3843 { StoreRegKind k = (K); return op_s_rxr_shi(s, a, OP, L, k); } \
3844 static bool trans_##NAME##_rxrr(DisasContext *s, arg_s_rrr_shr *a) \
3845 { StoreRegKind k = (K); return op_s_rxr_shr(s, a, OP, L, k); } \
3846 static bool trans_##NAME##_rxi(DisasContext *s, arg_s_rri_rot *a) \
3847 { StoreRegKind k = (K); return op_s_rxi_rot(s, a, OP, L, k); }
3848
3849 #define DO_CMP2(NAME, OP, L) \
3850 static bool trans_##NAME##_xrri(DisasContext *s, arg_s_rrr_shi *a) \
3851 { return op_s_rrr_shi(s, a, OP, L, STREG_NONE); } \
3852 static bool trans_##NAME##_xrrr(DisasContext *s, arg_s_rrr_shr *a) \
3853 { return op_s_rrr_shr(s, a, OP, L, STREG_NONE); } \
3854 static bool trans_##NAME##_xri(DisasContext *s, arg_s_rri_rot *a) \
3855 { return op_s_rri_rot(s, a, OP, L, STREG_NONE); }
3856
3857 DO_ANY3(AND, tcg_gen_and_i32, a->s, STREG_NORMAL)
3858 DO_ANY3(EOR, tcg_gen_xor_i32, a->s, STREG_NORMAL)
3859 DO_ANY3(ORR, tcg_gen_or_i32, a->s, STREG_NORMAL)
3860 DO_ANY3(BIC, tcg_gen_andc_i32, a->s, STREG_NORMAL)
3861
3862 DO_ANY3(RSB, a->s ? gen_rsb_CC : gen_rsb, false, STREG_NORMAL)
3863 DO_ANY3(ADC, a->s ? gen_adc_CC : gen_add_carry, false, STREG_NORMAL)
3864 DO_ANY3(SBC, a->s ? gen_sbc_CC : gen_sub_carry, false, STREG_NORMAL)
3865 DO_ANY3(RSC, a->s ? gen_rsc_CC : gen_rsc, false, STREG_NORMAL)
3866
DO_CMP2(TST,tcg_gen_and_i32,true)3867 DO_CMP2(TST, tcg_gen_and_i32, true)
3868 DO_CMP2(TEQ, tcg_gen_xor_i32, true)
3869 DO_CMP2(CMN, gen_add_CC, false)
3870 DO_CMP2(CMP, gen_sub_CC, false)
3871
3872 DO_ANY3(ADD, a->s ? gen_add_CC : tcg_gen_add_i32, false,
3873 a->rd == 13 && a->rn == 13 ? STREG_SP_CHECK : STREG_NORMAL)
3874
3875 /*
3876 * Note for the computation of StoreRegKind we return out of the
3877 * middle of the functions that are expanded by DO_ANY3, and that
3878 * we modify a->s via that parameter before it is used by OP.
3879 */
3880 DO_ANY3(SUB, a->s ? gen_sub_CC : tcg_gen_sub_i32, false,
3881 ({
3882 StoreRegKind ret = STREG_NORMAL;
3883 if (a->rd == 15 && a->s) {
3884 /*
3885 * See ALUExceptionReturn:
3886 * In User mode, UNPREDICTABLE; we choose UNDEF.
3887 * In Hyp mode, UNDEFINED.
3888 */
3889 if (IS_USER(s) || s->current_el == 2) {
3890 unallocated_encoding(s);
3891 return true;
3892 }
3893 /* There is no writeback of nzcv to PSTATE. */
3894 a->s = 0;
3895 ret = STREG_EXC_RET;
3896 } else if (a->rd == 13 && a->rn == 13) {
3897 ret = STREG_SP_CHECK;
3898 }
3899 ret;
3900 }))
3901
3902 DO_ANY2(MOV, tcg_gen_mov_i32, a->s,
3903 ({
3904 StoreRegKind ret = STREG_NORMAL;
3905 if (a->rd == 15 && a->s) {
3906 /*
3907 * See ALUExceptionReturn:
3908 * In User mode, UNPREDICTABLE; we choose UNDEF.
3909 * In Hyp mode, UNDEFINED.
3910 */
3911 if (IS_USER(s) || s->current_el == 2) {
3912 unallocated_encoding(s);
3913 return true;
3914 }
3915 /* There is no writeback of nzcv to PSTATE. */
3916 a->s = 0;
3917 ret = STREG_EXC_RET;
3918 } else if (a->rd == 13) {
3919 ret = STREG_SP_CHECK;
3920 }
3921 ret;
3922 }))
3923
3924 DO_ANY2(MVN, tcg_gen_not_i32, a->s, STREG_NORMAL)
3925
3926 /*
3927 * ORN is only available with T32, so there is no register-shifted-register
3928 * form of the insn. Using the DO_ANY3 macro would create an unused function.
3929 */
3930 static bool trans_ORN_rrri(DisasContext *s, arg_s_rrr_shi *a)
3931 {
3932 return op_s_rrr_shi(s, a, tcg_gen_orc_i32, a->s, STREG_NORMAL);
3933 }
3934
trans_ORN_rri(DisasContext * s,arg_s_rri_rot * a)3935 static bool trans_ORN_rri(DisasContext *s, arg_s_rri_rot *a)
3936 {
3937 return op_s_rri_rot(s, a, tcg_gen_orc_i32, a->s, STREG_NORMAL);
3938 }
3939
3940 #undef DO_ANY3
3941 #undef DO_ANY2
3942 #undef DO_CMP2
3943
trans_ADR(DisasContext * s,arg_ri * a)3944 static bool trans_ADR(DisasContext *s, arg_ri *a)
3945 {
3946 store_reg_bx(s, a->rd, add_reg_for_lit(s, 15, a->imm));
3947 return true;
3948 }
3949
trans_MOVW(DisasContext * s,arg_MOVW * a)3950 static bool trans_MOVW(DisasContext *s, arg_MOVW *a)
3951 {
3952 if (!ENABLE_ARCH_6T2) {
3953 return false;
3954 }
3955
3956 store_reg(s, a->rd, tcg_constant_i32(a->imm));
3957 return true;
3958 }
3959
trans_MOVT(DisasContext * s,arg_MOVW * a)3960 static bool trans_MOVT(DisasContext *s, arg_MOVW *a)
3961 {
3962 TCGv_i32 tmp;
3963
3964 if (!ENABLE_ARCH_6T2) {
3965 return false;
3966 }
3967
3968 tmp = load_reg(s, a->rd);
3969 tcg_gen_ext16u_i32(tmp, tmp);
3970 tcg_gen_ori_i32(tmp, tmp, a->imm << 16);
3971 store_reg(s, a->rd, tmp);
3972 return true;
3973 }
3974
3975 /*
3976 * v8.1M MVE wide-shifts
3977 */
do_mve_shl_ri(DisasContext * s,arg_mve_shl_ri * a,WideShiftImmFn * fn)3978 static bool do_mve_shl_ri(DisasContext *s, arg_mve_shl_ri *a,
3979 WideShiftImmFn *fn)
3980 {
3981 TCGv_i64 rda;
3982 TCGv_i32 rdalo, rdahi;
3983
3984 if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
3985 /* Decode falls through to ORR/MOV UNPREDICTABLE handling */
3986 return false;
3987 }
3988 if (a->rdahi == 15) {
3989 /* These are a different encoding (SQSHL/SRSHR/UQSHL/URSHR) */
3990 return false;
3991 }
3992 if (!dc_isar_feature(aa32_mve, s) ||
3993 !arm_dc_feature(s, ARM_FEATURE_M_MAIN) ||
3994 a->rdahi == 13) {
3995 /* RdaHi == 13 is UNPREDICTABLE; we choose to UNDEF */
3996 unallocated_encoding(s);
3997 return true;
3998 }
3999
4000 if (a->shim == 0) {
4001 a->shim = 32;
4002 }
4003
4004 rda = tcg_temp_new_i64();
4005 rdalo = load_reg(s, a->rdalo);
4006 rdahi = load_reg(s, a->rdahi);
4007 tcg_gen_concat_i32_i64(rda, rdalo, rdahi);
4008
4009 fn(rda, rda, a->shim);
4010
4011 tcg_gen_extrl_i64_i32(rdalo, rda);
4012 tcg_gen_extrh_i64_i32(rdahi, rda);
4013 store_reg(s, a->rdalo, rdalo);
4014 store_reg(s, a->rdahi, rdahi);
4015
4016 return true;
4017 }
4018
trans_ASRL_ri(DisasContext * s,arg_mve_shl_ri * a)4019 static bool trans_ASRL_ri(DisasContext *s, arg_mve_shl_ri *a)
4020 {
4021 return do_mve_shl_ri(s, a, tcg_gen_sari_i64);
4022 }
4023
trans_LSLL_ri(DisasContext * s,arg_mve_shl_ri * a)4024 static bool trans_LSLL_ri(DisasContext *s, arg_mve_shl_ri *a)
4025 {
4026 return do_mve_shl_ri(s, a, tcg_gen_shli_i64);
4027 }
4028
trans_LSRL_ri(DisasContext * s,arg_mve_shl_ri * a)4029 static bool trans_LSRL_ri(DisasContext *s, arg_mve_shl_ri *a)
4030 {
4031 return do_mve_shl_ri(s, a, tcg_gen_shri_i64);
4032 }
4033
gen_mve_sqshll(TCGv_i64 r,TCGv_i64 n,int64_t shift)4034 static void gen_mve_sqshll(TCGv_i64 r, TCGv_i64 n, int64_t shift)
4035 {
4036 gen_helper_mve_sqshll(r, tcg_env, n, tcg_constant_i32(shift));
4037 }
4038
trans_SQSHLL_ri(DisasContext * s,arg_mve_shl_ri * a)4039 static bool trans_SQSHLL_ri(DisasContext *s, arg_mve_shl_ri *a)
4040 {
4041 return do_mve_shl_ri(s, a, gen_mve_sqshll);
4042 }
4043
gen_mve_uqshll(TCGv_i64 r,TCGv_i64 n,int64_t shift)4044 static void gen_mve_uqshll(TCGv_i64 r, TCGv_i64 n, int64_t shift)
4045 {
4046 gen_helper_mve_uqshll(r, tcg_env, n, tcg_constant_i32(shift));
4047 }
4048
trans_UQSHLL_ri(DisasContext * s,arg_mve_shl_ri * a)4049 static bool trans_UQSHLL_ri(DisasContext *s, arg_mve_shl_ri *a)
4050 {
4051 return do_mve_shl_ri(s, a, gen_mve_uqshll);
4052 }
4053
trans_SRSHRL_ri(DisasContext * s,arg_mve_shl_ri * a)4054 static bool trans_SRSHRL_ri(DisasContext *s, arg_mve_shl_ri *a)
4055 {
4056 return do_mve_shl_ri(s, a, gen_srshr64_i64);
4057 }
4058
trans_URSHRL_ri(DisasContext * s,arg_mve_shl_ri * a)4059 static bool trans_URSHRL_ri(DisasContext *s, arg_mve_shl_ri *a)
4060 {
4061 return do_mve_shl_ri(s, a, gen_urshr64_i64);
4062 }
4063
do_mve_shl_rr(DisasContext * s,arg_mve_shl_rr * a,WideShiftFn * fn)4064 static bool do_mve_shl_rr(DisasContext *s, arg_mve_shl_rr *a, WideShiftFn *fn)
4065 {
4066 TCGv_i64 rda;
4067 TCGv_i32 rdalo, rdahi;
4068
4069 if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
4070 /* Decode falls through to ORR/MOV UNPREDICTABLE handling */
4071 return false;
4072 }
4073 if (a->rdahi == 15) {
4074 /* These are a different encoding (SQSHL/SRSHR/UQSHL/URSHR) */
4075 return false;
4076 }
4077 if (!dc_isar_feature(aa32_mve, s) ||
4078 !arm_dc_feature(s, ARM_FEATURE_M_MAIN) ||
4079 a->rdahi == 13 || a->rm == 13 || a->rm == 15 ||
4080 a->rm == a->rdahi || a->rm == a->rdalo) {
4081 /* These rdahi/rdalo/rm cases are UNPREDICTABLE; we choose to UNDEF */
4082 unallocated_encoding(s);
4083 return true;
4084 }
4085
4086 rda = tcg_temp_new_i64();
4087 rdalo = load_reg(s, a->rdalo);
4088 rdahi = load_reg(s, a->rdahi);
4089 tcg_gen_concat_i32_i64(rda, rdalo, rdahi);
4090
4091 /* The helper takes care of the sign-extension of the low 8 bits of Rm */
4092 fn(rda, tcg_env, rda, cpu_R[a->rm]);
4093
4094 tcg_gen_extrl_i64_i32(rdalo, rda);
4095 tcg_gen_extrh_i64_i32(rdahi, rda);
4096 store_reg(s, a->rdalo, rdalo);
4097 store_reg(s, a->rdahi, rdahi);
4098
4099 return true;
4100 }
4101
trans_LSLL_rr(DisasContext * s,arg_mve_shl_rr * a)4102 static bool trans_LSLL_rr(DisasContext *s, arg_mve_shl_rr *a)
4103 {
4104 return do_mve_shl_rr(s, a, gen_helper_mve_ushll);
4105 }
4106
trans_ASRL_rr(DisasContext * s,arg_mve_shl_rr * a)4107 static bool trans_ASRL_rr(DisasContext *s, arg_mve_shl_rr *a)
4108 {
4109 return do_mve_shl_rr(s, a, gen_helper_mve_sshrl);
4110 }
4111
trans_UQRSHLL64_rr(DisasContext * s,arg_mve_shl_rr * a)4112 static bool trans_UQRSHLL64_rr(DisasContext *s, arg_mve_shl_rr *a)
4113 {
4114 return do_mve_shl_rr(s, a, gen_helper_mve_uqrshll);
4115 }
4116
trans_SQRSHRL64_rr(DisasContext * s,arg_mve_shl_rr * a)4117 static bool trans_SQRSHRL64_rr(DisasContext *s, arg_mve_shl_rr *a)
4118 {
4119 return do_mve_shl_rr(s, a, gen_helper_mve_sqrshrl);
4120 }
4121
trans_UQRSHLL48_rr(DisasContext * s,arg_mve_shl_rr * a)4122 static bool trans_UQRSHLL48_rr(DisasContext *s, arg_mve_shl_rr *a)
4123 {
4124 return do_mve_shl_rr(s, a, gen_helper_mve_uqrshll48);
4125 }
4126
trans_SQRSHRL48_rr(DisasContext * s,arg_mve_shl_rr * a)4127 static bool trans_SQRSHRL48_rr(DisasContext *s, arg_mve_shl_rr *a)
4128 {
4129 return do_mve_shl_rr(s, a, gen_helper_mve_sqrshrl48);
4130 }
4131
do_mve_sh_ri(DisasContext * s,arg_mve_sh_ri * a,ShiftImmFn * fn)4132 static bool do_mve_sh_ri(DisasContext *s, arg_mve_sh_ri *a, ShiftImmFn *fn)
4133 {
4134 if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
4135 /* Decode falls through to ORR/MOV UNPREDICTABLE handling */
4136 return false;
4137 }
4138 if (!dc_isar_feature(aa32_mve, s) ||
4139 !arm_dc_feature(s, ARM_FEATURE_M_MAIN) ||
4140 a->rda == 13 || a->rda == 15) {
4141 /* These rda cases are UNPREDICTABLE; we choose to UNDEF */
4142 unallocated_encoding(s);
4143 return true;
4144 }
4145
4146 if (a->shim == 0) {
4147 a->shim = 32;
4148 }
4149 fn(cpu_R[a->rda], cpu_R[a->rda], a->shim);
4150
4151 return true;
4152 }
4153
trans_URSHR_ri(DisasContext * s,arg_mve_sh_ri * a)4154 static bool trans_URSHR_ri(DisasContext *s, arg_mve_sh_ri *a)
4155 {
4156 return do_mve_sh_ri(s, a, gen_urshr32_i32);
4157 }
4158
trans_SRSHR_ri(DisasContext * s,arg_mve_sh_ri * a)4159 static bool trans_SRSHR_ri(DisasContext *s, arg_mve_sh_ri *a)
4160 {
4161 return do_mve_sh_ri(s, a, gen_srshr32_i32);
4162 }
4163
gen_mve_sqshl(TCGv_i32 r,TCGv_i32 n,int32_t shift)4164 static void gen_mve_sqshl(TCGv_i32 r, TCGv_i32 n, int32_t shift)
4165 {
4166 gen_helper_mve_sqshl(r, tcg_env, n, tcg_constant_i32(shift));
4167 }
4168
trans_SQSHL_ri(DisasContext * s,arg_mve_sh_ri * a)4169 static bool trans_SQSHL_ri(DisasContext *s, arg_mve_sh_ri *a)
4170 {
4171 return do_mve_sh_ri(s, a, gen_mve_sqshl);
4172 }
4173
gen_mve_uqshl(TCGv_i32 r,TCGv_i32 n,int32_t shift)4174 static void gen_mve_uqshl(TCGv_i32 r, TCGv_i32 n, int32_t shift)
4175 {
4176 gen_helper_mve_uqshl(r, tcg_env, n, tcg_constant_i32(shift));
4177 }
4178
trans_UQSHL_ri(DisasContext * s,arg_mve_sh_ri * a)4179 static bool trans_UQSHL_ri(DisasContext *s, arg_mve_sh_ri *a)
4180 {
4181 return do_mve_sh_ri(s, a, gen_mve_uqshl);
4182 }
4183
do_mve_sh_rr(DisasContext * s,arg_mve_sh_rr * a,ShiftFn * fn)4184 static bool do_mve_sh_rr(DisasContext *s, arg_mve_sh_rr *a, ShiftFn *fn)
4185 {
4186 if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
4187 /* Decode falls through to ORR/MOV UNPREDICTABLE handling */
4188 return false;
4189 }
4190 if (!dc_isar_feature(aa32_mve, s) ||
4191 !arm_dc_feature(s, ARM_FEATURE_M_MAIN) ||
4192 a->rda == 13 || a->rda == 15 || a->rm == 13 || a->rm == 15 ||
4193 a->rm == a->rda) {
4194 /* These rda/rm cases are UNPREDICTABLE; we choose to UNDEF */
4195 unallocated_encoding(s);
4196 return true;
4197 }
4198
4199 /* The helper takes care of the sign-extension of the low 8 bits of Rm */
4200 fn(cpu_R[a->rda], tcg_env, cpu_R[a->rda], cpu_R[a->rm]);
4201 return true;
4202 }
4203
trans_SQRSHR_rr(DisasContext * s,arg_mve_sh_rr * a)4204 static bool trans_SQRSHR_rr(DisasContext *s, arg_mve_sh_rr *a)
4205 {
4206 return do_mve_sh_rr(s, a, gen_helper_mve_sqrshr);
4207 }
4208
trans_UQRSHL_rr(DisasContext * s,arg_mve_sh_rr * a)4209 static bool trans_UQRSHL_rr(DisasContext *s, arg_mve_sh_rr *a)
4210 {
4211 return do_mve_sh_rr(s, a, gen_helper_mve_uqrshl);
4212 }
4213
4214 /*
4215 * Multiply and multiply accumulate
4216 */
4217
op_mla(DisasContext * s,arg_s_rrrr * a,bool add)4218 static bool op_mla(DisasContext *s, arg_s_rrrr *a, bool add)
4219 {
4220 TCGv_i32 t1, t2;
4221
4222 t1 = load_reg(s, a->rn);
4223 t2 = load_reg(s, a->rm);
4224 tcg_gen_mul_i32(t1, t1, t2);
4225 if (add) {
4226 t2 = load_reg(s, a->ra);
4227 tcg_gen_add_i32(t1, t1, t2);
4228 }
4229 if (a->s) {
4230 gen_logic_CC(t1);
4231 }
4232 store_reg(s, a->rd, t1);
4233 return true;
4234 }
4235
trans_MUL(DisasContext * s,arg_MUL * a)4236 static bool trans_MUL(DisasContext *s, arg_MUL *a)
4237 {
4238 return op_mla(s, a, false);
4239 }
4240
trans_MLA(DisasContext * s,arg_MLA * a)4241 static bool trans_MLA(DisasContext *s, arg_MLA *a)
4242 {
4243 return op_mla(s, a, true);
4244 }
4245
trans_MLS(DisasContext * s,arg_MLS * a)4246 static bool trans_MLS(DisasContext *s, arg_MLS *a)
4247 {
4248 TCGv_i32 t1, t2;
4249
4250 if (!ENABLE_ARCH_6T2) {
4251 return false;
4252 }
4253 t1 = load_reg(s, a->rn);
4254 t2 = load_reg(s, a->rm);
4255 tcg_gen_mul_i32(t1, t1, t2);
4256 t2 = load_reg(s, a->ra);
4257 tcg_gen_sub_i32(t1, t2, t1);
4258 store_reg(s, a->rd, t1);
4259 return true;
4260 }
4261
op_mlal(DisasContext * s,arg_s_rrrr * a,bool uns,bool add)4262 static bool op_mlal(DisasContext *s, arg_s_rrrr *a, bool uns, bool add)
4263 {
4264 TCGv_i32 t0, t1, t2, t3;
4265
4266 t0 = load_reg(s, a->rm);
4267 t1 = load_reg(s, a->rn);
4268 if (uns) {
4269 tcg_gen_mulu2_i32(t0, t1, t0, t1);
4270 } else {
4271 tcg_gen_muls2_i32(t0, t1, t0, t1);
4272 }
4273 if (add) {
4274 t2 = load_reg(s, a->ra);
4275 t3 = load_reg(s, a->rd);
4276 tcg_gen_add2_i32(t0, t1, t0, t1, t2, t3);
4277 }
4278 if (a->s) {
4279 gen_logicq_cc(t0, t1);
4280 }
4281 store_reg(s, a->ra, t0);
4282 store_reg(s, a->rd, t1);
4283 return true;
4284 }
4285
trans_UMULL(DisasContext * s,arg_UMULL * a)4286 static bool trans_UMULL(DisasContext *s, arg_UMULL *a)
4287 {
4288 return op_mlal(s, a, true, false);
4289 }
4290
trans_SMULL(DisasContext * s,arg_SMULL * a)4291 static bool trans_SMULL(DisasContext *s, arg_SMULL *a)
4292 {
4293 return op_mlal(s, a, false, false);
4294 }
4295
trans_UMLAL(DisasContext * s,arg_UMLAL * a)4296 static bool trans_UMLAL(DisasContext *s, arg_UMLAL *a)
4297 {
4298 return op_mlal(s, a, true, true);
4299 }
4300
trans_SMLAL(DisasContext * s,arg_SMLAL * a)4301 static bool trans_SMLAL(DisasContext *s, arg_SMLAL *a)
4302 {
4303 return op_mlal(s, a, false, true);
4304 }
4305
trans_UMAAL(DisasContext * s,arg_UMAAL * a)4306 static bool trans_UMAAL(DisasContext *s, arg_UMAAL *a)
4307 {
4308 TCGv_i32 t0, t1, t2, zero;
4309
4310 if (s->thumb
4311 ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)
4312 : !ENABLE_ARCH_6) {
4313 return false;
4314 }
4315
4316 t0 = load_reg(s, a->rm);
4317 t1 = load_reg(s, a->rn);
4318 tcg_gen_mulu2_i32(t0, t1, t0, t1);
4319 zero = tcg_constant_i32(0);
4320 t2 = load_reg(s, a->ra);
4321 tcg_gen_add2_i32(t0, t1, t0, t1, t2, zero);
4322 t2 = load_reg(s, a->rd);
4323 tcg_gen_add2_i32(t0, t1, t0, t1, t2, zero);
4324 store_reg(s, a->ra, t0);
4325 store_reg(s, a->rd, t1);
4326 return true;
4327 }
4328
4329 /*
4330 * Saturating addition and subtraction
4331 */
4332
op_qaddsub(DisasContext * s,arg_rrr * a,bool add,bool doub)4333 static bool op_qaddsub(DisasContext *s, arg_rrr *a, bool add, bool doub)
4334 {
4335 TCGv_i32 t0, t1;
4336
4337 if (s->thumb
4338 ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)
4339 : !ENABLE_ARCH_5TE) {
4340 return false;
4341 }
4342
4343 t0 = load_reg(s, a->rm);
4344 t1 = load_reg(s, a->rn);
4345 if (doub) {
4346 gen_helper_add_saturate(t1, tcg_env, t1, t1);
4347 }
4348 if (add) {
4349 gen_helper_add_saturate(t0, tcg_env, t0, t1);
4350 } else {
4351 gen_helper_sub_saturate(t0, tcg_env, t0, t1);
4352 }
4353 store_reg(s, a->rd, t0);
4354 return true;
4355 }
4356
4357 #define DO_QADDSUB(NAME, ADD, DOUB) \
4358 static bool trans_##NAME(DisasContext *s, arg_rrr *a) \
4359 { \
4360 return op_qaddsub(s, a, ADD, DOUB); \
4361 }
4362
DO_QADDSUB(QADD,true,false)4363 DO_QADDSUB(QADD, true, false)
4364 DO_QADDSUB(QSUB, false, false)
4365 DO_QADDSUB(QDADD, true, true)
4366 DO_QADDSUB(QDSUB, false, true)
4367
4368 #undef DO_QADDSUB
4369
4370 /*
4371 * Halfword multiply and multiply accumulate
4372 */
4373
4374 static bool op_smlaxxx(DisasContext *s, arg_rrrr *a,
4375 int add_long, bool nt, bool mt)
4376 {
4377 TCGv_i32 t0, t1, tl, th;
4378
4379 if (s->thumb
4380 ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)
4381 : !ENABLE_ARCH_5TE) {
4382 return false;
4383 }
4384
4385 t0 = load_reg(s, a->rn);
4386 t1 = load_reg(s, a->rm);
4387 gen_mulxy(t0, t1, nt, mt);
4388
4389 switch (add_long) {
4390 case 0:
4391 store_reg(s, a->rd, t0);
4392 break;
4393 case 1:
4394 t1 = load_reg(s, a->ra);
4395 gen_helper_add_setq(t0, tcg_env, t0, t1);
4396 store_reg(s, a->rd, t0);
4397 break;
4398 case 2:
4399 tl = load_reg(s, a->ra);
4400 th = load_reg(s, a->rd);
4401 /* Sign-extend the 32-bit product to 64 bits. */
4402 t1 = tcg_temp_new_i32();
4403 tcg_gen_sari_i32(t1, t0, 31);
4404 tcg_gen_add2_i32(tl, th, tl, th, t0, t1);
4405 store_reg(s, a->ra, tl);
4406 store_reg(s, a->rd, th);
4407 break;
4408 default:
4409 g_assert_not_reached();
4410 }
4411 return true;
4412 }
4413
4414 #define DO_SMLAX(NAME, add, nt, mt) \
4415 static bool trans_##NAME(DisasContext *s, arg_rrrr *a) \
4416 { \
4417 return op_smlaxxx(s, a, add, nt, mt); \
4418 }
4419
4420 DO_SMLAX(SMULBB, 0, 0, 0)
4421 DO_SMLAX(SMULBT, 0, 0, 1)
4422 DO_SMLAX(SMULTB, 0, 1, 0)
4423 DO_SMLAX(SMULTT, 0, 1, 1)
4424
4425 DO_SMLAX(SMLABB, 1, 0, 0)
4426 DO_SMLAX(SMLABT, 1, 0, 1)
4427 DO_SMLAX(SMLATB, 1, 1, 0)
4428 DO_SMLAX(SMLATT, 1, 1, 1)
4429
4430 DO_SMLAX(SMLALBB, 2, 0, 0)
4431 DO_SMLAX(SMLALBT, 2, 0, 1)
4432 DO_SMLAX(SMLALTB, 2, 1, 0)
4433 DO_SMLAX(SMLALTT, 2, 1, 1)
4434
4435 #undef DO_SMLAX
4436
op_smlawx(DisasContext * s,arg_rrrr * a,bool add,bool mt)4437 static bool op_smlawx(DisasContext *s, arg_rrrr *a, bool add, bool mt)
4438 {
4439 TCGv_i32 t0, t1;
4440
4441 if (!ENABLE_ARCH_5TE) {
4442 return false;
4443 }
4444
4445 t0 = load_reg(s, a->rn);
4446 t1 = load_reg(s, a->rm);
4447 /*
4448 * Since the nominal result is product<47:16>, shift the 16-bit
4449 * input up by 16 bits, so that the result is at product<63:32>.
4450 */
4451 if (mt) {
4452 tcg_gen_andi_i32(t1, t1, 0xffff0000);
4453 } else {
4454 tcg_gen_shli_i32(t1, t1, 16);
4455 }
4456 tcg_gen_muls2_i32(t0, t1, t0, t1);
4457 if (add) {
4458 t0 = load_reg(s, a->ra);
4459 gen_helper_add_setq(t1, tcg_env, t1, t0);
4460 }
4461 store_reg(s, a->rd, t1);
4462 return true;
4463 }
4464
4465 #define DO_SMLAWX(NAME, add, mt) \
4466 static bool trans_##NAME(DisasContext *s, arg_rrrr *a) \
4467 { \
4468 return op_smlawx(s, a, add, mt); \
4469 }
4470
4471 DO_SMLAWX(SMULWB, 0, 0)
4472 DO_SMLAWX(SMULWT, 0, 1)
4473 DO_SMLAWX(SMLAWB, 1, 0)
4474 DO_SMLAWX(SMLAWT, 1, 1)
4475
4476 #undef DO_SMLAWX
4477
4478 /*
4479 * MSR (immediate) and hints
4480 */
4481
trans_YIELD(DisasContext * s,arg_YIELD * a)4482 static bool trans_YIELD(DisasContext *s, arg_YIELD *a)
4483 {
4484 /*
4485 * When running single-threaded TCG code, use the helper to ensure that
4486 * the next round-robin scheduled vCPU gets a crack. When running in
4487 * MTTCG we don't generate jumps to the helper as it won't affect the
4488 * scheduling of other vCPUs.
4489 */
4490 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
4491 gen_update_pc(s, curr_insn_len(s));
4492 s->base.is_jmp = DISAS_YIELD;
4493 }
4494 return true;
4495 }
4496
trans_WFE(DisasContext * s,arg_WFE * a)4497 static bool trans_WFE(DisasContext *s, arg_WFE *a)
4498 {
4499 /*
4500 * When running single-threaded TCG code, use the helper to ensure that
4501 * the next round-robin scheduled vCPU gets a crack. In MTTCG mode we
4502 * just skip this instruction. Currently the SEV/SEVL instructions,
4503 * which are *one* of many ways to wake the CPU from WFE, are not
4504 * implemented so we can't sleep like WFI does.
4505 */
4506 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
4507 gen_update_pc(s, curr_insn_len(s));
4508 s->base.is_jmp = DISAS_WFE;
4509 }
4510 return true;
4511 }
4512
trans_WFI(DisasContext * s,arg_WFI * a)4513 static bool trans_WFI(DisasContext *s, arg_WFI *a)
4514 {
4515 /* For WFI, halt the vCPU until an IRQ. */
4516 gen_update_pc(s, curr_insn_len(s));
4517 s->base.is_jmp = DISAS_WFI;
4518 return true;
4519 }
4520
trans_ESB(DisasContext * s,arg_ESB * a)4521 static bool trans_ESB(DisasContext *s, arg_ESB *a)
4522 {
4523 /*
4524 * For M-profile, minimal-RAS ESB can be a NOP.
4525 * Without RAS, we must implement this as NOP.
4526 */
4527 if (!arm_dc_feature(s, ARM_FEATURE_M) && dc_isar_feature(aa32_ras, s)) {
4528 /*
4529 * QEMU does not have a source of physical SErrors,
4530 * so we are only concerned with virtual SErrors.
4531 * The pseudocode in the ARM for this case is
4532 * if PSTATE.EL IN {EL0, EL1} && EL2Enabled() then
4533 * AArch32.vESBOperation();
4534 * Most of the condition can be evaluated at translation time.
4535 * Test for EL2 present, and defer test for SEL2 to runtime.
4536 */
4537 if (s->current_el <= 1 && arm_dc_feature(s, ARM_FEATURE_EL2)) {
4538 gen_helper_vesb(tcg_env);
4539 }
4540 }
4541 return true;
4542 }
4543
trans_NOP(DisasContext * s,arg_NOP * a)4544 static bool trans_NOP(DisasContext *s, arg_NOP *a)
4545 {
4546 return true;
4547 }
4548
trans_MSR_imm(DisasContext * s,arg_MSR_imm * a)4549 static bool trans_MSR_imm(DisasContext *s, arg_MSR_imm *a)
4550 {
4551 uint32_t val = ror32(a->imm, a->rot * 2);
4552 uint32_t mask = msr_mask(s, a->mask, a->r);
4553
4554 if (gen_set_psr_im(s, mask, a->r, val)) {
4555 unallocated_encoding(s);
4556 }
4557 return true;
4558 }
4559
4560 /*
4561 * Cyclic Redundancy Check
4562 */
4563
op_crc32(DisasContext * s,arg_rrr * a,bool c,MemOp sz)4564 static bool op_crc32(DisasContext *s, arg_rrr *a, bool c, MemOp sz)
4565 {
4566 TCGv_i32 t1, t2, t3;
4567
4568 if (!dc_isar_feature(aa32_crc32, s)) {
4569 return false;
4570 }
4571
4572 t1 = load_reg(s, a->rn);
4573 t2 = load_reg(s, a->rm);
4574 switch (sz) {
4575 case MO_8:
4576 gen_uxtb(t2);
4577 break;
4578 case MO_16:
4579 gen_uxth(t2);
4580 break;
4581 case MO_32:
4582 break;
4583 default:
4584 g_assert_not_reached();
4585 }
4586 t3 = tcg_constant_i32(1 << sz);
4587 if (c) {
4588 gen_helper_crc32c(t1, t1, t2, t3);
4589 } else {
4590 gen_helper_crc32(t1, t1, t2, t3);
4591 }
4592 store_reg(s, a->rd, t1);
4593 return true;
4594 }
4595
4596 #define DO_CRC32(NAME, c, sz) \
4597 static bool trans_##NAME(DisasContext *s, arg_rrr *a) \
4598 { return op_crc32(s, a, c, sz); }
4599
DO_CRC32(CRC32B,false,MO_8)4600 DO_CRC32(CRC32B, false, MO_8)
4601 DO_CRC32(CRC32H, false, MO_16)
4602 DO_CRC32(CRC32W, false, MO_32)
4603 DO_CRC32(CRC32CB, true, MO_8)
4604 DO_CRC32(CRC32CH, true, MO_16)
4605 DO_CRC32(CRC32CW, true, MO_32)
4606
4607 #undef DO_CRC32
4608
4609 /*
4610 * Miscellaneous instructions
4611 */
4612
4613 static bool trans_MRS_bank(DisasContext *s, arg_MRS_bank *a)
4614 {
4615 if (arm_dc_feature(s, ARM_FEATURE_M)) {
4616 return false;
4617 }
4618 gen_mrs_banked(s, a->r, a->sysm, a->rd);
4619 return true;
4620 }
4621
trans_MSR_bank(DisasContext * s,arg_MSR_bank * a)4622 static bool trans_MSR_bank(DisasContext *s, arg_MSR_bank *a)
4623 {
4624 if (arm_dc_feature(s, ARM_FEATURE_M)) {
4625 return false;
4626 }
4627 gen_msr_banked(s, a->r, a->sysm, a->rn);
4628 return true;
4629 }
4630
trans_MRS_reg(DisasContext * s,arg_MRS_reg * a)4631 static bool trans_MRS_reg(DisasContext *s, arg_MRS_reg *a)
4632 {
4633 TCGv_i32 tmp;
4634
4635 if (arm_dc_feature(s, ARM_FEATURE_M)) {
4636 return false;
4637 }
4638 if (a->r) {
4639 if (IS_USER(s)) {
4640 unallocated_encoding(s);
4641 return true;
4642 }
4643 tmp = load_cpu_field(spsr);
4644 } else {
4645 tmp = tcg_temp_new_i32();
4646 gen_helper_cpsr_read(tmp, tcg_env);
4647 }
4648 store_reg(s, a->rd, tmp);
4649 return true;
4650 }
4651
trans_MSR_reg(DisasContext * s,arg_MSR_reg * a)4652 static bool trans_MSR_reg(DisasContext *s, arg_MSR_reg *a)
4653 {
4654 TCGv_i32 tmp;
4655 uint32_t mask = msr_mask(s, a->mask, a->r);
4656
4657 if (arm_dc_feature(s, ARM_FEATURE_M)) {
4658 return false;
4659 }
4660 tmp = load_reg(s, a->rn);
4661 if (gen_set_psr(s, mask, a->r, tmp)) {
4662 unallocated_encoding(s);
4663 }
4664 return true;
4665 }
4666
trans_MRS_v7m(DisasContext * s,arg_MRS_v7m * a)4667 static bool trans_MRS_v7m(DisasContext *s, arg_MRS_v7m *a)
4668 {
4669 TCGv_i32 tmp;
4670
4671 if (!arm_dc_feature(s, ARM_FEATURE_M)) {
4672 return false;
4673 }
4674 tmp = tcg_temp_new_i32();
4675 gen_helper_v7m_mrs(tmp, tcg_env, tcg_constant_i32(a->sysm));
4676 store_reg(s, a->rd, tmp);
4677 return true;
4678 }
4679
trans_MSR_v7m(DisasContext * s,arg_MSR_v7m * a)4680 static bool trans_MSR_v7m(DisasContext *s, arg_MSR_v7m *a)
4681 {
4682 TCGv_i32 addr, reg;
4683
4684 if (!arm_dc_feature(s, ARM_FEATURE_M)) {
4685 return false;
4686 }
4687 addr = tcg_constant_i32((a->mask << 10) | a->sysm);
4688 reg = load_reg(s, a->rn);
4689 gen_helper_v7m_msr(tcg_env, addr, reg);
4690 /* If we wrote to CONTROL, the EL might have changed */
4691 gen_rebuild_hflags(s, true);
4692 gen_lookup_tb(s);
4693 return true;
4694 }
4695
trans_BX(DisasContext * s,arg_BX * a)4696 static bool trans_BX(DisasContext *s, arg_BX *a)
4697 {
4698 if (!ENABLE_ARCH_4T) {
4699 return false;
4700 }
4701 gen_bx_excret(s, load_reg(s, a->rm));
4702 return true;
4703 }
4704
trans_BXJ(DisasContext * s,arg_BXJ * a)4705 static bool trans_BXJ(DisasContext *s, arg_BXJ *a)
4706 {
4707 if (!ENABLE_ARCH_5J || arm_dc_feature(s, ARM_FEATURE_M)) {
4708 return false;
4709 }
4710 /*
4711 * v7A allows BXJ to be trapped via HSTR.TJDBX. We don't waste a
4712 * TBFLAGS bit on a basically-never-happens case, so call a helper
4713 * function to check for the trap and raise the exception if needed
4714 * (passing it the register number for the syndrome value).
4715 * v8A doesn't have this HSTR bit.
4716 */
4717 if (!arm_dc_feature(s, ARM_FEATURE_V8) &&
4718 arm_dc_feature(s, ARM_FEATURE_EL2) &&
4719 s->current_el < 2 && s->ns) {
4720 gen_helper_check_bxj_trap(tcg_env, tcg_constant_i32(a->rm));
4721 }
4722 /* Trivial implementation equivalent to bx. */
4723 gen_bx(s, load_reg(s, a->rm));
4724 return true;
4725 }
4726
trans_BLX_r(DisasContext * s,arg_BLX_r * a)4727 static bool trans_BLX_r(DisasContext *s, arg_BLX_r *a)
4728 {
4729 TCGv_i32 tmp;
4730
4731 if (!ENABLE_ARCH_5) {
4732 return false;
4733 }
4734 tmp = load_reg(s, a->rm);
4735 gen_pc_plus_diff(s, cpu_R[14], curr_insn_len(s) | s->thumb);
4736 gen_bx(s, tmp);
4737 return true;
4738 }
4739
4740 /*
4741 * BXNS/BLXNS: only exist for v8M with the security extensions,
4742 * and always UNDEF if NonSecure. We don't implement these in
4743 * the user-only mode either (in theory you can use them from
4744 * Secure User mode but they are too tied in to system emulation).
4745 */
trans_BXNS(DisasContext * s,arg_BXNS * a)4746 static bool trans_BXNS(DisasContext *s, arg_BXNS *a)
4747 {
4748 if (!s->v8m_secure || IS_USER_ONLY) {
4749 unallocated_encoding(s);
4750 } else {
4751 gen_bxns(s, a->rm);
4752 }
4753 return true;
4754 }
4755
trans_BLXNS(DisasContext * s,arg_BLXNS * a)4756 static bool trans_BLXNS(DisasContext *s, arg_BLXNS *a)
4757 {
4758 if (!s->v8m_secure || IS_USER_ONLY) {
4759 unallocated_encoding(s);
4760 } else {
4761 gen_blxns(s, a->rm);
4762 }
4763 return true;
4764 }
4765
trans_CLZ(DisasContext * s,arg_CLZ * a)4766 static bool trans_CLZ(DisasContext *s, arg_CLZ *a)
4767 {
4768 TCGv_i32 tmp;
4769
4770 if (!ENABLE_ARCH_5) {
4771 return false;
4772 }
4773 tmp = load_reg(s, a->rm);
4774 tcg_gen_clzi_i32(tmp, tmp, 32);
4775 store_reg(s, a->rd, tmp);
4776 return true;
4777 }
4778
trans_ERET(DisasContext * s,arg_ERET * a)4779 static bool trans_ERET(DisasContext *s, arg_ERET *a)
4780 {
4781 TCGv_i32 tmp;
4782
4783 if (!arm_dc_feature(s, ARM_FEATURE_V7VE)) {
4784 return false;
4785 }
4786 if (IS_USER(s)) {
4787 unallocated_encoding(s);
4788 return true;
4789 }
4790 if (s->current_el == 2) {
4791 /* ERET from Hyp uses ELR_Hyp, not LR */
4792 tmp = load_cpu_field_low32(elr_el[2]);
4793 } else {
4794 tmp = load_reg(s, 14);
4795 }
4796 gen_exception_return(s, tmp);
4797 return true;
4798 }
4799
trans_HLT(DisasContext * s,arg_HLT * a)4800 static bool trans_HLT(DisasContext *s, arg_HLT *a)
4801 {
4802 gen_hlt(s, a->imm);
4803 return true;
4804 }
4805
trans_BKPT(DisasContext * s,arg_BKPT * a)4806 static bool trans_BKPT(DisasContext *s, arg_BKPT *a)
4807 {
4808 if (!ENABLE_ARCH_5) {
4809 return false;
4810 }
4811 /* BKPT is OK with ECI set and leaves it untouched */
4812 s->eci_handled = true;
4813 if (arm_dc_feature(s, ARM_FEATURE_M) &&
4814 semihosting_enabled(s->current_el == 0) &&
4815 (a->imm == 0xab)) {
4816 gen_exception_internal_insn(s, EXCP_SEMIHOST);
4817 } else {
4818 gen_exception_bkpt_insn(s, syn_aa32_bkpt(a->imm, false));
4819 }
4820 return true;
4821 }
4822
trans_HVC(DisasContext * s,arg_HVC * a)4823 static bool trans_HVC(DisasContext *s, arg_HVC *a)
4824 {
4825 if (!ENABLE_ARCH_7 || arm_dc_feature(s, ARM_FEATURE_M)) {
4826 return false;
4827 }
4828 if (IS_USER(s)) {
4829 unallocated_encoding(s);
4830 } else {
4831 gen_hvc(s, a->imm);
4832 }
4833 return true;
4834 }
4835
trans_SMC(DisasContext * s,arg_SMC * a)4836 static bool trans_SMC(DisasContext *s, arg_SMC *a)
4837 {
4838 if (!ENABLE_ARCH_6K || arm_dc_feature(s, ARM_FEATURE_M)) {
4839 return false;
4840 }
4841 if (IS_USER(s)) {
4842 unallocated_encoding(s);
4843 } else {
4844 gen_smc(s);
4845 }
4846 return true;
4847 }
4848
trans_SG(DisasContext * s,arg_SG * a)4849 static bool trans_SG(DisasContext *s, arg_SG *a)
4850 {
4851 if (!arm_dc_feature(s, ARM_FEATURE_M) ||
4852 !arm_dc_feature(s, ARM_FEATURE_V8)) {
4853 return false;
4854 }
4855 /*
4856 * SG (v8M only)
4857 * The bulk of the behaviour for this instruction is implemented
4858 * in v7m_handle_execute_nsc(), which deals with the insn when
4859 * it is executed by a CPU in non-secure state from memory
4860 * which is Secure & NonSecure-Callable.
4861 * Here we only need to handle the remaining cases:
4862 * * in NS memory (including the "security extension not
4863 * implemented" case) : NOP
4864 * * in S memory but CPU already secure (clear IT bits)
4865 * We know that the attribute for the memory this insn is
4866 * in must match the current CPU state, because otherwise
4867 * get_phys_addr_pmsav8 would have generated an exception.
4868 */
4869 if (s->v8m_secure) {
4870 /* Like the IT insn, we don't need to generate any code */
4871 s->condexec_cond = 0;
4872 s->condexec_mask = 0;
4873 }
4874 return true;
4875 }
4876
trans_TT(DisasContext * s,arg_TT * a)4877 static bool trans_TT(DisasContext *s, arg_TT *a)
4878 {
4879 TCGv_i32 addr, tmp;
4880
4881 if (!arm_dc_feature(s, ARM_FEATURE_M) ||
4882 !arm_dc_feature(s, ARM_FEATURE_V8)) {
4883 return false;
4884 }
4885 if (a->rd == 13 || a->rd == 15 || a->rn == 15) {
4886 /* We UNDEF for these UNPREDICTABLE cases */
4887 unallocated_encoding(s);
4888 return true;
4889 }
4890 if (a->A && !s->v8m_secure) {
4891 /* This case is UNDEFINED. */
4892 unallocated_encoding(s);
4893 return true;
4894 }
4895
4896 addr = load_reg(s, a->rn);
4897 tmp = tcg_temp_new_i32();
4898 gen_helper_v7m_tt(tmp, tcg_env, addr, tcg_constant_i32((a->A << 1) | a->T));
4899 store_reg(s, a->rd, tmp);
4900 return true;
4901 }
4902
4903 /*
4904 * Load/store register index
4905 */
4906
make_issinfo(DisasContext * s,int rd,bool p,bool w)4907 static ISSInfo make_issinfo(DisasContext *s, int rd, bool p, bool w)
4908 {
4909 ISSInfo ret;
4910
4911 /* ISS not valid if writeback */
4912 if (p && !w) {
4913 ret = rd;
4914 if (curr_insn_len(s) == 2) {
4915 ret |= ISSIs16Bit;
4916 }
4917 } else {
4918 ret = ISSInvalid;
4919 }
4920 return ret;
4921 }
4922
op_addr_rr_pre(DisasContext * s,arg_ldst_rr * a)4923 static TCGv_i32 op_addr_rr_pre(DisasContext *s, arg_ldst_rr *a)
4924 {
4925 TCGv_i32 addr = load_reg(s, a->rn);
4926
4927 if (s->v8m_stackcheck && a->rn == 13 && a->w) {
4928 gen_helper_v8m_stackcheck(tcg_env, addr);
4929 }
4930
4931 if (a->p) {
4932 TCGv_i32 ofs = load_reg(s, a->rm);
4933 gen_arm_shift_im(ofs, a->shtype, a->shimm, 0);
4934 if (a->u) {
4935 tcg_gen_add_i32(addr, addr, ofs);
4936 } else {
4937 tcg_gen_sub_i32(addr, addr, ofs);
4938 }
4939 }
4940 return addr;
4941 }
4942
op_addr_rr_post(DisasContext * s,arg_ldst_rr * a,TCGv_i32 addr,int address_offset)4943 static void op_addr_rr_post(DisasContext *s, arg_ldst_rr *a,
4944 TCGv_i32 addr, int address_offset)
4945 {
4946 if (!a->p) {
4947 TCGv_i32 ofs = load_reg(s, a->rm);
4948 gen_arm_shift_im(ofs, a->shtype, a->shimm, 0);
4949 if (a->u) {
4950 tcg_gen_add_i32(addr, addr, ofs);
4951 } else {
4952 tcg_gen_sub_i32(addr, addr, ofs);
4953 }
4954 } else if (!a->w) {
4955 return;
4956 }
4957 tcg_gen_addi_i32(addr, addr, address_offset);
4958 store_reg(s, a->rn, addr);
4959 }
4960
op_load_rr(DisasContext * s,arg_ldst_rr * a,MemOp mop,int mem_idx)4961 static bool op_load_rr(DisasContext *s, arg_ldst_rr *a,
4962 MemOp mop, int mem_idx)
4963 {
4964 ISSInfo issinfo = make_issinfo(s, a->rt, a->p, a->w);
4965 TCGv_i32 addr, tmp;
4966
4967 addr = op_addr_rr_pre(s, a);
4968
4969 tmp = tcg_temp_new_i32();
4970 gen_aa32_ld_i32(s, tmp, addr, mem_idx, mop);
4971 disas_set_da_iss(s, mop, issinfo);
4972
4973 /*
4974 * Perform base writeback before the loaded value to
4975 * ensure correct behavior with overlapping index registers.
4976 */
4977 op_addr_rr_post(s, a, addr, 0);
4978 store_reg_from_load(s, a->rt, tmp);
4979 return true;
4980 }
4981
op_store_rr(DisasContext * s,arg_ldst_rr * a,MemOp mop,int mem_idx)4982 static bool op_store_rr(DisasContext *s, arg_ldst_rr *a,
4983 MemOp mop, int mem_idx)
4984 {
4985 ISSInfo issinfo = make_issinfo(s, a->rt, a->p, a->w) | ISSIsWrite;
4986 TCGv_i32 addr, tmp;
4987
4988 /*
4989 * In Thumb encodings of stores Rn=1111 is UNDEF; for Arm it
4990 * is either UNPREDICTABLE or has defined behaviour
4991 */
4992 if (s->thumb && a->rn == 15) {
4993 return false;
4994 }
4995
4996 addr = op_addr_rr_pre(s, a);
4997
4998 tmp = load_reg(s, a->rt);
4999 gen_aa32_st_i32(s, tmp, addr, mem_idx, mop);
5000 disas_set_da_iss(s, mop, issinfo);
5001
5002 op_addr_rr_post(s, a, addr, 0);
5003 return true;
5004 }
5005
do_ldrd_load(DisasContext * s,TCGv_i32 addr,int rt,int rt2)5006 static void do_ldrd_load(DisasContext *s, TCGv_i32 addr, int rt, int rt2)
5007 {
5008 /*
5009 * LDRD is required to be an atomic 64-bit access if the
5010 * address is 8-aligned, two atomic 32-bit accesses if
5011 * it's only 4-aligned, and to give an alignment fault
5012 * if it's not 4-aligned. This is MO_ALIGN_4 | MO_ATOM_SUBALIGN.
5013 * Rt is always the word from the lower address, and Rt2 the
5014 * data from the higher address, regardless of endianness.
5015 * So (like gen_load_exclusive) we avoid gen_aa32_ld_i64()
5016 * so we don't get its SCTLR_B check, and instead do a 64-bit access
5017 * using MO_BE if appropriate and then split the two halves.
5018 *
5019 * For M-profile, and for A-profile before LPAE, the 64-bit
5020 * atomicity is not required. We could model that using
5021 * the looser MO_ATOM_IFALIGN_PAIR, but providing a higher
5022 * level of atomicity than required is harmless (we would not
5023 * currently generate better code for IFALIGN_PAIR here).
5024 *
5025 * This also gives us the correct behaviour of not updating
5026 * rt if the load of rt2 faults; this is required for cases
5027 * like "ldrd r2, r3, [r2]" where rt is also the base register.
5028 */
5029 int mem_idx = get_mem_index(s);
5030 MemOp opc = MO_64 | MO_ALIGN_4 | MO_ATOM_SUBALIGN | s->be_data;
5031 TCGv taddr = gen_aa32_addr(s, addr, opc);
5032 TCGv_i64 t64 = tcg_temp_new_i64();
5033 TCGv_i32 tmp = tcg_temp_new_i32();
5034 TCGv_i32 tmp2 = tcg_temp_new_i32();
5035
5036 tcg_gen_qemu_ld_i64(t64, taddr, mem_idx, opc);
5037 if (s->be_data == MO_BE) {
5038 tcg_gen_extr_i64_i32(tmp2, tmp, t64);
5039 } else {
5040 tcg_gen_extr_i64_i32(tmp, tmp2, t64);
5041 }
5042 store_reg(s, rt, tmp);
5043 store_reg(s, rt2, tmp2);
5044 }
5045
trans_LDRD_rr(DisasContext * s,arg_ldst_rr * a)5046 static bool trans_LDRD_rr(DisasContext *s, arg_ldst_rr *a)
5047 {
5048 TCGv_i32 addr;
5049
5050 if (!ENABLE_ARCH_5TE) {
5051 return false;
5052 }
5053 if (a->rt & 1) {
5054 unallocated_encoding(s);
5055 return true;
5056 }
5057 addr = op_addr_rr_pre(s, a);
5058
5059 do_ldrd_load(s, addr, a->rt, a->rt + 1);
5060
5061 /* LDRD w/ base writeback is undefined if the registers overlap. */
5062 op_addr_rr_post(s, a, addr, 0);
5063 return true;
5064 }
5065
do_strd_store(DisasContext * s,TCGv_i32 addr,int rt,int rt2)5066 static void do_strd_store(DisasContext *s, TCGv_i32 addr, int rt, int rt2)
5067 {
5068 /*
5069 * STRD is required to be an atomic 64-bit access if the
5070 * address is 8-aligned, two atomic 32-bit accesses if
5071 * it's only 4-aligned, and to give an alignment fault
5072 * if it's not 4-aligned.
5073 * Rt is always the word from the lower address, and Rt2 the
5074 * data from the higher address, regardless of endianness.
5075 * So (like gen_store_exclusive) we avoid gen_aa32_ld_i64()
5076 * so we don't get its SCTLR_B check, and instead do a 64-bit access
5077 * using MO_BE if appropriate, using a value constructed
5078 * by putting the two halves together in the right order.
5079 *
5080 * As with LDRD, the 64-bit atomicity is not required for
5081 * M-profile, or for A-profile before LPAE, and we provide
5082 * the higher guarantee always for simplicity.
5083 */
5084 int mem_idx = get_mem_index(s);
5085 MemOp opc = MO_64 | MO_ALIGN_4 | MO_ATOM_SUBALIGN | s->be_data;
5086 TCGv taddr = gen_aa32_addr(s, addr, opc);
5087 TCGv_i32 t1 = load_reg(s, rt);
5088 TCGv_i32 t2 = load_reg(s, rt2);
5089 TCGv_i64 t64 = tcg_temp_new_i64();
5090
5091 if (s->be_data == MO_BE) {
5092 tcg_gen_concat_i32_i64(t64, t2, t1);
5093 } else {
5094 tcg_gen_concat_i32_i64(t64, t1, t2);
5095 }
5096 tcg_gen_qemu_st_i64(t64, taddr, mem_idx, opc);
5097 }
5098
trans_STRD_rr(DisasContext * s,arg_ldst_rr * a)5099 static bool trans_STRD_rr(DisasContext *s, arg_ldst_rr *a)
5100 {
5101 TCGv_i32 addr;
5102
5103 if (!ENABLE_ARCH_5TE) {
5104 return false;
5105 }
5106 if (a->rt & 1) {
5107 unallocated_encoding(s);
5108 return true;
5109 }
5110 addr = op_addr_rr_pre(s, a);
5111
5112 do_strd_store(s, addr, a->rt, a->rt + 1);
5113
5114 op_addr_rr_post(s, a, addr, 0);
5115 return true;
5116 }
5117
5118 /*
5119 * Load/store immediate index
5120 */
5121
op_addr_ri_pre(DisasContext * s,arg_ldst_ri * a)5122 static TCGv_i32 op_addr_ri_pre(DisasContext *s, arg_ldst_ri *a)
5123 {
5124 int ofs = a->imm;
5125
5126 if (!a->u) {
5127 ofs = -ofs;
5128 }
5129
5130 if (s->v8m_stackcheck && a->rn == 13 && a->w) {
5131 /*
5132 * Stackcheck. Here we know 'addr' is the current SP;
5133 * U is set if we're moving SP up, else down. It is
5134 * UNKNOWN whether the limit check triggers when SP starts
5135 * below the limit and ends up above it; we chose to do so.
5136 */
5137 if (!a->u) {
5138 TCGv_i32 newsp = tcg_temp_new_i32();
5139 tcg_gen_addi_i32(newsp, cpu_R[13], ofs);
5140 gen_helper_v8m_stackcheck(tcg_env, newsp);
5141 } else {
5142 gen_helper_v8m_stackcheck(tcg_env, cpu_R[13]);
5143 }
5144 }
5145
5146 return add_reg_for_lit(s, a->rn, a->p ? ofs : 0);
5147 }
5148
op_addr_ri_post(DisasContext * s,arg_ldst_ri * a,TCGv_i32 addr,int address_offset)5149 static void op_addr_ri_post(DisasContext *s, arg_ldst_ri *a,
5150 TCGv_i32 addr, int address_offset)
5151 {
5152 if (!a->p) {
5153 if (a->u) {
5154 address_offset += a->imm;
5155 } else {
5156 address_offset -= a->imm;
5157 }
5158 } else if (!a->w) {
5159 return;
5160 }
5161 tcg_gen_addi_i32(addr, addr, address_offset);
5162 store_reg(s, a->rn, addr);
5163 }
5164
op_load_ri(DisasContext * s,arg_ldst_ri * a,MemOp mop,int mem_idx)5165 static bool op_load_ri(DisasContext *s, arg_ldst_ri *a,
5166 MemOp mop, int mem_idx)
5167 {
5168 ISSInfo issinfo = make_issinfo(s, a->rt, a->p, a->w);
5169 TCGv_i32 addr, tmp;
5170
5171 addr = op_addr_ri_pre(s, a);
5172
5173 tmp = tcg_temp_new_i32();
5174 gen_aa32_ld_i32(s, tmp, addr, mem_idx, mop);
5175 disas_set_da_iss(s, mop, issinfo);
5176
5177 /*
5178 * Perform base writeback before the loaded value to
5179 * ensure correct behavior with overlapping index registers.
5180 */
5181 op_addr_ri_post(s, a, addr, 0);
5182 store_reg_from_load(s, a->rt, tmp);
5183 return true;
5184 }
5185
op_store_ri(DisasContext * s,arg_ldst_ri * a,MemOp mop,int mem_idx)5186 static bool op_store_ri(DisasContext *s, arg_ldst_ri *a,
5187 MemOp mop, int mem_idx)
5188 {
5189 ISSInfo issinfo = make_issinfo(s, a->rt, a->p, a->w) | ISSIsWrite;
5190 TCGv_i32 addr, tmp;
5191
5192 /*
5193 * In Thumb encodings of stores Rn=1111 is UNDEF; for Arm it
5194 * is either UNPREDICTABLE or has defined behaviour
5195 */
5196 if (s->thumb && a->rn == 15) {
5197 return false;
5198 }
5199
5200 addr = op_addr_ri_pre(s, a);
5201
5202 tmp = load_reg(s, a->rt);
5203 gen_aa32_st_i32(s, tmp, addr, mem_idx, mop);
5204 disas_set_da_iss(s, mop, issinfo);
5205
5206 op_addr_ri_post(s, a, addr, 0);
5207 return true;
5208 }
5209
op_ldrd_ri(DisasContext * s,arg_ldst_ri * a,int rt2)5210 static bool op_ldrd_ri(DisasContext *s, arg_ldst_ri *a, int rt2)
5211 {
5212 TCGv_i32 addr;
5213
5214 addr = op_addr_ri_pre(s, a);
5215
5216 do_ldrd_load(s, addr, a->rt, rt2);
5217
5218 /* LDRD w/ base writeback is undefined if the registers overlap. */
5219 op_addr_ri_post(s, a, addr, 0);
5220 return true;
5221 }
5222
trans_LDRD_ri_a32(DisasContext * s,arg_ldst_ri * a)5223 static bool trans_LDRD_ri_a32(DisasContext *s, arg_ldst_ri *a)
5224 {
5225 if (!ENABLE_ARCH_5TE || (a->rt & 1)) {
5226 return false;
5227 }
5228 return op_ldrd_ri(s, a, a->rt + 1);
5229 }
5230
trans_LDRD_ri_t32(DisasContext * s,arg_ldst_ri2 * a)5231 static bool trans_LDRD_ri_t32(DisasContext *s, arg_ldst_ri2 *a)
5232 {
5233 arg_ldst_ri b = {
5234 .u = a->u, .w = a->w, .p = a->p,
5235 .rn = a->rn, .rt = a->rt, .imm = a->imm
5236 };
5237 return op_ldrd_ri(s, &b, a->rt2);
5238 }
5239
op_strd_ri(DisasContext * s,arg_ldst_ri * a,int rt2)5240 static bool op_strd_ri(DisasContext *s, arg_ldst_ri *a, int rt2)
5241 {
5242 TCGv_i32 addr;
5243
5244 addr = op_addr_ri_pre(s, a);
5245
5246 do_strd_store(s, addr, a->rt, rt2);
5247
5248 op_addr_ri_post(s, a, addr, 0);
5249 return true;
5250 }
5251
trans_STRD_ri_a32(DisasContext * s,arg_ldst_ri * a)5252 static bool trans_STRD_ri_a32(DisasContext *s, arg_ldst_ri *a)
5253 {
5254 if (!ENABLE_ARCH_5TE || (a->rt & 1)) {
5255 return false;
5256 }
5257 return op_strd_ri(s, a, a->rt + 1);
5258 }
5259
trans_STRD_ri_t32(DisasContext * s,arg_ldst_ri2 * a)5260 static bool trans_STRD_ri_t32(DisasContext *s, arg_ldst_ri2 *a)
5261 {
5262 arg_ldst_ri b = {
5263 .u = a->u, .w = a->w, .p = a->p,
5264 .rn = a->rn, .rt = a->rt, .imm = a->imm
5265 };
5266 return op_strd_ri(s, &b, a->rt2);
5267 }
5268
5269 #define DO_LDST(NAME, WHICH, MEMOP) \
5270 static bool trans_##NAME##_ri(DisasContext *s, arg_ldst_ri *a) \
5271 { \
5272 return op_##WHICH##_ri(s, a, MEMOP, get_mem_index(s)); \
5273 } \
5274 static bool trans_##NAME##T_ri(DisasContext *s, arg_ldst_ri *a) \
5275 { \
5276 return op_##WHICH##_ri(s, a, MEMOP, get_a32_user_mem_index(s)); \
5277 } \
5278 static bool trans_##NAME##_rr(DisasContext *s, arg_ldst_rr *a) \
5279 { \
5280 return op_##WHICH##_rr(s, a, MEMOP, get_mem_index(s)); \
5281 } \
5282 static bool trans_##NAME##T_rr(DisasContext *s, arg_ldst_rr *a) \
5283 { \
5284 return op_##WHICH##_rr(s, a, MEMOP, get_a32_user_mem_index(s)); \
5285 }
5286
DO_LDST(LDR,load,MO_UL)5287 DO_LDST(LDR, load, MO_UL)
5288 DO_LDST(LDRB, load, MO_UB)
5289 DO_LDST(LDRH, load, MO_UW)
5290 DO_LDST(LDRSB, load, MO_SB)
5291 DO_LDST(LDRSH, load, MO_SW)
5292
5293 DO_LDST(STR, store, MO_UL)
5294 DO_LDST(STRB, store, MO_UB)
5295 DO_LDST(STRH, store, MO_UW)
5296
5297 #undef DO_LDST
5298
5299 /*
5300 * Synchronization primitives
5301 */
5302
5303 static bool op_swp(DisasContext *s, arg_SWP *a, MemOp opc)
5304 {
5305 TCGv_i32 addr, tmp;
5306 TCGv taddr;
5307
5308 opc |= s->be_data;
5309 addr = load_reg(s, a->rn);
5310 taddr = gen_aa32_addr(s, addr, opc);
5311
5312 tmp = load_reg(s, a->rt2);
5313 tcg_gen_atomic_xchg_i32(tmp, taddr, tmp, get_mem_index(s), opc);
5314
5315 store_reg(s, a->rt, tmp);
5316 return true;
5317 }
5318
trans_SWP(DisasContext * s,arg_SWP * a)5319 static bool trans_SWP(DisasContext *s, arg_SWP *a)
5320 {
5321 return op_swp(s, a, MO_UL | MO_ALIGN);
5322 }
5323
trans_SWPB(DisasContext * s,arg_SWP * a)5324 static bool trans_SWPB(DisasContext *s, arg_SWP *a)
5325 {
5326 return op_swp(s, a, MO_UB);
5327 }
5328
5329 /*
5330 * Load/Store Exclusive and Load-Acquire/Store-Release
5331 */
5332
op_strex(DisasContext * s,arg_STREX * a,MemOp mop,bool rel)5333 static bool op_strex(DisasContext *s, arg_STREX *a, MemOp mop, bool rel)
5334 {
5335 TCGv_i32 addr;
5336 /* Some cases stopped being UNPREDICTABLE in v8A (but not v8M) */
5337 bool v8a = ENABLE_ARCH_8 && !arm_dc_feature(s, ARM_FEATURE_M);
5338
5339 /* We UNDEF for these UNPREDICTABLE cases. */
5340 if (a->rd == 15 || a->rn == 15 || a->rt == 15
5341 || a->rd == a->rn || a->rd == a->rt
5342 || (!v8a && s->thumb && (a->rd == 13 || a->rt == 13))
5343 || (mop == MO_64
5344 && (a->rt2 == 15
5345 || a->rd == a->rt2
5346 || (!v8a && s->thumb && a->rt2 == 13)))) {
5347 unallocated_encoding(s);
5348 return true;
5349 }
5350
5351 if (rel) {
5352 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
5353 }
5354
5355 addr = tcg_temp_new_i32();
5356 load_reg_var(s, addr, a->rn);
5357 tcg_gen_addi_i32(addr, addr, a->imm);
5358
5359 gen_store_exclusive(s, a->rd, a->rt, a->rt2, addr, mop);
5360 return true;
5361 }
5362
trans_STREX(DisasContext * s,arg_STREX * a)5363 static bool trans_STREX(DisasContext *s, arg_STREX *a)
5364 {
5365 if (!ENABLE_ARCH_6) {
5366 return false;
5367 }
5368 return op_strex(s, a, MO_32, false);
5369 }
5370
trans_STREXD_a32(DisasContext * s,arg_STREX * a)5371 static bool trans_STREXD_a32(DisasContext *s, arg_STREX *a)
5372 {
5373 if (!ENABLE_ARCH_6K) {
5374 return false;
5375 }
5376 /* We UNDEF for these UNPREDICTABLE cases. */
5377 if (a->rt & 1) {
5378 unallocated_encoding(s);
5379 return true;
5380 }
5381 a->rt2 = a->rt + 1;
5382 return op_strex(s, a, MO_64, false);
5383 }
5384
trans_STREXD_t32(DisasContext * s,arg_STREX * a)5385 static bool trans_STREXD_t32(DisasContext *s, arg_STREX *a)
5386 {
5387 return op_strex(s, a, MO_64, false);
5388 }
5389
trans_STREXB(DisasContext * s,arg_STREX * a)5390 static bool trans_STREXB(DisasContext *s, arg_STREX *a)
5391 {
5392 if (s->thumb ? !ENABLE_ARCH_7 : !ENABLE_ARCH_6K) {
5393 return false;
5394 }
5395 return op_strex(s, a, MO_8, false);
5396 }
5397
trans_STREXH(DisasContext * s,arg_STREX * a)5398 static bool trans_STREXH(DisasContext *s, arg_STREX *a)
5399 {
5400 if (s->thumb ? !ENABLE_ARCH_7 : !ENABLE_ARCH_6K) {
5401 return false;
5402 }
5403 return op_strex(s, a, MO_16, false);
5404 }
5405
trans_STLEX(DisasContext * s,arg_STREX * a)5406 static bool trans_STLEX(DisasContext *s, arg_STREX *a)
5407 {
5408 if (!ENABLE_ARCH_8) {
5409 return false;
5410 }
5411 return op_strex(s, a, MO_32, true);
5412 }
5413
trans_STLEXD_a32(DisasContext * s,arg_STREX * a)5414 static bool trans_STLEXD_a32(DisasContext *s, arg_STREX *a)
5415 {
5416 if (!ENABLE_ARCH_8) {
5417 return false;
5418 }
5419 /* We UNDEF for these UNPREDICTABLE cases. */
5420 if (a->rt & 1) {
5421 unallocated_encoding(s);
5422 return true;
5423 }
5424 a->rt2 = a->rt + 1;
5425 return op_strex(s, a, MO_64, true);
5426 }
5427
trans_STLEXD_t32(DisasContext * s,arg_STREX * a)5428 static bool trans_STLEXD_t32(DisasContext *s, arg_STREX *a)
5429 {
5430 if (!ENABLE_ARCH_8) {
5431 return false;
5432 }
5433 return op_strex(s, a, MO_64, true);
5434 }
5435
trans_STLEXB(DisasContext * s,arg_STREX * a)5436 static bool trans_STLEXB(DisasContext *s, arg_STREX *a)
5437 {
5438 if (!ENABLE_ARCH_8) {
5439 return false;
5440 }
5441 return op_strex(s, a, MO_8, true);
5442 }
5443
trans_STLEXH(DisasContext * s,arg_STREX * a)5444 static bool trans_STLEXH(DisasContext *s, arg_STREX *a)
5445 {
5446 if (!ENABLE_ARCH_8) {
5447 return false;
5448 }
5449 return op_strex(s, a, MO_16, true);
5450 }
5451
op_stl(DisasContext * s,arg_STL * a,MemOp mop)5452 static bool op_stl(DisasContext *s, arg_STL *a, MemOp mop)
5453 {
5454 TCGv_i32 addr, tmp;
5455
5456 if (!ENABLE_ARCH_8) {
5457 return false;
5458 }
5459 /* We UNDEF for these UNPREDICTABLE cases. */
5460 if (a->rn == 15 || a->rt == 15) {
5461 unallocated_encoding(s);
5462 return true;
5463 }
5464
5465 addr = load_reg(s, a->rn);
5466 tmp = load_reg(s, a->rt);
5467 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
5468 gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), mop | MO_ALIGN);
5469 disas_set_da_iss(s, mop, a->rt | ISSIsAcqRel | ISSIsWrite);
5470
5471 return true;
5472 }
5473
trans_STL(DisasContext * s,arg_STL * a)5474 static bool trans_STL(DisasContext *s, arg_STL *a)
5475 {
5476 return op_stl(s, a, MO_UL);
5477 }
5478
trans_STLB(DisasContext * s,arg_STL * a)5479 static bool trans_STLB(DisasContext *s, arg_STL *a)
5480 {
5481 return op_stl(s, a, MO_UB);
5482 }
5483
trans_STLH(DisasContext * s,arg_STL * a)5484 static bool trans_STLH(DisasContext *s, arg_STL *a)
5485 {
5486 return op_stl(s, a, MO_UW);
5487 }
5488
op_ldrex(DisasContext * s,arg_LDREX * a,MemOp mop,bool acq)5489 static bool op_ldrex(DisasContext *s, arg_LDREX *a, MemOp mop, bool acq)
5490 {
5491 TCGv_i32 addr;
5492 /* Some cases stopped being UNPREDICTABLE in v8A (but not v8M) */
5493 bool v8a = ENABLE_ARCH_8 && !arm_dc_feature(s, ARM_FEATURE_M);
5494
5495 /* We UNDEF for these UNPREDICTABLE cases. */
5496 if (a->rn == 15 || a->rt == 15
5497 || (!v8a && s->thumb && a->rt == 13)
5498 || (mop == MO_64
5499 && (a->rt2 == 15 || a->rt == a->rt2
5500 || (!v8a && s->thumb && a->rt2 == 13)))) {
5501 unallocated_encoding(s);
5502 return true;
5503 }
5504
5505 addr = tcg_temp_new_i32();
5506 load_reg_var(s, addr, a->rn);
5507 tcg_gen_addi_i32(addr, addr, a->imm);
5508
5509 gen_load_exclusive(s, a->rt, a->rt2, addr, mop);
5510
5511 if (acq) {
5512 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
5513 }
5514 return true;
5515 }
5516
trans_LDREX(DisasContext * s,arg_LDREX * a)5517 static bool trans_LDREX(DisasContext *s, arg_LDREX *a)
5518 {
5519 if (!ENABLE_ARCH_6) {
5520 return false;
5521 }
5522 return op_ldrex(s, a, MO_32, false);
5523 }
5524
trans_LDREXD_a32(DisasContext * s,arg_LDREX * a)5525 static bool trans_LDREXD_a32(DisasContext *s, arg_LDREX *a)
5526 {
5527 if (!ENABLE_ARCH_6K) {
5528 return false;
5529 }
5530 /* We UNDEF for these UNPREDICTABLE cases. */
5531 if (a->rt & 1) {
5532 unallocated_encoding(s);
5533 return true;
5534 }
5535 a->rt2 = a->rt + 1;
5536 return op_ldrex(s, a, MO_64, false);
5537 }
5538
trans_LDREXD_t32(DisasContext * s,arg_LDREX * a)5539 static bool trans_LDREXD_t32(DisasContext *s, arg_LDREX *a)
5540 {
5541 return op_ldrex(s, a, MO_64, false);
5542 }
5543
trans_LDREXB(DisasContext * s,arg_LDREX * a)5544 static bool trans_LDREXB(DisasContext *s, arg_LDREX *a)
5545 {
5546 if (s->thumb ? !ENABLE_ARCH_7 : !ENABLE_ARCH_6K) {
5547 return false;
5548 }
5549 return op_ldrex(s, a, MO_8, false);
5550 }
5551
trans_LDREXH(DisasContext * s,arg_LDREX * a)5552 static bool trans_LDREXH(DisasContext *s, arg_LDREX *a)
5553 {
5554 if (s->thumb ? !ENABLE_ARCH_7 : !ENABLE_ARCH_6K) {
5555 return false;
5556 }
5557 return op_ldrex(s, a, MO_16, false);
5558 }
5559
trans_LDAEX(DisasContext * s,arg_LDREX * a)5560 static bool trans_LDAEX(DisasContext *s, arg_LDREX *a)
5561 {
5562 if (!ENABLE_ARCH_8) {
5563 return false;
5564 }
5565 return op_ldrex(s, a, MO_32, true);
5566 }
5567
trans_LDAEXD_a32(DisasContext * s,arg_LDREX * a)5568 static bool trans_LDAEXD_a32(DisasContext *s, arg_LDREX *a)
5569 {
5570 if (!ENABLE_ARCH_8) {
5571 return false;
5572 }
5573 /* We UNDEF for these UNPREDICTABLE cases. */
5574 if (a->rt & 1) {
5575 unallocated_encoding(s);
5576 return true;
5577 }
5578 a->rt2 = a->rt + 1;
5579 return op_ldrex(s, a, MO_64, true);
5580 }
5581
trans_LDAEXD_t32(DisasContext * s,arg_LDREX * a)5582 static bool trans_LDAEXD_t32(DisasContext *s, arg_LDREX *a)
5583 {
5584 if (!ENABLE_ARCH_8) {
5585 return false;
5586 }
5587 return op_ldrex(s, a, MO_64, true);
5588 }
5589
trans_LDAEXB(DisasContext * s,arg_LDREX * a)5590 static bool trans_LDAEXB(DisasContext *s, arg_LDREX *a)
5591 {
5592 if (!ENABLE_ARCH_8) {
5593 return false;
5594 }
5595 return op_ldrex(s, a, MO_8, true);
5596 }
5597
trans_LDAEXH(DisasContext * s,arg_LDREX * a)5598 static bool trans_LDAEXH(DisasContext *s, arg_LDREX *a)
5599 {
5600 if (!ENABLE_ARCH_8) {
5601 return false;
5602 }
5603 return op_ldrex(s, a, MO_16, true);
5604 }
5605
op_lda(DisasContext * s,arg_LDA * a,MemOp mop)5606 static bool op_lda(DisasContext *s, arg_LDA *a, MemOp mop)
5607 {
5608 TCGv_i32 addr, tmp;
5609
5610 if (!ENABLE_ARCH_8) {
5611 return false;
5612 }
5613 /* We UNDEF for these UNPREDICTABLE cases. */
5614 if (a->rn == 15 || a->rt == 15) {
5615 unallocated_encoding(s);
5616 return true;
5617 }
5618
5619 addr = load_reg(s, a->rn);
5620 tmp = tcg_temp_new_i32();
5621 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), mop | MO_ALIGN);
5622 disas_set_da_iss(s, mop, a->rt | ISSIsAcqRel);
5623
5624 store_reg(s, a->rt, tmp);
5625 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
5626 return true;
5627 }
5628
trans_LDA(DisasContext * s,arg_LDA * a)5629 static bool trans_LDA(DisasContext *s, arg_LDA *a)
5630 {
5631 return op_lda(s, a, MO_UL);
5632 }
5633
trans_LDAB(DisasContext * s,arg_LDA * a)5634 static bool trans_LDAB(DisasContext *s, arg_LDA *a)
5635 {
5636 return op_lda(s, a, MO_UB);
5637 }
5638
trans_LDAH(DisasContext * s,arg_LDA * a)5639 static bool trans_LDAH(DisasContext *s, arg_LDA *a)
5640 {
5641 return op_lda(s, a, MO_UW);
5642 }
5643
5644 /*
5645 * Media instructions
5646 */
5647
trans_USADA8(DisasContext * s,arg_USADA8 * a)5648 static bool trans_USADA8(DisasContext *s, arg_USADA8 *a)
5649 {
5650 TCGv_i32 t1, t2;
5651
5652 if (!ENABLE_ARCH_6) {
5653 return false;
5654 }
5655
5656 t1 = load_reg(s, a->rn);
5657 t2 = load_reg(s, a->rm);
5658 gen_helper_usad8(t1, t1, t2);
5659 if (a->ra != 15) {
5660 t2 = load_reg(s, a->ra);
5661 tcg_gen_add_i32(t1, t1, t2);
5662 }
5663 store_reg(s, a->rd, t1);
5664 return true;
5665 }
5666
op_bfx(DisasContext * s,arg_UBFX * a,bool u)5667 static bool op_bfx(DisasContext *s, arg_UBFX *a, bool u)
5668 {
5669 TCGv_i32 tmp;
5670 int width = a->widthm1 + 1;
5671 int shift = a->lsb;
5672
5673 if (!ENABLE_ARCH_6T2) {
5674 return false;
5675 }
5676 if (shift + width > 32) {
5677 /* UNPREDICTABLE; we choose to UNDEF */
5678 unallocated_encoding(s);
5679 return true;
5680 }
5681
5682 tmp = load_reg(s, a->rn);
5683 if (u) {
5684 tcg_gen_extract_i32(tmp, tmp, shift, width);
5685 } else {
5686 tcg_gen_sextract_i32(tmp, tmp, shift, width);
5687 }
5688 store_reg(s, a->rd, tmp);
5689 return true;
5690 }
5691
trans_SBFX(DisasContext * s,arg_SBFX * a)5692 static bool trans_SBFX(DisasContext *s, arg_SBFX *a)
5693 {
5694 return op_bfx(s, a, false);
5695 }
5696
trans_UBFX(DisasContext * s,arg_UBFX * a)5697 static bool trans_UBFX(DisasContext *s, arg_UBFX *a)
5698 {
5699 return op_bfx(s, a, true);
5700 }
5701
trans_BFCI(DisasContext * s,arg_BFCI * a)5702 static bool trans_BFCI(DisasContext *s, arg_BFCI *a)
5703 {
5704 int msb = a->msb, lsb = a->lsb;
5705 TCGv_i32 t_in, t_rd;
5706 int width;
5707
5708 if (!ENABLE_ARCH_6T2) {
5709 return false;
5710 }
5711 if (msb < lsb) {
5712 /* UNPREDICTABLE; we choose to UNDEF */
5713 unallocated_encoding(s);
5714 return true;
5715 }
5716
5717 width = msb + 1 - lsb;
5718 if (a->rn == 15) {
5719 /* BFC */
5720 t_in = tcg_constant_i32(0);
5721 } else {
5722 /* BFI */
5723 t_in = load_reg(s, a->rn);
5724 }
5725 t_rd = load_reg(s, a->rd);
5726 tcg_gen_deposit_i32(t_rd, t_rd, t_in, lsb, width);
5727 store_reg(s, a->rd, t_rd);
5728 return true;
5729 }
5730
trans_UDF(DisasContext * s,arg_UDF * a)5731 static bool trans_UDF(DisasContext *s, arg_UDF *a)
5732 {
5733 unallocated_encoding(s);
5734 return true;
5735 }
5736
5737 /*
5738 * Parallel addition and subtraction
5739 */
5740
op_par_addsub(DisasContext * s,arg_rrr * a,void (* gen)(TCGv_i32,TCGv_i32,TCGv_i32))5741 static bool op_par_addsub(DisasContext *s, arg_rrr *a,
5742 void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32))
5743 {
5744 TCGv_i32 t0, t1;
5745
5746 if (s->thumb
5747 ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)
5748 : !ENABLE_ARCH_6) {
5749 return false;
5750 }
5751
5752 t0 = load_reg(s, a->rn);
5753 t1 = load_reg(s, a->rm);
5754
5755 gen(t0, t0, t1);
5756
5757 store_reg(s, a->rd, t0);
5758 return true;
5759 }
5760
op_par_addsub_ge(DisasContext * s,arg_rrr * a,void (* gen)(TCGv_i32,TCGv_i32,TCGv_i32,TCGv_ptr))5761 static bool op_par_addsub_ge(DisasContext *s, arg_rrr *a,
5762 void (*gen)(TCGv_i32, TCGv_i32,
5763 TCGv_i32, TCGv_ptr))
5764 {
5765 TCGv_i32 t0, t1;
5766 TCGv_ptr ge;
5767
5768 if (s->thumb
5769 ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)
5770 : !ENABLE_ARCH_6) {
5771 return false;
5772 }
5773
5774 t0 = load_reg(s, a->rn);
5775 t1 = load_reg(s, a->rm);
5776
5777 ge = tcg_temp_new_ptr();
5778 tcg_gen_addi_ptr(ge, tcg_env, offsetof(CPUARMState, GE));
5779 gen(t0, t0, t1, ge);
5780
5781 store_reg(s, a->rd, t0);
5782 return true;
5783 }
5784
5785 #define DO_PAR_ADDSUB(NAME, helper) \
5786 static bool trans_##NAME(DisasContext *s, arg_rrr *a) \
5787 { \
5788 return op_par_addsub(s, a, helper); \
5789 }
5790
5791 #define DO_PAR_ADDSUB_GE(NAME, helper) \
5792 static bool trans_##NAME(DisasContext *s, arg_rrr *a) \
5793 { \
5794 return op_par_addsub_ge(s, a, helper); \
5795 }
5796
DO_PAR_ADDSUB_GE(SADD16,gen_helper_sadd16)5797 DO_PAR_ADDSUB_GE(SADD16, gen_helper_sadd16)
5798 DO_PAR_ADDSUB_GE(SASX, gen_helper_saddsubx)
5799 DO_PAR_ADDSUB_GE(SSAX, gen_helper_ssubaddx)
5800 DO_PAR_ADDSUB_GE(SSUB16, gen_helper_ssub16)
5801 DO_PAR_ADDSUB_GE(SADD8, gen_helper_sadd8)
5802 DO_PAR_ADDSUB_GE(SSUB8, gen_helper_ssub8)
5803
5804 DO_PAR_ADDSUB_GE(UADD16, gen_helper_uadd16)
5805 DO_PAR_ADDSUB_GE(UASX, gen_helper_uaddsubx)
5806 DO_PAR_ADDSUB_GE(USAX, gen_helper_usubaddx)
5807 DO_PAR_ADDSUB_GE(USUB16, gen_helper_usub16)
5808 DO_PAR_ADDSUB_GE(UADD8, gen_helper_uadd8)
5809 DO_PAR_ADDSUB_GE(USUB8, gen_helper_usub8)
5810
5811 DO_PAR_ADDSUB(QADD16, gen_helper_qadd16)
5812 DO_PAR_ADDSUB(QASX, gen_helper_qaddsubx)
5813 DO_PAR_ADDSUB(QSAX, gen_helper_qsubaddx)
5814 DO_PAR_ADDSUB(QSUB16, gen_helper_qsub16)
5815 DO_PAR_ADDSUB(QADD8, gen_helper_qadd8)
5816 DO_PAR_ADDSUB(QSUB8, gen_helper_qsub8)
5817
5818 DO_PAR_ADDSUB(UQADD16, gen_helper_uqadd16)
5819 DO_PAR_ADDSUB(UQASX, gen_helper_uqaddsubx)
5820 DO_PAR_ADDSUB(UQSAX, gen_helper_uqsubaddx)
5821 DO_PAR_ADDSUB(UQSUB16, gen_helper_uqsub16)
5822 DO_PAR_ADDSUB(UQADD8, gen_helper_uqadd8)
5823 DO_PAR_ADDSUB(UQSUB8, gen_helper_uqsub8)
5824
5825 DO_PAR_ADDSUB(SHADD16, gen_helper_shadd16)
5826 DO_PAR_ADDSUB(SHASX, gen_helper_shaddsubx)
5827 DO_PAR_ADDSUB(SHSAX, gen_helper_shsubaddx)
5828 DO_PAR_ADDSUB(SHSUB16, gen_helper_shsub16)
5829 DO_PAR_ADDSUB(SHADD8, gen_helper_shadd8)
5830 DO_PAR_ADDSUB(SHSUB8, gen_helper_shsub8)
5831
5832 DO_PAR_ADDSUB(UHADD16, gen_helper_uhadd16)
5833 DO_PAR_ADDSUB(UHASX, gen_helper_uhaddsubx)
5834 DO_PAR_ADDSUB(UHSAX, gen_helper_uhsubaddx)
5835 DO_PAR_ADDSUB(UHSUB16, gen_helper_uhsub16)
5836 DO_PAR_ADDSUB(UHADD8, gen_helper_uhadd8)
5837 DO_PAR_ADDSUB(UHSUB8, gen_helper_uhsub8)
5838
5839 #undef DO_PAR_ADDSUB
5840 #undef DO_PAR_ADDSUB_GE
5841
5842 /*
5843 * Packing, unpacking, saturation, and reversal
5844 */
5845
5846 static bool trans_PKH(DisasContext *s, arg_PKH *a)
5847 {
5848 TCGv_i32 tn, tm;
5849 int shift = a->imm;
5850
5851 if (s->thumb
5852 ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)
5853 : !ENABLE_ARCH_6) {
5854 return false;
5855 }
5856
5857 tn = load_reg(s, a->rn);
5858 tm = load_reg(s, a->rm);
5859 if (a->tb) {
5860 /* PKHTB */
5861 if (shift == 0) {
5862 shift = 31;
5863 }
5864 tcg_gen_sari_i32(tm, tm, shift);
5865 tcg_gen_deposit_i32(tn, tn, tm, 0, 16);
5866 } else {
5867 /* PKHBT */
5868 tcg_gen_shli_i32(tm, tm, shift);
5869 tcg_gen_deposit_i32(tn, tm, tn, 0, 16);
5870 }
5871 store_reg(s, a->rd, tn);
5872 return true;
5873 }
5874
op_sat(DisasContext * s,arg_sat * a,void (* gen)(TCGv_i32,TCGv_env,TCGv_i32,TCGv_i32))5875 static bool op_sat(DisasContext *s, arg_sat *a,
5876 void (*gen)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
5877 {
5878 TCGv_i32 tmp;
5879 int shift = a->imm;
5880
5881 if (!ENABLE_ARCH_6) {
5882 return false;
5883 }
5884
5885 tmp = load_reg(s, a->rn);
5886 if (a->sh) {
5887 tcg_gen_sari_i32(tmp, tmp, shift ? shift : 31);
5888 } else {
5889 tcg_gen_shli_i32(tmp, tmp, shift);
5890 }
5891
5892 gen(tmp, tcg_env, tmp, tcg_constant_i32(a->satimm));
5893
5894 store_reg(s, a->rd, tmp);
5895 return true;
5896 }
5897
trans_SSAT(DisasContext * s,arg_sat * a)5898 static bool trans_SSAT(DisasContext *s, arg_sat *a)
5899 {
5900 return op_sat(s, a, gen_helper_ssat);
5901 }
5902
trans_USAT(DisasContext * s,arg_sat * a)5903 static bool trans_USAT(DisasContext *s, arg_sat *a)
5904 {
5905 return op_sat(s, a, gen_helper_usat);
5906 }
5907
trans_SSAT16(DisasContext * s,arg_sat * a)5908 static bool trans_SSAT16(DisasContext *s, arg_sat *a)
5909 {
5910 if (s->thumb && !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
5911 return false;
5912 }
5913 return op_sat(s, a, gen_helper_ssat16);
5914 }
5915
trans_USAT16(DisasContext * s,arg_sat * a)5916 static bool trans_USAT16(DisasContext *s, arg_sat *a)
5917 {
5918 if (s->thumb && !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
5919 return false;
5920 }
5921 return op_sat(s, a, gen_helper_usat16);
5922 }
5923
op_xta(DisasContext * s,arg_rrr_rot * a,void (* gen_extract)(TCGv_i32,TCGv_i32),void (* gen_add)(TCGv_i32,TCGv_i32,TCGv_i32))5924 static bool op_xta(DisasContext *s, arg_rrr_rot *a,
5925 void (*gen_extract)(TCGv_i32, TCGv_i32),
5926 void (*gen_add)(TCGv_i32, TCGv_i32, TCGv_i32))
5927 {
5928 TCGv_i32 tmp;
5929
5930 if (!ENABLE_ARCH_6) {
5931 return false;
5932 }
5933
5934 tmp = load_reg(s, a->rm);
5935 /*
5936 * TODO: In many cases we could do a shift instead of a rotate.
5937 * Combined with a simple extend, that becomes an extract.
5938 */
5939 tcg_gen_rotri_i32(tmp, tmp, a->rot * 8);
5940 gen_extract(tmp, tmp);
5941
5942 if (a->rn != 15) {
5943 TCGv_i32 tmp2 = load_reg(s, a->rn);
5944 gen_add(tmp, tmp, tmp2);
5945 }
5946 store_reg(s, a->rd, tmp);
5947 return true;
5948 }
5949
trans_SXTAB(DisasContext * s,arg_rrr_rot * a)5950 static bool trans_SXTAB(DisasContext *s, arg_rrr_rot *a)
5951 {
5952 return op_xta(s, a, tcg_gen_ext8s_i32, tcg_gen_add_i32);
5953 }
5954
trans_SXTAH(DisasContext * s,arg_rrr_rot * a)5955 static bool trans_SXTAH(DisasContext *s, arg_rrr_rot *a)
5956 {
5957 return op_xta(s, a, tcg_gen_ext16s_i32, tcg_gen_add_i32);
5958 }
5959
trans_SXTAB16(DisasContext * s,arg_rrr_rot * a)5960 static bool trans_SXTAB16(DisasContext *s, arg_rrr_rot *a)
5961 {
5962 if (s->thumb && !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
5963 return false;
5964 }
5965 return op_xta(s, a, gen_helper_sxtb16, gen_add16);
5966 }
5967
trans_UXTAB(DisasContext * s,arg_rrr_rot * a)5968 static bool trans_UXTAB(DisasContext *s, arg_rrr_rot *a)
5969 {
5970 return op_xta(s, a, tcg_gen_ext8u_i32, tcg_gen_add_i32);
5971 }
5972
trans_UXTAH(DisasContext * s,arg_rrr_rot * a)5973 static bool trans_UXTAH(DisasContext *s, arg_rrr_rot *a)
5974 {
5975 return op_xta(s, a, tcg_gen_ext16u_i32, tcg_gen_add_i32);
5976 }
5977
trans_UXTAB16(DisasContext * s,arg_rrr_rot * a)5978 static bool trans_UXTAB16(DisasContext *s, arg_rrr_rot *a)
5979 {
5980 if (s->thumb && !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
5981 return false;
5982 }
5983 return op_xta(s, a, gen_helper_uxtb16, gen_add16);
5984 }
5985
trans_SEL(DisasContext * s,arg_rrr * a)5986 static bool trans_SEL(DisasContext *s, arg_rrr *a)
5987 {
5988 TCGv_i32 t1, t2, t3;
5989
5990 if (s->thumb
5991 ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)
5992 : !ENABLE_ARCH_6) {
5993 return false;
5994 }
5995
5996 t1 = load_reg(s, a->rn);
5997 t2 = load_reg(s, a->rm);
5998 t3 = tcg_temp_new_i32();
5999 tcg_gen_ld_i32(t3, tcg_env, offsetof(CPUARMState, GE));
6000 gen_helper_sel_flags(t1, t3, t1, t2);
6001 store_reg(s, a->rd, t1);
6002 return true;
6003 }
6004
op_rr(DisasContext * s,arg_rr * a,void (* gen)(TCGv_i32,TCGv_i32))6005 static bool op_rr(DisasContext *s, arg_rr *a,
6006 void (*gen)(TCGv_i32, TCGv_i32))
6007 {
6008 TCGv_i32 tmp;
6009
6010 tmp = load_reg(s, a->rm);
6011 gen(tmp, tmp);
6012 store_reg(s, a->rd, tmp);
6013 return true;
6014 }
6015
trans_REV(DisasContext * s,arg_rr * a)6016 static bool trans_REV(DisasContext *s, arg_rr *a)
6017 {
6018 if (!ENABLE_ARCH_6) {
6019 return false;
6020 }
6021 return op_rr(s, a, tcg_gen_bswap32_i32);
6022 }
6023
trans_REV16(DisasContext * s,arg_rr * a)6024 static bool trans_REV16(DisasContext *s, arg_rr *a)
6025 {
6026 if (!ENABLE_ARCH_6) {
6027 return false;
6028 }
6029 return op_rr(s, a, gen_rev16);
6030 }
6031
trans_REVSH(DisasContext * s,arg_rr * a)6032 static bool trans_REVSH(DisasContext *s, arg_rr *a)
6033 {
6034 if (!ENABLE_ARCH_6) {
6035 return false;
6036 }
6037 return op_rr(s, a, gen_revsh);
6038 }
6039
trans_RBIT(DisasContext * s,arg_rr * a)6040 static bool trans_RBIT(DisasContext *s, arg_rr *a)
6041 {
6042 if (!ENABLE_ARCH_6T2) {
6043 return false;
6044 }
6045 return op_rr(s, a, gen_helper_rbit);
6046 }
6047
6048 /*
6049 * Signed multiply, signed and unsigned divide
6050 */
6051
op_smlad(DisasContext * s,arg_rrrr * a,bool m_swap,bool sub)6052 static bool op_smlad(DisasContext *s, arg_rrrr *a, bool m_swap, bool sub)
6053 {
6054 TCGv_i32 t1, t2;
6055
6056 if (!ENABLE_ARCH_6) {
6057 return false;
6058 }
6059
6060 t1 = load_reg(s, a->rn);
6061 t2 = load_reg(s, a->rm);
6062 if (m_swap) {
6063 gen_swap_half(t2, t2);
6064 }
6065 gen_smul_dual(t1, t2);
6066
6067 if (sub) {
6068 /*
6069 * This subtraction cannot overflow, so we can do a simple
6070 * 32-bit subtraction and then a possible 32-bit saturating
6071 * addition of Ra.
6072 */
6073 tcg_gen_sub_i32(t1, t1, t2);
6074
6075 if (a->ra != 15) {
6076 t2 = load_reg(s, a->ra);
6077 gen_helper_add_setq(t1, tcg_env, t1, t2);
6078 }
6079 } else if (a->ra == 15) {
6080 /* Single saturation-checking addition */
6081 gen_helper_add_setq(t1, tcg_env, t1, t2);
6082 } else {
6083 /*
6084 * We need to add the products and Ra together and then
6085 * determine whether the final result overflowed. Doing
6086 * this as two separate add-and-check-overflow steps incorrectly
6087 * sets Q for cases like (-32768 * -32768) + (-32768 * -32768) + -1.
6088 * Do all the arithmetic at 64-bits and then check for overflow.
6089 */
6090 TCGv_i64 p64, q64;
6091 TCGv_i32 t3, qf, one;
6092
6093 p64 = tcg_temp_new_i64();
6094 q64 = tcg_temp_new_i64();
6095 tcg_gen_ext_i32_i64(p64, t1);
6096 tcg_gen_ext_i32_i64(q64, t2);
6097 tcg_gen_add_i64(p64, p64, q64);
6098 load_reg_var(s, t2, a->ra);
6099 tcg_gen_ext_i32_i64(q64, t2);
6100 tcg_gen_add_i64(p64, p64, q64);
6101
6102 tcg_gen_extr_i64_i32(t1, t2, p64);
6103 /*
6104 * t1 is the low half of the result which goes into Rd.
6105 * We have overflow and must set Q if the high half (t2)
6106 * is different from the sign-extension of t1.
6107 */
6108 t3 = tcg_temp_new_i32();
6109 tcg_gen_sari_i32(t3, t1, 31);
6110 qf = load_cpu_field(QF);
6111 one = tcg_constant_i32(1);
6112 tcg_gen_movcond_i32(TCG_COND_NE, qf, t2, t3, one, qf);
6113 store_cpu_field(qf, QF);
6114 }
6115 store_reg(s, a->rd, t1);
6116 return true;
6117 }
6118
trans_SMLAD(DisasContext * s,arg_rrrr * a)6119 static bool trans_SMLAD(DisasContext *s, arg_rrrr *a)
6120 {
6121 return op_smlad(s, a, false, false);
6122 }
6123
trans_SMLADX(DisasContext * s,arg_rrrr * a)6124 static bool trans_SMLADX(DisasContext *s, arg_rrrr *a)
6125 {
6126 return op_smlad(s, a, true, false);
6127 }
6128
trans_SMLSD(DisasContext * s,arg_rrrr * a)6129 static bool trans_SMLSD(DisasContext *s, arg_rrrr *a)
6130 {
6131 return op_smlad(s, a, false, true);
6132 }
6133
trans_SMLSDX(DisasContext * s,arg_rrrr * a)6134 static bool trans_SMLSDX(DisasContext *s, arg_rrrr *a)
6135 {
6136 return op_smlad(s, a, true, true);
6137 }
6138
op_smlald(DisasContext * s,arg_rrrr * a,bool m_swap,bool sub)6139 static bool op_smlald(DisasContext *s, arg_rrrr *a, bool m_swap, bool sub)
6140 {
6141 TCGv_i32 t1, t2;
6142 TCGv_i64 l1, l2;
6143
6144 if (!ENABLE_ARCH_6) {
6145 return false;
6146 }
6147
6148 t1 = load_reg(s, a->rn);
6149 t2 = load_reg(s, a->rm);
6150 if (m_swap) {
6151 gen_swap_half(t2, t2);
6152 }
6153 gen_smul_dual(t1, t2);
6154
6155 l1 = tcg_temp_new_i64();
6156 l2 = tcg_temp_new_i64();
6157 tcg_gen_ext_i32_i64(l1, t1);
6158 tcg_gen_ext_i32_i64(l2, t2);
6159
6160 if (sub) {
6161 tcg_gen_sub_i64(l1, l1, l2);
6162 } else {
6163 tcg_gen_add_i64(l1, l1, l2);
6164 }
6165
6166 gen_addq(s, l1, a->ra, a->rd);
6167 gen_storeq_reg(s, a->ra, a->rd, l1);
6168 return true;
6169 }
6170
trans_SMLALD(DisasContext * s,arg_rrrr * a)6171 static bool trans_SMLALD(DisasContext *s, arg_rrrr *a)
6172 {
6173 return op_smlald(s, a, false, false);
6174 }
6175
trans_SMLALDX(DisasContext * s,arg_rrrr * a)6176 static bool trans_SMLALDX(DisasContext *s, arg_rrrr *a)
6177 {
6178 return op_smlald(s, a, true, false);
6179 }
6180
trans_SMLSLD(DisasContext * s,arg_rrrr * a)6181 static bool trans_SMLSLD(DisasContext *s, arg_rrrr *a)
6182 {
6183 return op_smlald(s, a, false, true);
6184 }
6185
trans_SMLSLDX(DisasContext * s,arg_rrrr * a)6186 static bool trans_SMLSLDX(DisasContext *s, arg_rrrr *a)
6187 {
6188 return op_smlald(s, a, true, true);
6189 }
6190
op_smmla(DisasContext * s,arg_rrrr * a,bool round,bool sub)6191 static bool op_smmla(DisasContext *s, arg_rrrr *a, bool round, bool sub)
6192 {
6193 TCGv_i32 t1, t2;
6194
6195 if (s->thumb
6196 ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)
6197 : !ENABLE_ARCH_6) {
6198 return false;
6199 }
6200
6201 t1 = load_reg(s, a->rn);
6202 t2 = load_reg(s, a->rm);
6203 tcg_gen_muls2_i32(t2, t1, t1, t2);
6204
6205 if (a->ra != 15) {
6206 TCGv_i32 t3 = load_reg(s, a->ra);
6207 if (sub) {
6208 /*
6209 * For SMMLS, we need a 64-bit subtract. Borrow caused by
6210 * a non-zero multiplicand lowpart, and the correct result
6211 * lowpart for rounding.
6212 */
6213 tcg_gen_sub2_i32(t2, t1, tcg_constant_i32(0), t3, t2, t1);
6214 } else {
6215 tcg_gen_add_i32(t1, t1, t3);
6216 }
6217 }
6218 if (round) {
6219 /*
6220 * Adding 0x80000000 to the 64-bit quantity means that we have
6221 * carry in to the high word when the low word has the msb set.
6222 */
6223 tcg_gen_shri_i32(t2, t2, 31);
6224 tcg_gen_add_i32(t1, t1, t2);
6225 }
6226 store_reg(s, a->rd, t1);
6227 return true;
6228 }
6229
trans_SMMLA(DisasContext * s,arg_rrrr * a)6230 static bool trans_SMMLA(DisasContext *s, arg_rrrr *a)
6231 {
6232 return op_smmla(s, a, false, false);
6233 }
6234
trans_SMMLAR(DisasContext * s,arg_rrrr * a)6235 static bool trans_SMMLAR(DisasContext *s, arg_rrrr *a)
6236 {
6237 return op_smmla(s, a, true, false);
6238 }
6239
trans_SMMLS(DisasContext * s,arg_rrrr * a)6240 static bool trans_SMMLS(DisasContext *s, arg_rrrr *a)
6241 {
6242 return op_smmla(s, a, false, true);
6243 }
6244
trans_SMMLSR(DisasContext * s,arg_rrrr * a)6245 static bool trans_SMMLSR(DisasContext *s, arg_rrrr *a)
6246 {
6247 return op_smmla(s, a, true, true);
6248 }
6249
op_div(DisasContext * s,arg_rrr * a,bool u)6250 static bool op_div(DisasContext *s, arg_rrr *a, bool u)
6251 {
6252 TCGv_i32 t1, t2;
6253
6254 if (s->thumb
6255 ? !dc_isar_feature(aa32_thumb_div, s)
6256 : !dc_isar_feature(aa32_arm_div, s)) {
6257 return false;
6258 }
6259
6260 t1 = load_reg(s, a->rn);
6261 t2 = load_reg(s, a->rm);
6262 if (u) {
6263 gen_helper_udiv(t1, tcg_env, t1, t2);
6264 } else {
6265 gen_helper_sdiv(t1, tcg_env, t1, t2);
6266 }
6267 store_reg(s, a->rd, t1);
6268 return true;
6269 }
6270
trans_SDIV(DisasContext * s,arg_rrr * a)6271 static bool trans_SDIV(DisasContext *s, arg_rrr *a)
6272 {
6273 return op_div(s, a, false);
6274 }
6275
trans_UDIV(DisasContext * s,arg_rrr * a)6276 static bool trans_UDIV(DisasContext *s, arg_rrr *a)
6277 {
6278 return op_div(s, a, true);
6279 }
6280
6281 /*
6282 * Block data transfer
6283 */
6284
op_addr_block_pre(DisasContext * s,arg_ldst_block * a,int n)6285 static TCGv_i32 op_addr_block_pre(DisasContext *s, arg_ldst_block *a, int n)
6286 {
6287 TCGv_i32 addr = load_reg(s, a->rn);
6288
6289 if (a->b) {
6290 if (a->i) {
6291 /* pre increment */
6292 tcg_gen_addi_i32(addr, addr, 4);
6293 } else {
6294 /* pre decrement */
6295 tcg_gen_addi_i32(addr, addr, -(n * 4));
6296 }
6297 } else if (!a->i && n != 1) {
6298 /* post decrement */
6299 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
6300 }
6301
6302 if (s->v8m_stackcheck && a->rn == 13 && a->w) {
6303 /*
6304 * If the writeback is incrementing SP rather than
6305 * decrementing it, and the initial SP is below the
6306 * stack limit but the final written-back SP would
6307 * be above, then we must not perform any memory
6308 * accesses, but it is IMPDEF whether we generate
6309 * an exception. We choose to do so in this case.
6310 * At this point 'addr' is the lowest address, so
6311 * either the original SP (if incrementing) or our
6312 * final SP (if decrementing), so that's what we check.
6313 */
6314 gen_helper_v8m_stackcheck(tcg_env, addr);
6315 }
6316
6317 return addr;
6318 }
6319
op_addr_block_post(DisasContext * s,arg_ldst_block * a,TCGv_i32 addr,int n)6320 static void op_addr_block_post(DisasContext *s, arg_ldst_block *a,
6321 TCGv_i32 addr, int n)
6322 {
6323 if (a->w) {
6324 /* write back */
6325 if (!a->b) {
6326 if (a->i) {
6327 /* post increment */
6328 tcg_gen_addi_i32(addr, addr, 4);
6329 } else {
6330 /* post decrement */
6331 tcg_gen_addi_i32(addr, addr, -(n * 4));
6332 }
6333 } else if (!a->i && n != 1) {
6334 /* pre decrement */
6335 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
6336 }
6337 store_reg(s, a->rn, addr);
6338 }
6339 }
6340
op_stm(DisasContext * s,arg_ldst_block * a)6341 static bool op_stm(DisasContext *s, arg_ldst_block *a)
6342 {
6343 int i, j, n, list, mem_idx;
6344 bool user = a->u;
6345 TCGv_i32 addr, tmp;
6346
6347 if (user) {
6348 /* STM (user) */
6349 if (IS_USER(s)) {
6350 /* Only usable in supervisor mode. */
6351 unallocated_encoding(s);
6352 return true;
6353 }
6354 }
6355
6356 list = a->list;
6357 n = ctpop16(list);
6358 /*
6359 * This is UNPREDICTABLE for n < 1 in all encodings, and we choose
6360 * to UNDEF. In the T32 STM encoding n == 1 is also UNPREDICTABLE,
6361 * but hardware treats it like the A32 version and implements the
6362 * single-register-store, and some in-the-wild (buggy) software
6363 * assumes that, so we don't UNDEF on that case.
6364 */
6365 if (n < 1 || a->rn == 15) {
6366 unallocated_encoding(s);
6367 return true;
6368 }
6369
6370 s->eci_handled = true;
6371
6372 addr = op_addr_block_pre(s, a, n);
6373 mem_idx = get_mem_index(s);
6374
6375 for (i = j = 0; i < 16; i++) {
6376 if (!(list & (1 << i))) {
6377 continue;
6378 }
6379
6380 if (user && i != 15) {
6381 tmp = tcg_temp_new_i32();
6382 gen_helper_get_user_reg(tmp, tcg_env, tcg_constant_i32(i));
6383 } else {
6384 tmp = load_reg(s, i);
6385 }
6386 gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN);
6387
6388 /* No need to add after the last transfer. */
6389 if (++j != n) {
6390 tcg_gen_addi_i32(addr, addr, 4);
6391 }
6392 }
6393
6394 op_addr_block_post(s, a, addr, n);
6395 clear_eci_state(s);
6396 return true;
6397 }
6398
trans_STM(DisasContext * s,arg_ldst_block * a)6399 static bool trans_STM(DisasContext *s, arg_ldst_block *a)
6400 {
6401 return op_stm(s, a);
6402 }
6403
trans_STM_t32(DisasContext * s,arg_ldst_block * a)6404 static bool trans_STM_t32(DisasContext *s, arg_ldst_block *a)
6405 {
6406 /* Writeback register in register list is UNPREDICTABLE for T32. */
6407 if (a->w && (a->list & (1 << a->rn))) {
6408 unallocated_encoding(s);
6409 return true;
6410 }
6411 return op_stm(s, a);
6412 }
6413
do_ldm(DisasContext * s,arg_ldst_block * a)6414 static bool do_ldm(DisasContext *s, arg_ldst_block *a)
6415 {
6416 int i, j, n, list, mem_idx;
6417 bool loaded_base;
6418 bool user = a->u;
6419 bool exc_return = false;
6420 TCGv_i32 addr, tmp, loaded_var;
6421
6422 if (user) {
6423 /* LDM (user), LDM (exception return) */
6424 if (IS_USER(s)) {
6425 /* Only usable in supervisor mode. */
6426 unallocated_encoding(s);
6427 return true;
6428 }
6429 if (extract32(a->list, 15, 1)) {
6430 exc_return = true;
6431 user = false;
6432 } else {
6433 /* LDM (user) does not allow writeback. */
6434 if (a->w) {
6435 unallocated_encoding(s);
6436 return true;
6437 }
6438 }
6439 }
6440
6441 list = a->list;
6442 n = ctpop16(list);
6443 /*
6444 * This is UNPREDICTABLE for n < 1 in all encodings, and we choose
6445 * to UNDEF. In the T32 LDM encoding n == 1 is also UNPREDICTABLE,
6446 * but hardware treats it like the A32 version and implements the
6447 * single-register-load, and some in-the-wild (buggy) software
6448 * assumes that, so we don't UNDEF on that case.
6449 */
6450 if (n < 1 || a->rn == 15) {
6451 unallocated_encoding(s);
6452 return true;
6453 }
6454
6455 s->eci_handled = true;
6456
6457 addr = op_addr_block_pre(s, a, n);
6458 mem_idx = get_mem_index(s);
6459 loaded_base = false;
6460 loaded_var = NULL;
6461
6462 for (i = j = 0; i < 16; i++) {
6463 if (!(list & (1 << i))) {
6464 continue;
6465 }
6466
6467 tmp = tcg_temp_new_i32();
6468 gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN);
6469 if (user) {
6470 gen_helper_set_user_reg(tcg_env, tcg_constant_i32(i), tmp);
6471 } else if (i == a->rn) {
6472 loaded_var = tmp;
6473 loaded_base = true;
6474 } else if (i == 15 && exc_return) {
6475 store_pc_exc_ret(s, tmp);
6476 } else {
6477 store_reg_from_load(s, i, tmp);
6478 }
6479
6480 /* No need to add after the last transfer. */
6481 if (++j != n) {
6482 tcg_gen_addi_i32(addr, addr, 4);
6483 }
6484 }
6485
6486 op_addr_block_post(s, a, addr, n);
6487
6488 if (loaded_base) {
6489 /* Note that we reject base == pc above. */
6490 store_reg(s, a->rn, loaded_var);
6491 }
6492
6493 if (exc_return) {
6494 /* Restore CPSR from SPSR. */
6495 tmp = load_cpu_field(spsr);
6496 translator_io_start(&s->base);
6497 gen_helper_cpsr_write_eret(tcg_env, tmp);
6498 /* Must exit loop to check un-masked IRQs */
6499 s->base.is_jmp = DISAS_EXIT;
6500 }
6501 clear_eci_state(s);
6502 return true;
6503 }
6504
trans_LDM_a32(DisasContext * s,arg_ldst_block * a)6505 static bool trans_LDM_a32(DisasContext *s, arg_ldst_block *a)
6506 {
6507 /*
6508 * Writeback register in register list is UNPREDICTABLE
6509 * for ArchVersion() >= 7. Prior to v7, A32 would write
6510 * an UNKNOWN value to the base register.
6511 */
6512 if (ENABLE_ARCH_7 && a->w && (a->list & (1 << a->rn))) {
6513 unallocated_encoding(s);
6514 return true;
6515 }
6516 return do_ldm(s, a);
6517 }
6518
trans_LDM_t32(DisasContext * s,arg_ldst_block * a)6519 static bool trans_LDM_t32(DisasContext *s, arg_ldst_block *a)
6520 {
6521 /* Writeback register in register list is UNPREDICTABLE for T32. */
6522 if (a->w && (a->list & (1 << a->rn))) {
6523 unallocated_encoding(s);
6524 return true;
6525 }
6526 return do_ldm(s, a);
6527 }
6528
trans_LDM_t16(DisasContext * s,arg_ldst_block * a)6529 static bool trans_LDM_t16(DisasContext *s, arg_ldst_block *a)
6530 {
6531 /* Writeback is conditional on the base register not being loaded. */
6532 a->w = !(a->list & (1 << a->rn));
6533 return do_ldm(s, a);
6534 }
6535
trans_CLRM(DisasContext * s,arg_CLRM * a)6536 static bool trans_CLRM(DisasContext *s, arg_CLRM *a)
6537 {
6538 int i;
6539 TCGv_i32 zero;
6540
6541 if (!dc_isar_feature(aa32_m_sec_state, s)) {
6542 return false;
6543 }
6544
6545 if (extract32(a->list, 13, 1)) {
6546 return false;
6547 }
6548
6549 if (!a->list) {
6550 /* UNPREDICTABLE; we choose to UNDEF */
6551 return false;
6552 }
6553
6554 s->eci_handled = true;
6555
6556 zero = tcg_constant_i32(0);
6557 for (i = 0; i < 15; i++) {
6558 if (extract32(a->list, i, 1)) {
6559 /* Clear R[i] */
6560 tcg_gen_mov_i32(cpu_R[i], zero);
6561 }
6562 }
6563 if (extract32(a->list, 15, 1)) {
6564 /*
6565 * Clear APSR (by calling the MSR helper with the same argument
6566 * as for "MSR APSR_nzcvqg, Rn": mask = 0b1100, SYSM=0)
6567 */
6568 gen_helper_v7m_msr(tcg_env, tcg_constant_i32(0xc00), zero);
6569 }
6570 clear_eci_state(s);
6571 return true;
6572 }
6573
6574 /*
6575 * Branch, branch with link
6576 */
6577
trans_B(DisasContext * s,arg_i * a)6578 static bool trans_B(DisasContext *s, arg_i *a)
6579 {
6580 gen_jmp(s, jmp_diff(s, a->imm));
6581 return true;
6582 }
6583
trans_B_cond_thumb(DisasContext * s,arg_ci * a)6584 static bool trans_B_cond_thumb(DisasContext *s, arg_ci *a)
6585 {
6586 /* This has cond from encoding, required to be outside IT block. */
6587 if (a->cond >= 0xe) {
6588 return false;
6589 }
6590 if (s->condexec_mask) {
6591 unallocated_encoding(s);
6592 return true;
6593 }
6594 arm_skip_unless(s, a->cond);
6595 gen_jmp(s, jmp_diff(s, a->imm));
6596 return true;
6597 }
6598
trans_BL(DisasContext * s,arg_i * a)6599 static bool trans_BL(DisasContext *s, arg_i *a)
6600 {
6601 gen_pc_plus_diff(s, cpu_R[14], curr_insn_len(s) | s->thumb);
6602 gen_jmp(s, jmp_diff(s, a->imm));
6603 return true;
6604 }
6605
trans_BLX_i(DisasContext * s,arg_BLX_i * a)6606 static bool trans_BLX_i(DisasContext *s, arg_BLX_i *a)
6607 {
6608 /*
6609 * BLX <imm> would be useless on M-profile; the encoding space
6610 * is used for other insns from v8.1M onward, and UNDEFs before that.
6611 */
6612 if (arm_dc_feature(s, ARM_FEATURE_M)) {
6613 return false;
6614 }
6615
6616 /* For A32, ARM_FEATURE_V5 is checked near the start of the uncond block. */
6617 if (s->thumb && (a->imm & 2)) {
6618 return false;
6619 }
6620 gen_pc_plus_diff(s, cpu_R[14], curr_insn_len(s) | s->thumb);
6621 store_cpu_field_constant(!s->thumb, thumb);
6622 /* This jump is computed from an aligned PC: subtract off the low bits. */
6623 gen_jmp(s, jmp_diff(s, a->imm - (s->pc_curr & 3)));
6624 return true;
6625 }
6626
trans_BL_BLX_prefix(DisasContext * s,arg_BL_BLX_prefix * a)6627 static bool trans_BL_BLX_prefix(DisasContext *s, arg_BL_BLX_prefix *a)
6628 {
6629 assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
6630 gen_pc_plus_diff(s, cpu_R[14], jmp_diff(s, a->imm << 12));
6631 return true;
6632 }
6633
trans_BL_suffix(DisasContext * s,arg_BL_suffix * a)6634 static bool trans_BL_suffix(DisasContext *s, arg_BL_suffix *a)
6635 {
6636 TCGv_i32 tmp = tcg_temp_new_i32();
6637
6638 assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
6639 tcg_gen_addi_i32(tmp, cpu_R[14], (a->imm << 1) | 1);
6640 gen_pc_plus_diff(s, cpu_R[14], curr_insn_len(s) | 1);
6641 gen_bx(s, tmp);
6642 return true;
6643 }
6644
trans_BLX_suffix(DisasContext * s,arg_BLX_suffix * a)6645 static bool trans_BLX_suffix(DisasContext *s, arg_BLX_suffix *a)
6646 {
6647 TCGv_i32 tmp;
6648
6649 assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
6650 if (!ENABLE_ARCH_5) {
6651 return false;
6652 }
6653 tmp = tcg_temp_new_i32();
6654 tcg_gen_addi_i32(tmp, cpu_R[14], a->imm << 1);
6655 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
6656 gen_pc_plus_diff(s, cpu_R[14], curr_insn_len(s) | 1);
6657 gen_bx(s, tmp);
6658 return true;
6659 }
6660
trans_BF(DisasContext * s,arg_BF * a)6661 static bool trans_BF(DisasContext *s, arg_BF *a)
6662 {
6663 /*
6664 * M-profile branch future insns. The architecture permits an
6665 * implementation to implement these as NOPs (equivalent to
6666 * discarding the LO_BRANCH_INFO cache immediately), and we
6667 * take that IMPDEF option because for QEMU a "real" implementation
6668 * would be complicated and wouldn't execute any faster.
6669 */
6670 if (!dc_isar_feature(aa32_lob, s)) {
6671 return false;
6672 }
6673 if (a->boff == 0) {
6674 /* SEE "Related encodings" (loop insns) */
6675 return false;
6676 }
6677 /* Handle as NOP */
6678 return true;
6679 }
6680
trans_DLS(DisasContext * s,arg_DLS * a)6681 static bool trans_DLS(DisasContext *s, arg_DLS *a)
6682 {
6683 /* M-profile low-overhead loop start */
6684 TCGv_i32 tmp;
6685
6686 if (!dc_isar_feature(aa32_lob, s)) {
6687 return false;
6688 }
6689 if (a->rn == 13 || a->rn == 15) {
6690 /*
6691 * For DLSTP rn == 15 is a related encoding (LCTP); the
6692 * other cases caught by this condition are all
6693 * CONSTRAINED UNPREDICTABLE: we choose to UNDEF
6694 */
6695 return false;
6696 }
6697
6698 if (a->size != 4) {
6699 /* DLSTP */
6700 if (!dc_isar_feature(aa32_mve, s)) {
6701 return false;
6702 }
6703 if (!vfp_access_check(s)) {
6704 return true;
6705 }
6706 }
6707
6708 /* Not a while loop: set LR to the count, and set LTPSIZE for DLSTP */
6709 tmp = load_reg(s, a->rn);
6710 store_reg(s, 14, tmp);
6711 if (a->size != 4) {
6712 /* DLSTP: set FPSCR.LTPSIZE */
6713 store_cpu_field(tcg_constant_i32(a->size), v7m.ltpsize);
6714 s->base.is_jmp = DISAS_UPDATE_NOCHAIN;
6715 }
6716 return true;
6717 }
6718
trans_WLS(DisasContext * s,arg_WLS * a)6719 static bool trans_WLS(DisasContext *s, arg_WLS *a)
6720 {
6721 /* M-profile low-overhead while-loop start */
6722 TCGv_i32 tmp;
6723 DisasLabel nextlabel;
6724
6725 if (!dc_isar_feature(aa32_lob, s)) {
6726 return false;
6727 }
6728 if (a->rn == 13 || a->rn == 15) {
6729 /*
6730 * For WLSTP rn == 15 is a related encoding (LE); the
6731 * other cases caught by this condition are all
6732 * CONSTRAINED UNPREDICTABLE: we choose to UNDEF
6733 */
6734 return false;
6735 }
6736 if (s->condexec_mask) {
6737 /*
6738 * WLS in an IT block is CONSTRAINED UNPREDICTABLE;
6739 * we choose to UNDEF, because otherwise our use of
6740 * gen_goto_tb(1) would clash with the use of TB exit 1
6741 * in the dc->condjmp condition-failed codepath in
6742 * arm_tr_tb_stop() and we'd get an assertion.
6743 */
6744 return false;
6745 }
6746 if (a->size != 4) {
6747 /* WLSTP */
6748 if (!dc_isar_feature(aa32_mve, s)) {
6749 return false;
6750 }
6751 /*
6752 * We need to check that the FPU is enabled here, but mustn't
6753 * call vfp_access_check() to do that because we don't want to
6754 * do the lazy state preservation in the "loop count is zero" case.
6755 * Do the check-and-raise-exception by hand.
6756 */
6757 if (s->fp_excp_el) {
6758 gen_exception_insn_el(s, 0, EXCP_NOCP,
6759 syn_uncategorized(), s->fp_excp_el);
6760 return true;
6761 }
6762 }
6763
6764 nextlabel = gen_disas_label(s);
6765 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_R[a->rn], 0, nextlabel.label);
6766 tmp = load_reg(s, a->rn);
6767 store_reg(s, 14, tmp);
6768 if (a->size != 4) {
6769 /*
6770 * WLSTP: set FPSCR.LTPSIZE. This requires that we do the
6771 * lazy state preservation, new FP context creation, etc,
6772 * that vfp_access_check() does. We know that the actual
6773 * access check will succeed (ie it won't generate code that
6774 * throws an exception) because we did that check by hand earlier.
6775 */
6776 bool ok = vfp_access_check(s);
6777 assert(ok);
6778 store_cpu_field(tcg_constant_i32(a->size), v7m.ltpsize);
6779 /*
6780 * LTPSIZE updated, but MVE_NO_PRED will always be the same thing (0)
6781 * when we take this upcoming exit from this TB, so gen_jmp_tb() is OK.
6782 */
6783 }
6784 gen_jmp_tb(s, curr_insn_len(s), 1);
6785
6786 set_disas_label(s, nextlabel);
6787 gen_jmp(s, jmp_diff(s, a->imm));
6788 return true;
6789 }
6790
trans_LE(DisasContext * s,arg_LE * a)6791 static bool trans_LE(DisasContext *s, arg_LE *a)
6792 {
6793 /*
6794 * M-profile low-overhead loop end. The architecture permits an
6795 * implementation to discard the LO_BRANCH_INFO cache at any time,
6796 * and we take the IMPDEF option to never set it in the first place
6797 * (equivalent to always discarding it immediately), because for QEMU
6798 * a "real" implementation would be complicated and wouldn't execute
6799 * any faster.
6800 */
6801 TCGv_i32 tmp;
6802 DisasLabel loopend;
6803 bool fpu_active;
6804
6805 if (!dc_isar_feature(aa32_lob, s)) {
6806 return false;
6807 }
6808 if (a->f && a->tp) {
6809 return false;
6810 }
6811 if (s->condexec_mask) {
6812 /*
6813 * LE in an IT block is CONSTRAINED UNPREDICTABLE;
6814 * we choose to UNDEF, because otherwise our use of
6815 * gen_goto_tb(1) would clash with the use of TB exit 1
6816 * in the dc->condjmp condition-failed codepath in
6817 * arm_tr_tb_stop() and we'd get an assertion.
6818 */
6819 return false;
6820 }
6821 if (a->tp) {
6822 /* LETP */
6823 if (!dc_isar_feature(aa32_mve, s)) {
6824 return false;
6825 }
6826 if (!vfp_access_check(s)) {
6827 s->eci_handled = true;
6828 return true;
6829 }
6830 }
6831
6832 /* LE/LETP is OK with ECI set and leaves it untouched */
6833 s->eci_handled = true;
6834
6835 /*
6836 * With MVE, LTPSIZE might not be 4, and we must emit an INVSTATE
6837 * UsageFault exception for the LE insn in that case. Note that we
6838 * are not directly checking FPSCR.LTPSIZE but instead check the
6839 * pseudocode LTPSIZE() function, which returns 4 if the FPU is
6840 * not currently active (ie ActiveFPState() returns false). We
6841 * can identify not-active purely from our TB state flags, as the
6842 * FPU is active only if:
6843 * the FPU is enabled
6844 * AND lazy state preservation is not active
6845 * AND we do not need a new fp context (this is the ASPEN/FPCA check)
6846 *
6847 * Usually we don't need to care about this distinction between
6848 * LTPSIZE and FPSCR.LTPSIZE, because the code in vfp_access_check()
6849 * will either take an exception or clear the conditions that make
6850 * the FPU not active. But LE is an unusual case of a non-FP insn
6851 * that looks at LTPSIZE.
6852 */
6853 fpu_active = !s->fp_excp_el && !s->v7m_lspact && !s->v7m_new_fp_ctxt_needed;
6854
6855 if (!a->tp && dc_isar_feature(aa32_mve, s) && fpu_active) {
6856 /* Need to do a runtime check for LTPSIZE != 4 */
6857 DisasLabel skipexc = gen_disas_label(s);
6858 tmp = load_cpu_field(v7m.ltpsize);
6859 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 4, skipexc.label);
6860 gen_exception_insn(s, 0, EXCP_INVSTATE, syn_uncategorized());
6861 set_disas_label(s, skipexc);
6862 }
6863
6864 if (a->f) {
6865 /* Loop-forever: just jump back to the loop start */
6866 gen_jmp(s, jmp_diff(s, -a->imm));
6867 return true;
6868 }
6869
6870 /*
6871 * Not loop-forever. If LR <= loop-decrement-value this is the last loop.
6872 * For LE, we know at this point that LTPSIZE must be 4 and the
6873 * loop decrement value is 1. For LETP we need to calculate the decrement
6874 * value from LTPSIZE.
6875 */
6876 loopend = gen_disas_label(s);
6877 if (!a->tp) {
6878 tcg_gen_brcondi_i32(TCG_COND_LEU, cpu_R[14], 1, loopend.label);
6879 tcg_gen_addi_i32(cpu_R[14], cpu_R[14], -1);
6880 } else {
6881 /*
6882 * Decrement by 1 << (4 - LTPSIZE). We need to use a TCG local
6883 * so that decr stays live after the brcondi.
6884 */
6885 TCGv_i32 decr = tcg_temp_new_i32();
6886 TCGv_i32 ltpsize = load_cpu_field(v7m.ltpsize);
6887 tcg_gen_sub_i32(decr, tcg_constant_i32(4), ltpsize);
6888 tcg_gen_shl_i32(decr, tcg_constant_i32(1), decr);
6889
6890 tcg_gen_brcond_i32(TCG_COND_LEU, cpu_R[14], decr, loopend.label);
6891
6892 tcg_gen_sub_i32(cpu_R[14], cpu_R[14], decr);
6893 }
6894 /* Jump back to the loop start */
6895 gen_jmp(s, jmp_diff(s, -a->imm));
6896
6897 set_disas_label(s, loopend);
6898 if (a->tp) {
6899 /* Exits from tail-pred loops must reset LTPSIZE to 4 */
6900 store_cpu_field(tcg_constant_i32(4), v7m.ltpsize);
6901 }
6902 /* End TB, continuing to following insn */
6903 gen_jmp_tb(s, curr_insn_len(s), 1);
6904 return true;
6905 }
6906
trans_LCTP(DisasContext * s,arg_LCTP * a)6907 static bool trans_LCTP(DisasContext *s, arg_LCTP *a)
6908 {
6909 /*
6910 * M-profile Loop Clear with Tail Predication. Since our implementation
6911 * doesn't cache branch information, all we need to do is reset
6912 * FPSCR.LTPSIZE to 4.
6913 */
6914
6915 if (!dc_isar_feature(aa32_lob, s) ||
6916 !dc_isar_feature(aa32_mve, s)) {
6917 return false;
6918 }
6919
6920 if (!vfp_access_check(s)) {
6921 return true;
6922 }
6923
6924 store_cpu_field_constant(4, v7m.ltpsize);
6925 return true;
6926 }
6927
trans_VCTP(DisasContext * s,arg_VCTP * a)6928 static bool trans_VCTP(DisasContext *s, arg_VCTP *a)
6929 {
6930 /*
6931 * M-profile Create Vector Tail Predicate. This insn is itself
6932 * predicated and is subject to beatwise execution.
6933 */
6934 TCGv_i32 rn_shifted, masklen;
6935
6936 if (!dc_isar_feature(aa32_mve, s) || a->rn == 13 || a->rn == 15) {
6937 return false;
6938 }
6939
6940 if (!mve_eci_check(s) || !vfp_access_check(s)) {
6941 return true;
6942 }
6943
6944 /*
6945 * We pre-calculate the mask length here to avoid having
6946 * to have multiple helpers specialized for size.
6947 * We pass the helper "rn <= (1 << (4 - size)) ? (rn << size) : 16".
6948 */
6949 rn_shifted = tcg_temp_new_i32();
6950 masklen = load_reg(s, a->rn);
6951 tcg_gen_shli_i32(rn_shifted, masklen, a->size);
6952 tcg_gen_movcond_i32(TCG_COND_LEU, masklen,
6953 masklen, tcg_constant_i32(1 << (4 - a->size)),
6954 rn_shifted, tcg_constant_i32(16));
6955 gen_helper_mve_vctp(tcg_env, masklen);
6956 /* This insn updates predication bits */
6957 s->base.is_jmp = DISAS_UPDATE_NOCHAIN;
6958 mve_update_eci(s);
6959 return true;
6960 }
6961
op_tbranch(DisasContext * s,arg_tbranch * a,bool half)6962 static bool op_tbranch(DisasContext *s, arg_tbranch *a, bool half)
6963 {
6964 TCGv_i32 addr, tmp;
6965
6966 tmp = load_reg(s, a->rm);
6967 if (half) {
6968 tcg_gen_add_i32(tmp, tmp, tmp);
6969 }
6970 addr = load_reg(s, a->rn);
6971 tcg_gen_add_i32(addr, addr, tmp);
6972
6973 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), half ? MO_UW : MO_UB);
6974
6975 tcg_gen_add_i32(tmp, tmp, tmp);
6976 gen_pc_plus_diff(s, addr, jmp_diff(s, 0));
6977 tcg_gen_add_i32(tmp, tmp, addr);
6978 store_reg(s, 15, tmp);
6979 return true;
6980 }
6981
trans_TBB(DisasContext * s,arg_tbranch * a)6982 static bool trans_TBB(DisasContext *s, arg_tbranch *a)
6983 {
6984 return op_tbranch(s, a, false);
6985 }
6986
trans_TBH(DisasContext * s,arg_tbranch * a)6987 static bool trans_TBH(DisasContext *s, arg_tbranch *a)
6988 {
6989 return op_tbranch(s, a, true);
6990 }
6991
trans_CBZ(DisasContext * s,arg_CBZ * a)6992 static bool trans_CBZ(DisasContext *s, arg_CBZ *a)
6993 {
6994 TCGv_i32 tmp = load_reg(s, a->rn);
6995
6996 arm_gen_condlabel(s);
6997 tcg_gen_brcondi_i32(a->nz ? TCG_COND_EQ : TCG_COND_NE,
6998 tmp, 0, s->condlabel.label);
6999 gen_jmp(s, jmp_diff(s, a->imm));
7000 return true;
7001 }
7002
7003 /*
7004 * Supervisor call - both T32 & A32 come here so we need to check
7005 * which mode we are in when checking for semihosting.
7006 */
7007
trans_SVC(DisasContext * s,arg_SVC * a)7008 static bool trans_SVC(DisasContext *s, arg_SVC *a)
7009 {
7010 const uint32_t semihost_imm = s->thumb ? 0xab : 0x123456;
7011
7012 if (!arm_dc_feature(s, ARM_FEATURE_M) &&
7013 semihosting_enabled(s->current_el == 0) &&
7014 (a->imm == semihost_imm)) {
7015 gen_exception_internal_insn(s, EXCP_SEMIHOST);
7016 } else {
7017 if (s->fgt_svc) {
7018 uint32_t syndrome = syn_aa32_svc(a->imm, s->thumb);
7019 gen_exception_insn_el(s, 0, EXCP_UDEF, syndrome, 2);
7020 } else {
7021 gen_update_pc(s, curr_insn_len(s));
7022 s->svc_imm = a->imm;
7023 s->base.is_jmp = DISAS_SWI;
7024 }
7025 }
7026 return true;
7027 }
7028
7029 /*
7030 * Unconditional system instructions
7031 */
7032
trans_RFE(DisasContext * s,arg_RFE * a)7033 static bool trans_RFE(DisasContext *s, arg_RFE *a)
7034 {
7035 static const int8_t pre_offset[4] = {
7036 /* DA */ -4, /* IA */ 0, /* DB */ -8, /* IB */ 4
7037 };
7038 static const int8_t post_offset[4] = {
7039 /* DA */ -8, /* IA */ 4, /* DB */ -4, /* IB */ 0
7040 };
7041 TCGv_i32 addr, t1, t2;
7042
7043 if (!ENABLE_ARCH_6 || arm_dc_feature(s, ARM_FEATURE_M)) {
7044 return false;
7045 }
7046 if (IS_USER(s)) {
7047 unallocated_encoding(s);
7048 return true;
7049 }
7050
7051 addr = load_reg(s, a->rn);
7052 tcg_gen_addi_i32(addr, addr, pre_offset[a->pu]);
7053
7054 /* Load PC into tmp and CPSR into tmp2. */
7055 t1 = tcg_temp_new_i32();
7056 gen_aa32_ld_i32(s, t1, addr, get_mem_index(s), MO_UL | MO_ALIGN);
7057 tcg_gen_addi_i32(addr, addr, 4);
7058 t2 = tcg_temp_new_i32();
7059 gen_aa32_ld_i32(s, t2, addr, get_mem_index(s), MO_UL | MO_ALIGN);
7060
7061 if (a->w) {
7062 /* Base writeback. */
7063 tcg_gen_addi_i32(addr, addr, post_offset[a->pu]);
7064 store_reg(s, a->rn, addr);
7065 }
7066 gen_rfe(s, t1, t2);
7067 return true;
7068 }
7069
trans_SRS(DisasContext * s,arg_SRS * a)7070 static bool trans_SRS(DisasContext *s, arg_SRS *a)
7071 {
7072 if (!ENABLE_ARCH_6 || arm_dc_feature(s, ARM_FEATURE_M)) {
7073 return false;
7074 }
7075 gen_srs(s, a->mode, a->pu, a->w);
7076 return true;
7077 }
7078
trans_CPS(DisasContext * s,arg_CPS * a)7079 static bool trans_CPS(DisasContext *s, arg_CPS *a)
7080 {
7081 uint32_t mask, val;
7082
7083 if (!ENABLE_ARCH_6 || arm_dc_feature(s, ARM_FEATURE_M)) {
7084 return false;
7085 }
7086 if (IS_USER(s)) {
7087 /* Implemented as NOP in user mode. */
7088 return true;
7089 }
7090 /* TODO: There are quite a lot of UNPREDICTABLE argument combinations. */
7091
7092 mask = val = 0;
7093 if (a->imod & 2) {
7094 if (a->A) {
7095 mask |= CPSR_A;
7096 }
7097 if (a->I) {
7098 mask |= CPSR_I;
7099 }
7100 if (a->F) {
7101 mask |= CPSR_F;
7102 }
7103 if (a->imod & 1) {
7104 val |= mask;
7105 }
7106 }
7107 if (a->M) {
7108 mask |= CPSR_M;
7109 val |= a->mode;
7110 }
7111 if (mask) {
7112 gen_set_psr_im(s, mask, 0, val);
7113 }
7114 return true;
7115 }
7116
trans_CPS_v7m(DisasContext * s,arg_CPS_v7m * a)7117 static bool trans_CPS_v7m(DisasContext *s, arg_CPS_v7m *a)
7118 {
7119 TCGv_i32 tmp, addr;
7120
7121 if (!arm_dc_feature(s, ARM_FEATURE_M)) {
7122 return false;
7123 }
7124 if (IS_USER(s)) {
7125 /* Implemented as NOP in user mode. */
7126 return true;
7127 }
7128
7129 tmp = tcg_constant_i32(a->im);
7130 /* FAULTMASK */
7131 if (a->F) {
7132 addr = tcg_constant_i32(19);
7133 gen_helper_v7m_msr(tcg_env, addr, tmp);
7134 }
7135 /* PRIMASK */
7136 if (a->I) {
7137 addr = tcg_constant_i32(16);
7138 gen_helper_v7m_msr(tcg_env, addr, tmp);
7139 }
7140 gen_rebuild_hflags(s, false);
7141 gen_lookup_tb(s);
7142 return true;
7143 }
7144
7145 /*
7146 * Clear-Exclusive, Barriers
7147 */
7148
trans_CLREX(DisasContext * s,arg_CLREX * a)7149 static bool trans_CLREX(DisasContext *s, arg_CLREX *a)
7150 {
7151 if (s->thumb
7152 ? !ENABLE_ARCH_7 && !arm_dc_feature(s, ARM_FEATURE_M)
7153 : !ENABLE_ARCH_6K) {
7154 return false;
7155 }
7156 gen_clrex(s);
7157 return true;
7158 }
7159
trans_DSB(DisasContext * s,arg_DSB * a)7160 static bool trans_DSB(DisasContext *s, arg_DSB *a)
7161 {
7162 if (!ENABLE_ARCH_7 && !arm_dc_feature(s, ARM_FEATURE_M)) {
7163 return false;
7164 }
7165 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
7166 return true;
7167 }
7168
trans_DMB(DisasContext * s,arg_DMB * a)7169 static bool trans_DMB(DisasContext *s, arg_DMB *a)
7170 {
7171 return trans_DSB(s, NULL);
7172 }
7173
trans_ISB(DisasContext * s,arg_ISB * a)7174 static bool trans_ISB(DisasContext *s, arg_ISB *a)
7175 {
7176 if (!ENABLE_ARCH_7 && !arm_dc_feature(s, ARM_FEATURE_M)) {
7177 return false;
7178 }
7179 /*
7180 * We need to break the TB after this insn to execute
7181 * self-modifying code correctly and also to take
7182 * any pending interrupts immediately.
7183 */
7184 s->base.is_jmp = DISAS_TOO_MANY;
7185 return true;
7186 }
7187
trans_SB(DisasContext * s,arg_SB * a)7188 static bool trans_SB(DisasContext *s, arg_SB *a)
7189 {
7190 if (!dc_isar_feature(aa32_sb, s)) {
7191 return false;
7192 }
7193 /*
7194 * TODO: There is no speculation barrier opcode
7195 * for TCG; MB and end the TB instead.
7196 */
7197 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
7198 s->base.is_jmp = DISAS_TOO_MANY;
7199 return true;
7200 }
7201
trans_SETEND(DisasContext * s,arg_SETEND * a)7202 static bool trans_SETEND(DisasContext *s, arg_SETEND *a)
7203 {
7204 if (!ENABLE_ARCH_6) {
7205 return false;
7206 }
7207 if (a->E != (s->be_data == MO_BE)) {
7208 gen_helper_setend(tcg_env);
7209 s->base.is_jmp = DISAS_UPDATE_EXIT;
7210 }
7211 return true;
7212 }
7213
7214 /*
7215 * Preload instructions
7216 * All are nops, contingent on the appropriate arch level.
7217 */
7218
trans_PLD(DisasContext * s,arg_PLD * a)7219 static bool trans_PLD(DisasContext *s, arg_PLD *a)
7220 {
7221 return ENABLE_ARCH_5TE;
7222 }
7223
trans_PLDW(DisasContext * s,arg_PLDW * a)7224 static bool trans_PLDW(DisasContext *s, arg_PLDW *a)
7225 {
7226 return arm_dc_feature(s, ARM_FEATURE_V7MP);
7227 }
7228
trans_PLI(DisasContext * s,arg_PLI * a)7229 static bool trans_PLI(DisasContext *s, arg_PLI *a)
7230 {
7231 return ENABLE_ARCH_7;
7232 }
7233
7234 /*
7235 * If-then
7236 */
7237
trans_IT(DisasContext * s,arg_IT * a)7238 static bool trans_IT(DisasContext *s, arg_IT *a)
7239 {
7240 int cond_mask = a->cond_mask;
7241
7242 /*
7243 * No actual code generated for this insn, just setup state.
7244 *
7245 * Combinations of firstcond and mask which set up an 0b1111
7246 * condition are UNPREDICTABLE; we take the CONSTRAINED
7247 * UNPREDICTABLE choice to treat 0b1111 the same as 0b1110,
7248 * i.e. both meaning "execute always".
7249 */
7250 s->condexec_cond = (cond_mask >> 4) & 0xe;
7251 s->condexec_mask = cond_mask & 0x1f;
7252 return true;
7253 }
7254
7255 /* v8.1M CSEL/CSINC/CSNEG/CSINV */
trans_CSEL(DisasContext * s,arg_CSEL * a)7256 static bool trans_CSEL(DisasContext *s, arg_CSEL *a)
7257 {
7258 TCGv_i32 rn, rm;
7259 DisasCompare c;
7260
7261 if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
7262 return false;
7263 }
7264
7265 if (a->rm == 13) {
7266 /* SEE "Related encodings" (MVE shifts) */
7267 return false;
7268 }
7269
7270 if (a->rd == 13 || a->rd == 15 || a->rn == 13 || a->fcond >= 14) {
7271 /* CONSTRAINED UNPREDICTABLE: we choose to UNDEF */
7272 return false;
7273 }
7274
7275 /* In this insn input reg fields of 0b1111 mean "zero", not "PC" */
7276 rn = tcg_temp_new_i32();
7277 rm = tcg_temp_new_i32();
7278 if (a->rn == 15) {
7279 tcg_gen_movi_i32(rn, 0);
7280 } else {
7281 load_reg_var(s, rn, a->rn);
7282 }
7283 if (a->rm == 15) {
7284 tcg_gen_movi_i32(rm, 0);
7285 } else {
7286 load_reg_var(s, rm, a->rm);
7287 }
7288
7289 switch (a->op) {
7290 case 0: /* CSEL */
7291 break;
7292 case 1: /* CSINC */
7293 tcg_gen_addi_i32(rm, rm, 1);
7294 break;
7295 case 2: /* CSINV */
7296 tcg_gen_not_i32(rm, rm);
7297 break;
7298 case 3: /* CSNEG */
7299 tcg_gen_neg_i32(rm, rm);
7300 break;
7301 default:
7302 g_assert_not_reached();
7303 }
7304
7305 arm_test_cc(&c, a->fcond);
7306 tcg_gen_movcond_i32(c.cond, rn, c.value, tcg_constant_i32(0), rn, rm);
7307
7308 store_reg(s, a->rd, rn);
7309 return true;
7310 }
7311
7312 /*
7313 * Legacy decoder.
7314 */
7315
disas_arm_insn(DisasContext * s,unsigned int insn)7316 static void disas_arm_insn(DisasContext *s, unsigned int insn)
7317 {
7318 unsigned int cond = insn >> 28;
7319
7320 /* M variants do not implement ARM mode; this must raise the INVSTATE
7321 * UsageFault exception.
7322 */
7323 if (arm_dc_feature(s, ARM_FEATURE_M)) {
7324 gen_exception_insn(s, 0, EXCP_INVSTATE, syn_uncategorized());
7325 return;
7326 }
7327
7328 if (s->pstate_il) {
7329 /*
7330 * Illegal execution state. This has priority over BTI
7331 * exceptions, but comes after instruction abort exceptions.
7332 */
7333 gen_exception_insn(s, 0, EXCP_UDEF, syn_illegalstate());
7334 return;
7335 }
7336
7337 if (cond == 0xf) {
7338 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
7339 * choose to UNDEF. In ARMv5 and above the space is used
7340 * for miscellaneous unconditional instructions.
7341 */
7342 if (!arm_dc_feature(s, ARM_FEATURE_V5)) {
7343 unallocated_encoding(s);
7344 return;
7345 }
7346
7347 /* Unconditional instructions. */
7348 /* TODO: Perhaps merge these into one decodetree output file. */
7349 if (disas_a32_uncond(s, insn) ||
7350 disas_vfp_uncond(s, insn) ||
7351 disas_neon_dp(s, insn) ||
7352 disas_neon_ls(s, insn) ||
7353 disas_neon_shared(s, insn)) {
7354 return;
7355 }
7356 /* fall back to legacy decoder */
7357
7358 if ((insn & 0x0e000f00) == 0x0c000100) {
7359 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
7360 /* iWMMXt register transfer. */
7361 if (extract32(s->c15_cpar, 1, 1)) {
7362 if (!disas_iwmmxt_insn(s, insn)) {
7363 return;
7364 }
7365 }
7366 }
7367 }
7368 goto illegal_op;
7369 }
7370 if (cond != 0xe) {
7371 /* if not always execute, we generate a conditional jump to
7372 next instruction */
7373 arm_skip_unless(s, cond);
7374 }
7375
7376 /* TODO: Perhaps merge these into one decodetree output file. */
7377 if (disas_a32(s, insn) ||
7378 disas_vfp(s, insn)) {
7379 return;
7380 }
7381 /* fall back to legacy decoder */
7382 /* TODO: convert xscale/iwmmxt decoder to decodetree ?? */
7383 if (arm_dc_feature(s, ARM_FEATURE_XSCALE)) {
7384 if (((insn & 0x0c000e00) == 0x0c000000)
7385 && ((insn & 0x03000000) != 0x03000000)) {
7386 /* Coprocessor insn, coprocessor 0 or 1 */
7387 disas_xscale_insn(s, insn);
7388 return;
7389 }
7390 }
7391
7392 illegal_op:
7393 unallocated_encoding(s);
7394 }
7395
thumb_insn_is_16bit(DisasContext * s,uint32_t pc,uint32_t insn)7396 static bool thumb_insn_is_16bit(DisasContext *s, uint32_t pc, uint32_t insn)
7397 {
7398 /*
7399 * Return true if this is a 16 bit instruction. We must be precise
7400 * about this (matching the decode).
7401 */
7402 if ((insn >> 11) < 0x1d) {
7403 /* Definitely a 16-bit instruction */
7404 return true;
7405 }
7406
7407 /* Top five bits 0b11101 / 0b11110 / 0b11111 : this is the
7408 * first half of a 32-bit Thumb insn. Thumb-1 cores might
7409 * end up actually treating this as two 16-bit insns, though,
7410 * if it's half of a bl/blx pair that might span a page boundary.
7411 */
7412 if (arm_dc_feature(s, ARM_FEATURE_THUMB2) ||
7413 arm_dc_feature(s, ARM_FEATURE_M)) {
7414 /* Thumb2 cores (including all M profile ones) always treat
7415 * 32-bit insns as 32-bit.
7416 */
7417 return false;
7418 }
7419
7420 if ((insn >> 11) == 0x1e && pc - s->page_start < TARGET_PAGE_SIZE - 3) {
7421 /* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix, and the suffix
7422 * is not on the next page; we merge this into a 32-bit
7423 * insn.
7424 */
7425 return false;
7426 }
7427 /* 0b1110_1xxx_xxxx_xxxx : BLX suffix (or UNDEF);
7428 * 0b1111_1xxx_xxxx_xxxx : BL suffix;
7429 * 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix on the end of a page
7430 * -- handle as single 16 bit insn
7431 */
7432 return true;
7433 }
7434
7435 /* Translate a 32-bit thumb instruction. */
disas_thumb2_insn(DisasContext * s,uint32_t insn)7436 static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
7437 {
7438 /*
7439 * ARMv6-M supports a limited subset of Thumb2 instructions.
7440 * Other Thumb1 architectures allow only 32-bit
7441 * combined BL/BLX prefix and suffix.
7442 */
7443 if (arm_dc_feature(s, ARM_FEATURE_M) &&
7444 !arm_dc_feature(s, ARM_FEATURE_V7)) {
7445 int i;
7446 bool found = false;
7447 static const uint32_t armv6m_insn[] = {0xf3808000 /* msr */,
7448 0xf3b08040 /* dsb */,
7449 0xf3b08050 /* dmb */,
7450 0xf3b08060 /* isb */,
7451 0xf3e08000 /* mrs */,
7452 0xf000d000 /* bl */};
7453 static const uint32_t armv6m_mask[] = {0xffe0d000,
7454 0xfff0d0f0,
7455 0xfff0d0f0,
7456 0xfff0d0f0,
7457 0xffe0d000,
7458 0xf800d000};
7459
7460 for (i = 0; i < ARRAY_SIZE(armv6m_insn); i++) {
7461 if ((insn & armv6m_mask[i]) == armv6m_insn[i]) {
7462 found = true;
7463 break;
7464 }
7465 }
7466 if (!found) {
7467 goto illegal_op;
7468 }
7469 } else if ((insn & 0xf800e800) != 0xf000e800) {
7470 if (!arm_dc_feature(s, ARM_FEATURE_THUMB2)) {
7471 unallocated_encoding(s);
7472 return;
7473 }
7474 }
7475
7476 if (arm_dc_feature(s, ARM_FEATURE_M)) {
7477 /*
7478 * NOCP takes precedence over any UNDEF for (almost) the
7479 * entire wide range of coprocessor-space encodings, so check
7480 * for it first before proceeding to actually decode eg VFP
7481 * insns. This decode also handles the few insns which are
7482 * in copro space but do not have NOCP checks (eg VLLDM, VLSTM).
7483 */
7484 if (disas_m_nocp(s, insn)) {
7485 return;
7486 }
7487 }
7488
7489 if ((insn & 0xef000000) == 0xef000000) {
7490 /*
7491 * T32 encodings 0b111p_1111_qqqq_qqqq_qqqq_qqqq_qqqq_qqqq
7492 * transform into
7493 * A32 encodings 0b1111_001p_qqqq_qqqq_qqqq_qqqq_qqqq_qqqq
7494 */
7495 uint32_t a32_insn = (insn & 0xe2ffffff) |
7496 ((insn & (1 << 28)) >> 4) | (1 << 28);
7497
7498 if (disas_neon_dp(s, a32_insn)) {
7499 return;
7500 }
7501 }
7502
7503 if ((insn & 0xff100000) == 0xf9000000) {
7504 /*
7505 * T32 encodings 0b1111_1001_ppp0_qqqq_qqqq_qqqq_qqqq_qqqq
7506 * transform into
7507 * A32 encodings 0b1111_0100_ppp0_qqqq_qqqq_qqqq_qqqq_qqqq
7508 */
7509 uint32_t a32_insn = (insn & 0x00ffffff) | 0xf4000000;
7510
7511 if (disas_neon_ls(s, a32_insn)) {
7512 return;
7513 }
7514 }
7515
7516 /*
7517 * TODO: Perhaps merge these into one decodetree output file.
7518 * Note disas_vfp is written for a32 with cond field in the
7519 * top nibble. The t32 encoding requires 0xe in the top nibble.
7520 */
7521 if (disas_t32(s, insn) ||
7522 disas_vfp_uncond(s, insn) ||
7523 disas_neon_shared(s, insn) ||
7524 disas_mve(s, insn) ||
7525 ((insn >> 28) == 0xe && disas_vfp(s, insn))) {
7526 return;
7527 }
7528
7529 illegal_op:
7530 unallocated_encoding(s);
7531 }
7532
disas_thumb_insn(DisasContext * s,uint32_t insn)7533 static void disas_thumb_insn(DisasContext *s, uint32_t insn)
7534 {
7535 if (!disas_t16(s, insn)) {
7536 unallocated_encoding(s);
7537 }
7538 }
7539
insn_crosses_page(CPUARMState * env,DisasContext * s)7540 static bool insn_crosses_page(CPUARMState *env, DisasContext *s)
7541 {
7542 /* Return true if the insn at dc->base.pc_next might cross a page boundary.
7543 * (False positives are OK, false negatives are not.)
7544 * We know this is a Thumb insn, and our caller ensures we are
7545 * only called if dc->base.pc_next is less than 4 bytes from the page
7546 * boundary, so we cross the page if the first 16 bits indicate
7547 * that this is a 32 bit insn.
7548 */
7549 uint16_t insn = arm_lduw_code(env, &s->base, s->base.pc_next, s->sctlr_b);
7550
7551 return !thumb_insn_is_16bit(s, s->base.pc_next, insn);
7552 }
7553
arm_tr_init_disas_context(DisasContextBase * dcbase,CPUState * cs)7554 static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
7555 {
7556 DisasContext *dc = container_of(dcbase, DisasContext, base);
7557 CPUARMState *env = cpu_env(cs);
7558 ARMCPU *cpu = env_archcpu(env);
7559 CPUARMTBFlags tb_flags = arm_tbflags_from_tb(dc->base.tb);
7560 uint32_t condexec, core_mmu_idx;
7561
7562 dc->isar = &cpu->isar;
7563 dc->condjmp = 0;
7564 dc->pc_save = dc->base.pc_first;
7565 dc->aarch64 = false;
7566 dc->thumb = EX_TBFLAG_AM32(tb_flags, THUMB);
7567 dc->be_data = EX_TBFLAG_ANY(tb_flags, BE_DATA) ? MO_BE : MO_LE;
7568 condexec = EX_TBFLAG_AM32(tb_flags, CONDEXEC);
7569 /*
7570 * the CONDEXEC TB flags are CPSR bits [15:10][26:25]. On A-profile this
7571 * is always the IT bits. On M-profile, some of the reserved encodings
7572 * of IT are used instead to indicate either ICI or ECI, which
7573 * indicate partial progress of a restartable insn that was interrupted
7574 * partway through by an exception:
7575 * * if CONDEXEC[3:0] != 0b0000 : CONDEXEC is IT bits
7576 * * if CONDEXEC[3:0] == 0b0000 : CONDEXEC is ICI or ECI bits
7577 * In all cases CONDEXEC == 0 means "not in IT block or restartable
7578 * insn, behave normally".
7579 */
7580 dc->eci = dc->condexec_mask = dc->condexec_cond = 0;
7581 dc->eci_handled = false;
7582 if (condexec & 0xf) {
7583 dc->condexec_mask = (condexec & 0xf) << 1;
7584 dc->condexec_cond = condexec >> 4;
7585 } else {
7586 if (arm_feature(env, ARM_FEATURE_M)) {
7587 dc->eci = condexec >> 4;
7588 }
7589 }
7590
7591 core_mmu_idx = EX_TBFLAG_ANY(tb_flags, MMUIDX);
7592 dc->mmu_idx = core_to_arm_mmu_idx(env, core_mmu_idx);
7593 dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
7594 #if !defined(CONFIG_USER_ONLY)
7595 dc->user = (dc->current_el == 0);
7596 #endif
7597 dc->fp_excp_el = EX_TBFLAG_ANY(tb_flags, FPEXC_EL);
7598 dc->align_mem = EX_TBFLAG_ANY(tb_flags, ALIGN_MEM);
7599 dc->pstate_il = EX_TBFLAG_ANY(tb_flags, PSTATE__IL);
7600 dc->fgt_active = EX_TBFLAG_ANY(tb_flags, FGT_ACTIVE);
7601 dc->fgt_svc = EX_TBFLAG_ANY(tb_flags, FGT_SVC);
7602
7603 if (arm_feature(env, ARM_FEATURE_M)) {
7604 dc->vfp_enabled = 1;
7605 dc->be_data = MO_TE;
7606 dc->v7m_handler_mode = EX_TBFLAG_M32(tb_flags, HANDLER);
7607 dc->v8m_secure = EX_TBFLAG_M32(tb_flags, SECURE);
7608 dc->v8m_stackcheck = EX_TBFLAG_M32(tb_flags, STACKCHECK);
7609 dc->v8m_fpccr_s_wrong = EX_TBFLAG_M32(tb_flags, FPCCR_S_WRONG);
7610 dc->v7m_new_fp_ctxt_needed =
7611 EX_TBFLAG_M32(tb_flags, NEW_FP_CTXT_NEEDED);
7612 dc->v7m_lspact = EX_TBFLAG_M32(tb_flags, LSPACT);
7613 dc->mve_no_pred = EX_TBFLAG_M32(tb_flags, MVE_NO_PRED);
7614 } else {
7615 dc->sctlr_b = EX_TBFLAG_A32(tb_flags, SCTLR__B);
7616 dc->hstr_active = EX_TBFLAG_A32(tb_flags, HSTR_ACTIVE);
7617 dc->ns = EX_TBFLAG_A32(tb_flags, NS);
7618 dc->vfp_enabled = EX_TBFLAG_A32(tb_flags, VFPEN);
7619 if (arm_feature(env, ARM_FEATURE_XSCALE)) {
7620 dc->c15_cpar = EX_TBFLAG_A32(tb_flags, XSCALE_CPAR);
7621 } else {
7622 dc->vec_len = EX_TBFLAG_A32(tb_flags, VECLEN);
7623 dc->vec_stride = EX_TBFLAG_A32(tb_flags, VECSTRIDE);
7624 }
7625 dc->sme_trap_nonstreaming =
7626 EX_TBFLAG_A32(tb_flags, SME_TRAP_NONSTREAMING);
7627 }
7628 dc->lse2 = false; /* applies only to aarch64 */
7629 dc->cp_regs = cpu->cp_regs;
7630 dc->features = env->features;
7631
7632 /* Single step state. The code-generation logic here is:
7633 * SS_ACTIVE == 0:
7634 * generate code with no special handling for single-stepping (except
7635 * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
7636 * this happens anyway because those changes are all system register or
7637 * PSTATE writes).
7638 * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
7639 * emit code for one insn
7640 * emit code to clear PSTATE.SS
7641 * emit code to generate software step exception for completed step
7642 * end TB (as usual for having generated an exception)
7643 * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
7644 * emit code to generate a software step exception
7645 * end the TB
7646 */
7647 dc->ss_active = EX_TBFLAG_ANY(tb_flags, SS_ACTIVE);
7648 dc->pstate_ss = EX_TBFLAG_ANY(tb_flags, PSTATE__SS);
7649 dc->is_ldex = false;
7650
7651 dc->page_start = dc->base.pc_first & TARGET_PAGE_MASK;
7652
7653 /* If architectural single step active, limit to 1. */
7654 if (dc->ss_active) {
7655 dc->base.max_insns = 1;
7656 }
7657
7658 /* ARM is a fixed-length ISA. Bound the number of insns to execute
7659 to those left on the page. */
7660 if (!dc->thumb) {
7661 int bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
7662 dc->base.max_insns = MIN(dc->base.max_insns, bound);
7663 }
7664
7665 cpu_V0 = tcg_temp_new_i64();
7666 cpu_V1 = tcg_temp_new_i64();
7667 cpu_M0 = tcg_temp_new_i64();
7668 }
7669
arm_tr_tb_start(DisasContextBase * dcbase,CPUState * cpu)7670 static void arm_tr_tb_start(DisasContextBase *dcbase, CPUState *cpu)
7671 {
7672 DisasContext *dc = container_of(dcbase, DisasContext, base);
7673
7674 /* A note on handling of the condexec (IT) bits:
7675 *
7676 * We want to avoid the overhead of having to write the updated condexec
7677 * bits back to the CPUARMState for every instruction in an IT block. So:
7678 * (1) if the condexec bits are not already zero then we write
7679 * zero back into the CPUARMState now. This avoids complications trying
7680 * to do it at the end of the block. (For example if we don't do this
7681 * it's hard to identify whether we can safely skip writing condexec
7682 * at the end of the TB, which we definitely want to do for the case
7683 * where a TB doesn't do anything with the IT state at all.)
7684 * (2) if we are going to leave the TB then we call gen_set_condexec()
7685 * which will write the correct value into CPUARMState if zero is wrong.
7686 * This is done both for leaving the TB at the end, and for leaving
7687 * it because of an exception we know will happen, which is done in
7688 * gen_exception_insn(). The latter is necessary because we need to
7689 * leave the TB with the PC/IT state just prior to execution of the
7690 * instruction which caused the exception.
7691 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
7692 * then the CPUARMState will be wrong and we need to reset it.
7693 * This is handled in the same way as restoration of the
7694 * PC in these situations; we save the value of the condexec bits
7695 * for each PC via tcg_gen_insn_start(), and restore_state_to_opc()
7696 * then uses this to restore them after an exception.
7697 *
7698 * Note that there are no instructions which can read the condexec
7699 * bits, and none which can write non-static values to them, so
7700 * we don't need to care about whether CPUARMState is correct in the
7701 * middle of a TB.
7702 */
7703
7704 /* Reset the conditional execution bits immediately. This avoids
7705 complications trying to do it at the end of the block. */
7706 if (dc->condexec_mask || dc->condexec_cond) {
7707 store_cpu_field_constant(0, condexec_bits);
7708 }
7709 }
7710
arm_tr_insn_start(DisasContextBase * dcbase,CPUState * cpu)7711 static void arm_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
7712 {
7713 DisasContext *dc = container_of(dcbase, DisasContext, base);
7714 /*
7715 * The ECI/ICI bits share PSR bits with the IT bits, so we
7716 * need to reconstitute the bits from the split-out DisasContext
7717 * fields here.
7718 */
7719 uint32_t condexec_bits;
7720 target_ulong pc_arg = dc->base.pc_next;
7721
7722 if (tb_cflags(dcbase->tb) & CF_PCREL) {
7723 pc_arg &= ~TARGET_PAGE_MASK;
7724 }
7725 if (dc->eci) {
7726 condexec_bits = dc->eci << 4;
7727 } else {
7728 condexec_bits = (dc->condexec_cond << 4) | (dc->condexec_mask >> 1);
7729 }
7730 tcg_gen_insn_start(pc_arg, condexec_bits, 0);
7731 dc->insn_start_updated = false;
7732 }
7733
arm_check_kernelpage(DisasContext * dc)7734 static bool arm_check_kernelpage(DisasContext *dc)
7735 {
7736 #ifdef CONFIG_USER_ONLY
7737 /* Intercept jump to the magic kernel page. */
7738 if (dc->base.pc_next >= 0xffff0000) {
7739 /* We always get here via a jump, so know we are not in a
7740 conditional execution block. */
7741 gen_exception_internal(EXCP_KERNEL_TRAP);
7742 dc->base.is_jmp = DISAS_NORETURN;
7743 return true;
7744 }
7745 #endif
7746 return false;
7747 }
7748
arm_check_ss_active(DisasContext * dc)7749 static bool arm_check_ss_active(DisasContext *dc)
7750 {
7751 if (dc->ss_active && !dc->pstate_ss) {
7752 /* Singlestep state is Active-pending.
7753 * If we're in this state at the start of a TB then either
7754 * a) we just took an exception to an EL which is being debugged
7755 * and this is the first insn in the exception handler
7756 * b) debug exceptions were masked and we just unmasked them
7757 * without changing EL (eg by clearing PSTATE.D)
7758 * In either case we're going to take a swstep exception in the
7759 * "did not step an insn" case, and so the syndrome ISV and EX
7760 * bits should be zero.
7761 */
7762 assert(dc->base.num_insns == 1);
7763 gen_swstep_exception(dc, 0, 0);
7764 dc->base.is_jmp = DISAS_NORETURN;
7765 return true;
7766 }
7767
7768 return false;
7769 }
7770
arm_post_translate_insn(DisasContext * dc)7771 static void arm_post_translate_insn(DisasContext *dc)
7772 {
7773 if (dc->condjmp && dc->base.is_jmp == DISAS_NEXT) {
7774 if (dc->pc_save != dc->condlabel.pc_save) {
7775 gen_update_pc(dc, dc->condlabel.pc_save - dc->pc_save);
7776 }
7777 gen_set_label(dc->condlabel.label);
7778 dc->condjmp = 0;
7779 }
7780 }
7781
arm_tr_translate_insn(DisasContextBase * dcbase,CPUState * cpu)7782 static void arm_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
7783 {
7784 DisasContext *dc = container_of(dcbase, DisasContext, base);
7785 CPUARMState *env = cpu_env(cpu);
7786 uint32_t pc = dc->base.pc_next;
7787 unsigned int insn;
7788
7789 /* Singlestep exceptions have the highest priority. */
7790 if (arm_check_ss_active(dc)) {
7791 dc->base.pc_next = pc + 4;
7792 return;
7793 }
7794
7795 if (pc & 3) {
7796 /*
7797 * PC alignment fault. This has priority over the instruction abort
7798 * that we would receive from a translation fault via arm_ldl_code
7799 * (or the execution of the kernelpage entrypoint). This should only
7800 * be possible after an indirect branch, at the start of the TB.
7801 */
7802 assert(dc->base.num_insns == 1);
7803 gen_helper_exception_pc_alignment(tcg_env, tcg_constant_tl(pc));
7804 dc->base.is_jmp = DISAS_NORETURN;
7805 dc->base.pc_next = QEMU_ALIGN_UP(pc, 4);
7806 return;
7807 }
7808
7809 if (arm_check_kernelpage(dc)) {
7810 dc->base.pc_next = pc + 4;
7811 return;
7812 }
7813
7814 dc->pc_curr = pc;
7815 insn = arm_ldl_code(env, &dc->base, pc, dc->sctlr_b);
7816 dc->insn = insn;
7817 dc->base.pc_next = pc + 4;
7818 disas_arm_insn(dc, insn);
7819
7820 arm_post_translate_insn(dc);
7821
7822 /* ARM is a fixed-length ISA. We performed the cross-page check
7823 in init_disas_context by adjusting max_insns. */
7824 }
7825
thumb_insn_is_unconditional(DisasContext * s,uint32_t insn)7826 static bool thumb_insn_is_unconditional(DisasContext *s, uint32_t insn)
7827 {
7828 /* Return true if this Thumb insn is always unconditional,
7829 * even inside an IT block. This is true of only a very few
7830 * instructions: BKPT, HLT, and SG.
7831 *
7832 * A larger class of instructions are UNPREDICTABLE if used
7833 * inside an IT block; we do not need to detect those here, because
7834 * what we do by default (perform the cc check and update the IT
7835 * bits state machine) is a permitted CONSTRAINED UNPREDICTABLE
7836 * choice for those situations.
7837 *
7838 * insn is either a 16-bit or a 32-bit instruction; the two are
7839 * distinguishable because for the 16-bit case the top 16 bits
7840 * are zeroes, and that isn't a valid 32-bit encoding.
7841 */
7842 if ((insn & 0xffffff00) == 0xbe00) {
7843 /* BKPT */
7844 return true;
7845 }
7846
7847 if ((insn & 0xffffffc0) == 0xba80 && arm_dc_feature(s, ARM_FEATURE_V8) &&
7848 !arm_dc_feature(s, ARM_FEATURE_M)) {
7849 /* HLT: v8A only. This is unconditional even when it is going to
7850 * UNDEF; see the v8A ARM ARM DDI0487B.a H3.3.
7851 * For v7 cores this was a plain old undefined encoding and so
7852 * honours its cc check. (We might be using the encoding as
7853 * a semihosting trap, but we don't change the cc check behaviour
7854 * on that account, because a debugger connected to a real v7A
7855 * core and emulating semihosting traps by catching the UNDEF
7856 * exception would also only see cases where the cc check passed.
7857 * No guest code should be trying to do a HLT semihosting trap
7858 * in an IT block anyway.
7859 */
7860 return true;
7861 }
7862
7863 if (insn == 0xe97fe97f && arm_dc_feature(s, ARM_FEATURE_V8) &&
7864 arm_dc_feature(s, ARM_FEATURE_M)) {
7865 /* SG: v8M only */
7866 return true;
7867 }
7868
7869 return false;
7870 }
7871
thumb_tr_translate_insn(DisasContextBase * dcbase,CPUState * cpu)7872 static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
7873 {
7874 DisasContext *dc = container_of(dcbase, DisasContext, base);
7875 CPUARMState *env = cpu_env(cpu);
7876 uint32_t pc = dc->base.pc_next;
7877 uint32_t insn;
7878 bool is_16bit;
7879 /* TCG op to rewind to if this turns out to be an invalid ECI state */
7880 TCGOp *insn_eci_rewind = NULL;
7881 target_ulong insn_eci_pc_save = -1;
7882
7883 /* Misaligned thumb PC is architecturally impossible. */
7884 assert((dc->base.pc_next & 1) == 0);
7885
7886 if (arm_check_ss_active(dc) || arm_check_kernelpage(dc)) {
7887 dc->base.pc_next = pc + 2;
7888 return;
7889 }
7890
7891 dc->pc_curr = pc;
7892 insn = arm_lduw_code(env, &dc->base, pc, dc->sctlr_b);
7893 is_16bit = thumb_insn_is_16bit(dc, dc->base.pc_next, insn);
7894 pc += 2;
7895 if (!is_16bit) {
7896 uint32_t insn2 = arm_lduw_code(env, &dc->base, pc, dc->sctlr_b);
7897 insn = insn << 16 | insn2;
7898 pc += 2;
7899 }
7900 dc->base.pc_next = pc;
7901 dc->insn = insn;
7902
7903 if (dc->pstate_il) {
7904 /*
7905 * Illegal execution state. This has priority over BTI
7906 * exceptions, but comes after instruction abort exceptions.
7907 */
7908 gen_exception_insn(dc, 0, EXCP_UDEF, syn_illegalstate());
7909 return;
7910 }
7911
7912 if (dc->eci) {
7913 /*
7914 * For M-profile continuable instructions, ECI/ICI handling
7915 * falls into these cases:
7916 * - interrupt-continuable instructions
7917 * These are the various load/store multiple insns (both
7918 * integer and fp). The ICI bits indicate the register
7919 * where the load/store can resume. We make the IMPDEF
7920 * choice to always do "instruction restart", ie ignore
7921 * the ICI value and always execute the ldm/stm from the
7922 * start. So all we need to do is zero PSR.ICI if the
7923 * insn executes.
7924 * - MVE instructions subject to beat-wise execution
7925 * Here the ECI bits indicate which beats have already been
7926 * executed, and we must honour this. Each insn of this
7927 * type will handle it correctly. We will update PSR.ECI
7928 * in the helper function for the insn (some ECI values
7929 * mean that the following insn also has been partially
7930 * executed).
7931 * - Special cases which don't advance ECI
7932 * The insns LE, LETP and BKPT leave the ECI/ICI state
7933 * bits untouched.
7934 * - all other insns (the common case)
7935 * Non-zero ECI/ICI means an INVSTATE UsageFault.
7936 * We place a rewind-marker here. Insns in the previous
7937 * three categories will set a flag in the DisasContext.
7938 * If the flag isn't set after we call disas_thumb_insn()
7939 * or disas_thumb2_insn() then we know we have a "some other
7940 * insn" case. We will rewind to the marker (ie throwing away
7941 * all the generated code) and instead emit "take exception".
7942 */
7943 insn_eci_rewind = tcg_last_op();
7944 insn_eci_pc_save = dc->pc_save;
7945 }
7946
7947 if (dc->condexec_mask && !thumb_insn_is_unconditional(dc, insn)) {
7948 uint32_t cond = dc->condexec_cond;
7949
7950 /*
7951 * Conditionally skip the insn. Note that both 0xe and 0xf mean
7952 * "always"; 0xf is not "never".
7953 */
7954 if (cond < 0x0e) {
7955 arm_skip_unless(dc, cond);
7956 }
7957 }
7958
7959 if (is_16bit) {
7960 disas_thumb_insn(dc, insn);
7961 } else {
7962 disas_thumb2_insn(dc, insn);
7963 }
7964
7965 /* Advance the Thumb condexec condition. */
7966 if (dc->condexec_mask) {
7967 dc->condexec_cond = ((dc->condexec_cond & 0xe) |
7968 ((dc->condexec_mask >> 4) & 1));
7969 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
7970 if (dc->condexec_mask == 0) {
7971 dc->condexec_cond = 0;
7972 }
7973 }
7974
7975 if (dc->eci && !dc->eci_handled) {
7976 /*
7977 * Insn wasn't valid for ECI/ICI at all: undo what we
7978 * just generated and instead emit an exception
7979 */
7980 tcg_remove_ops_after(insn_eci_rewind);
7981 dc->pc_save = insn_eci_pc_save;
7982 dc->condjmp = 0;
7983 gen_exception_insn(dc, 0, EXCP_INVSTATE, syn_uncategorized());
7984 }
7985
7986 arm_post_translate_insn(dc);
7987
7988 /* Thumb is a variable-length ISA. Stop translation when the next insn
7989 * will touch a new page. This ensures that prefetch aborts occur at
7990 * the right place.
7991 *
7992 * We want to stop the TB if the next insn starts in a new page,
7993 * or if it spans between this page and the next. This means that
7994 * if we're looking at the last halfword in the page we need to
7995 * see if it's a 16-bit Thumb insn (which will fit in this TB)
7996 * or a 32-bit Thumb insn (which won't).
7997 * This is to avoid generating a silly TB with a single 16-bit insn
7998 * in it at the end of this page (which would execute correctly
7999 * but isn't very efficient).
8000 */
8001 if (dc->base.is_jmp == DISAS_NEXT
8002 && (dc->base.pc_next - dc->page_start >= TARGET_PAGE_SIZE
8003 || (dc->base.pc_next - dc->page_start >= TARGET_PAGE_SIZE - 3
8004 && insn_crosses_page(env, dc)))) {
8005 dc->base.is_jmp = DISAS_TOO_MANY;
8006 }
8007 }
8008
arm_tr_tb_stop(DisasContextBase * dcbase,CPUState * cpu)8009 static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
8010 {
8011 DisasContext *dc = container_of(dcbase, DisasContext, base);
8012
8013 /* At this stage dc->condjmp will only be set when the skipped
8014 instruction was a conditional branch or trap, and the PC has
8015 already been written. */
8016 gen_set_condexec(dc);
8017 if (dc->base.is_jmp == DISAS_BX_EXCRET) {
8018 /* Exception return branches need some special case code at the
8019 * end of the TB, which is complex enough that it has to
8020 * handle the single-step vs not and the condition-failed
8021 * insn codepath itself.
8022 */
8023 gen_bx_excret_final_code(dc);
8024 } else if (unlikely(dc->ss_active)) {
8025 /* Unconditional and "condition passed" instruction codepath. */
8026 switch (dc->base.is_jmp) {
8027 case DISAS_SWI:
8028 gen_ss_advance(dc);
8029 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb));
8030 break;
8031 case DISAS_HVC:
8032 gen_ss_advance(dc);
8033 gen_exception_el(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
8034 break;
8035 case DISAS_SMC:
8036 gen_ss_advance(dc);
8037 gen_exception_el(EXCP_SMC, syn_aa32_smc(), 3);
8038 break;
8039 case DISAS_NEXT:
8040 case DISAS_TOO_MANY:
8041 case DISAS_UPDATE_EXIT:
8042 case DISAS_UPDATE_NOCHAIN:
8043 gen_update_pc(dc, curr_insn_len(dc));
8044 /* fall through */
8045 default:
8046 /* FIXME: Single stepping a WFI insn will not halt the CPU. */
8047 gen_singlestep_exception(dc);
8048 break;
8049 case DISAS_NORETURN:
8050 break;
8051 }
8052 } else {
8053 /* While branches must always occur at the end of an IT block,
8054 there are a few other things that can cause us to terminate
8055 the TB in the middle of an IT block:
8056 - Exception generating instructions (bkpt, swi, undefined).
8057 - Page boundaries.
8058 - Hardware watchpoints.
8059 Hardware breakpoints have already been handled and skip this code.
8060 */
8061 switch (dc->base.is_jmp) {
8062 case DISAS_NEXT:
8063 case DISAS_TOO_MANY:
8064 gen_goto_tb(dc, 1, curr_insn_len(dc));
8065 break;
8066 case DISAS_UPDATE_NOCHAIN:
8067 gen_update_pc(dc, curr_insn_len(dc));
8068 /* fall through */
8069 case DISAS_JUMP:
8070 gen_goto_ptr();
8071 break;
8072 case DISAS_UPDATE_EXIT:
8073 gen_update_pc(dc, curr_insn_len(dc));
8074 /* fall through */
8075 default:
8076 /* indicate that the hash table must be used to find the next TB */
8077 tcg_gen_exit_tb(NULL, 0);
8078 break;
8079 case DISAS_NORETURN:
8080 /* nothing more to generate */
8081 break;
8082 case DISAS_WFI:
8083 gen_helper_wfi(tcg_env, tcg_constant_i32(curr_insn_len(dc)));
8084 /*
8085 * The helper doesn't necessarily throw an exception, but we
8086 * must go back to the main loop to check for interrupts anyway.
8087 */
8088 tcg_gen_exit_tb(NULL, 0);
8089 break;
8090 case DISAS_WFE:
8091 gen_helper_wfe(tcg_env);
8092 break;
8093 case DISAS_YIELD:
8094 gen_helper_yield(tcg_env);
8095 break;
8096 case DISAS_SWI:
8097 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb));
8098 break;
8099 case DISAS_HVC:
8100 gen_exception_el(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
8101 break;
8102 case DISAS_SMC:
8103 gen_exception_el(EXCP_SMC, syn_aa32_smc(), 3);
8104 break;
8105 }
8106 }
8107
8108 if (dc->condjmp) {
8109 /* "Condition failed" instruction codepath for the branch/trap insn */
8110 set_disas_label(dc, dc->condlabel);
8111 gen_set_condexec(dc);
8112 if (unlikely(dc->ss_active)) {
8113 gen_update_pc(dc, curr_insn_len(dc));
8114 gen_singlestep_exception(dc);
8115 } else {
8116 gen_goto_tb(dc, 1, curr_insn_len(dc));
8117 }
8118 }
8119 }
8120
8121 static const TranslatorOps arm_translator_ops = {
8122 .init_disas_context = arm_tr_init_disas_context,
8123 .tb_start = arm_tr_tb_start,
8124 .insn_start = arm_tr_insn_start,
8125 .translate_insn = arm_tr_translate_insn,
8126 .tb_stop = arm_tr_tb_stop,
8127 };
8128
8129 static const TranslatorOps thumb_translator_ops = {
8130 .init_disas_context = arm_tr_init_disas_context,
8131 .tb_start = arm_tr_tb_start,
8132 .insn_start = arm_tr_insn_start,
8133 .translate_insn = thumb_tr_translate_insn,
8134 .tb_stop = arm_tr_tb_stop,
8135 };
8136
8137 /* generate intermediate code for basic block 'tb'. */
gen_intermediate_code(CPUState * cpu,TranslationBlock * tb,int * max_insns,vaddr pc,void * host_pc)8138 void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
8139 vaddr pc, void *host_pc)
8140 {
8141 DisasContext dc = { };
8142 const TranslatorOps *ops = &arm_translator_ops;
8143 CPUARMTBFlags tb_flags = arm_tbflags_from_tb(tb);
8144
8145 if (EX_TBFLAG_AM32(tb_flags, THUMB)) {
8146 ops = &thumb_translator_ops;
8147 }
8148 #ifdef TARGET_AARCH64
8149 if (EX_TBFLAG_ANY(tb_flags, AARCH64_STATE)) {
8150 ops = &aarch64_translator_ops;
8151 }
8152 #endif
8153
8154 translator_loop(cpu, tb, max_insns, pc, host_pc, ops, &dc.base);
8155 }
8156