xref: /openbmc/qemu/target/microblaze/translate.c (revision d428a048c0733e65416722233e2926fb8196bda8)
1 /*
2  *  Xilinx MicroBlaze emulation for qemu: main translation routines.
3  *
4  *  Copyright (c) 2009 Edgar E. Iglesias.
5  *  Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "accel/tcg/cpu-ldst.h"
24 #include "tcg/tcg-op.h"
25 #include "exec/helper-proto.h"
26 #include "exec/helper-gen.h"
27 #include "exec/translator.h"
28 #include "exec/translation-block.h"
29 #include "exec/target_page.h"
30 #include "qemu/qemu-print.h"
31 
32 #include "exec/log.h"
33 
34 #define HELPER_H "helper.h"
35 #include "exec/helper-info.c.inc"
36 #undef  HELPER_H
37 
38 #define EXTRACT_FIELD(src, start, end) \
39             (((src) >> start) & ((1 << (end - start + 1)) - 1))
40 
41 /* is_jmp field values */
42 #define DISAS_JUMP    DISAS_TARGET_0 /* only pc was modified dynamically */
43 #define DISAS_EXIT    DISAS_TARGET_1 /* all cpu state modified dynamically */
44 
45 /* cpu state besides pc was modified dynamically; update pc to next */
46 #define DISAS_EXIT_NEXT DISAS_TARGET_2
47 /* cpu state besides pc was modified dynamically; update pc to btarget */
48 #define DISAS_EXIT_JUMP DISAS_TARGET_3
49 
50 static TCGv_i32 cpu_R[32];
51 static TCGv_i32 cpu_pc;
52 static TCGv_i32 cpu_msr;
53 static TCGv_i32 cpu_msr_c;
54 static TCGv_i32 cpu_imm;
55 static TCGv_i32 cpu_bvalue;
56 static TCGv_i32 cpu_btarget;
57 static TCGv_i32 cpu_iflags;
58 static TCGv cpu_res_addr;
59 static TCGv_i32 cpu_res_val;
60 
61 /* This is the state at translation time.  */
62 typedef struct DisasContext {
63     DisasContextBase base;
64     const MicroBlazeCPUConfig *cfg;
65 
66     /* Decoder.  */
67     uint32_t ext_imm;
68     unsigned int tb_flags;
69     unsigned int tb_flags_to_set;
70     int mem_index;
71 
72     /* Condition under which to jump, including NEVER and ALWAYS. */
73     TCGCond jmp_cond;
74 
75     /* Immediate branch-taken destination, or -1 for indirect. */
76     uint32_t jmp_dest;
77 } DisasContext;
78 
typeb_imm(DisasContext * dc,int x)79 static int typeb_imm(DisasContext *dc, int x)
80 {
81     if (dc->tb_flags & IMM_FLAG) {
82         return deposit32(dc->ext_imm, 0, 16, x);
83     }
84     return x;
85 }
86 
87 /* Include the auto-generated decoder.  */
88 #include "decode-insns.c.inc"
89 
t_sync_flags(DisasContext * dc)90 static void t_sync_flags(DisasContext *dc)
91 {
92     /* Synch the tb dependent flags between translator and runtime.  */
93     if ((dc->tb_flags ^ dc->base.tb->flags) & IFLAGS_TB_MASK) {
94         tcg_gen_movi_i32(cpu_iflags, dc->tb_flags & IFLAGS_TB_MASK);
95     }
96 }
97 
gen_raise_exception(DisasContext * dc,uint32_t index)98 static void gen_raise_exception(DisasContext *dc, uint32_t index)
99 {
100     gen_helper_raise_exception(tcg_env, tcg_constant_i32(index));
101     dc->base.is_jmp = DISAS_NORETURN;
102 }
103 
gen_raise_exception_sync(DisasContext * dc,uint32_t index)104 static void gen_raise_exception_sync(DisasContext *dc, uint32_t index)
105 {
106     t_sync_flags(dc);
107     tcg_gen_movi_i32(cpu_pc, dc->base.pc_next);
108     gen_raise_exception(dc, index);
109 }
110 
gen_raise_hw_excp(DisasContext * dc,uint32_t esr_ec)111 static void gen_raise_hw_excp(DisasContext *dc, uint32_t esr_ec)
112 {
113     TCGv_i32 tmp = tcg_constant_i32(esr_ec);
114     tcg_gen_st_i32(tmp, tcg_env, offsetof(CPUMBState, esr));
115 
116     gen_raise_exception_sync(dc, EXCP_HW_EXCP);
117 }
118 
gen_goto_tb(DisasContext * dc,int n,target_ulong dest)119 static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
120 {
121     if (translator_use_goto_tb(&dc->base, dest)) {
122         tcg_gen_goto_tb(n);
123         tcg_gen_movi_i32(cpu_pc, dest);
124         tcg_gen_exit_tb(dc->base.tb, n);
125     } else {
126         tcg_gen_movi_i32(cpu_pc, dest);
127         tcg_gen_lookup_and_goto_ptr();
128     }
129     dc->base.is_jmp = DISAS_NORETURN;
130 }
131 
132 /*
133  * Returns true if the insn an illegal operation.
134  * If exceptions are enabled, an exception is raised.
135  */
trap_illegal(DisasContext * dc,bool cond)136 static bool trap_illegal(DisasContext *dc, bool cond)
137 {
138     if (cond && (dc->tb_flags & MSR_EE)
139         && dc->cfg->illegal_opcode_exception) {
140         gen_raise_hw_excp(dc, ESR_EC_ILLEGAL_OP);
141     }
142     return cond;
143 }
144 
145 /*
146  * Returns true if the insn is illegal in userspace.
147  * If exceptions are enabled, an exception is raised.
148  */
trap_userspace(DisasContext * dc,bool cond)149 static bool trap_userspace(DisasContext *dc, bool cond)
150 {
151     bool cond_user = cond && dc->mem_index == MMU_USER_IDX;
152 
153     if (cond_user && (dc->tb_flags & MSR_EE)) {
154         gen_raise_hw_excp(dc, ESR_EC_PRIVINSN);
155     }
156     return cond_user;
157 }
158 
159 /*
160  * Return true, and log an error, if the current insn is
161  * within a delay slot.
162  */
invalid_delay_slot(DisasContext * dc,const char * insn_type)163 static bool invalid_delay_slot(DisasContext *dc, const char *insn_type)
164 {
165     if (dc->tb_flags & D_FLAG) {
166         qemu_log_mask(LOG_GUEST_ERROR,
167                       "Invalid insn in delay slot: %s at %08x\n",
168                       insn_type, (uint32_t)dc->base.pc_next);
169         return true;
170     }
171     return false;
172 }
173 
reg_for_read(DisasContext * dc,int reg)174 static TCGv_i32 reg_for_read(DisasContext *dc, int reg)
175 {
176     if (likely(reg != 0)) {
177         return cpu_R[reg];
178     }
179     return tcg_constant_i32(0);
180 }
181 
reg_for_write(DisasContext * dc,int reg)182 static TCGv_i32 reg_for_write(DisasContext *dc, int reg)
183 {
184     if (likely(reg != 0)) {
185         return cpu_R[reg];
186     }
187     return tcg_temp_new_i32();
188 }
189 
do_typea(DisasContext * dc,arg_typea * arg,bool side_effects,void (* fn)(TCGv_i32,TCGv_i32,TCGv_i32))190 static bool do_typea(DisasContext *dc, arg_typea *arg, bool side_effects,
191                      void (*fn)(TCGv_i32, TCGv_i32, TCGv_i32))
192 {
193     TCGv_i32 rd, ra, rb;
194 
195     if (arg->rd == 0 && !side_effects) {
196         return true;
197     }
198 
199     rd = reg_for_write(dc, arg->rd);
200     ra = reg_for_read(dc, arg->ra);
201     rb = reg_for_read(dc, arg->rb);
202     fn(rd, ra, rb);
203     return true;
204 }
205 
do_typea0(DisasContext * dc,arg_typea0 * arg,bool side_effects,void (* fn)(TCGv_i32,TCGv_i32))206 static bool do_typea0(DisasContext *dc, arg_typea0 *arg, bool side_effects,
207                       void (*fn)(TCGv_i32, TCGv_i32))
208 {
209     TCGv_i32 rd, ra;
210 
211     if (arg->rd == 0 && !side_effects) {
212         return true;
213     }
214 
215     rd = reg_for_write(dc, arg->rd);
216     ra = reg_for_read(dc, arg->ra);
217     fn(rd, ra);
218     return true;
219 }
220 
do_typeb_imm(DisasContext * dc,arg_typeb * arg,bool side_effects,void (* fni)(TCGv_i32,TCGv_i32,int32_t))221 static bool do_typeb_imm(DisasContext *dc, arg_typeb *arg, bool side_effects,
222                          void (*fni)(TCGv_i32, TCGv_i32, int32_t))
223 {
224     TCGv_i32 rd, ra;
225 
226     if (arg->rd == 0 && !side_effects) {
227         return true;
228     }
229 
230     rd = reg_for_write(dc, arg->rd);
231     ra = reg_for_read(dc, arg->ra);
232     fni(rd, ra, arg->imm);
233     return true;
234 }
235 
do_typeb_val(DisasContext * dc,arg_typeb * arg,bool side_effects,void (* fn)(TCGv_i32,TCGv_i32,TCGv_i32))236 static bool do_typeb_val(DisasContext *dc, arg_typeb *arg, bool side_effects,
237                          void (*fn)(TCGv_i32, TCGv_i32, TCGv_i32))
238 {
239     TCGv_i32 rd, ra, imm;
240 
241     if (arg->rd == 0 && !side_effects) {
242         return true;
243     }
244 
245     rd = reg_for_write(dc, arg->rd);
246     ra = reg_for_read(dc, arg->ra);
247     imm = tcg_constant_i32(arg->imm);
248 
249     fn(rd, ra, imm);
250     return true;
251 }
252 
253 #define DO_TYPEA(NAME, SE, FN) \
254     static bool trans_##NAME(DisasContext *dc, arg_typea *a) \
255     { return do_typea(dc, a, SE, FN); }
256 
257 #define DO_TYPEA_CFG(NAME, CFG, SE, FN) \
258     static bool trans_##NAME(DisasContext *dc, arg_typea *a) \
259     { return dc->cfg->CFG && do_typea(dc, a, SE, FN); }
260 
261 #define DO_TYPEA0(NAME, SE, FN) \
262     static bool trans_##NAME(DisasContext *dc, arg_typea0 *a) \
263     { return do_typea0(dc, a, SE, FN); }
264 
265 #define DO_TYPEA0_CFG(NAME, CFG, SE, FN) \
266     static bool trans_##NAME(DisasContext *dc, arg_typea0 *a) \
267     { return dc->cfg->CFG && do_typea0(dc, a, SE, FN); }
268 
269 #define DO_TYPEBI(NAME, SE, FNI) \
270     static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
271     { return do_typeb_imm(dc, a, SE, FNI); }
272 
273 #define DO_TYPEBI_CFG(NAME, CFG, SE, FNI) \
274     static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
275     { return dc->cfg->CFG && do_typeb_imm(dc, a, SE, FNI); }
276 
277 #define DO_TYPEBV(NAME, SE, FN) \
278     static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
279     { return do_typeb_val(dc, a, SE, FN); }
280 
281 #define ENV_WRAPPER2(NAME, HELPER) \
282     static void NAME(TCGv_i32 out, TCGv_i32 ina) \
283     { HELPER(out, tcg_env, ina); }
284 
285 #define ENV_WRAPPER3(NAME, HELPER) \
286     static void NAME(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) \
287     { HELPER(out, tcg_env, ina, inb); }
288 
289 /* No input carry, but output carry. */
gen_add(TCGv_i32 out,TCGv_i32 ina,TCGv_i32 inb)290 static void gen_add(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
291 {
292     TCGv_i32 zero = tcg_constant_i32(0);
293 
294     tcg_gen_add2_i32(out, cpu_msr_c, ina, zero, inb, zero);
295 }
296 
297 /* Input and output carry. */
gen_addc(TCGv_i32 out,TCGv_i32 ina,TCGv_i32 inb)298 static void gen_addc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
299 {
300     tcg_gen_addcio_i32(out, cpu_msr_c, ina, inb, cpu_msr_c);
301 }
302 
303 /* Input carry, but no output carry. */
gen_addkc(TCGv_i32 out,TCGv_i32 ina,TCGv_i32 inb)304 static void gen_addkc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
305 {
306     tcg_gen_add_i32(out, ina, inb);
307     tcg_gen_add_i32(out, out, cpu_msr_c);
308 }
309 
DO_TYPEA(add,true,gen_add)310 DO_TYPEA(add, true, gen_add)
311 DO_TYPEA(addc, true, gen_addc)
312 DO_TYPEA(addk, false, tcg_gen_add_i32)
313 DO_TYPEA(addkc, true, gen_addkc)
314 
315 DO_TYPEBV(addi, true, gen_add)
316 DO_TYPEBV(addic, true, gen_addc)
317 DO_TYPEBI(addik, false, tcg_gen_addi_i32)
318 DO_TYPEBV(addikc, true, gen_addkc)
319 
320 static void gen_andni(TCGv_i32 out, TCGv_i32 ina, int32_t imm)
321 {
322     tcg_gen_andi_i32(out, ina, ~imm);
323 }
324 
DO_TYPEA(and,false,tcg_gen_and_i32)325 DO_TYPEA(and, false, tcg_gen_and_i32)
326 DO_TYPEBI(andi, false, tcg_gen_andi_i32)
327 DO_TYPEA(andn, false, tcg_gen_andc_i32)
328 DO_TYPEBI(andni, false, gen_andni)
329 
330 static void gen_bsra(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
331 {
332     TCGv_i32 tmp = tcg_temp_new_i32();
333     tcg_gen_andi_i32(tmp, inb, 31);
334     tcg_gen_sar_i32(out, ina, tmp);
335 }
336 
gen_bsrl(TCGv_i32 out,TCGv_i32 ina,TCGv_i32 inb)337 static void gen_bsrl(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
338 {
339     TCGv_i32 tmp = tcg_temp_new_i32();
340     tcg_gen_andi_i32(tmp, inb, 31);
341     tcg_gen_shr_i32(out, ina, tmp);
342 }
343 
gen_bsll(TCGv_i32 out,TCGv_i32 ina,TCGv_i32 inb)344 static void gen_bsll(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
345 {
346     TCGv_i32 tmp = tcg_temp_new_i32();
347     tcg_gen_andi_i32(tmp, inb, 31);
348     tcg_gen_shl_i32(out, ina, tmp);
349 }
350 
gen_bsefi(TCGv_i32 out,TCGv_i32 ina,int32_t imm)351 static void gen_bsefi(TCGv_i32 out, TCGv_i32 ina, int32_t imm)
352 {
353     /* Note that decodetree has extracted and reassembled imm_w/imm_s. */
354     int imm_w = extract32(imm, 5, 5);
355     int imm_s = extract32(imm, 0, 5);
356 
357     if (imm_w + imm_s > 32 || imm_w == 0) {
358         /* These inputs have an undefined behavior.  */
359         qemu_log_mask(LOG_GUEST_ERROR, "bsefi: Bad input w=%d s=%d\n",
360                       imm_w, imm_s);
361     } else {
362         tcg_gen_extract_i32(out, ina, imm_s, imm_w);
363     }
364 }
365 
gen_bsifi(TCGv_i32 out,TCGv_i32 ina,int32_t imm)366 static void gen_bsifi(TCGv_i32 out, TCGv_i32 ina, int32_t imm)
367 {
368     /* Note that decodetree has extracted and reassembled imm_w/imm_s. */
369     int imm_w = extract32(imm, 5, 5);
370     int imm_s = extract32(imm, 0, 5);
371     int width = imm_w - imm_s + 1;
372 
373     if (imm_w < imm_s) {
374         /* These inputs have an undefined behavior.  */
375         qemu_log_mask(LOG_GUEST_ERROR, "bsifi: Bad input w=%d s=%d\n",
376                       imm_w, imm_s);
377     } else {
378         tcg_gen_deposit_i32(out, out, ina, imm_s, width);
379     }
380 }
381 
DO_TYPEA_CFG(bsra,use_barrel,false,gen_bsra)382 DO_TYPEA_CFG(bsra, use_barrel, false, gen_bsra)
383 DO_TYPEA_CFG(bsrl, use_barrel, false, gen_bsrl)
384 DO_TYPEA_CFG(bsll, use_barrel, false, gen_bsll)
385 
386 DO_TYPEBI_CFG(bsrai, use_barrel, false, tcg_gen_sari_i32)
387 DO_TYPEBI_CFG(bsrli, use_barrel, false, tcg_gen_shri_i32)
388 DO_TYPEBI_CFG(bslli, use_barrel, false, tcg_gen_shli_i32)
389 
390 DO_TYPEBI_CFG(bsefi, use_barrel, false, gen_bsefi)
391 DO_TYPEBI_CFG(bsifi, use_barrel, false, gen_bsifi)
392 
393 static void gen_clz(TCGv_i32 out, TCGv_i32 ina)
394 {
395     tcg_gen_clzi_i32(out, ina, 32);
396 }
397 
DO_TYPEA0_CFG(clz,use_pcmp_instr,false,gen_clz)398 DO_TYPEA0_CFG(clz, use_pcmp_instr, false, gen_clz)
399 
400 static void gen_cmp(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
401 {
402     TCGv_i32 lt = tcg_temp_new_i32();
403 
404     tcg_gen_setcond_i32(TCG_COND_LT, lt, inb, ina);
405     tcg_gen_sub_i32(out, inb, ina);
406     tcg_gen_deposit_i32(out, out, lt, 31, 1);
407 }
408 
gen_cmpu(TCGv_i32 out,TCGv_i32 ina,TCGv_i32 inb)409 static void gen_cmpu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
410 {
411     TCGv_i32 lt = tcg_temp_new_i32();
412 
413     tcg_gen_setcond_i32(TCG_COND_LTU, lt, inb, ina);
414     tcg_gen_sub_i32(out, inb, ina);
415     tcg_gen_deposit_i32(out, out, lt, 31, 1);
416 }
417 
DO_TYPEA(cmp,false,gen_cmp)418 DO_TYPEA(cmp, false, gen_cmp)
419 DO_TYPEA(cmpu, false, gen_cmpu)
420 
421 ENV_WRAPPER3(gen_fadd, gen_helper_fadd)
422 ENV_WRAPPER3(gen_frsub, gen_helper_frsub)
423 ENV_WRAPPER3(gen_fmul, gen_helper_fmul)
424 ENV_WRAPPER3(gen_fdiv, gen_helper_fdiv)
425 ENV_WRAPPER3(gen_fcmp_un, gen_helper_fcmp_un)
426 ENV_WRAPPER3(gen_fcmp_lt, gen_helper_fcmp_lt)
427 ENV_WRAPPER3(gen_fcmp_eq, gen_helper_fcmp_eq)
428 ENV_WRAPPER3(gen_fcmp_le, gen_helper_fcmp_le)
429 ENV_WRAPPER3(gen_fcmp_gt, gen_helper_fcmp_gt)
430 ENV_WRAPPER3(gen_fcmp_ne, gen_helper_fcmp_ne)
431 ENV_WRAPPER3(gen_fcmp_ge, gen_helper_fcmp_ge)
432 
433 DO_TYPEA_CFG(fadd, use_fpu, true, gen_fadd)
434 DO_TYPEA_CFG(frsub, use_fpu, true, gen_frsub)
435 DO_TYPEA_CFG(fmul, use_fpu, true, gen_fmul)
436 DO_TYPEA_CFG(fdiv, use_fpu, true, gen_fdiv)
437 DO_TYPEA_CFG(fcmp_un, use_fpu, true, gen_fcmp_un)
438 DO_TYPEA_CFG(fcmp_lt, use_fpu, true, gen_fcmp_lt)
439 DO_TYPEA_CFG(fcmp_eq, use_fpu, true, gen_fcmp_eq)
440 DO_TYPEA_CFG(fcmp_le, use_fpu, true, gen_fcmp_le)
441 DO_TYPEA_CFG(fcmp_gt, use_fpu, true, gen_fcmp_gt)
442 DO_TYPEA_CFG(fcmp_ne, use_fpu, true, gen_fcmp_ne)
443 DO_TYPEA_CFG(fcmp_ge, use_fpu, true, gen_fcmp_ge)
444 
445 ENV_WRAPPER2(gen_flt, gen_helper_flt)
446 ENV_WRAPPER2(gen_fint, gen_helper_fint)
447 ENV_WRAPPER2(gen_fsqrt, gen_helper_fsqrt)
448 
449 DO_TYPEA0_CFG(flt, use_fpu >= 2, true, gen_flt)
450 DO_TYPEA0_CFG(fint, use_fpu >= 2, true, gen_fint)
451 DO_TYPEA0_CFG(fsqrt, use_fpu >= 2, true, gen_fsqrt)
452 
453 ENV_WRAPPER3(gen_idiv, gen_helper_divs)
454 ENV_WRAPPER3(gen_idivu, gen_helper_divu)
455 
456 DO_TYPEA_CFG(idiv, use_div, true, gen_idiv)
457 DO_TYPEA_CFG(idivu, use_div, true, gen_idivu)
458 
459 static bool trans_imm(DisasContext *dc, arg_imm *arg)
460 {
461     if (invalid_delay_slot(dc, "imm")) {
462         return true;
463     }
464     dc->ext_imm = arg->imm << 16;
465     tcg_gen_movi_i32(cpu_imm, dc->ext_imm);
466     dc->tb_flags_to_set = IMM_FLAG;
467     return true;
468 }
469 
gen_mulh(TCGv_i32 out,TCGv_i32 ina,TCGv_i32 inb)470 static void gen_mulh(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
471 {
472     TCGv_i32 tmp = tcg_temp_new_i32();
473     tcg_gen_muls2_i32(tmp, out, ina, inb);
474 }
475 
gen_mulhu(TCGv_i32 out,TCGv_i32 ina,TCGv_i32 inb)476 static void gen_mulhu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
477 {
478     TCGv_i32 tmp = tcg_temp_new_i32();
479     tcg_gen_mulu2_i32(tmp, out, ina, inb);
480 }
481 
gen_mulhsu(TCGv_i32 out,TCGv_i32 ina,TCGv_i32 inb)482 static void gen_mulhsu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
483 {
484     TCGv_i32 tmp = tcg_temp_new_i32();
485     tcg_gen_mulsu2_i32(tmp, out, ina, inb);
486 }
487 
DO_TYPEA_CFG(mul,use_hw_mul,false,tcg_gen_mul_i32)488 DO_TYPEA_CFG(mul, use_hw_mul, false, tcg_gen_mul_i32)
489 DO_TYPEA_CFG(mulh, use_hw_mul >= 2, false, gen_mulh)
490 DO_TYPEA_CFG(mulhu, use_hw_mul >= 2, false, gen_mulhu)
491 DO_TYPEA_CFG(mulhsu, use_hw_mul >= 2, false, gen_mulhsu)
492 DO_TYPEBI_CFG(muli, use_hw_mul, false, tcg_gen_muli_i32)
493 
494 DO_TYPEA(or, false, tcg_gen_or_i32)
495 DO_TYPEBI(ori, false, tcg_gen_ori_i32)
496 
497 static void gen_pcmpeq(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
498 {
499     tcg_gen_setcond_i32(TCG_COND_EQ, out, ina, inb);
500 }
501 
gen_pcmpne(TCGv_i32 out,TCGv_i32 ina,TCGv_i32 inb)502 static void gen_pcmpne(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
503 {
504     tcg_gen_setcond_i32(TCG_COND_NE, out, ina, inb);
505 }
506 
DO_TYPEA_CFG(pcmpbf,use_pcmp_instr,false,gen_helper_pcmpbf)507 DO_TYPEA_CFG(pcmpbf, use_pcmp_instr, false, gen_helper_pcmpbf)
508 DO_TYPEA_CFG(pcmpeq, use_pcmp_instr, false, gen_pcmpeq)
509 DO_TYPEA_CFG(pcmpne, use_pcmp_instr, false, gen_pcmpne)
510 
511 /* No input carry, but output carry. */
512 static void gen_rsub(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
513 {
514     tcg_gen_setcond_i32(TCG_COND_GEU, cpu_msr_c, inb, ina);
515     tcg_gen_sub_i32(out, inb, ina);
516 }
517 
518 /* Input and output carry. */
gen_rsubc(TCGv_i32 out,TCGv_i32 ina,TCGv_i32 inb)519 static void gen_rsubc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
520 {
521     TCGv_i32 tmp = tcg_temp_new_i32();
522 
523     tcg_gen_not_i32(tmp, ina);
524     tcg_gen_addcio_i32(out, cpu_msr_c, tmp, inb, cpu_msr_c);
525 }
526 
527 /* No input or output carry. */
gen_rsubk(TCGv_i32 out,TCGv_i32 ina,TCGv_i32 inb)528 static void gen_rsubk(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
529 {
530     tcg_gen_sub_i32(out, inb, ina);
531 }
532 
533 /* Input carry, no output carry. */
gen_rsubkc(TCGv_i32 out,TCGv_i32 ina,TCGv_i32 inb)534 static void gen_rsubkc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
535 {
536     TCGv_i32 nota = tcg_temp_new_i32();
537 
538     tcg_gen_not_i32(nota, ina);
539     tcg_gen_add_i32(out, inb, nota);
540     tcg_gen_add_i32(out, out, cpu_msr_c);
541 }
542 
DO_TYPEA(rsub,true,gen_rsub)543 DO_TYPEA(rsub, true, gen_rsub)
544 DO_TYPEA(rsubc, true, gen_rsubc)
545 DO_TYPEA(rsubk, false, gen_rsubk)
546 DO_TYPEA(rsubkc, true, gen_rsubkc)
547 
548 DO_TYPEBV(rsubi, true, gen_rsub)
549 DO_TYPEBV(rsubic, true, gen_rsubc)
550 DO_TYPEBV(rsubik, false, gen_rsubk)
551 DO_TYPEBV(rsubikc, true, gen_rsubkc)
552 
553 DO_TYPEA0(sext8, false, tcg_gen_ext8s_i32)
554 DO_TYPEA0(sext16, false, tcg_gen_ext16s_i32)
555 
556 static void gen_sra(TCGv_i32 out, TCGv_i32 ina)
557 {
558     tcg_gen_andi_i32(cpu_msr_c, ina, 1);
559     tcg_gen_sari_i32(out, ina, 1);
560 }
561 
gen_src(TCGv_i32 out,TCGv_i32 ina)562 static void gen_src(TCGv_i32 out, TCGv_i32 ina)
563 {
564     TCGv_i32 tmp = tcg_temp_new_i32();
565 
566     tcg_gen_mov_i32(tmp, cpu_msr_c);
567     tcg_gen_andi_i32(cpu_msr_c, ina, 1);
568     tcg_gen_extract2_i32(out, ina, tmp, 1);
569 }
570 
gen_srl(TCGv_i32 out,TCGv_i32 ina)571 static void gen_srl(TCGv_i32 out, TCGv_i32 ina)
572 {
573     tcg_gen_andi_i32(cpu_msr_c, ina, 1);
574     tcg_gen_shri_i32(out, ina, 1);
575 }
576 
DO_TYPEA0(sra,false,gen_sra)577 DO_TYPEA0(sra, false, gen_sra)
578 DO_TYPEA0(src, false, gen_src)
579 DO_TYPEA0(srl, false, gen_srl)
580 
581 static void gen_swaph(TCGv_i32 out, TCGv_i32 ina)
582 {
583     tcg_gen_rotri_i32(out, ina, 16);
584 }
585 
DO_TYPEA0(swapb,false,tcg_gen_bswap32_i32)586 DO_TYPEA0(swapb, false, tcg_gen_bswap32_i32)
587 DO_TYPEA0(swaph, false, gen_swaph)
588 
589 static bool trans_wdic(DisasContext *dc, arg_wdic *a)
590 {
591     /* Cache operations are nops: only check for supervisor mode.  */
592     trap_userspace(dc, true);
593     return true;
594 }
595 
DO_TYPEA(xor,false,tcg_gen_xor_i32)596 DO_TYPEA(xor, false, tcg_gen_xor_i32)
597 DO_TYPEBI(xori, false, tcg_gen_xori_i32)
598 
599 static TCGv compute_ldst_addr_typea(DisasContext *dc, int ra, int rb)
600 {
601     TCGv ret;
602 
603     /* If any of the regs is r0, set t to the value of the other reg.  */
604     if (ra && rb) {
605         ret = tcg_temp_new_i32();
606         tcg_gen_add_i32(ret, cpu_R[ra], cpu_R[rb]);
607     } else if (ra) {
608         ret = cpu_R[ra];
609     } else if (rb) {
610         ret = cpu_R[rb];
611     } else {
612         ret = tcg_constant_i32(0);
613     }
614 
615     if ((ra == 1 || rb == 1) && dc->cfg->stackprot) {
616         gen_helper_stackprot(tcg_env, ret);
617     }
618     return ret;
619 }
620 
compute_ldst_addr_typeb(DisasContext * dc,int ra,int imm)621 static TCGv compute_ldst_addr_typeb(DisasContext *dc, int ra, int imm)
622 {
623     TCGv ret;
624 
625     /* If any of the regs is r0, set t to the value of the other reg.  */
626     if (ra && imm) {
627         ret = tcg_temp_new_i32();
628         tcg_gen_addi_i32(ret, cpu_R[ra], imm);
629     } else if (ra) {
630         ret = cpu_R[ra];
631     } else {
632         ret = tcg_constant_i32(imm);
633     }
634 
635     if (ra == 1 && dc->cfg->stackprot) {
636         gen_helper_stackprot(tcg_env, ret);
637     }
638     return ret;
639 }
640 
641 #ifndef CONFIG_USER_ONLY
compute_ldst_addr_ea(DisasContext * dc,int ra,int rb)642 static TCGv_i64 compute_ldst_addr_ea(DisasContext *dc, int ra, int rb)
643 {
644     int addr_size = dc->cfg->addr_size;
645     TCGv_i64 ret = tcg_temp_new_i64();
646 
647     if (addr_size == 32 || ra == 0) {
648         if (rb) {
649             tcg_gen_extu_i32_i64(ret, cpu_R[rb]);
650         } else {
651             return tcg_constant_i64(0);
652         }
653     } else {
654         if (rb) {
655             tcg_gen_concat_i32_i64(ret, cpu_R[rb], cpu_R[ra]);
656         } else {
657             tcg_gen_extu_i32_i64(ret, cpu_R[ra]);
658             tcg_gen_shli_i64(ret, ret, 32);
659         }
660         if (addr_size < 64) {
661             /* Mask off out of range bits.  */
662             tcg_gen_andi_i64(ret, ret, MAKE_64BIT_MASK(0, addr_size));
663         }
664     }
665     return ret;
666 }
667 #endif
668 
669 #ifndef CONFIG_USER_ONLY
record_unaligned_ess(DisasContext * dc,int rd,MemOp size,bool store)670 static void record_unaligned_ess(DisasContext *dc, int rd,
671                                  MemOp size, bool store)
672 {
673     uint32_t iflags = tcg_get_insn_start_param(dc->base.insn_start, 1);
674 
675     iflags |= ESR_ESS_FLAG;
676     iflags |= rd << 5;
677     iflags |= store * ESR_S;
678     iflags |= (size == MO_32) * ESR_W;
679 
680     tcg_set_insn_start_param(dc->base.insn_start, 1, iflags);
681 }
682 
gen_alignment_check_ea(DisasContext * dc,TCGv_i64 ea,int rb,int rd,MemOp size,bool store)683 static void gen_alignment_check_ea(DisasContext *dc, TCGv_i64 ea, int rb,
684                                    int rd, MemOp size, bool store)
685 {
686     if (rb && (dc->tb_flags & MSR_EE) && dc->cfg->unaligned_exceptions) {
687         TCGLabel *over = gen_new_label();
688 
689         record_unaligned_ess(dc, rd, size, store);
690 
691         tcg_gen_brcondi_i64(TCG_COND_TSTEQ, ea, (1 << size) - 1, over);
692         gen_helper_unaligned_access(tcg_env, ea);
693         gen_set_label(over);
694     }
695 }
696 #endif
697 
mo_endian(DisasContext * dc)698 static inline MemOp mo_endian(DisasContext *dc)
699 {
700     return dc->cfg->endi ? MO_LE : MO_BE;
701 }
702 
do_load(DisasContext * dc,int rd,TCGv addr,MemOp mop,int mem_index,bool rev)703 static bool do_load(DisasContext *dc, int rd, TCGv addr, MemOp mop,
704                     int mem_index, bool rev)
705 {
706     MemOp size = mop & MO_SIZE;
707 
708     mop |= mo_endian(dc);
709 
710     /*
711      * When doing reverse accesses we need to do two things.
712      *
713      * 1. Reverse the address wrt endianness.
714      * 2. Byteswap the data lanes on the way back into the CPU core.
715      */
716     if (rev) {
717         if (size > MO_8) {
718             mop ^= MO_BSWAP;
719         }
720         if (size < MO_32) {
721             tcg_gen_xori_tl(addr, addr, 3 - size);
722         }
723     }
724 
725     /*
726      * For system mode, enforce alignment if the cpu configuration
727      * requires it.  For user-mode, the Linux kernel will have fixed up
728      * any unaligned access, so emulate that by *not* setting MO_ALIGN.
729      */
730 #ifndef CONFIG_USER_ONLY
731     if (size > MO_8 &&
732         (dc->tb_flags & MSR_EE) &&
733         dc->cfg->unaligned_exceptions) {
734         record_unaligned_ess(dc, rd, size, false);
735         mop |= MO_ALIGN;
736     }
737 #endif
738 
739     tcg_gen_qemu_ld_i32(reg_for_write(dc, rd), addr, mem_index, mop);
740     return true;
741 }
742 
trans_lbu(DisasContext * dc,arg_typea * arg)743 static bool trans_lbu(DisasContext *dc, arg_typea *arg)
744 {
745     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
746     return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
747 }
748 
trans_lbur(DisasContext * dc,arg_typea * arg)749 static bool trans_lbur(DisasContext *dc, arg_typea *arg)
750 {
751     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
752     return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, true);
753 }
754 
trans_lbuea(DisasContext * dc,arg_typea * arg)755 static bool trans_lbuea(DisasContext *dc, arg_typea *arg)
756 {
757     if (trap_userspace(dc, true)) {
758         return true;
759     }
760 #ifdef CONFIG_USER_ONLY
761     g_assert_not_reached();
762 #else
763     TCGv_i64 addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
764     gen_helper_lbuea(reg_for_write(dc, arg->rd), tcg_env, addr);
765     return true;
766 #endif
767 }
768 
trans_lbui(DisasContext * dc,arg_typeb * arg)769 static bool trans_lbui(DisasContext *dc, arg_typeb *arg)
770 {
771     TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
772     return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
773 }
774 
trans_lhu(DisasContext * dc,arg_typea * arg)775 static bool trans_lhu(DisasContext *dc, arg_typea *arg)
776 {
777     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
778     return do_load(dc, arg->rd, addr, MO_UW, dc->mem_index, false);
779 }
780 
trans_lhur(DisasContext * dc,arg_typea * arg)781 static bool trans_lhur(DisasContext *dc, arg_typea *arg)
782 {
783     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
784     return do_load(dc, arg->rd, addr, MO_UW, dc->mem_index, true);
785 }
786 
trans_lhuea(DisasContext * dc,arg_typea * arg)787 static bool trans_lhuea(DisasContext *dc, arg_typea *arg)
788 {
789     if (trap_userspace(dc, true)) {
790         return true;
791     }
792 #ifdef CONFIG_USER_ONLY
793     g_assert_not_reached();
794 #else
795     TCGv_i64 addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
796     gen_alignment_check_ea(dc, addr, arg->rb, arg->rd, MO_16, false);
797     (mo_endian(dc) == MO_BE ? gen_helper_lhuea_be : gen_helper_lhuea_le)
798         (reg_for_write(dc, arg->rd), tcg_env, addr);
799     return true;
800 #endif
801 }
802 
trans_lhui(DisasContext * dc,arg_typeb * arg)803 static bool trans_lhui(DisasContext *dc, arg_typeb *arg)
804 {
805     TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
806     return do_load(dc, arg->rd, addr, MO_UW, dc->mem_index, false);
807 }
808 
trans_lw(DisasContext * dc,arg_typea * arg)809 static bool trans_lw(DisasContext *dc, arg_typea *arg)
810 {
811     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
812     return do_load(dc, arg->rd, addr, MO_UL, dc->mem_index, false);
813 }
814 
trans_lwr(DisasContext * dc,arg_typea * arg)815 static bool trans_lwr(DisasContext *dc, arg_typea *arg)
816 {
817     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
818     return do_load(dc, arg->rd, addr, MO_UL, dc->mem_index, true);
819 }
820 
trans_lwea(DisasContext * dc,arg_typea * arg)821 static bool trans_lwea(DisasContext *dc, arg_typea *arg)
822 {
823     if (trap_userspace(dc, true)) {
824         return true;
825     }
826 #ifdef CONFIG_USER_ONLY
827     g_assert_not_reached();
828 #else
829     TCGv_i64 addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
830     gen_alignment_check_ea(dc, addr, arg->rb, arg->rd, MO_32, false);
831     (mo_endian(dc) == MO_BE ? gen_helper_lwea_be : gen_helper_lwea_le)
832         (reg_for_write(dc, arg->rd), tcg_env, addr);
833     return true;
834 #endif
835 }
836 
trans_lwi(DisasContext * dc,arg_typeb * arg)837 static bool trans_lwi(DisasContext *dc, arg_typeb *arg)
838 {
839     TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
840     return do_load(dc, arg->rd, addr, MO_UL, dc->mem_index, false);
841 }
842 
trans_lwx(DisasContext * dc,arg_typea * arg)843 static bool trans_lwx(DisasContext *dc, arg_typea *arg)
844 {
845     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
846 
847     /* lwx does not throw unaligned access errors, so force alignment */
848     tcg_gen_andi_tl(addr, addr, ~3);
849 
850     tcg_gen_qemu_ld_i32(cpu_res_val, addr, dc->mem_index,
851                         mo_endian(dc) | MO_UL);
852     tcg_gen_mov_tl(cpu_res_addr, addr);
853 
854     if (arg->rd) {
855         tcg_gen_mov_i32(cpu_R[arg->rd], cpu_res_val);
856     }
857 
858     /* No support for AXI exclusive so always clear C */
859     tcg_gen_movi_i32(cpu_msr_c, 0);
860     return true;
861 }
862 
do_store(DisasContext * dc,int rd,TCGv addr,MemOp mop,int mem_index,bool rev)863 static bool do_store(DisasContext *dc, int rd, TCGv addr, MemOp mop,
864                      int mem_index, bool rev)
865 {
866     MemOp size = mop & MO_SIZE;
867 
868     mop |= mo_endian(dc);
869 
870     /*
871      * When doing reverse accesses we need to do two things.
872      *
873      * 1. Reverse the address wrt endianness.
874      * 2. Byteswap the data lanes on the way back into the CPU core.
875      */
876     if (rev) {
877         if (size > MO_8) {
878             mop ^= MO_BSWAP;
879         }
880         if (size < MO_32) {
881             tcg_gen_xori_tl(addr, addr, 3 - size);
882         }
883     }
884 
885     /*
886      * For system mode, enforce alignment if the cpu configuration
887      * requires it.  For user-mode, the Linux kernel will have fixed up
888      * any unaligned access, so emulate that by *not* setting MO_ALIGN.
889      */
890 #ifndef CONFIG_USER_ONLY
891     if (size > MO_8 &&
892         (dc->tb_flags & MSR_EE) &&
893         dc->cfg->unaligned_exceptions) {
894         record_unaligned_ess(dc, rd, size, true);
895         mop |= MO_ALIGN;
896     }
897 #endif
898 
899     tcg_gen_qemu_st_i32(reg_for_read(dc, rd), addr, mem_index, mop);
900     return true;
901 }
902 
trans_sb(DisasContext * dc,arg_typea * arg)903 static bool trans_sb(DisasContext *dc, arg_typea *arg)
904 {
905     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
906     return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
907 }
908 
trans_sbr(DisasContext * dc,arg_typea * arg)909 static bool trans_sbr(DisasContext *dc, arg_typea *arg)
910 {
911     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
912     return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, true);
913 }
914 
trans_sbea(DisasContext * dc,arg_typea * arg)915 static bool trans_sbea(DisasContext *dc, arg_typea *arg)
916 {
917     if (trap_userspace(dc, true)) {
918         return true;
919     }
920 #ifdef CONFIG_USER_ONLY
921     g_assert_not_reached();
922 #else
923     TCGv_i64 addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
924     gen_helper_sbea(tcg_env, reg_for_read(dc, arg->rd), addr);
925     return true;
926 #endif
927 }
928 
trans_sbi(DisasContext * dc,arg_typeb * arg)929 static bool trans_sbi(DisasContext *dc, arg_typeb *arg)
930 {
931     TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
932     return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
933 }
934 
trans_sh(DisasContext * dc,arg_typea * arg)935 static bool trans_sh(DisasContext *dc, arg_typea *arg)
936 {
937     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
938     return do_store(dc, arg->rd, addr, MO_UW, dc->mem_index, false);
939 }
940 
trans_shr(DisasContext * dc,arg_typea * arg)941 static bool trans_shr(DisasContext *dc, arg_typea *arg)
942 {
943     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
944     return do_store(dc, arg->rd, addr, MO_UW, dc->mem_index, true);
945 }
946 
trans_shea(DisasContext * dc,arg_typea * arg)947 static bool trans_shea(DisasContext *dc, arg_typea *arg)
948 {
949     if (trap_userspace(dc, true)) {
950         return true;
951     }
952 #ifdef CONFIG_USER_ONLY
953     g_assert_not_reached();
954 #else
955     TCGv_i64 addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
956     gen_alignment_check_ea(dc, addr, arg->rb, arg->rd, MO_16, true);
957     (mo_endian(dc) == MO_BE ? gen_helper_shea_be : gen_helper_shea_le)
958         (tcg_env, reg_for_read(dc, arg->rd), addr);
959     return true;
960 #endif
961 }
962 
trans_shi(DisasContext * dc,arg_typeb * arg)963 static bool trans_shi(DisasContext *dc, arg_typeb *arg)
964 {
965     TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
966     return do_store(dc, arg->rd, addr, MO_UW, dc->mem_index, false);
967 }
968 
trans_sw(DisasContext * dc,arg_typea * arg)969 static bool trans_sw(DisasContext *dc, arg_typea *arg)
970 {
971     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
972     return do_store(dc, arg->rd, addr, MO_UL, dc->mem_index, false);
973 }
974 
trans_swr(DisasContext * dc,arg_typea * arg)975 static bool trans_swr(DisasContext *dc, arg_typea *arg)
976 {
977     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
978     return do_store(dc, arg->rd, addr, MO_UL, dc->mem_index, true);
979 }
980 
trans_swea(DisasContext * dc,arg_typea * arg)981 static bool trans_swea(DisasContext *dc, arg_typea *arg)
982 {
983     if (trap_userspace(dc, true)) {
984         return true;
985     }
986 #ifdef CONFIG_USER_ONLY
987     g_assert_not_reached();
988 #else
989     TCGv_i64 addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
990     gen_alignment_check_ea(dc, addr, arg->rb, arg->rd, MO_32, true);
991     (mo_endian(dc) == MO_BE ? gen_helper_swea_be : gen_helper_swea_le)
992         (tcg_env, reg_for_read(dc, arg->rd), addr);
993     return true;
994 #endif
995 }
996 
trans_swi(DisasContext * dc,arg_typeb * arg)997 static bool trans_swi(DisasContext *dc, arg_typeb *arg)
998 {
999     TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
1000     return do_store(dc, arg->rd, addr, MO_UL, dc->mem_index, false);
1001 }
1002 
trans_swx(DisasContext * dc,arg_typea * arg)1003 static bool trans_swx(DisasContext *dc, arg_typea *arg)
1004 {
1005     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
1006     TCGLabel *swx_done = gen_new_label();
1007     TCGLabel *swx_fail = gen_new_label();
1008     TCGv_i32 tval;
1009 
1010     /* swx does not throw unaligned access errors, so force alignment */
1011     tcg_gen_andi_tl(addr, addr, ~3);
1012 
1013     /*
1014      * Compare the address vs the one we used during lwx.
1015      * On mismatch, the operation fails.  On match, addr dies at the
1016      * branch, but we know we can use the equal version in the global.
1017      * In either case, addr is no longer needed.
1018      */
1019     tcg_gen_brcond_tl(TCG_COND_NE, cpu_res_addr, addr, swx_fail);
1020 
1021     /*
1022      * Compare the value loaded during lwx with current contents of
1023      * the reserved location.
1024      */
1025     tval = tcg_temp_new_i32();
1026 
1027     tcg_gen_atomic_cmpxchg_i32(tval, cpu_res_addr, cpu_res_val,
1028                                reg_for_write(dc, arg->rd),
1029                                dc->mem_index, mo_endian(dc) | MO_UL);
1030 
1031     tcg_gen_brcond_i32(TCG_COND_NE, cpu_res_val, tval, swx_fail);
1032 
1033     /* Success */
1034     tcg_gen_movi_i32(cpu_msr_c, 0);
1035     tcg_gen_br(swx_done);
1036 
1037     /* Failure */
1038     gen_set_label(swx_fail);
1039     tcg_gen_movi_i32(cpu_msr_c, 1);
1040 
1041     gen_set_label(swx_done);
1042 
1043     /*
1044      * Prevent the saved address from working again without another ldx.
1045      * Akin to the pseudocode setting reservation = 0.
1046      */
1047     tcg_gen_movi_tl(cpu_res_addr, -1);
1048     return true;
1049 }
1050 
setup_dslot(DisasContext * dc,bool type_b)1051 static void setup_dslot(DisasContext *dc, bool type_b)
1052 {
1053     dc->tb_flags_to_set |= D_FLAG;
1054     if (type_b && (dc->tb_flags & IMM_FLAG)) {
1055         dc->tb_flags_to_set |= BIMM_FLAG;
1056     }
1057 }
1058 
do_branch(DisasContext * dc,int dest_rb,int dest_imm,bool delay,bool abs,int link)1059 static bool do_branch(DisasContext *dc, int dest_rb, int dest_imm,
1060                       bool delay, bool abs, int link)
1061 {
1062     uint32_t add_pc;
1063 
1064     if (invalid_delay_slot(dc, "branch")) {
1065         return true;
1066     }
1067     if (delay) {
1068         setup_dslot(dc, dest_rb < 0);
1069     }
1070 
1071     if (link) {
1072         tcg_gen_movi_i32(cpu_R[link], dc->base.pc_next);
1073     }
1074 
1075     /* Store the branch taken destination into btarget.  */
1076     add_pc = abs ? 0 : dc->base.pc_next;
1077     if (dest_rb > 0) {
1078         dc->jmp_dest = -1;
1079         tcg_gen_addi_i32(cpu_btarget, cpu_R[dest_rb], add_pc);
1080     } else {
1081         dc->jmp_dest = add_pc + dest_imm;
1082         tcg_gen_movi_i32(cpu_btarget, dc->jmp_dest);
1083     }
1084     dc->jmp_cond = TCG_COND_ALWAYS;
1085     return true;
1086 }
1087 
1088 #define DO_BR(NAME, NAMEI, DELAY, ABS, LINK)                               \
1089     static bool trans_##NAME(DisasContext *dc, arg_typea_br *arg)          \
1090     { return do_branch(dc, arg->rb, 0, DELAY, ABS, LINK ? arg->rd : 0); }  \
1091     static bool trans_##NAMEI(DisasContext *dc, arg_typeb_br *arg)         \
1092     { return do_branch(dc, -1, arg->imm, DELAY, ABS, LINK ? arg->rd : 0); }
1093 
DO_BR(br,bri,false,false,false)1094 DO_BR(br, bri, false, false, false)
1095 DO_BR(bra, brai, false, true, false)
1096 DO_BR(brd, brid, true, false, false)
1097 DO_BR(brad, braid, true, true, false)
1098 DO_BR(brld, brlid, true, false, true)
1099 DO_BR(brald, bralid, true, true, true)
1100 
1101 static bool do_bcc(DisasContext *dc, int dest_rb, int dest_imm,
1102                    TCGCond cond, int ra, bool delay)
1103 {
1104     TCGv_i32 zero, next;
1105 
1106     if (invalid_delay_slot(dc, "bcc")) {
1107         return true;
1108     }
1109     if (delay) {
1110         setup_dslot(dc, dest_rb < 0);
1111     }
1112 
1113     dc->jmp_cond = cond;
1114 
1115     /* Cache the condition register in cpu_bvalue across any delay slot.  */
1116     tcg_gen_mov_i32(cpu_bvalue, reg_for_read(dc, ra));
1117 
1118     /* Store the branch taken destination into btarget.  */
1119     if (dest_rb > 0) {
1120         dc->jmp_dest = -1;
1121         tcg_gen_addi_i32(cpu_btarget, cpu_R[dest_rb], dc->base.pc_next);
1122     } else {
1123         dc->jmp_dest = dc->base.pc_next + dest_imm;
1124         tcg_gen_movi_i32(cpu_btarget, dc->jmp_dest);
1125     }
1126 
1127     /* Compute the final destination into btarget.  */
1128     zero = tcg_constant_i32(0);
1129     next = tcg_constant_i32(dc->base.pc_next + (delay + 1) * 4);
1130     tcg_gen_movcond_i32(dc->jmp_cond, cpu_btarget,
1131                         reg_for_read(dc, ra), zero,
1132                         cpu_btarget, next);
1133 
1134     return true;
1135 }
1136 
1137 #define DO_BCC(NAME, COND)                                              \
1138     static bool trans_##NAME(DisasContext *dc, arg_typea_bc *arg)       \
1139     { return do_bcc(dc, arg->rb, 0, COND, arg->ra, false); }            \
1140     static bool trans_##NAME##d(DisasContext *dc, arg_typea_bc *arg)    \
1141     { return do_bcc(dc, arg->rb, 0, COND, arg->ra, true); }             \
1142     static bool trans_##NAME##i(DisasContext *dc, arg_typeb_bc *arg)    \
1143     { return do_bcc(dc, -1, arg->imm, COND, arg->ra, false); }          \
1144     static bool trans_##NAME##id(DisasContext *dc, arg_typeb_bc *arg)   \
1145     { return do_bcc(dc, -1, arg->imm, COND, arg->ra, true); }
1146 
DO_BCC(beq,TCG_COND_EQ)1147 DO_BCC(beq, TCG_COND_EQ)
1148 DO_BCC(bge, TCG_COND_GE)
1149 DO_BCC(bgt, TCG_COND_GT)
1150 DO_BCC(ble, TCG_COND_LE)
1151 DO_BCC(blt, TCG_COND_LT)
1152 DO_BCC(bne, TCG_COND_NE)
1153 
1154 static bool trans_brk(DisasContext *dc, arg_typea_br *arg)
1155 {
1156     if (trap_userspace(dc, true)) {
1157         return true;
1158     }
1159     if (invalid_delay_slot(dc, "brk")) {
1160         return true;
1161     }
1162 
1163     tcg_gen_mov_i32(cpu_pc, reg_for_read(dc, arg->rb));
1164     if (arg->rd) {
1165         tcg_gen_movi_i32(cpu_R[arg->rd], dc->base.pc_next);
1166     }
1167     tcg_gen_ori_i32(cpu_msr, cpu_msr, MSR_BIP);
1168     tcg_gen_movi_tl(cpu_res_addr, -1);
1169 
1170     dc->base.is_jmp = DISAS_EXIT;
1171     return true;
1172 }
1173 
trans_brki(DisasContext * dc,arg_typeb_br * arg)1174 static bool trans_brki(DisasContext *dc, arg_typeb_br *arg)
1175 {
1176     uint32_t imm = arg->imm;
1177 
1178     if (trap_userspace(dc, imm != 0x8 && imm != 0x18)) {
1179         return true;
1180     }
1181     if (invalid_delay_slot(dc, "brki")) {
1182         return true;
1183     }
1184 
1185     tcg_gen_movi_i32(cpu_pc, imm);
1186     if (arg->rd) {
1187         tcg_gen_movi_i32(cpu_R[arg->rd], dc->base.pc_next);
1188     }
1189     tcg_gen_movi_tl(cpu_res_addr, -1);
1190 
1191 #ifdef CONFIG_USER_ONLY
1192     switch (imm) {
1193     case 0x8:  /* syscall trap */
1194         gen_raise_exception_sync(dc, EXCP_SYSCALL);
1195         break;
1196     case 0x18: /* debug trap */
1197         gen_raise_exception_sync(dc, EXCP_DEBUG);
1198         break;
1199     default:   /* eliminated with trap_userspace check */
1200         g_assert_not_reached();
1201     }
1202 #else
1203     uint32_t msr_to_set = 0;
1204 
1205     if (imm != 0x18) {
1206         msr_to_set |= MSR_BIP;
1207     }
1208     if (imm == 0x8 || imm == 0x18) {
1209         /* MSR_UM and MSR_VM are in tb_flags, so we know their value. */
1210         msr_to_set |= (dc->tb_flags & (MSR_UM | MSR_VM)) << 1;
1211         tcg_gen_andi_i32(cpu_msr, cpu_msr,
1212                          ~(MSR_VMS | MSR_UMS | MSR_VM | MSR_UM));
1213     }
1214     tcg_gen_ori_i32(cpu_msr, cpu_msr, msr_to_set);
1215     dc->base.is_jmp = DISAS_EXIT;
1216 #endif
1217 
1218     return true;
1219 }
1220 
trans_mbar(DisasContext * dc,arg_mbar * arg)1221 static bool trans_mbar(DisasContext *dc, arg_mbar *arg)
1222 {
1223     int mbar_imm = arg->imm;
1224 
1225     /* Note that mbar is a specialized branch instruction. */
1226     if (invalid_delay_slot(dc, "mbar")) {
1227         return true;
1228     }
1229 
1230     /* Data access memory barrier.  */
1231     if ((mbar_imm & 2) == 0) {
1232         tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
1233     }
1234 
1235     /* Sleep. */
1236     if (mbar_imm & 16) {
1237         if (trap_userspace(dc, true)) {
1238             /* Sleep is a privileged instruction.  */
1239             return true;
1240         }
1241 
1242         t_sync_flags(dc);
1243 
1244         tcg_gen_st_i32(tcg_constant_i32(1), tcg_env,
1245                        -offsetof(MicroBlazeCPU, env)
1246                        +offsetof(CPUState, halted));
1247 
1248         tcg_gen_movi_i32(cpu_pc, dc->base.pc_next + 4);
1249 
1250         gen_raise_exception(dc, EXCP_HLT);
1251     }
1252 
1253     /*
1254      * If !(mbar_imm & 1), this is an instruction access memory barrier
1255      * and we need to end the TB so that we recognize self-modified
1256      * code immediately.
1257      *
1258      * However, there are some data mbars that need the TB break
1259      * (and return to main loop) to recognize interrupts right away.
1260      * E.g. recognizing a change to an interrupt controller register.
1261      *
1262      * Therefore, choose to end the TB always.
1263      */
1264     dc->base.is_jmp = DISAS_EXIT_NEXT;
1265     return true;
1266 }
1267 
do_rts(DisasContext * dc,arg_typeb_bc * arg,int to_set)1268 static bool do_rts(DisasContext *dc, arg_typeb_bc *arg, int to_set)
1269 {
1270     if (trap_userspace(dc, to_set)) {
1271         return true;
1272     }
1273     if (invalid_delay_slot(dc, "rts")) {
1274         return true;
1275     }
1276 
1277     dc->tb_flags_to_set |= to_set;
1278     setup_dslot(dc, true);
1279 
1280     dc->jmp_cond = TCG_COND_ALWAYS;
1281     dc->jmp_dest = -1;
1282     tcg_gen_addi_i32(cpu_btarget, reg_for_read(dc, arg->ra), arg->imm);
1283     return true;
1284 }
1285 
1286 #define DO_RTS(NAME, IFLAG) \
1287     static bool trans_##NAME(DisasContext *dc, arg_typeb_bc *arg) \
1288     { return do_rts(dc, arg, IFLAG); }
1289 
DO_RTS(rtbd,DRTB_FLAG)1290 DO_RTS(rtbd, DRTB_FLAG)
1291 DO_RTS(rtid, DRTI_FLAG)
1292 DO_RTS(rted, DRTE_FLAG)
1293 DO_RTS(rtsd, 0)
1294 
1295 static bool trans_zero(DisasContext *dc, arg_zero *arg)
1296 {
1297     /* If opcode_0_illegal, trap.  */
1298     if (dc->cfg->opcode_0_illegal) {
1299         trap_illegal(dc, true);
1300         return true;
1301     }
1302     /*
1303      * Otherwise, this is "add r0, r0, r0".
1304      * Continue to trans_add so that MSR[C] gets cleared.
1305      */
1306     return false;
1307 }
1308 
msr_read(DisasContext * dc,TCGv_i32 d)1309 static void msr_read(DisasContext *dc, TCGv_i32 d)
1310 {
1311     TCGv_i32 t;
1312 
1313     /* Replicate the cpu_msr_c boolean into the proper bit and the copy. */
1314     t = tcg_temp_new_i32();
1315     tcg_gen_muli_i32(t, cpu_msr_c, MSR_C | MSR_CC);
1316     tcg_gen_or_i32(d, cpu_msr, t);
1317 }
1318 
do_msrclrset(DisasContext * dc,arg_type_msr * arg,bool set)1319 static bool do_msrclrset(DisasContext *dc, arg_type_msr *arg, bool set)
1320 {
1321     uint32_t imm = arg->imm;
1322 
1323     if (trap_userspace(dc, imm != MSR_C)) {
1324         return true;
1325     }
1326 
1327     if (arg->rd) {
1328         msr_read(dc, cpu_R[arg->rd]);
1329     }
1330 
1331     /*
1332      * Handle the carry bit separately.
1333      * This is the only bit that userspace can modify.
1334      */
1335     if (imm & MSR_C) {
1336         tcg_gen_movi_i32(cpu_msr_c, set);
1337     }
1338 
1339     /*
1340      * MSR_C and MSR_CC set above.
1341      * MSR_PVR is not writable, and is always clear.
1342      */
1343     imm &= ~(MSR_C | MSR_CC | MSR_PVR);
1344 
1345     if (imm != 0) {
1346         if (set) {
1347             tcg_gen_ori_i32(cpu_msr, cpu_msr, imm);
1348         } else {
1349             tcg_gen_andi_i32(cpu_msr, cpu_msr, ~imm);
1350         }
1351         dc->base.is_jmp = DISAS_EXIT_NEXT;
1352     }
1353     return true;
1354 }
1355 
trans_msrclr(DisasContext * dc,arg_type_msr * arg)1356 static bool trans_msrclr(DisasContext *dc, arg_type_msr *arg)
1357 {
1358     return do_msrclrset(dc, arg, false);
1359 }
1360 
trans_msrset(DisasContext * dc,arg_type_msr * arg)1361 static bool trans_msrset(DisasContext *dc, arg_type_msr *arg)
1362 {
1363     return do_msrclrset(dc, arg, true);
1364 }
1365 
trans_mts(DisasContext * dc,arg_mts * arg)1366 static bool trans_mts(DisasContext *dc, arg_mts *arg)
1367 {
1368     if (trap_userspace(dc, true)) {
1369         return true;
1370     }
1371 
1372 #ifdef CONFIG_USER_ONLY
1373     g_assert_not_reached();
1374 #else
1375     if (arg->e && arg->rs != 0x1003) {
1376         qemu_log_mask(LOG_GUEST_ERROR,
1377                       "Invalid extended mts reg 0x%x\n", arg->rs);
1378         return true;
1379     }
1380 
1381     TCGv_i32 src = reg_for_read(dc, arg->ra);
1382     switch (arg->rs) {
1383     case SR_MSR:
1384         /* Install MSR_C.  */
1385         tcg_gen_extract_i32(cpu_msr_c, src, 2, 1);
1386         /*
1387          * Clear MSR_C and MSR_CC;
1388          * MSR_PVR is not writable, and is always clear.
1389          */
1390         tcg_gen_andi_i32(cpu_msr, src, ~(MSR_C | MSR_CC | MSR_PVR));
1391         break;
1392     case SR_FSR:
1393         tcg_gen_st_i32(src, tcg_env, offsetof(CPUMBState, fsr));
1394         break;
1395     case 0x800:
1396         tcg_gen_st_i32(src, tcg_env, offsetof(CPUMBState, slr));
1397         break;
1398     case 0x802:
1399         tcg_gen_st_i32(src, tcg_env, offsetof(CPUMBState, shr));
1400         break;
1401 
1402     case 0x1000: /* PID */
1403     case 0x1001: /* ZPR */
1404     case 0x1002: /* TLBX */
1405     case 0x1003: /* TLBLO */
1406     case 0x1004: /* TLBHI */
1407     case 0x1005: /* TLBSX */
1408         {
1409             TCGv_i32 tmp_ext = tcg_constant_i32(arg->e);
1410             TCGv_i32 tmp_reg = tcg_constant_i32(arg->rs & 7);
1411 
1412             gen_helper_mmu_write(tcg_env, tmp_ext, tmp_reg, src);
1413         }
1414         break;
1415 
1416     default:
1417         qemu_log_mask(LOG_GUEST_ERROR, "Invalid mts reg 0x%x\n", arg->rs);
1418         return true;
1419     }
1420     dc->base.is_jmp = DISAS_EXIT_NEXT;
1421     return true;
1422 #endif
1423 }
1424 
trans_mfs(DisasContext * dc,arg_mfs * arg)1425 static bool trans_mfs(DisasContext *dc, arg_mfs *arg)
1426 {
1427     TCGv_i32 dest = reg_for_write(dc, arg->rd);
1428 
1429     if (arg->e) {
1430         switch (arg->rs) {
1431         case SR_EAR:
1432             {
1433                 TCGv_i64 t64 = tcg_temp_new_i64();
1434                 tcg_gen_ld_i64(t64, tcg_env, offsetof(CPUMBState, ear));
1435                 tcg_gen_extrh_i64_i32(dest, t64);
1436             }
1437             return true;
1438 #ifndef CONFIG_USER_ONLY
1439         case 0x1003: /* TLBLO */
1440             /* Handled below. */
1441             break;
1442 #endif
1443         case 0x2006 ... 0x2009:
1444             /* High bits of PVR6-9 not implemented. */
1445             tcg_gen_movi_i32(dest, 0);
1446             return true;
1447         default:
1448             qemu_log_mask(LOG_GUEST_ERROR,
1449                           "Invalid extended mfs reg 0x%x\n", arg->rs);
1450             return true;
1451         }
1452     }
1453 
1454     switch (arg->rs) {
1455     case SR_PC:
1456         tcg_gen_movi_i32(dest, dc->base.pc_next);
1457         break;
1458     case SR_MSR:
1459         msr_read(dc, dest);
1460         break;
1461     case SR_EAR:
1462         {
1463             TCGv_i64 t64 = tcg_temp_new_i64();
1464             tcg_gen_ld_i64(t64, tcg_env, offsetof(CPUMBState, ear));
1465             tcg_gen_extrl_i64_i32(dest, t64);
1466         }
1467         break;
1468     case SR_ESR:
1469         tcg_gen_ld_i32(dest, tcg_env, offsetof(CPUMBState, esr));
1470         break;
1471     case SR_FSR:
1472         tcg_gen_ld_i32(dest, tcg_env, offsetof(CPUMBState, fsr));
1473         break;
1474     case SR_BTR:
1475         tcg_gen_ld_i32(dest, tcg_env, offsetof(CPUMBState, btr));
1476         break;
1477     case SR_EDR:
1478         tcg_gen_ld_i32(dest, tcg_env, offsetof(CPUMBState, edr));
1479         break;
1480     case 0x800:
1481         tcg_gen_ld_i32(dest, tcg_env, offsetof(CPUMBState, slr));
1482         break;
1483     case 0x802:
1484         tcg_gen_ld_i32(dest, tcg_env, offsetof(CPUMBState, shr));
1485         break;
1486 
1487 #ifndef CONFIG_USER_ONLY
1488     case 0x1000: /* PID */
1489     case 0x1001: /* ZPR */
1490     case 0x1002: /* TLBX */
1491     case 0x1003: /* TLBLO */
1492     case 0x1004: /* TLBHI */
1493     case 0x1005: /* TLBSX */
1494         {
1495             TCGv_i32 tmp_ext = tcg_constant_i32(arg->e);
1496             TCGv_i32 tmp_reg = tcg_constant_i32(arg->rs & 7);
1497 
1498             gen_helper_mmu_read(dest, tcg_env, tmp_ext, tmp_reg);
1499         }
1500         break;
1501 #endif
1502 
1503     case 0x2000 ... 0x200c:
1504         tcg_gen_ld_i32(dest, tcg_env,
1505                        offsetof(MicroBlazeCPU, cfg.pvr_regs[arg->rs - 0x2000])
1506                        - offsetof(MicroBlazeCPU, env));
1507         break;
1508     default:
1509         qemu_log_mask(LOG_GUEST_ERROR, "Invalid mfs reg 0x%x\n", arg->rs);
1510         break;
1511     }
1512     return true;
1513 }
1514 
do_rti(DisasContext * dc)1515 static void do_rti(DisasContext *dc)
1516 {
1517     TCGv_i32 tmp = tcg_temp_new_i32();
1518 
1519     tcg_gen_shri_i32(tmp, cpu_msr, 1);
1520     tcg_gen_ori_i32(cpu_msr, cpu_msr, MSR_IE);
1521     tcg_gen_andi_i32(tmp, tmp, MSR_VM | MSR_UM);
1522     tcg_gen_andi_i32(cpu_msr, cpu_msr, ~(MSR_VM | MSR_UM));
1523     tcg_gen_or_i32(cpu_msr, cpu_msr, tmp);
1524 }
1525 
do_rtb(DisasContext * dc)1526 static void do_rtb(DisasContext *dc)
1527 {
1528     TCGv_i32 tmp = tcg_temp_new_i32();
1529 
1530     tcg_gen_shri_i32(tmp, cpu_msr, 1);
1531     tcg_gen_andi_i32(cpu_msr, cpu_msr, ~(MSR_VM | MSR_UM | MSR_BIP));
1532     tcg_gen_andi_i32(tmp, tmp, (MSR_VM | MSR_UM));
1533     tcg_gen_or_i32(cpu_msr, cpu_msr, tmp);
1534 }
1535 
do_rte(DisasContext * dc)1536 static void do_rte(DisasContext *dc)
1537 {
1538     TCGv_i32 tmp = tcg_temp_new_i32();
1539 
1540     tcg_gen_shri_i32(tmp, cpu_msr, 1);
1541     tcg_gen_ori_i32(cpu_msr, cpu_msr, MSR_EE);
1542     tcg_gen_andi_i32(tmp, tmp, (MSR_VM | MSR_UM));
1543     tcg_gen_andi_i32(cpu_msr, cpu_msr, ~(MSR_VM | MSR_UM | MSR_EIP));
1544     tcg_gen_or_i32(cpu_msr, cpu_msr, tmp);
1545 }
1546 
1547 /* Insns connected to FSL or AXI stream attached devices.  */
do_get(DisasContext * dc,int rd,int rb,int imm,int ctrl)1548 static bool do_get(DisasContext *dc, int rd, int rb, int imm, int ctrl)
1549 {
1550     TCGv_i32 t_id, t_ctrl;
1551 
1552     if (trap_userspace(dc, true)) {
1553         return true;
1554     }
1555 
1556     t_id = tcg_temp_new_i32();
1557     if (rb) {
1558         tcg_gen_andi_i32(t_id, cpu_R[rb], 0xf);
1559     } else {
1560         tcg_gen_movi_i32(t_id, imm);
1561     }
1562 
1563     t_ctrl = tcg_constant_i32(ctrl);
1564     gen_helper_get(reg_for_write(dc, rd), t_id, t_ctrl);
1565     return true;
1566 }
1567 
trans_get(DisasContext * dc,arg_get * arg)1568 static bool trans_get(DisasContext *dc, arg_get *arg)
1569 {
1570     return do_get(dc, arg->rd, 0, arg->imm, arg->ctrl);
1571 }
1572 
trans_getd(DisasContext * dc,arg_getd * arg)1573 static bool trans_getd(DisasContext *dc, arg_getd *arg)
1574 {
1575     return do_get(dc, arg->rd, arg->rb, 0, arg->ctrl);
1576 }
1577 
do_put(DisasContext * dc,int ra,int rb,int imm,int ctrl)1578 static bool do_put(DisasContext *dc, int ra, int rb, int imm, int ctrl)
1579 {
1580     TCGv_i32 t_id, t_ctrl;
1581 
1582     if (trap_userspace(dc, true)) {
1583         return true;
1584     }
1585 
1586     t_id = tcg_temp_new_i32();
1587     if (rb) {
1588         tcg_gen_andi_i32(t_id, cpu_R[rb], 0xf);
1589     } else {
1590         tcg_gen_movi_i32(t_id, imm);
1591     }
1592 
1593     t_ctrl = tcg_constant_i32(ctrl);
1594     gen_helper_put(t_id, t_ctrl, reg_for_read(dc, ra));
1595     return true;
1596 }
1597 
trans_put(DisasContext * dc,arg_put * arg)1598 static bool trans_put(DisasContext *dc, arg_put *arg)
1599 {
1600     return do_put(dc, arg->ra, 0, arg->imm, arg->ctrl);
1601 }
1602 
trans_putd(DisasContext * dc,arg_putd * arg)1603 static bool trans_putd(DisasContext *dc, arg_putd *arg)
1604 {
1605     return do_put(dc, arg->ra, arg->rb, 0, arg->ctrl);
1606 }
1607 
mb_tr_init_disas_context(DisasContextBase * dcb,CPUState * cs)1608 static void mb_tr_init_disas_context(DisasContextBase *dcb, CPUState *cs)
1609 {
1610     DisasContext *dc = container_of(dcb, DisasContext, base);
1611     MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1612     int bound;
1613 
1614     dc->cfg = &cpu->cfg;
1615     dc->tb_flags = dc->base.tb->flags;
1616     dc->ext_imm = dc->base.tb->cs_base;
1617     dc->mem_index = cpu_mmu_index(cs, false);
1618     dc->jmp_cond = dc->tb_flags & D_FLAG ? TCG_COND_ALWAYS : TCG_COND_NEVER;
1619     dc->jmp_dest = -1;
1620 
1621     bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
1622     dc->base.max_insns = MIN(dc->base.max_insns, bound);
1623 }
1624 
mb_tr_tb_start(DisasContextBase * dcb,CPUState * cs)1625 static void mb_tr_tb_start(DisasContextBase *dcb, CPUState *cs)
1626 {
1627 }
1628 
mb_tr_insn_start(DisasContextBase * dcb,CPUState * cs)1629 static void mb_tr_insn_start(DisasContextBase *dcb, CPUState *cs)
1630 {
1631     DisasContext *dc = container_of(dcb, DisasContext, base);
1632 
1633     tcg_gen_insn_start(dc->base.pc_next, dc->tb_flags & ~MSR_TB_MASK);
1634 }
1635 
mb_tr_translate_insn(DisasContextBase * dcb,CPUState * cs)1636 static void mb_tr_translate_insn(DisasContextBase *dcb, CPUState *cs)
1637 {
1638     DisasContext *dc = container_of(dcb, DisasContext, base);
1639     uint32_t ir;
1640 
1641     /* TODO: This should raise an exception, not terminate qemu. */
1642     if (dc->base.pc_next & 3) {
1643         cpu_abort(cs, "Microblaze: unaligned PC=%x\n",
1644                   (uint32_t)dc->base.pc_next);
1645     }
1646 
1647     dc->tb_flags_to_set = 0;
1648 
1649     ir = translator_ldl_swap(cpu_env(cs), &dc->base, dc->base.pc_next,
1650                              mb_cpu_is_big_endian(cs) != TARGET_BIG_ENDIAN);
1651     if (!decode(dc, ir)) {
1652         trap_illegal(dc, true);
1653     }
1654 
1655     /* Discard the imm global when its contents cannot be used. */
1656     if ((dc->tb_flags & ~dc->tb_flags_to_set) & IMM_FLAG) {
1657         tcg_gen_discard_i32(cpu_imm);
1658     }
1659 
1660     dc->tb_flags &= ~(IMM_FLAG | BIMM_FLAG | D_FLAG);
1661     dc->tb_flags |= dc->tb_flags_to_set;
1662     dc->base.pc_next += 4;
1663 
1664     if (dc->jmp_cond != TCG_COND_NEVER && !(dc->tb_flags & D_FLAG)) {
1665         /*
1666          * Finish any return-from branch.
1667          */
1668         uint32_t rt_ibe = dc->tb_flags & (DRTI_FLAG | DRTB_FLAG | DRTE_FLAG);
1669         if (unlikely(rt_ibe != 0)) {
1670             dc->tb_flags &= ~(DRTI_FLAG | DRTB_FLAG | DRTE_FLAG);
1671             if (rt_ibe & DRTI_FLAG) {
1672                 do_rti(dc);
1673             } else if (rt_ibe & DRTB_FLAG) {
1674                 do_rtb(dc);
1675             } else {
1676                 do_rte(dc);
1677             }
1678         }
1679 
1680         /* Complete the branch, ending the TB. */
1681         switch (dc->base.is_jmp) {
1682         case DISAS_NORETURN:
1683             /*
1684              * E.g. illegal insn in a delay slot.  We've already exited
1685              * and will handle D_FLAG in mb_cpu_do_interrupt.
1686              */
1687             break;
1688         case DISAS_NEXT:
1689             /*
1690              * Normal insn a delay slot.
1691              * However, the return-from-exception type insns should
1692              * return to the main loop, as they have adjusted MSR.
1693              */
1694             dc->base.is_jmp = (rt_ibe ? DISAS_EXIT_JUMP : DISAS_JUMP);
1695             break;
1696         case DISAS_EXIT_NEXT:
1697             /*
1698              * E.g. mts insn in a delay slot.  Continue with btarget,
1699              * but still return to the main loop.
1700              */
1701             dc->base.is_jmp = DISAS_EXIT_JUMP;
1702             break;
1703         default:
1704             g_assert_not_reached();
1705         }
1706     }
1707 }
1708 
mb_tr_tb_stop(DisasContextBase * dcb,CPUState * cs)1709 static void mb_tr_tb_stop(DisasContextBase *dcb, CPUState *cs)
1710 {
1711     DisasContext *dc = container_of(dcb, DisasContext, base);
1712 
1713     if (dc->base.is_jmp == DISAS_NORETURN) {
1714         /* We have already exited the TB. */
1715         return;
1716     }
1717 
1718     t_sync_flags(dc);
1719 
1720     switch (dc->base.is_jmp) {
1721     case DISAS_TOO_MANY:
1722         gen_goto_tb(dc, 0, dc->base.pc_next);
1723         return;
1724 
1725     case DISAS_EXIT:
1726         break;
1727     case DISAS_EXIT_NEXT:
1728         tcg_gen_movi_i32(cpu_pc, dc->base.pc_next);
1729         break;
1730     case DISAS_EXIT_JUMP:
1731         tcg_gen_mov_i32(cpu_pc, cpu_btarget);
1732         tcg_gen_discard_i32(cpu_btarget);
1733         break;
1734 
1735     case DISAS_JUMP:
1736         if (dc->jmp_dest != -1 && !(tb_cflags(dc->base.tb) & CF_NO_GOTO_TB)) {
1737             /* Direct jump. */
1738             tcg_gen_discard_i32(cpu_btarget);
1739 
1740             if (dc->jmp_cond != TCG_COND_ALWAYS) {
1741                 /* Conditional direct jump. */
1742                 TCGLabel *taken = gen_new_label();
1743                 TCGv_i32 tmp = tcg_temp_new_i32();
1744 
1745                 /*
1746                  * Copy bvalue to a temp now, so we can discard bvalue.
1747                  * This can avoid writing bvalue to memory when the
1748                  * delay slot cannot raise an exception.
1749                  */
1750                 tcg_gen_mov_i32(tmp, cpu_bvalue);
1751                 tcg_gen_discard_i32(cpu_bvalue);
1752 
1753                 tcg_gen_brcondi_i32(dc->jmp_cond, tmp, 0, taken);
1754                 gen_goto_tb(dc, 1, dc->base.pc_next);
1755                 gen_set_label(taken);
1756             }
1757             gen_goto_tb(dc, 0, dc->jmp_dest);
1758             return;
1759         }
1760 
1761         /* Indirect jump (or direct jump w/ goto_tb disabled) */
1762         tcg_gen_mov_i32(cpu_pc, cpu_btarget);
1763         tcg_gen_discard_i32(cpu_btarget);
1764         tcg_gen_lookup_and_goto_ptr();
1765         return;
1766 
1767     default:
1768         g_assert_not_reached();
1769     }
1770 
1771     /* Finish DISAS_EXIT_* */
1772     if (unlikely(cs->singlestep_enabled)) {
1773         gen_raise_exception(dc, EXCP_DEBUG);
1774     } else {
1775         tcg_gen_exit_tb(NULL, 0);
1776     }
1777 }
1778 
1779 static const TranslatorOps mb_tr_ops = {
1780     .init_disas_context = mb_tr_init_disas_context,
1781     .tb_start           = mb_tr_tb_start,
1782     .insn_start         = mb_tr_insn_start,
1783     .translate_insn     = mb_tr_translate_insn,
1784     .tb_stop            = mb_tr_tb_stop,
1785 };
1786 
mb_translate_code(CPUState * cpu,TranslationBlock * tb,int * max_insns,vaddr pc,void * host_pc)1787 void mb_translate_code(CPUState *cpu, TranslationBlock *tb,
1788                        int *max_insns, vaddr pc, void *host_pc)
1789 {
1790     DisasContext dc;
1791     translator_loop(cpu, tb, max_insns, pc, host_pc, &mb_tr_ops, &dc.base);
1792 }
1793 
mb_cpu_dump_state(CPUState * cs,FILE * f,int flags)1794 void mb_cpu_dump_state(CPUState *cs, FILE *f, int flags)
1795 {
1796     CPUMBState *env = cpu_env(cs);
1797     uint32_t iflags;
1798     int i;
1799 
1800     qemu_fprintf(f, "pc=0x%08x msr=0x%05x mode=%s(saved=%s) eip=%d ie=%d\n",
1801                  env->pc, env->msr,
1802                  (env->msr & MSR_UM) ? "user" : "kernel",
1803                  (env->msr & MSR_UMS) ? "user" : "kernel",
1804                  (bool)(env->msr & MSR_EIP),
1805                  (bool)(env->msr & MSR_IE));
1806 
1807     iflags = env->iflags;
1808     qemu_fprintf(f, "iflags: 0x%08x", iflags);
1809     if (iflags & IMM_FLAG) {
1810         qemu_fprintf(f, " IMM(0x%08x)", env->imm);
1811     }
1812     if (iflags & BIMM_FLAG) {
1813         qemu_fprintf(f, " BIMM");
1814     }
1815     if (iflags & D_FLAG) {
1816         qemu_fprintf(f, " D(btarget=0x%08x)", env->btarget);
1817     }
1818     if (iflags & DRTI_FLAG) {
1819         qemu_fprintf(f, " DRTI");
1820     }
1821     if (iflags & DRTE_FLAG) {
1822         qemu_fprintf(f, " DRTE");
1823     }
1824     if (iflags & DRTB_FLAG) {
1825         qemu_fprintf(f, " DRTB");
1826     }
1827     if (iflags & ESR_ESS_FLAG) {
1828         qemu_fprintf(f, " ESR_ESS(0x%04x)", iflags & ESR_ESS_MASK);
1829     }
1830 
1831     qemu_fprintf(f, "\nesr=0x%04x fsr=0x%02x btr=0x%08x edr=0x%x\n"
1832                  "ear=0x%" PRIx64 " slr=0x%x shr=0x%x\n",
1833                  env->esr, env->fsr, env->btr, env->edr,
1834                  env->ear, env->slr, env->shr);
1835 
1836     for (i = 0; i < 32; i++) {
1837         qemu_fprintf(f, "r%2.2d=%08x%c",
1838                      i, env->regs[i], i % 4 == 3 ? '\n' : ' ');
1839     }
1840     qemu_fprintf(f, "\n");
1841 }
1842 
mb_tcg_init(void)1843 void mb_tcg_init(void)
1844 {
1845 #define R(X)  { &cpu_R[X], offsetof(CPUMBState, regs[X]), "r" #X }
1846 #define SP(X) { &cpu_##X, offsetof(CPUMBState, X), #X }
1847 
1848     static const struct {
1849         TCGv_i32 *var; int ofs; char name[8];
1850     } i32s[] = {
1851         /*
1852          * Note that r0 is handled specially in reg_for_read
1853          * and reg_for_write.  Nothing should touch cpu_R[0].
1854          * Leave that element NULL, which will assert quickly
1855          * inside the tcg generator functions.
1856          */
1857                R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),
1858         R(8),  R(9),  R(10), R(11), R(12), R(13), R(14), R(15),
1859         R(16), R(17), R(18), R(19), R(20), R(21), R(22), R(23),
1860         R(24), R(25), R(26), R(27), R(28), R(29), R(30), R(31),
1861 
1862         SP(pc),
1863         SP(msr),
1864         SP(msr_c),
1865         SP(imm),
1866         SP(iflags),
1867         SP(bvalue),
1868         SP(btarget),
1869         SP(res_val),
1870     };
1871 
1872 #undef R
1873 #undef SP
1874 
1875     for (int i = 0; i < ARRAY_SIZE(i32s); ++i) {
1876         *i32s[i].var =
1877           tcg_global_mem_new_i32(tcg_env, i32s[i].ofs, i32s[i].name);
1878     }
1879 
1880     cpu_res_addr =
1881         tcg_global_mem_new(tcg_env, offsetof(CPUMBState, res_addr), "res_addr");
1882 }
1883