xref: /openbmc/qemu/target/microblaze/translate.c (revision 67abc3dd)
1 /*
2  *  Xilinx MicroBlaze emulation for qemu: main translation routines.
3  *
4  *  Copyright (c) 2009 Edgar E. Iglesias.
5  *  Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "exec/helper-proto.h"
27 #include "exec/cpu_ldst.h"
28 #include "exec/helper-gen.h"
29 #include "exec/translator.h"
30 #include "qemu/qemu-print.h"
31 
32 #include "trace-tcg.h"
33 #include "exec/log.h"
34 
35 #define EXTRACT_FIELD(src, start, end) \
36             (((src) >> start) & ((1 << (end - start + 1)) - 1))
37 
38 /* is_jmp field values */
39 #define DISAS_JUMP    DISAS_TARGET_0 /* only pc was modified dynamically */
40 #define DISAS_UPDATE  DISAS_TARGET_1 /* cpu state was modified dynamically */
41 
42 static TCGv_i32 cpu_R[32];
43 static TCGv_i32 cpu_pc;
44 static TCGv_i32 cpu_msr;
45 static TCGv_i32 cpu_msr_c;
46 static TCGv_i32 cpu_imm;
47 static TCGv_i32 cpu_bvalue;
48 static TCGv_i32 cpu_btarget;
49 static TCGv_i32 cpu_iflags;
50 static TCGv cpu_res_addr;
51 static TCGv_i32 cpu_res_val;
52 
53 #include "exec/gen-icount.h"
54 
55 /* This is the state at translation time.  */
56 typedef struct DisasContext {
57     DisasContextBase base;
58     MicroBlazeCPU *cpu;
59 
60     /* TCG op of the current insn_start.  */
61     TCGOp *insn_start;
62 
63     TCGv_i32 r0;
64     bool r0_set;
65 
66     /* Decoder.  */
67     uint32_t ext_imm;
68     unsigned int cpustate_changed;
69     unsigned int tb_flags;
70     unsigned int tb_flags_to_set;
71     int mem_index;
72 
73     /* Condition under which to jump, including NEVER and ALWAYS. */
74     TCGCond jmp_cond;
75 
76     /* Immediate branch-taken destination, or -1 for indirect. */
77     uint32_t jmp_dest;
78 } DisasContext;
79 
80 static int typeb_imm(DisasContext *dc, int x)
81 {
82     if (dc->tb_flags & IMM_FLAG) {
83         return deposit32(dc->ext_imm, 0, 16, x);
84     }
85     return x;
86 }
87 
88 /* Include the auto-generated decoder.  */
89 #include "decode-insns.c.inc"
90 
91 static void t_sync_flags(DisasContext *dc)
92 {
93     /* Synch the tb dependent flags between translator and runtime.  */
94     if ((dc->tb_flags ^ dc->base.tb->flags) & ~MSR_TB_MASK) {
95         tcg_gen_movi_i32(cpu_iflags, dc->tb_flags & ~MSR_TB_MASK);
96     }
97 }
98 
99 static void gen_raise_exception(DisasContext *dc, uint32_t index)
100 {
101     TCGv_i32 tmp = tcg_const_i32(index);
102 
103     gen_helper_raise_exception(cpu_env, tmp);
104     tcg_temp_free_i32(tmp);
105     dc->base.is_jmp = DISAS_NORETURN;
106 }
107 
108 static void gen_raise_exception_sync(DisasContext *dc, uint32_t index)
109 {
110     t_sync_flags(dc);
111     tcg_gen_movi_i32(cpu_pc, dc->base.pc_next);
112     gen_raise_exception(dc, index);
113 }
114 
115 static void gen_raise_hw_excp(DisasContext *dc, uint32_t esr_ec)
116 {
117     TCGv_i32 tmp = tcg_const_i32(esr_ec);
118     tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUMBState, esr));
119     tcg_temp_free_i32(tmp);
120 
121     gen_raise_exception_sync(dc, EXCP_HW_EXCP);
122 }
123 
124 static inline bool use_goto_tb(DisasContext *dc, target_ulong dest)
125 {
126 #ifndef CONFIG_USER_ONLY
127     return (dc->base.pc_first & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
128 #else
129     return true;
130 #endif
131 }
132 
133 static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
134 {
135     if (dc->base.singlestep_enabled) {
136         TCGv_i32 tmp = tcg_const_i32(EXCP_DEBUG);
137         tcg_gen_movi_i32(cpu_pc, dest);
138         gen_helper_raise_exception(cpu_env, tmp);
139         tcg_temp_free_i32(tmp);
140     } else if (use_goto_tb(dc, dest)) {
141         tcg_gen_goto_tb(n);
142         tcg_gen_movi_i32(cpu_pc, dest);
143         tcg_gen_exit_tb(dc->base.tb, n);
144     } else {
145         tcg_gen_movi_i32(cpu_pc, dest);
146         tcg_gen_exit_tb(NULL, 0);
147     }
148     dc->base.is_jmp = DISAS_NORETURN;
149 }
150 
151 /*
152  * Returns true if the insn an illegal operation.
153  * If exceptions are enabled, an exception is raised.
154  */
155 static bool trap_illegal(DisasContext *dc, bool cond)
156 {
157     if (cond && (dc->tb_flags & MSR_EE)
158         && dc->cpu->cfg.illegal_opcode_exception) {
159         gen_raise_hw_excp(dc, ESR_EC_ILLEGAL_OP);
160     }
161     return cond;
162 }
163 
164 /*
165  * Returns true if the insn is illegal in userspace.
166  * If exceptions are enabled, an exception is raised.
167  */
168 static bool trap_userspace(DisasContext *dc, bool cond)
169 {
170     bool cond_user = cond && dc->mem_index == MMU_USER_IDX;
171 
172     if (cond_user && (dc->tb_flags & MSR_EE)) {
173         gen_raise_hw_excp(dc, ESR_EC_PRIVINSN);
174     }
175     return cond_user;
176 }
177 
178 static TCGv_i32 reg_for_read(DisasContext *dc, int reg)
179 {
180     if (likely(reg != 0)) {
181         return cpu_R[reg];
182     }
183     if (!dc->r0_set) {
184         if (dc->r0 == NULL) {
185             dc->r0 = tcg_temp_new_i32();
186         }
187         tcg_gen_movi_i32(dc->r0, 0);
188         dc->r0_set = true;
189     }
190     return dc->r0;
191 }
192 
193 static TCGv_i32 reg_for_write(DisasContext *dc, int reg)
194 {
195     if (likely(reg != 0)) {
196         return cpu_R[reg];
197     }
198     if (dc->r0 == NULL) {
199         dc->r0 = tcg_temp_new_i32();
200     }
201     return dc->r0;
202 }
203 
204 static bool do_typea(DisasContext *dc, arg_typea *arg, bool side_effects,
205                      void (*fn)(TCGv_i32, TCGv_i32, TCGv_i32))
206 {
207     TCGv_i32 rd, ra, rb;
208 
209     if (arg->rd == 0 && !side_effects) {
210         return true;
211     }
212 
213     rd = reg_for_write(dc, arg->rd);
214     ra = reg_for_read(dc, arg->ra);
215     rb = reg_for_read(dc, arg->rb);
216     fn(rd, ra, rb);
217     return true;
218 }
219 
220 static bool do_typea0(DisasContext *dc, arg_typea0 *arg, bool side_effects,
221                       void (*fn)(TCGv_i32, TCGv_i32))
222 {
223     TCGv_i32 rd, ra;
224 
225     if (arg->rd == 0 && !side_effects) {
226         return true;
227     }
228 
229     rd = reg_for_write(dc, arg->rd);
230     ra = reg_for_read(dc, arg->ra);
231     fn(rd, ra);
232     return true;
233 }
234 
235 static bool do_typeb_imm(DisasContext *dc, arg_typeb *arg, bool side_effects,
236                          void (*fni)(TCGv_i32, TCGv_i32, int32_t))
237 {
238     TCGv_i32 rd, ra;
239 
240     if (arg->rd == 0 && !side_effects) {
241         return true;
242     }
243 
244     rd = reg_for_write(dc, arg->rd);
245     ra = reg_for_read(dc, arg->ra);
246     fni(rd, ra, arg->imm);
247     return true;
248 }
249 
250 static bool do_typeb_val(DisasContext *dc, arg_typeb *arg, bool side_effects,
251                          void (*fn)(TCGv_i32, TCGv_i32, TCGv_i32))
252 {
253     TCGv_i32 rd, ra, imm;
254 
255     if (arg->rd == 0 && !side_effects) {
256         return true;
257     }
258 
259     rd = reg_for_write(dc, arg->rd);
260     ra = reg_for_read(dc, arg->ra);
261     imm = tcg_const_i32(arg->imm);
262 
263     fn(rd, ra, imm);
264 
265     tcg_temp_free_i32(imm);
266     return true;
267 }
268 
269 #define DO_TYPEA(NAME, SE, FN) \
270     static bool trans_##NAME(DisasContext *dc, arg_typea *a) \
271     { return do_typea(dc, a, SE, FN); }
272 
273 #define DO_TYPEA_CFG(NAME, CFG, SE, FN) \
274     static bool trans_##NAME(DisasContext *dc, arg_typea *a) \
275     { return dc->cpu->cfg.CFG && do_typea(dc, a, SE, FN); }
276 
277 #define DO_TYPEA0(NAME, SE, FN) \
278     static bool trans_##NAME(DisasContext *dc, arg_typea0 *a) \
279     { return do_typea0(dc, a, SE, FN); }
280 
281 #define DO_TYPEA0_CFG(NAME, CFG, SE, FN) \
282     static bool trans_##NAME(DisasContext *dc, arg_typea0 *a) \
283     { return dc->cpu->cfg.CFG && do_typea0(dc, a, SE, FN); }
284 
285 #define DO_TYPEBI(NAME, SE, FNI) \
286     static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
287     { return do_typeb_imm(dc, a, SE, FNI); }
288 
289 #define DO_TYPEBI_CFG(NAME, CFG, SE, FNI) \
290     static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
291     { return dc->cpu->cfg.CFG && do_typeb_imm(dc, a, SE, FNI); }
292 
293 #define DO_TYPEBV(NAME, SE, FN) \
294     static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
295     { return do_typeb_val(dc, a, SE, FN); }
296 
297 #define ENV_WRAPPER2(NAME, HELPER) \
298     static void NAME(TCGv_i32 out, TCGv_i32 ina) \
299     { HELPER(out, cpu_env, ina); }
300 
301 #define ENV_WRAPPER3(NAME, HELPER) \
302     static void NAME(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) \
303     { HELPER(out, cpu_env, ina, inb); }
304 
305 /* No input carry, but output carry. */
306 static void gen_add(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
307 {
308     TCGv_i32 zero = tcg_const_i32(0);
309 
310     tcg_gen_add2_i32(out, cpu_msr_c, ina, zero, inb, zero);
311 
312     tcg_temp_free_i32(zero);
313 }
314 
315 /* Input and output carry. */
316 static void gen_addc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
317 {
318     TCGv_i32 zero = tcg_const_i32(0);
319     TCGv_i32 tmp = tcg_temp_new_i32();
320 
321     tcg_gen_add2_i32(tmp, cpu_msr_c, ina, zero, cpu_msr_c, zero);
322     tcg_gen_add2_i32(out, cpu_msr_c, tmp, cpu_msr_c, inb, zero);
323 
324     tcg_temp_free_i32(tmp);
325     tcg_temp_free_i32(zero);
326 }
327 
328 /* Input carry, but no output carry. */
329 static void gen_addkc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
330 {
331     tcg_gen_add_i32(out, ina, inb);
332     tcg_gen_add_i32(out, out, cpu_msr_c);
333 }
334 
335 DO_TYPEA(add, true, gen_add)
336 DO_TYPEA(addc, true, gen_addc)
337 DO_TYPEA(addk, false, tcg_gen_add_i32)
338 DO_TYPEA(addkc, true, gen_addkc)
339 
340 DO_TYPEBV(addi, true, gen_add)
341 DO_TYPEBV(addic, true, gen_addc)
342 DO_TYPEBI(addik, false, tcg_gen_addi_i32)
343 DO_TYPEBV(addikc, true, gen_addkc)
344 
345 static void gen_andni(TCGv_i32 out, TCGv_i32 ina, int32_t imm)
346 {
347     tcg_gen_andi_i32(out, ina, ~imm);
348 }
349 
350 DO_TYPEA(and, false, tcg_gen_and_i32)
351 DO_TYPEBI(andi, false, tcg_gen_andi_i32)
352 DO_TYPEA(andn, false, tcg_gen_andc_i32)
353 DO_TYPEBI(andni, false, gen_andni)
354 
355 static void gen_bsra(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
356 {
357     TCGv_i32 tmp = tcg_temp_new_i32();
358     tcg_gen_andi_i32(tmp, inb, 31);
359     tcg_gen_sar_i32(out, ina, tmp);
360     tcg_temp_free_i32(tmp);
361 }
362 
363 static void gen_bsrl(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
364 {
365     TCGv_i32 tmp = tcg_temp_new_i32();
366     tcg_gen_andi_i32(tmp, inb, 31);
367     tcg_gen_shr_i32(out, ina, tmp);
368     tcg_temp_free_i32(tmp);
369 }
370 
371 static void gen_bsll(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
372 {
373     TCGv_i32 tmp = tcg_temp_new_i32();
374     tcg_gen_andi_i32(tmp, inb, 31);
375     tcg_gen_shl_i32(out, ina, tmp);
376     tcg_temp_free_i32(tmp);
377 }
378 
379 static void gen_bsefi(TCGv_i32 out, TCGv_i32 ina, int32_t imm)
380 {
381     /* Note that decodetree has extracted and reassembled imm_w/imm_s. */
382     int imm_w = extract32(imm, 5, 5);
383     int imm_s = extract32(imm, 0, 5);
384 
385     if (imm_w + imm_s > 32 || imm_w == 0) {
386         /* These inputs have an undefined behavior.  */
387         qemu_log_mask(LOG_GUEST_ERROR, "bsefi: Bad input w=%d s=%d\n",
388                       imm_w, imm_s);
389     } else {
390         tcg_gen_extract_i32(out, ina, imm_s, imm_w);
391     }
392 }
393 
394 static void gen_bsifi(TCGv_i32 out, TCGv_i32 ina, int32_t imm)
395 {
396     /* Note that decodetree has extracted and reassembled imm_w/imm_s. */
397     int imm_w = extract32(imm, 5, 5);
398     int imm_s = extract32(imm, 0, 5);
399     int width = imm_w - imm_s + 1;
400 
401     if (imm_w < imm_s) {
402         /* These inputs have an undefined behavior.  */
403         qemu_log_mask(LOG_GUEST_ERROR, "bsifi: Bad input w=%d s=%d\n",
404                       imm_w, imm_s);
405     } else {
406         tcg_gen_deposit_i32(out, out, ina, imm_s, width);
407     }
408 }
409 
410 DO_TYPEA_CFG(bsra, use_barrel, false, gen_bsra)
411 DO_TYPEA_CFG(bsrl, use_barrel, false, gen_bsrl)
412 DO_TYPEA_CFG(bsll, use_barrel, false, gen_bsll)
413 
414 DO_TYPEBI_CFG(bsrai, use_barrel, false, tcg_gen_sari_i32)
415 DO_TYPEBI_CFG(bsrli, use_barrel, false, tcg_gen_shri_i32)
416 DO_TYPEBI_CFG(bslli, use_barrel, false, tcg_gen_shli_i32)
417 
418 DO_TYPEBI_CFG(bsefi, use_barrel, false, gen_bsefi)
419 DO_TYPEBI_CFG(bsifi, use_barrel, false, gen_bsifi)
420 
421 static void gen_clz(TCGv_i32 out, TCGv_i32 ina)
422 {
423     tcg_gen_clzi_i32(out, ina, 32);
424 }
425 
426 DO_TYPEA0_CFG(clz, use_pcmp_instr, false, gen_clz)
427 
428 static void gen_cmp(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
429 {
430     TCGv_i32 lt = tcg_temp_new_i32();
431 
432     tcg_gen_setcond_i32(TCG_COND_LT, lt, inb, ina);
433     tcg_gen_sub_i32(out, inb, ina);
434     tcg_gen_deposit_i32(out, out, lt, 31, 1);
435     tcg_temp_free_i32(lt);
436 }
437 
438 static void gen_cmpu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
439 {
440     TCGv_i32 lt = tcg_temp_new_i32();
441 
442     tcg_gen_setcond_i32(TCG_COND_LTU, lt, inb, ina);
443     tcg_gen_sub_i32(out, inb, ina);
444     tcg_gen_deposit_i32(out, out, lt, 31, 1);
445     tcg_temp_free_i32(lt);
446 }
447 
448 DO_TYPEA(cmp, false, gen_cmp)
449 DO_TYPEA(cmpu, false, gen_cmpu)
450 
451 ENV_WRAPPER3(gen_fadd, gen_helper_fadd)
452 ENV_WRAPPER3(gen_frsub, gen_helper_frsub)
453 ENV_WRAPPER3(gen_fmul, gen_helper_fmul)
454 ENV_WRAPPER3(gen_fdiv, gen_helper_fdiv)
455 ENV_WRAPPER3(gen_fcmp_un, gen_helper_fcmp_un)
456 ENV_WRAPPER3(gen_fcmp_lt, gen_helper_fcmp_lt)
457 ENV_WRAPPER3(gen_fcmp_eq, gen_helper_fcmp_eq)
458 ENV_WRAPPER3(gen_fcmp_le, gen_helper_fcmp_le)
459 ENV_WRAPPER3(gen_fcmp_gt, gen_helper_fcmp_gt)
460 ENV_WRAPPER3(gen_fcmp_ne, gen_helper_fcmp_ne)
461 ENV_WRAPPER3(gen_fcmp_ge, gen_helper_fcmp_ge)
462 
463 DO_TYPEA_CFG(fadd, use_fpu, true, gen_fadd)
464 DO_TYPEA_CFG(frsub, use_fpu, true, gen_frsub)
465 DO_TYPEA_CFG(fmul, use_fpu, true, gen_fmul)
466 DO_TYPEA_CFG(fdiv, use_fpu, true, gen_fdiv)
467 DO_TYPEA_CFG(fcmp_un, use_fpu, true, gen_fcmp_un)
468 DO_TYPEA_CFG(fcmp_lt, use_fpu, true, gen_fcmp_lt)
469 DO_TYPEA_CFG(fcmp_eq, use_fpu, true, gen_fcmp_eq)
470 DO_TYPEA_CFG(fcmp_le, use_fpu, true, gen_fcmp_le)
471 DO_TYPEA_CFG(fcmp_gt, use_fpu, true, gen_fcmp_gt)
472 DO_TYPEA_CFG(fcmp_ne, use_fpu, true, gen_fcmp_ne)
473 DO_TYPEA_CFG(fcmp_ge, use_fpu, true, gen_fcmp_ge)
474 
475 ENV_WRAPPER2(gen_flt, gen_helper_flt)
476 ENV_WRAPPER2(gen_fint, gen_helper_fint)
477 ENV_WRAPPER2(gen_fsqrt, gen_helper_fsqrt)
478 
479 DO_TYPEA0_CFG(flt, use_fpu >= 2, true, gen_flt)
480 DO_TYPEA0_CFG(fint, use_fpu >= 2, true, gen_fint)
481 DO_TYPEA0_CFG(fsqrt, use_fpu >= 2, true, gen_fsqrt)
482 
483 /* Does not use ENV_WRAPPER3, because arguments are swapped as well. */
484 static void gen_idiv(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
485 {
486     gen_helper_divs(out, cpu_env, inb, ina);
487 }
488 
489 static void gen_idivu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
490 {
491     gen_helper_divu(out, cpu_env, inb, ina);
492 }
493 
494 DO_TYPEA_CFG(idiv, use_div, true, gen_idiv)
495 DO_TYPEA_CFG(idivu, use_div, true, gen_idivu)
496 
497 static bool trans_imm(DisasContext *dc, arg_imm *arg)
498 {
499     dc->ext_imm = arg->imm << 16;
500     tcg_gen_movi_i32(cpu_imm, dc->ext_imm);
501     dc->tb_flags_to_set = IMM_FLAG;
502     return true;
503 }
504 
505 static void gen_mulh(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
506 {
507     TCGv_i32 tmp = tcg_temp_new_i32();
508     tcg_gen_muls2_i32(tmp, out, ina, inb);
509     tcg_temp_free_i32(tmp);
510 }
511 
512 static void gen_mulhu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
513 {
514     TCGv_i32 tmp = tcg_temp_new_i32();
515     tcg_gen_mulu2_i32(tmp, out, ina, inb);
516     tcg_temp_free_i32(tmp);
517 }
518 
519 static void gen_mulhsu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
520 {
521     TCGv_i32 tmp = tcg_temp_new_i32();
522     tcg_gen_mulsu2_i32(tmp, out, ina, inb);
523     tcg_temp_free_i32(tmp);
524 }
525 
526 DO_TYPEA_CFG(mul, use_hw_mul, false, tcg_gen_mul_i32)
527 DO_TYPEA_CFG(mulh, use_hw_mul >= 2, false, gen_mulh)
528 DO_TYPEA_CFG(mulhu, use_hw_mul >= 2, false, gen_mulhu)
529 DO_TYPEA_CFG(mulhsu, use_hw_mul >= 2, false, gen_mulhsu)
530 DO_TYPEBI_CFG(muli, use_hw_mul, false, tcg_gen_muli_i32)
531 
532 DO_TYPEA(or, false, tcg_gen_or_i32)
533 DO_TYPEBI(ori, false, tcg_gen_ori_i32)
534 
535 static void gen_pcmpeq(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
536 {
537     tcg_gen_setcond_i32(TCG_COND_EQ, out, ina, inb);
538 }
539 
540 static void gen_pcmpne(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
541 {
542     tcg_gen_setcond_i32(TCG_COND_NE, out, ina, inb);
543 }
544 
545 DO_TYPEA_CFG(pcmpbf, use_pcmp_instr, false, gen_helper_pcmpbf)
546 DO_TYPEA_CFG(pcmpeq, use_pcmp_instr, false, gen_pcmpeq)
547 DO_TYPEA_CFG(pcmpne, use_pcmp_instr, false, gen_pcmpne)
548 
549 /* No input carry, but output carry. */
550 static void gen_rsub(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
551 {
552     tcg_gen_setcond_i32(TCG_COND_GEU, cpu_msr_c, inb, ina);
553     tcg_gen_sub_i32(out, inb, ina);
554 }
555 
556 /* Input and output carry. */
557 static void gen_rsubc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
558 {
559     TCGv_i32 zero = tcg_const_i32(0);
560     TCGv_i32 tmp = tcg_temp_new_i32();
561 
562     tcg_gen_not_i32(tmp, ina);
563     tcg_gen_add2_i32(tmp, cpu_msr_c, tmp, zero, cpu_msr_c, zero);
564     tcg_gen_add2_i32(out, cpu_msr_c, tmp, cpu_msr_c, inb, zero);
565 
566     tcg_temp_free_i32(zero);
567     tcg_temp_free_i32(tmp);
568 }
569 
570 /* No input or output carry. */
571 static void gen_rsubk(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
572 {
573     tcg_gen_sub_i32(out, inb, ina);
574 }
575 
576 /* Input carry, no output carry. */
577 static void gen_rsubkc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
578 {
579     TCGv_i32 nota = tcg_temp_new_i32();
580 
581     tcg_gen_not_i32(nota, ina);
582     tcg_gen_add_i32(out, inb, nota);
583     tcg_gen_add_i32(out, out, cpu_msr_c);
584 
585     tcg_temp_free_i32(nota);
586 }
587 
588 DO_TYPEA(rsub, true, gen_rsub)
589 DO_TYPEA(rsubc, true, gen_rsubc)
590 DO_TYPEA(rsubk, false, gen_rsubk)
591 DO_TYPEA(rsubkc, true, gen_rsubkc)
592 
593 DO_TYPEBV(rsubi, true, gen_rsub)
594 DO_TYPEBV(rsubic, true, gen_rsubc)
595 DO_TYPEBV(rsubik, false, gen_rsubk)
596 DO_TYPEBV(rsubikc, true, gen_rsubkc)
597 
598 DO_TYPEA0(sext8, false, tcg_gen_ext8s_i32)
599 DO_TYPEA0(sext16, false, tcg_gen_ext16s_i32)
600 
601 static void gen_sra(TCGv_i32 out, TCGv_i32 ina)
602 {
603     tcg_gen_andi_i32(cpu_msr_c, ina, 1);
604     tcg_gen_sari_i32(out, ina, 1);
605 }
606 
607 static void gen_src(TCGv_i32 out, TCGv_i32 ina)
608 {
609     TCGv_i32 tmp = tcg_temp_new_i32();
610 
611     tcg_gen_mov_i32(tmp, cpu_msr_c);
612     tcg_gen_andi_i32(cpu_msr_c, ina, 1);
613     tcg_gen_extract2_i32(out, ina, tmp, 1);
614 
615     tcg_temp_free_i32(tmp);
616 }
617 
618 static void gen_srl(TCGv_i32 out, TCGv_i32 ina)
619 {
620     tcg_gen_andi_i32(cpu_msr_c, ina, 1);
621     tcg_gen_shri_i32(out, ina, 1);
622 }
623 
624 DO_TYPEA0(sra, false, gen_sra)
625 DO_TYPEA0(src, false, gen_src)
626 DO_TYPEA0(srl, false, gen_srl)
627 
628 static void gen_swaph(TCGv_i32 out, TCGv_i32 ina)
629 {
630     tcg_gen_rotri_i32(out, ina, 16);
631 }
632 
633 DO_TYPEA0(swapb, false, tcg_gen_bswap32_i32)
634 DO_TYPEA0(swaph, false, gen_swaph)
635 
636 static bool trans_wdic(DisasContext *dc, arg_wdic *a)
637 {
638     /* Cache operations are nops: only check for supervisor mode.  */
639     trap_userspace(dc, true);
640     return true;
641 }
642 
643 DO_TYPEA(xor, false, tcg_gen_xor_i32)
644 DO_TYPEBI(xori, false, tcg_gen_xori_i32)
645 
646 static TCGv compute_ldst_addr_typea(DisasContext *dc, int ra, int rb)
647 {
648     TCGv ret = tcg_temp_new();
649 
650     /* If any of the regs is r0, set t to the value of the other reg.  */
651     if (ra && rb) {
652         TCGv_i32 tmp = tcg_temp_new_i32();
653         tcg_gen_add_i32(tmp, cpu_R[ra], cpu_R[rb]);
654         tcg_gen_extu_i32_tl(ret, tmp);
655         tcg_temp_free_i32(tmp);
656     } else if (ra) {
657         tcg_gen_extu_i32_tl(ret, cpu_R[ra]);
658     } else if (rb) {
659         tcg_gen_extu_i32_tl(ret, cpu_R[rb]);
660     } else {
661         tcg_gen_movi_tl(ret, 0);
662     }
663 
664     if ((ra == 1 || rb == 1) && dc->cpu->cfg.stackprot) {
665         gen_helper_stackprot(cpu_env, ret);
666     }
667     return ret;
668 }
669 
670 static TCGv compute_ldst_addr_typeb(DisasContext *dc, int ra, int imm)
671 {
672     TCGv ret = tcg_temp_new();
673 
674     /* If any of the regs is r0, set t to the value of the other reg.  */
675     if (ra) {
676         TCGv_i32 tmp = tcg_temp_new_i32();
677         tcg_gen_addi_i32(tmp, cpu_R[ra], imm);
678         tcg_gen_extu_i32_tl(ret, tmp);
679         tcg_temp_free_i32(tmp);
680     } else {
681         tcg_gen_movi_tl(ret, (uint32_t)imm);
682     }
683 
684     if (ra == 1 && dc->cpu->cfg.stackprot) {
685         gen_helper_stackprot(cpu_env, ret);
686     }
687     return ret;
688 }
689 
690 #ifndef CONFIG_USER_ONLY
691 static TCGv compute_ldst_addr_ea(DisasContext *dc, int ra, int rb)
692 {
693     int addr_size = dc->cpu->cfg.addr_size;
694     TCGv ret = tcg_temp_new();
695 
696     if (addr_size == 32 || ra == 0) {
697         if (rb) {
698             tcg_gen_extu_i32_tl(ret, cpu_R[rb]);
699         } else {
700             tcg_gen_movi_tl(ret, 0);
701         }
702     } else {
703         if (rb) {
704             tcg_gen_concat_i32_i64(ret, cpu_R[rb], cpu_R[ra]);
705         } else {
706             tcg_gen_extu_i32_tl(ret, cpu_R[ra]);
707             tcg_gen_shli_tl(ret, ret, 32);
708         }
709         if (addr_size < 64) {
710             /* Mask off out of range bits.  */
711             tcg_gen_andi_i64(ret, ret, MAKE_64BIT_MASK(0, addr_size));
712         }
713     }
714     return ret;
715 }
716 #endif
717 
718 static void record_unaligned_ess(DisasContext *dc, int rd,
719                                  MemOp size, bool store)
720 {
721     uint32_t iflags = tcg_get_insn_start_param(dc->insn_start, 1);
722 
723     iflags |= ESR_ESS_FLAG;
724     iflags |= rd << 5;
725     iflags |= store * ESR_S;
726     iflags |= (size == MO_32) * ESR_W;
727 
728     tcg_set_insn_start_param(dc->insn_start, 1, iflags);
729 }
730 
731 static bool do_load(DisasContext *dc, int rd, TCGv addr, MemOp mop,
732                     int mem_index, bool rev)
733 {
734     MemOp size = mop & MO_SIZE;
735 
736     /*
737      * When doing reverse accesses we need to do two things.
738      *
739      * 1. Reverse the address wrt endianness.
740      * 2. Byteswap the data lanes on the way back into the CPU core.
741      */
742     if (rev) {
743         if (size > MO_8) {
744             mop ^= MO_BSWAP;
745         }
746         if (size < MO_32) {
747             tcg_gen_xori_tl(addr, addr, 3 - size);
748         }
749     }
750 
751     if (size > MO_8 &&
752         (dc->tb_flags & MSR_EE) &&
753         dc->cpu->cfg.unaligned_exceptions) {
754         record_unaligned_ess(dc, rd, size, false);
755         mop |= MO_ALIGN;
756     }
757 
758     tcg_gen_qemu_ld_i32(reg_for_write(dc, rd), addr, mem_index, mop);
759 
760     tcg_temp_free(addr);
761     return true;
762 }
763 
764 static bool trans_lbu(DisasContext *dc, arg_typea *arg)
765 {
766     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
767     return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
768 }
769 
770 static bool trans_lbur(DisasContext *dc, arg_typea *arg)
771 {
772     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
773     return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, true);
774 }
775 
776 static bool trans_lbuea(DisasContext *dc, arg_typea *arg)
777 {
778     if (trap_userspace(dc, true)) {
779         return true;
780     }
781 #ifdef CONFIG_USER_ONLY
782     return true;
783 #else
784     TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
785     return do_load(dc, arg->rd, addr, MO_UB, MMU_NOMMU_IDX, false);
786 #endif
787 }
788 
789 static bool trans_lbui(DisasContext *dc, arg_typeb *arg)
790 {
791     TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
792     return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
793 }
794 
795 static bool trans_lhu(DisasContext *dc, arg_typea *arg)
796 {
797     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
798     return do_load(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
799 }
800 
801 static bool trans_lhur(DisasContext *dc, arg_typea *arg)
802 {
803     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
804     return do_load(dc, arg->rd, addr, MO_TEUW, dc->mem_index, true);
805 }
806 
807 static bool trans_lhuea(DisasContext *dc, arg_typea *arg)
808 {
809     if (trap_userspace(dc, true)) {
810         return true;
811     }
812 #ifdef CONFIG_USER_ONLY
813     return true;
814 #else
815     TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
816     return do_load(dc, arg->rd, addr, MO_TEUW, MMU_NOMMU_IDX, false);
817 #endif
818 }
819 
820 static bool trans_lhui(DisasContext *dc, arg_typeb *arg)
821 {
822     TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
823     return do_load(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
824 }
825 
826 static bool trans_lw(DisasContext *dc, arg_typea *arg)
827 {
828     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
829     return do_load(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
830 }
831 
832 static bool trans_lwr(DisasContext *dc, arg_typea *arg)
833 {
834     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
835     return do_load(dc, arg->rd, addr, MO_TEUL, dc->mem_index, true);
836 }
837 
838 static bool trans_lwea(DisasContext *dc, arg_typea *arg)
839 {
840     if (trap_userspace(dc, true)) {
841         return true;
842     }
843 #ifdef CONFIG_USER_ONLY
844     return true;
845 #else
846     TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
847     return do_load(dc, arg->rd, addr, MO_TEUL, MMU_NOMMU_IDX, false);
848 #endif
849 }
850 
851 static bool trans_lwi(DisasContext *dc, arg_typeb *arg)
852 {
853     TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
854     return do_load(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
855 }
856 
857 static bool trans_lwx(DisasContext *dc, arg_typea *arg)
858 {
859     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
860 
861     /* lwx does not throw unaligned access errors, so force alignment */
862     tcg_gen_andi_tl(addr, addr, ~3);
863 
864     tcg_gen_qemu_ld_i32(cpu_res_val, addr, dc->mem_index, MO_TEUL);
865     tcg_gen_mov_tl(cpu_res_addr, addr);
866     tcg_temp_free(addr);
867 
868     if (arg->rd) {
869         tcg_gen_mov_i32(cpu_R[arg->rd], cpu_res_val);
870     }
871 
872     /* No support for AXI exclusive so always clear C */
873     tcg_gen_movi_i32(cpu_msr_c, 0);
874     return true;
875 }
876 
877 static bool do_store(DisasContext *dc, int rd, TCGv addr, MemOp mop,
878                      int mem_index, bool rev)
879 {
880     MemOp size = mop & MO_SIZE;
881 
882     /*
883      * When doing reverse accesses we need to do two things.
884      *
885      * 1. Reverse the address wrt endianness.
886      * 2. Byteswap the data lanes on the way back into the CPU core.
887      */
888     if (rev) {
889         if (size > MO_8) {
890             mop ^= MO_BSWAP;
891         }
892         if (size < MO_32) {
893             tcg_gen_xori_tl(addr, addr, 3 - size);
894         }
895     }
896 
897     if (size > MO_8 &&
898         (dc->tb_flags & MSR_EE) &&
899         dc->cpu->cfg.unaligned_exceptions) {
900         record_unaligned_ess(dc, rd, size, true);
901         mop |= MO_ALIGN;
902     }
903 
904     tcg_gen_qemu_st_i32(reg_for_read(dc, rd), addr, mem_index, mop);
905 
906     tcg_temp_free(addr);
907     return true;
908 }
909 
910 static bool trans_sb(DisasContext *dc, arg_typea *arg)
911 {
912     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
913     return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
914 }
915 
916 static bool trans_sbr(DisasContext *dc, arg_typea *arg)
917 {
918     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
919     return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, true);
920 }
921 
922 static bool trans_sbea(DisasContext *dc, arg_typea *arg)
923 {
924     if (trap_userspace(dc, true)) {
925         return true;
926     }
927 #ifdef CONFIG_USER_ONLY
928     return true;
929 #else
930     TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
931     return do_store(dc, arg->rd, addr, MO_UB, MMU_NOMMU_IDX, false);
932 #endif
933 }
934 
935 static bool trans_sbi(DisasContext *dc, arg_typeb *arg)
936 {
937     TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
938     return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
939 }
940 
941 static bool trans_sh(DisasContext *dc, arg_typea *arg)
942 {
943     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
944     return do_store(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
945 }
946 
947 static bool trans_shr(DisasContext *dc, arg_typea *arg)
948 {
949     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
950     return do_store(dc, arg->rd, addr, MO_TEUW, dc->mem_index, true);
951 }
952 
953 static bool trans_shea(DisasContext *dc, arg_typea *arg)
954 {
955     if (trap_userspace(dc, true)) {
956         return true;
957     }
958 #ifdef CONFIG_USER_ONLY
959     return true;
960 #else
961     TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
962     return do_store(dc, arg->rd, addr, MO_TEUW, MMU_NOMMU_IDX, false);
963 #endif
964 }
965 
966 static bool trans_shi(DisasContext *dc, arg_typeb *arg)
967 {
968     TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
969     return do_store(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
970 }
971 
972 static bool trans_sw(DisasContext *dc, arg_typea *arg)
973 {
974     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
975     return do_store(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
976 }
977 
978 static bool trans_swr(DisasContext *dc, arg_typea *arg)
979 {
980     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
981     return do_store(dc, arg->rd, addr, MO_TEUL, dc->mem_index, true);
982 }
983 
984 static bool trans_swea(DisasContext *dc, arg_typea *arg)
985 {
986     if (trap_userspace(dc, true)) {
987         return true;
988     }
989 #ifdef CONFIG_USER_ONLY
990     return true;
991 #else
992     TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
993     return do_store(dc, arg->rd, addr, MO_TEUL, MMU_NOMMU_IDX, false);
994 #endif
995 }
996 
997 static bool trans_swi(DisasContext *dc, arg_typeb *arg)
998 {
999     TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
1000     return do_store(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
1001 }
1002 
1003 static bool trans_swx(DisasContext *dc, arg_typea *arg)
1004 {
1005     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
1006     TCGLabel *swx_done = gen_new_label();
1007     TCGLabel *swx_fail = gen_new_label();
1008     TCGv_i32 tval;
1009 
1010     /* swx does not throw unaligned access errors, so force alignment */
1011     tcg_gen_andi_tl(addr, addr, ~3);
1012 
1013     /*
1014      * Compare the address vs the one we used during lwx.
1015      * On mismatch, the operation fails.  On match, addr dies at the
1016      * branch, but we know we can use the equal version in the global.
1017      * In either case, addr is no longer needed.
1018      */
1019     tcg_gen_brcond_tl(TCG_COND_NE, cpu_res_addr, addr, swx_fail);
1020     tcg_temp_free(addr);
1021 
1022     /*
1023      * Compare the value loaded during lwx with current contents of
1024      * the reserved location.
1025      */
1026     tval = tcg_temp_new_i32();
1027 
1028     tcg_gen_atomic_cmpxchg_i32(tval, cpu_res_addr, cpu_res_val,
1029                                reg_for_write(dc, arg->rd),
1030                                dc->mem_index, MO_TEUL);
1031 
1032     tcg_gen_brcond_i32(TCG_COND_NE, cpu_res_val, tval, swx_fail);
1033     tcg_temp_free_i32(tval);
1034 
1035     /* Success */
1036     tcg_gen_movi_i32(cpu_msr_c, 0);
1037     tcg_gen_br(swx_done);
1038 
1039     /* Failure */
1040     gen_set_label(swx_fail);
1041     tcg_gen_movi_i32(cpu_msr_c, 1);
1042 
1043     gen_set_label(swx_done);
1044 
1045     /*
1046      * Prevent the saved address from working again without another ldx.
1047      * Akin to the pseudocode setting reservation = 0.
1048      */
1049     tcg_gen_movi_tl(cpu_res_addr, -1);
1050     return true;
1051 }
1052 
1053 static void setup_dslot(DisasContext *dc, bool type_b)
1054 {
1055     dc->tb_flags_to_set |= D_FLAG;
1056     if (type_b && (dc->tb_flags & IMM_FLAG)) {
1057         dc->tb_flags_to_set |= BIMM_FLAG;
1058     }
1059 }
1060 
1061 static bool do_branch(DisasContext *dc, int dest_rb, int dest_imm,
1062                       bool delay, bool abs, int link)
1063 {
1064     uint32_t add_pc;
1065 
1066     if (delay) {
1067         setup_dslot(dc, dest_rb < 0);
1068     }
1069 
1070     if (link) {
1071         tcg_gen_movi_i32(cpu_R[link], dc->base.pc_next);
1072     }
1073 
1074     /* Store the branch taken destination into btarget.  */
1075     add_pc = abs ? 0 : dc->base.pc_next;
1076     if (dest_rb > 0) {
1077         dc->jmp_dest = -1;
1078         tcg_gen_addi_i32(cpu_btarget, cpu_R[dest_rb], add_pc);
1079     } else {
1080         dc->jmp_dest = add_pc + dest_imm;
1081         tcg_gen_movi_i32(cpu_btarget, dc->jmp_dest);
1082     }
1083     dc->jmp_cond = TCG_COND_ALWAYS;
1084     return true;
1085 }
1086 
1087 #define DO_BR(NAME, NAMEI, DELAY, ABS, LINK)                               \
1088     static bool trans_##NAME(DisasContext *dc, arg_typea_br *arg)          \
1089     { return do_branch(dc, arg->rb, 0, DELAY, ABS, LINK ? arg->rd : 0); }  \
1090     static bool trans_##NAMEI(DisasContext *dc, arg_typeb_br *arg)         \
1091     { return do_branch(dc, -1, arg->imm, DELAY, ABS, LINK ? arg->rd : 0); }
1092 
1093 DO_BR(br, bri, false, false, false)
1094 DO_BR(bra, brai, false, true, false)
1095 DO_BR(brd, brid, true, false, false)
1096 DO_BR(brad, braid, true, true, false)
1097 DO_BR(brld, brlid, true, false, true)
1098 DO_BR(brald, bralid, true, true, true)
1099 
1100 static bool do_bcc(DisasContext *dc, int dest_rb, int dest_imm,
1101                    TCGCond cond, int ra, bool delay)
1102 {
1103     TCGv_i32 zero, next;
1104 
1105     if (delay) {
1106         setup_dslot(dc, dest_rb < 0);
1107     }
1108 
1109     dc->jmp_cond = cond;
1110 
1111     /* Cache the condition register in cpu_bvalue across any delay slot.  */
1112     tcg_gen_mov_i32(cpu_bvalue, reg_for_read(dc, ra));
1113 
1114     /* Store the branch taken destination into btarget.  */
1115     if (dest_rb > 0) {
1116         dc->jmp_dest = -1;
1117         tcg_gen_addi_i32(cpu_btarget, cpu_R[dest_rb], dc->base.pc_next);
1118     } else {
1119         dc->jmp_dest = dc->base.pc_next + dest_imm;
1120         tcg_gen_movi_i32(cpu_btarget, dc->jmp_dest);
1121     }
1122 
1123     /* Compute the final destination into btarget.  */
1124     zero = tcg_const_i32(0);
1125     next = tcg_const_i32(dc->base.pc_next + (delay + 1) * 4);
1126     tcg_gen_movcond_i32(dc->jmp_cond, cpu_btarget,
1127                         reg_for_read(dc, ra), zero,
1128                         cpu_btarget, next);
1129     tcg_temp_free_i32(zero);
1130     tcg_temp_free_i32(next);
1131 
1132     return true;
1133 }
1134 
1135 #define DO_BCC(NAME, COND)                                              \
1136     static bool trans_##NAME(DisasContext *dc, arg_typea_bc *arg)       \
1137     { return do_bcc(dc, arg->rb, 0, COND, arg->ra, false); }            \
1138     static bool trans_##NAME##d(DisasContext *dc, arg_typea_bc *arg)    \
1139     { return do_bcc(dc, arg->rb, 0, COND, arg->ra, true); }             \
1140     static bool trans_##NAME##i(DisasContext *dc, arg_typeb_bc *arg)    \
1141     { return do_bcc(dc, -1, arg->imm, COND, arg->ra, false); }          \
1142     static bool trans_##NAME##id(DisasContext *dc, arg_typeb_bc *arg)   \
1143     { return do_bcc(dc, -1, arg->imm, COND, arg->ra, true); }
1144 
1145 DO_BCC(beq, TCG_COND_EQ)
1146 DO_BCC(bge, TCG_COND_GE)
1147 DO_BCC(bgt, TCG_COND_GT)
1148 DO_BCC(ble, TCG_COND_LE)
1149 DO_BCC(blt, TCG_COND_LT)
1150 DO_BCC(bne, TCG_COND_NE)
1151 
1152 static bool trans_brk(DisasContext *dc, arg_typea_br *arg)
1153 {
1154     if (trap_userspace(dc, true)) {
1155         return true;
1156     }
1157     tcg_gen_mov_i32(cpu_pc, reg_for_read(dc, arg->rb));
1158     if (arg->rd) {
1159         tcg_gen_movi_i32(cpu_R[arg->rd], dc->base.pc_next);
1160     }
1161     tcg_gen_ori_i32(cpu_msr, cpu_msr, MSR_BIP);
1162     tcg_gen_movi_tl(cpu_res_addr, -1);
1163 
1164     dc->base.is_jmp = DISAS_UPDATE;
1165     return true;
1166 }
1167 
1168 static bool trans_brki(DisasContext *dc, arg_typeb_br *arg)
1169 {
1170     uint32_t imm = arg->imm;
1171 
1172     if (trap_userspace(dc, imm != 0x8 && imm != 0x18)) {
1173         return true;
1174     }
1175     tcg_gen_movi_i32(cpu_pc, imm);
1176     if (arg->rd) {
1177         tcg_gen_movi_i32(cpu_R[arg->rd], dc->base.pc_next);
1178     }
1179     tcg_gen_movi_tl(cpu_res_addr, -1);
1180 
1181 #ifdef CONFIG_USER_ONLY
1182     switch (imm) {
1183     case 0x8:  /* syscall trap */
1184         gen_raise_exception_sync(dc, EXCP_SYSCALL);
1185         break;
1186     case 0x18: /* debug trap */
1187         gen_raise_exception_sync(dc, EXCP_DEBUG);
1188         break;
1189     default:   /* eliminated with trap_userspace check */
1190         g_assert_not_reached();
1191     }
1192 #else
1193     uint32_t msr_to_set = 0;
1194 
1195     if (imm != 0x18) {
1196         msr_to_set |= MSR_BIP;
1197     }
1198     if (imm == 0x8 || imm == 0x18) {
1199         /* MSR_UM and MSR_VM are in tb_flags, so we know their value. */
1200         msr_to_set |= (dc->tb_flags & (MSR_UM | MSR_VM)) << 1;
1201         tcg_gen_andi_i32(cpu_msr, cpu_msr,
1202                          ~(MSR_VMS | MSR_UMS | MSR_VM | MSR_UM));
1203     }
1204     tcg_gen_ori_i32(cpu_msr, cpu_msr, msr_to_set);
1205     dc->base.is_jmp = DISAS_UPDATE;
1206 #endif
1207 
1208     return true;
1209 }
1210 
1211 static bool trans_mbar(DisasContext *dc, arg_mbar *arg)
1212 {
1213     int mbar_imm = arg->imm;
1214 
1215     /* Data access memory barrier.  */
1216     if ((mbar_imm & 2) == 0) {
1217         tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
1218     }
1219 
1220     /* Sleep. */
1221     if (mbar_imm & 16) {
1222         TCGv_i32 tmp_1;
1223 
1224         if (trap_userspace(dc, true)) {
1225             /* Sleep is a privileged instruction.  */
1226             return true;
1227         }
1228 
1229         t_sync_flags(dc);
1230 
1231         tmp_1 = tcg_const_i32(1);
1232         tcg_gen_st_i32(tmp_1, cpu_env,
1233                        -offsetof(MicroBlazeCPU, env)
1234                        +offsetof(CPUState, halted));
1235         tcg_temp_free_i32(tmp_1);
1236 
1237         tcg_gen_movi_i32(cpu_pc, dc->base.pc_next + 4);
1238 
1239         gen_raise_exception(dc, EXCP_HLT);
1240     }
1241 
1242     /*
1243      * If !(mbar_imm & 1), this is an instruction access memory barrier
1244      * and we need to end the TB so that we recognize self-modified
1245      * code immediately.
1246      *
1247      * However, there are some data mbars that need the TB break
1248      * (and return to main loop) to recognize interrupts right away.
1249      * E.g. recognizing a change to an interrupt controller register.
1250      *
1251      * Therefore, choose to end the TB always.
1252      */
1253     dc->cpustate_changed = 1;
1254     return true;
1255 }
1256 
1257 static bool do_rts(DisasContext *dc, arg_typeb_bc *arg, int to_set)
1258 {
1259     if (trap_userspace(dc, to_set)) {
1260         return true;
1261     }
1262     dc->tb_flags_to_set |= to_set;
1263     setup_dslot(dc, true);
1264 
1265     dc->jmp_cond = TCG_COND_ALWAYS;
1266     dc->jmp_dest = -1;
1267     tcg_gen_addi_i32(cpu_btarget, reg_for_read(dc, arg->ra), arg->imm);
1268     return true;
1269 }
1270 
1271 #define DO_RTS(NAME, IFLAG) \
1272     static bool trans_##NAME(DisasContext *dc, arg_typeb_bc *arg) \
1273     { return do_rts(dc, arg, IFLAG); }
1274 
1275 DO_RTS(rtbd, DRTB_FLAG)
1276 DO_RTS(rtid, DRTI_FLAG)
1277 DO_RTS(rted, DRTE_FLAG)
1278 DO_RTS(rtsd, 0)
1279 
1280 static bool trans_zero(DisasContext *dc, arg_zero *arg)
1281 {
1282     /* If opcode_0_illegal, trap.  */
1283     if (dc->cpu->cfg.opcode_0_illegal) {
1284         trap_illegal(dc, true);
1285         return true;
1286     }
1287     /*
1288      * Otherwise, this is "add r0, r0, r0".
1289      * Continue to trans_add so that MSR[C] gets cleared.
1290      */
1291     return false;
1292 }
1293 
1294 static void msr_read(DisasContext *dc, TCGv_i32 d)
1295 {
1296     TCGv_i32 t;
1297 
1298     /* Replicate the cpu_msr_c boolean into the proper bit and the copy. */
1299     t = tcg_temp_new_i32();
1300     tcg_gen_muli_i32(t, cpu_msr_c, MSR_C | MSR_CC);
1301     tcg_gen_or_i32(d, cpu_msr, t);
1302     tcg_temp_free_i32(t);
1303 }
1304 
1305 #ifndef CONFIG_USER_ONLY
1306 static void msr_write(DisasContext *dc, TCGv_i32 v)
1307 {
1308     dc->cpustate_changed = 1;
1309 
1310     /* Install MSR_C.  */
1311     tcg_gen_extract_i32(cpu_msr_c, v, 2, 1);
1312 
1313     /* Clear MSR_C and MSR_CC; MSR_PVR is not writable, and is always clear. */
1314     tcg_gen_andi_i32(cpu_msr, v, ~(MSR_C | MSR_CC | MSR_PVR));
1315 }
1316 #endif
1317 
1318 static bool do_msrclrset(DisasContext *dc, arg_type_msr *arg, bool set)
1319 {
1320     uint32_t imm = arg->imm;
1321 
1322     if (trap_userspace(dc, imm != MSR_C)) {
1323         return true;
1324     }
1325 
1326     if (arg->rd) {
1327         msr_read(dc, cpu_R[arg->rd]);
1328     }
1329 
1330     /*
1331      * Handle the carry bit separately.
1332      * This is the only bit that userspace can modify.
1333      */
1334     if (imm & MSR_C) {
1335         tcg_gen_movi_i32(cpu_msr_c, set);
1336     }
1337 
1338     /*
1339      * MSR_C and MSR_CC set above.
1340      * MSR_PVR is not writable, and is always clear.
1341      */
1342     imm &= ~(MSR_C | MSR_CC | MSR_PVR);
1343 
1344     if (imm != 0) {
1345         if (set) {
1346             tcg_gen_ori_i32(cpu_msr, cpu_msr, imm);
1347         } else {
1348             tcg_gen_andi_i32(cpu_msr, cpu_msr, ~imm);
1349         }
1350         dc->cpustate_changed = 1;
1351     }
1352     return true;
1353 }
1354 
1355 static bool trans_msrclr(DisasContext *dc, arg_type_msr *arg)
1356 {
1357     return do_msrclrset(dc, arg, false);
1358 }
1359 
1360 static bool trans_msrset(DisasContext *dc, arg_type_msr *arg)
1361 {
1362     return do_msrclrset(dc, arg, true);
1363 }
1364 
1365 static bool trans_mts(DisasContext *dc, arg_mts *arg)
1366 {
1367     if (trap_userspace(dc, true)) {
1368         return true;
1369     }
1370 
1371 #ifdef CONFIG_USER_ONLY
1372     g_assert_not_reached();
1373 #else
1374     if (arg->e && arg->rs != 0x1003) {
1375         qemu_log_mask(LOG_GUEST_ERROR,
1376                       "Invalid extended mts reg 0x%x\n", arg->rs);
1377         return true;
1378     }
1379 
1380     TCGv_i32 src = reg_for_read(dc, arg->ra);
1381     switch (arg->rs) {
1382     case SR_MSR:
1383         msr_write(dc, src);
1384         break;
1385     case SR_FSR:
1386         tcg_gen_st_i32(src, cpu_env, offsetof(CPUMBState, fsr));
1387         break;
1388     case 0x800:
1389         tcg_gen_st_i32(src, cpu_env, offsetof(CPUMBState, slr));
1390         break;
1391     case 0x802:
1392         tcg_gen_st_i32(src, cpu_env, offsetof(CPUMBState, shr));
1393         break;
1394 
1395     case 0x1000: /* PID */
1396     case 0x1001: /* ZPR */
1397     case 0x1002: /* TLBX */
1398     case 0x1003: /* TLBLO */
1399     case 0x1004: /* TLBHI */
1400     case 0x1005: /* TLBSX */
1401         {
1402             TCGv_i32 tmp_ext = tcg_const_i32(arg->e);
1403             TCGv_i32 tmp_reg = tcg_const_i32(arg->rs & 7);
1404 
1405             gen_helper_mmu_write(cpu_env, tmp_ext, tmp_reg, src);
1406             tcg_temp_free_i32(tmp_reg);
1407             tcg_temp_free_i32(tmp_ext);
1408         }
1409         break;
1410 
1411     default:
1412         qemu_log_mask(LOG_GUEST_ERROR, "Invalid mts reg 0x%x\n", arg->rs);
1413         return true;
1414     }
1415     dc->cpustate_changed = 1;
1416     return true;
1417 #endif
1418 }
1419 
1420 static bool trans_mfs(DisasContext *dc, arg_mfs *arg)
1421 {
1422     TCGv_i32 dest = reg_for_write(dc, arg->rd);
1423 
1424     if (arg->e) {
1425         switch (arg->rs) {
1426         case SR_EAR:
1427             {
1428                 TCGv_i64 t64 = tcg_temp_new_i64();
1429                 tcg_gen_ld_i64(t64, cpu_env, offsetof(CPUMBState, ear));
1430                 tcg_gen_extrh_i64_i32(dest, t64);
1431                 tcg_temp_free_i64(t64);
1432             }
1433             return true;
1434 #ifndef CONFIG_USER_ONLY
1435         case 0x1003: /* TLBLO */
1436             /* Handled below. */
1437             break;
1438 #endif
1439         case 0x2006 ... 0x2009:
1440             /* High bits of PVR6-9 not implemented. */
1441             tcg_gen_movi_i32(dest, 0);
1442             return true;
1443         default:
1444             qemu_log_mask(LOG_GUEST_ERROR,
1445                           "Invalid extended mfs reg 0x%x\n", arg->rs);
1446             return true;
1447         }
1448     }
1449 
1450     switch (arg->rs) {
1451     case SR_PC:
1452         tcg_gen_movi_i32(dest, dc->base.pc_next);
1453         break;
1454     case SR_MSR:
1455         msr_read(dc, dest);
1456         break;
1457     case SR_EAR:
1458         {
1459             TCGv_i64 t64 = tcg_temp_new_i64();
1460             tcg_gen_ld_i64(t64, cpu_env, offsetof(CPUMBState, ear));
1461             tcg_gen_extrl_i64_i32(dest, t64);
1462             tcg_temp_free_i64(t64);
1463         }
1464         break;
1465     case SR_ESR:
1466         tcg_gen_ld_i32(dest, cpu_env, offsetof(CPUMBState, esr));
1467         break;
1468     case SR_FSR:
1469         tcg_gen_ld_i32(dest, cpu_env, offsetof(CPUMBState, fsr));
1470         break;
1471     case SR_BTR:
1472         tcg_gen_ld_i32(dest, cpu_env, offsetof(CPUMBState, btr));
1473         break;
1474     case SR_EDR:
1475         tcg_gen_ld_i32(dest, cpu_env, offsetof(CPUMBState, edr));
1476         break;
1477     case 0x800:
1478         tcg_gen_ld_i32(dest, cpu_env, offsetof(CPUMBState, slr));
1479         break;
1480     case 0x802:
1481         tcg_gen_ld_i32(dest, cpu_env, offsetof(CPUMBState, shr));
1482         break;
1483 
1484 #ifndef CONFIG_USER_ONLY
1485     case 0x1000: /* PID */
1486     case 0x1001: /* ZPR */
1487     case 0x1002: /* TLBX */
1488     case 0x1003: /* TLBLO */
1489     case 0x1004: /* TLBHI */
1490     case 0x1005: /* TLBSX */
1491         {
1492             TCGv_i32 tmp_ext = tcg_const_i32(arg->e);
1493             TCGv_i32 tmp_reg = tcg_const_i32(arg->rs & 7);
1494 
1495             gen_helper_mmu_read(dest, cpu_env, tmp_ext, tmp_reg);
1496             tcg_temp_free_i32(tmp_reg);
1497             tcg_temp_free_i32(tmp_ext);
1498         }
1499         break;
1500 #endif
1501 
1502     case 0x2000 ... 0x200c:
1503         tcg_gen_ld_i32(dest, cpu_env,
1504                        offsetof(CPUMBState, pvr.regs[arg->rs - 0x2000]));
1505         break;
1506     default:
1507         qemu_log_mask(LOG_GUEST_ERROR, "Invalid mfs reg 0x%x\n", arg->rs);
1508         break;
1509     }
1510     return true;
1511 }
1512 
1513 static void do_rti(DisasContext *dc)
1514 {
1515     TCGv_i32 tmp = tcg_temp_new_i32();
1516 
1517     tcg_gen_shri_i32(tmp, cpu_msr, 1);
1518     tcg_gen_ori_i32(cpu_msr, cpu_msr, MSR_IE);
1519     tcg_gen_andi_i32(tmp, tmp, MSR_VM | MSR_UM);
1520     tcg_gen_andi_i32(cpu_msr, cpu_msr, ~(MSR_VM | MSR_UM));
1521     tcg_gen_or_i32(cpu_msr, cpu_msr, tmp);
1522 
1523     tcg_temp_free_i32(tmp);
1524     dc->tb_flags &= ~DRTI_FLAG;
1525 }
1526 
1527 static void do_rtb(DisasContext *dc)
1528 {
1529     TCGv_i32 tmp = tcg_temp_new_i32();
1530 
1531     tcg_gen_shri_i32(tmp, cpu_msr, 1);
1532     tcg_gen_andi_i32(cpu_msr, cpu_msr, ~(MSR_VM | MSR_UM | MSR_BIP));
1533     tcg_gen_andi_i32(tmp, tmp, (MSR_VM | MSR_UM));
1534     tcg_gen_or_i32(cpu_msr, cpu_msr, tmp);
1535 
1536     tcg_temp_free_i32(tmp);
1537     dc->tb_flags &= ~DRTB_FLAG;
1538 }
1539 
1540 static void do_rte(DisasContext *dc)
1541 {
1542     TCGv_i32 tmp = tcg_temp_new_i32();
1543 
1544     tcg_gen_shri_i32(tmp, cpu_msr, 1);
1545     tcg_gen_ori_i32(cpu_msr, cpu_msr, MSR_EE);
1546     tcg_gen_andi_i32(tmp, tmp, (MSR_VM | MSR_UM));
1547     tcg_gen_andi_i32(cpu_msr, cpu_msr, ~(MSR_VM | MSR_UM | MSR_EIP));
1548     tcg_gen_or_i32(cpu_msr, cpu_msr, tmp);
1549 
1550     tcg_temp_free_i32(tmp);
1551     dc->tb_flags &= ~DRTE_FLAG;
1552 }
1553 
1554 /* Insns connected to FSL or AXI stream attached devices.  */
1555 static bool do_get(DisasContext *dc, int rd, int rb, int imm, int ctrl)
1556 {
1557     TCGv_i32 t_id, t_ctrl;
1558 
1559     if (trap_userspace(dc, true)) {
1560         return true;
1561     }
1562 
1563     t_id = tcg_temp_new_i32();
1564     if (rb) {
1565         tcg_gen_andi_i32(t_id, cpu_R[rb], 0xf);
1566     } else {
1567         tcg_gen_movi_i32(t_id, imm);
1568     }
1569 
1570     t_ctrl = tcg_const_i32(ctrl);
1571     gen_helper_get(reg_for_write(dc, rd), t_id, t_ctrl);
1572     tcg_temp_free_i32(t_id);
1573     tcg_temp_free_i32(t_ctrl);
1574     return true;
1575 }
1576 
1577 static bool trans_get(DisasContext *dc, arg_get *arg)
1578 {
1579     return do_get(dc, arg->rd, 0, arg->imm, arg->ctrl);
1580 }
1581 
1582 static bool trans_getd(DisasContext *dc, arg_getd *arg)
1583 {
1584     return do_get(dc, arg->rd, arg->rb, 0, arg->ctrl);
1585 }
1586 
1587 static bool do_put(DisasContext *dc, int ra, int rb, int imm, int ctrl)
1588 {
1589     TCGv_i32 t_id, t_ctrl;
1590 
1591     if (trap_userspace(dc, true)) {
1592         return true;
1593     }
1594 
1595     t_id = tcg_temp_new_i32();
1596     if (rb) {
1597         tcg_gen_andi_i32(t_id, cpu_R[rb], 0xf);
1598     } else {
1599         tcg_gen_movi_i32(t_id, imm);
1600     }
1601 
1602     t_ctrl = tcg_const_i32(ctrl);
1603     gen_helper_put(t_id, t_ctrl, reg_for_read(dc, ra));
1604     tcg_temp_free_i32(t_id);
1605     tcg_temp_free_i32(t_ctrl);
1606     return true;
1607 }
1608 
1609 static bool trans_put(DisasContext *dc, arg_put *arg)
1610 {
1611     return do_put(dc, arg->ra, 0, arg->imm, arg->ctrl);
1612 }
1613 
1614 static bool trans_putd(DisasContext *dc, arg_putd *arg)
1615 {
1616     return do_put(dc, arg->ra, arg->rb, 0, arg->ctrl);
1617 }
1618 
1619 static void mb_tr_init_disas_context(DisasContextBase *dcb, CPUState *cs)
1620 {
1621     DisasContext *dc = container_of(dcb, DisasContext, base);
1622     MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1623     int bound;
1624 
1625     dc->cpu = cpu;
1626     dc->tb_flags = dc->base.tb->flags;
1627     dc->cpustate_changed = 0;
1628     dc->ext_imm = dc->base.tb->cs_base;
1629     dc->r0 = NULL;
1630     dc->r0_set = false;
1631     dc->mem_index = cpu_mmu_index(&cpu->env, false);
1632     dc->jmp_cond = dc->tb_flags & D_FLAG ? TCG_COND_ALWAYS : TCG_COND_NEVER;
1633     dc->jmp_dest = -1;
1634 
1635     bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
1636     dc->base.max_insns = MIN(dc->base.max_insns, bound);
1637 }
1638 
1639 static void mb_tr_tb_start(DisasContextBase *dcb, CPUState *cs)
1640 {
1641 }
1642 
1643 static void mb_tr_insn_start(DisasContextBase *dcb, CPUState *cs)
1644 {
1645     DisasContext *dc = container_of(dcb, DisasContext, base);
1646 
1647     tcg_gen_insn_start(dc->base.pc_next, dc->tb_flags & ~MSR_TB_MASK);
1648     dc->insn_start = tcg_last_op();
1649 }
1650 
1651 static bool mb_tr_breakpoint_check(DisasContextBase *dcb, CPUState *cs,
1652                                    const CPUBreakpoint *bp)
1653 {
1654     DisasContext *dc = container_of(dcb, DisasContext, base);
1655 
1656     gen_raise_exception_sync(dc, EXCP_DEBUG);
1657 
1658     /*
1659      * The address covered by the breakpoint must be included in
1660      * [tb->pc, tb->pc + tb->size) in order to for it to be
1661      * properly cleared -- thus we increment the PC here so that
1662      * the logic setting tb->size below does the right thing.
1663      */
1664     dc->base.pc_next += 4;
1665     return true;
1666 }
1667 
1668 static void mb_tr_translate_insn(DisasContextBase *dcb, CPUState *cs)
1669 {
1670     DisasContext *dc = container_of(dcb, DisasContext, base);
1671     CPUMBState *env = cs->env_ptr;
1672     uint32_t ir;
1673 
1674     /* TODO: This should raise an exception, not terminate qemu. */
1675     if (dc->base.pc_next & 3) {
1676         cpu_abort(cs, "Microblaze: unaligned PC=%x\n",
1677                   (uint32_t)dc->base.pc_next);
1678     }
1679 
1680     dc->tb_flags_to_set = 0;
1681 
1682     ir = cpu_ldl_code(env, dc->base.pc_next);
1683     if (!decode(dc, ir)) {
1684         trap_illegal(dc, true);
1685     }
1686 
1687     if (dc->r0) {
1688         tcg_temp_free_i32(dc->r0);
1689         dc->r0 = NULL;
1690         dc->r0_set = false;
1691     }
1692 
1693     /* Discard the imm global when its contents cannot be used. */
1694     if ((dc->tb_flags & ~dc->tb_flags_to_set) & IMM_FLAG) {
1695         tcg_gen_discard_i32(cpu_imm);
1696     }
1697 
1698     dc->tb_flags &= ~(IMM_FLAG | BIMM_FLAG | D_FLAG);
1699     dc->tb_flags |= dc->tb_flags_to_set;
1700     dc->base.pc_next += 4;
1701 
1702     if (dc->jmp_cond != TCG_COND_NEVER && !(dc->tb_flags & D_FLAG)) {
1703         if (dc->tb_flags & DRTI_FLAG) {
1704             do_rti(dc);
1705         } else if (dc->tb_flags & DRTB_FLAG) {
1706             do_rtb(dc);
1707         } else if (dc->tb_flags & DRTE_FLAG) {
1708             do_rte(dc);
1709         }
1710         dc->base.is_jmp = DISAS_JUMP;
1711     }
1712 
1713     /* Force an exit if the per-tb cpu state has changed.  */
1714     if (dc->base.is_jmp == DISAS_NEXT && dc->cpustate_changed) {
1715         dc->base.is_jmp = DISAS_UPDATE;
1716         tcg_gen_movi_i32(cpu_pc, dc->base.pc_next);
1717     }
1718 }
1719 
1720 static void mb_tr_tb_stop(DisasContextBase *dcb, CPUState *cs)
1721 {
1722     DisasContext *dc = container_of(dcb, DisasContext, base);
1723 
1724     if (dc->base.is_jmp == DISAS_NORETURN) {
1725         /* We have already exited the TB. */
1726         return;
1727     }
1728 
1729     t_sync_flags(dc);
1730 
1731     switch (dc->base.is_jmp) {
1732     case DISAS_TOO_MANY:
1733         gen_goto_tb(dc, 0, dc->base.pc_next);
1734         return;
1735 
1736     case DISAS_UPDATE:
1737         if (unlikely(cs->singlestep_enabled)) {
1738             gen_raise_exception(dc, EXCP_DEBUG);
1739         } else {
1740             tcg_gen_exit_tb(NULL, 0);
1741         }
1742         return;
1743 
1744     case DISAS_JUMP:
1745         if (dc->jmp_dest != -1 && !cs->singlestep_enabled) {
1746             /* Direct jump. */
1747             tcg_gen_discard_i32(cpu_btarget);
1748 
1749             if (dc->jmp_cond != TCG_COND_ALWAYS) {
1750                 /* Conditional direct jump. */
1751                 TCGLabel *taken = gen_new_label();
1752                 TCGv_i32 tmp = tcg_temp_new_i32();
1753 
1754                 /*
1755                  * Copy bvalue to a temp now, so we can discard bvalue.
1756                  * This can avoid writing bvalue to memory when the
1757                  * delay slot cannot raise an exception.
1758                  */
1759                 tcg_gen_mov_i32(tmp, cpu_bvalue);
1760                 tcg_gen_discard_i32(cpu_bvalue);
1761 
1762                 tcg_gen_brcondi_i32(dc->jmp_cond, tmp, 0, taken);
1763                 gen_goto_tb(dc, 1, dc->base.pc_next);
1764                 gen_set_label(taken);
1765             }
1766             gen_goto_tb(dc, 0, dc->jmp_dest);
1767             return;
1768         }
1769 
1770         /* Indirect jump (or direct jump w/ singlestep) */
1771         tcg_gen_mov_i32(cpu_pc, cpu_btarget);
1772         tcg_gen_discard_i32(cpu_btarget);
1773 
1774         if (unlikely(cs->singlestep_enabled)) {
1775             gen_raise_exception(dc, EXCP_DEBUG);
1776         } else {
1777             tcg_gen_exit_tb(NULL, 0);
1778         }
1779         return;
1780 
1781     default:
1782         g_assert_not_reached();
1783     }
1784 }
1785 
1786 static void mb_tr_disas_log(const DisasContextBase *dcb, CPUState *cs)
1787 {
1788     qemu_log("IN: %s\n", lookup_symbol(dcb->pc_first));
1789     log_target_disas(cs, dcb->pc_first, dcb->tb->size);
1790 }
1791 
1792 static const TranslatorOps mb_tr_ops = {
1793     .init_disas_context = mb_tr_init_disas_context,
1794     .tb_start           = mb_tr_tb_start,
1795     .insn_start         = mb_tr_insn_start,
1796     .breakpoint_check   = mb_tr_breakpoint_check,
1797     .translate_insn     = mb_tr_translate_insn,
1798     .tb_stop            = mb_tr_tb_stop,
1799     .disas_log          = mb_tr_disas_log,
1800 };
1801 
1802 void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
1803 {
1804     DisasContext dc;
1805     translator_loop(&mb_tr_ops, &dc.base, cpu, tb, max_insns);
1806 }
1807 
1808 void mb_cpu_dump_state(CPUState *cs, FILE *f, int flags)
1809 {
1810     MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1811     CPUMBState *env = &cpu->env;
1812     uint32_t iflags;
1813     int i;
1814 
1815     qemu_fprintf(f, "pc=0x%08x msr=0x%05x mode=%s(saved=%s) eip=%d ie=%d\n",
1816                  env->pc, env->msr,
1817                  (env->msr & MSR_UM) ? "user" : "kernel",
1818                  (env->msr & MSR_UMS) ? "user" : "kernel",
1819                  (bool)(env->msr & MSR_EIP),
1820                  (bool)(env->msr & MSR_IE));
1821 
1822     iflags = env->iflags;
1823     qemu_fprintf(f, "iflags: 0x%08x", iflags);
1824     if (iflags & IMM_FLAG) {
1825         qemu_fprintf(f, " IMM(0x%08x)", env->imm);
1826     }
1827     if (iflags & BIMM_FLAG) {
1828         qemu_fprintf(f, " BIMM");
1829     }
1830     if (iflags & D_FLAG) {
1831         qemu_fprintf(f, " D(btarget=0x%08x)", env->btarget);
1832     }
1833     if (iflags & DRTI_FLAG) {
1834         qemu_fprintf(f, " DRTI");
1835     }
1836     if (iflags & DRTE_FLAG) {
1837         qemu_fprintf(f, " DRTE");
1838     }
1839     if (iflags & DRTB_FLAG) {
1840         qemu_fprintf(f, " DRTB");
1841     }
1842     if (iflags & ESR_ESS_FLAG) {
1843         qemu_fprintf(f, " ESR_ESS(0x%04x)", iflags & ESR_ESS_MASK);
1844     }
1845 
1846     qemu_fprintf(f, "\nesr=0x%04x fsr=0x%02x btr=0x%08x edr=0x%x\n"
1847                  "ear=0x" TARGET_FMT_lx " slr=0x%x shr=0x%x\n",
1848                  env->esr, env->fsr, env->btr, env->edr,
1849                  env->ear, env->slr, env->shr);
1850 
1851     for (i = 0; i < 12; i++) {
1852         qemu_fprintf(f, "rpvr%-2d=%08x%c",
1853                      i, env->pvr.regs[i], i % 4 == 3 ? '\n' : ' ');
1854     }
1855 
1856     for (i = 0; i < 32; i++) {
1857         qemu_fprintf(f, "r%2.2d=%08x%c",
1858                      i, env->regs[i], i % 4 == 3 ? '\n' : ' ');
1859     }
1860     qemu_fprintf(f, "\n");
1861 }
1862 
1863 void mb_tcg_init(void)
1864 {
1865 #define R(X)  { &cpu_R[X], offsetof(CPUMBState, regs[X]), "r" #X }
1866 #define SP(X) { &cpu_##X, offsetof(CPUMBState, X), #X }
1867 
1868     static const struct {
1869         TCGv_i32 *var; int ofs; char name[8];
1870     } i32s[] = {
1871         /*
1872          * Note that r0 is handled specially in reg_for_read
1873          * and reg_for_write.  Nothing should touch cpu_R[0].
1874          * Leave that element NULL, which will assert quickly
1875          * inside the tcg generator functions.
1876          */
1877                R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),
1878         R(8),  R(9),  R(10), R(11), R(12), R(13), R(14), R(15),
1879         R(16), R(17), R(18), R(19), R(20), R(21), R(22), R(23),
1880         R(24), R(25), R(26), R(27), R(28), R(29), R(30), R(31),
1881 
1882         SP(pc),
1883         SP(msr),
1884         SP(msr_c),
1885         SP(imm),
1886         SP(iflags),
1887         SP(bvalue),
1888         SP(btarget),
1889         SP(res_val),
1890     };
1891 
1892 #undef R
1893 #undef SP
1894 
1895     for (int i = 0; i < ARRAY_SIZE(i32s); ++i) {
1896         *i32s[i].var =
1897           tcg_global_mem_new_i32(cpu_env, i32s[i].ofs, i32s[i].name);
1898     }
1899 
1900     cpu_res_addr =
1901         tcg_global_mem_new(cpu_env, offsetof(CPUMBState, res_addr), "res_addr");
1902 }
1903 
1904 void restore_state_to_opc(CPUMBState *env, TranslationBlock *tb,
1905                           target_ulong *data)
1906 {
1907     env->pc = data[0];
1908     env->iflags = data[1];
1909 }
1910