xref: /openbmc/qemu/target/microblaze/translate.c (revision fe766734)
1 /*
2  *  Xilinx MicroBlaze emulation for qemu: main translation routines.
3  *
4  *  Copyright (c) 2009 Edgar E. Iglesias.
5  *  Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
25 #include "exec/cpu_ldst.h"
26 #include "tcg/tcg-op.h"
27 #include "exec/helper-proto.h"
28 #include "exec/helper-gen.h"
29 #include "exec/translator.h"
30 #include "qemu/qemu-print.h"
31 
32 #include "exec/log.h"
33 
34 #define HELPER_H "helper.h"
35 #include "exec/helper-info.c.inc"
36 #undef  HELPER_H
37 
38 #define EXTRACT_FIELD(src, start, end) \
39             (((src) >> start) & ((1 << (end - start + 1)) - 1))
40 
41 /* is_jmp field values */
42 #define DISAS_JUMP    DISAS_TARGET_0 /* only pc was modified dynamically */
43 #define DISAS_EXIT    DISAS_TARGET_1 /* all cpu state modified dynamically */
44 
45 /* cpu state besides pc was modified dynamically; update pc to next */
46 #define DISAS_EXIT_NEXT DISAS_TARGET_2
47 /* cpu state besides pc was modified dynamically; update pc to btarget */
48 #define DISAS_EXIT_JUMP DISAS_TARGET_3
49 
50 static TCGv_i32 cpu_R[32];
51 static TCGv_i32 cpu_pc;
52 static TCGv_i32 cpu_msr;
53 static TCGv_i32 cpu_msr_c;
54 static TCGv_i32 cpu_imm;
55 static TCGv_i32 cpu_bvalue;
56 static TCGv_i32 cpu_btarget;
57 static TCGv_i32 cpu_iflags;
58 static TCGv cpu_res_addr;
59 static TCGv_i32 cpu_res_val;
60 
61 /* This is the state at translation time.  */
62 typedef struct DisasContext {
63     DisasContextBase base;
64     const MicroBlazeCPUConfig *cfg;
65 
66     TCGv_i32 r0;
67     bool r0_set;
68 
69     /* Decoder.  */
70     uint32_t ext_imm;
71     unsigned int tb_flags;
72     unsigned int tb_flags_to_set;
73     int mem_index;
74 
75     /* Condition under which to jump, including NEVER and ALWAYS. */
76     TCGCond jmp_cond;
77 
78     /* Immediate branch-taken destination, or -1 for indirect. */
79     uint32_t jmp_dest;
80 } DisasContext;
81 
82 static int typeb_imm(DisasContext *dc, int x)
83 {
84     if (dc->tb_flags & IMM_FLAG) {
85         return deposit32(dc->ext_imm, 0, 16, x);
86     }
87     return x;
88 }
89 
90 /* Include the auto-generated decoder.  */
91 #include "decode-insns.c.inc"
92 
93 static void t_sync_flags(DisasContext *dc)
94 {
95     /* Synch the tb dependent flags between translator and runtime.  */
96     if ((dc->tb_flags ^ dc->base.tb->flags) & IFLAGS_TB_MASK) {
97         tcg_gen_movi_i32(cpu_iflags, dc->tb_flags & IFLAGS_TB_MASK);
98     }
99 }
100 
101 static void gen_raise_exception(DisasContext *dc, uint32_t index)
102 {
103     gen_helper_raise_exception(tcg_env, tcg_constant_i32(index));
104     dc->base.is_jmp = DISAS_NORETURN;
105 }
106 
107 static void gen_raise_exception_sync(DisasContext *dc, uint32_t index)
108 {
109     t_sync_flags(dc);
110     tcg_gen_movi_i32(cpu_pc, dc->base.pc_next);
111     gen_raise_exception(dc, index);
112 }
113 
114 static void gen_raise_hw_excp(DisasContext *dc, uint32_t esr_ec)
115 {
116     TCGv_i32 tmp = tcg_constant_i32(esr_ec);
117     tcg_gen_st_i32(tmp, tcg_env, offsetof(CPUMBState, esr));
118 
119     gen_raise_exception_sync(dc, EXCP_HW_EXCP);
120 }
121 
122 static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
123 {
124     if (translator_use_goto_tb(&dc->base, dest)) {
125         tcg_gen_goto_tb(n);
126         tcg_gen_movi_i32(cpu_pc, dest);
127         tcg_gen_exit_tb(dc->base.tb, n);
128     } else {
129         tcg_gen_movi_i32(cpu_pc, dest);
130         tcg_gen_lookup_and_goto_ptr();
131     }
132     dc->base.is_jmp = DISAS_NORETURN;
133 }
134 
135 /*
136  * Returns true if the insn an illegal operation.
137  * If exceptions are enabled, an exception is raised.
138  */
139 static bool trap_illegal(DisasContext *dc, bool cond)
140 {
141     if (cond && (dc->tb_flags & MSR_EE)
142         && dc->cfg->illegal_opcode_exception) {
143         gen_raise_hw_excp(dc, ESR_EC_ILLEGAL_OP);
144     }
145     return cond;
146 }
147 
148 /*
149  * Returns true if the insn is illegal in userspace.
150  * If exceptions are enabled, an exception is raised.
151  */
152 static bool trap_userspace(DisasContext *dc, bool cond)
153 {
154     bool cond_user = cond && dc->mem_index == MMU_USER_IDX;
155 
156     if (cond_user && (dc->tb_flags & MSR_EE)) {
157         gen_raise_hw_excp(dc, ESR_EC_PRIVINSN);
158     }
159     return cond_user;
160 }
161 
162 /*
163  * Return true, and log an error, if the current insn is
164  * within a delay slot.
165  */
166 static bool invalid_delay_slot(DisasContext *dc, const char *insn_type)
167 {
168     if (dc->tb_flags & D_FLAG) {
169         qemu_log_mask(LOG_GUEST_ERROR,
170                       "Invalid insn in delay slot: %s at %08x\n",
171                       insn_type, (uint32_t)dc->base.pc_next);
172         return true;
173     }
174     return false;
175 }
176 
177 static TCGv_i32 reg_for_read(DisasContext *dc, int reg)
178 {
179     if (likely(reg != 0)) {
180         return cpu_R[reg];
181     }
182     if (!dc->r0_set) {
183         if (dc->r0 == NULL) {
184             dc->r0 = tcg_temp_new_i32();
185         }
186         tcg_gen_movi_i32(dc->r0, 0);
187         dc->r0_set = true;
188     }
189     return dc->r0;
190 }
191 
192 static TCGv_i32 reg_for_write(DisasContext *dc, int reg)
193 {
194     if (likely(reg != 0)) {
195         return cpu_R[reg];
196     }
197     if (dc->r0 == NULL) {
198         dc->r0 = tcg_temp_new_i32();
199     }
200     return dc->r0;
201 }
202 
203 static bool do_typea(DisasContext *dc, arg_typea *arg, bool side_effects,
204                      void (*fn)(TCGv_i32, TCGv_i32, TCGv_i32))
205 {
206     TCGv_i32 rd, ra, rb;
207 
208     if (arg->rd == 0 && !side_effects) {
209         return true;
210     }
211 
212     rd = reg_for_write(dc, arg->rd);
213     ra = reg_for_read(dc, arg->ra);
214     rb = reg_for_read(dc, arg->rb);
215     fn(rd, ra, rb);
216     return true;
217 }
218 
219 static bool do_typea0(DisasContext *dc, arg_typea0 *arg, bool side_effects,
220                       void (*fn)(TCGv_i32, TCGv_i32))
221 {
222     TCGv_i32 rd, ra;
223 
224     if (arg->rd == 0 && !side_effects) {
225         return true;
226     }
227 
228     rd = reg_for_write(dc, arg->rd);
229     ra = reg_for_read(dc, arg->ra);
230     fn(rd, ra);
231     return true;
232 }
233 
234 static bool do_typeb_imm(DisasContext *dc, arg_typeb *arg, bool side_effects,
235                          void (*fni)(TCGv_i32, TCGv_i32, int32_t))
236 {
237     TCGv_i32 rd, ra;
238 
239     if (arg->rd == 0 && !side_effects) {
240         return true;
241     }
242 
243     rd = reg_for_write(dc, arg->rd);
244     ra = reg_for_read(dc, arg->ra);
245     fni(rd, ra, arg->imm);
246     return true;
247 }
248 
249 static bool do_typeb_val(DisasContext *dc, arg_typeb *arg, bool side_effects,
250                          void (*fn)(TCGv_i32, TCGv_i32, TCGv_i32))
251 {
252     TCGv_i32 rd, ra, imm;
253 
254     if (arg->rd == 0 && !side_effects) {
255         return true;
256     }
257 
258     rd = reg_for_write(dc, arg->rd);
259     ra = reg_for_read(dc, arg->ra);
260     imm = tcg_constant_i32(arg->imm);
261 
262     fn(rd, ra, imm);
263     return true;
264 }
265 
266 #define DO_TYPEA(NAME, SE, FN) \
267     static bool trans_##NAME(DisasContext *dc, arg_typea *a) \
268     { return do_typea(dc, a, SE, FN); }
269 
270 #define DO_TYPEA_CFG(NAME, CFG, SE, FN) \
271     static bool trans_##NAME(DisasContext *dc, arg_typea *a) \
272     { return dc->cfg->CFG && do_typea(dc, a, SE, FN); }
273 
274 #define DO_TYPEA0(NAME, SE, FN) \
275     static bool trans_##NAME(DisasContext *dc, arg_typea0 *a) \
276     { return do_typea0(dc, a, SE, FN); }
277 
278 #define DO_TYPEA0_CFG(NAME, CFG, SE, FN) \
279     static bool trans_##NAME(DisasContext *dc, arg_typea0 *a) \
280     { return dc->cfg->CFG && do_typea0(dc, a, SE, FN); }
281 
282 #define DO_TYPEBI(NAME, SE, FNI) \
283     static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
284     { return do_typeb_imm(dc, a, SE, FNI); }
285 
286 #define DO_TYPEBI_CFG(NAME, CFG, SE, FNI) \
287     static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
288     { return dc->cfg->CFG && do_typeb_imm(dc, a, SE, FNI); }
289 
290 #define DO_TYPEBV(NAME, SE, FN) \
291     static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
292     { return do_typeb_val(dc, a, SE, FN); }
293 
294 #define ENV_WRAPPER2(NAME, HELPER) \
295     static void NAME(TCGv_i32 out, TCGv_i32 ina) \
296     { HELPER(out, tcg_env, ina); }
297 
298 #define ENV_WRAPPER3(NAME, HELPER) \
299     static void NAME(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) \
300     { HELPER(out, tcg_env, ina, inb); }
301 
302 /* No input carry, but output carry. */
303 static void gen_add(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
304 {
305     TCGv_i32 zero = tcg_constant_i32(0);
306 
307     tcg_gen_add2_i32(out, cpu_msr_c, ina, zero, inb, zero);
308 }
309 
310 /* Input and output carry. */
311 static void gen_addc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
312 {
313     TCGv_i32 zero = tcg_constant_i32(0);
314     TCGv_i32 tmp = tcg_temp_new_i32();
315 
316     tcg_gen_add2_i32(tmp, cpu_msr_c, ina, zero, cpu_msr_c, zero);
317     tcg_gen_add2_i32(out, cpu_msr_c, tmp, cpu_msr_c, inb, zero);
318 }
319 
320 /* Input carry, but no output carry. */
321 static void gen_addkc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
322 {
323     tcg_gen_add_i32(out, ina, inb);
324     tcg_gen_add_i32(out, out, cpu_msr_c);
325 }
326 
327 DO_TYPEA(add, true, gen_add)
328 DO_TYPEA(addc, true, gen_addc)
329 DO_TYPEA(addk, false, tcg_gen_add_i32)
330 DO_TYPEA(addkc, true, gen_addkc)
331 
332 DO_TYPEBV(addi, true, gen_add)
333 DO_TYPEBV(addic, true, gen_addc)
334 DO_TYPEBI(addik, false, tcg_gen_addi_i32)
335 DO_TYPEBV(addikc, true, gen_addkc)
336 
337 static void gen_andni(TCGv_i32 out, TCGv_i32 ina, int32_t imm)
338 {
339     tcg_gen_andi_i32(out, ina, ~imm);
340 }
341 
342 DO_TYPEA(and, false, tcg_gen_and_i32)
343 DO_TYPEBI(andi, false, tcg_gen_andi_i32)
344 DO_TYPEA(andn, false, tcg_gen_andc_i32)
345 DO_TYPEBI(andni, false, gen_andni)
346 
347 static void gen_bsra(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
348 {
349     TCGv_i32 tmp = tcg_temp_new_i32();
350     tcg_gen_andi_i32(tmp, inb, 31);
351     tcg_gen_sar_i32(out, ina, tmp);
352 }
353 
354 static void gen_bsrl(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
355 {
356     TCGv_i32 tmp = tcg_temp_new_i32();
357     tcg_gen_andi_i32(tmp, inb, 31);
358     tcg_gen_shr_i32(out, ina, tmp);
359 }
360 
361 static void gen_bsll(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
362 {
363     TCGv_i32 tmp = tcg_temp_new_i32();
364     tcg_gen_andi_i32(tmp, inb, 31);
365     tcg_gen_shl_i32(out, ina, tmp);
366 }
367 
368 static void gen_bsefi(TCGv_i32 out, TCGv_i32 ina, int32_t imm)
369 {
370     /* Note that decodetree has extracted and reassembled imm_w/imm_s. */
371     int imm_w = extract32(imm, 5, 5);
372     int imm_s = extract32(imm, 0, 5);
373 
374     if (imm_w + imm_s > 32 || imm_w == 0) {
375         /* These inputs have an undefined behavior.  */
376         qemu_log_mask(LOG_GUEST_ERROR, "bsefi: Bad input w=%d s=%d\n",
377                       imm_w, imm_s);
378     } else {
379         tcg_gen_extract_i32(out, ina, imm_s, imm_w);
380     }
381 }
382 
383 static void gen_bsifi(TCGv_i32 out, TCGv_i32 ina, int32_t imm)
384 {
385     /* Note that decodetree has extracted and reassembled imm_w/imm_s. */
386     int imm_w = extract32(imm, 5, 5);
387     int imm_s = extract32(imm, 0, 5);
388     int width = imm_w - imm_s + 1;
389 
390     if (imm_w < imm_s) {
391         /* These inputs have an undefined behavior.  */
392         qemu_log_mask(LOG_GUEST_ERROR, "bsifi: Bad input w=%d s=%d\n",
393                       imm_w, imm_s);
394     } else {
395         tcg_gen_deposit_i32(out, out, ina, imm_s, width);
396     }
397 }
398 
399 DO_TYPEA_CFG(bsra, use_barrel, false, gen_bsra)
400 DO_TYPEA_CFG(bsrl, use_barrel, false, gen_bsrl)
401 DO_TYPEA_CFG(bsll, use_barrel, false, gen_bsll)
402 
403 DO_TYPEBI_CFG(bsrai, use_barrel, false, tcg_gen_sari_i32)
404 DO_TYPEBI_CFG(bsrli, use_barrel, false, tcg_gen_shri_i32)
405 DO_TYPEBI_CFG(bslli, use_barrel, false, tcg_gen_shli_i32)
406 
407 DO_TYPEBI_CFG(bsefi, use_barrel, false, gen_bsefi)
408 DO_TYPEBI_CFG(bsifi, use_barrel, false, gen_bsifi)
409 
410 static void gen_clz(TCGv_i32 out, TCGv_i32 ina)
411 {
412     tcg_gen_clzi_i32(out, ina, 32);
413 }
414 
415 DO_TYPEA0_CFG(clz, use_pcmp_instr, false, gen_clz)
416 
417 static void gen_cmp(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
418 {
419     TCGv_i32 lt = tcg_temp_new_i32();
420 
421     tcg_gen_setcond_i32(TCG_COND_LT, lt, inb, ina);
422     tcg_gen_sub_i32(out, inb, ina);
423     tcg_gen_deposit_i32(out, out, lt, 31, 1);
424 }
425 
426 static void gen_cmpu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
427 {
428     TCGv_i32 lt = tcg_temp_new_i32();
429 
430     tcg_gen_setcond_i32(TCG_COND_LTU, lt, inb, ina);
431     tcg_gen_sub_i32(out, inb, ina);
432     tcg_gen_deposit_i32(out, out, lt, 31, 1);
433 }
434 
435 DO_TYPEA(cmp, false, gen_cmp)
436 DO_TYPEA(cmpu, false, gen_cmpu)
437 
438 ENV_WRAPPER3(gen_fadd, gen_helper_fadd)
439 ENV_WRAPPER3(gen_frsub, gen_helper_frsub)
440 ENV_WRAPPER3(gen_fmul, gen_helper_fmul)
441 ENV_WRAPPER3(gen_fdiv, gen_helper_fdiv)
442 ENV_WRAPPER3(gen_fcmp_un, gen_helper_fcmp_un)
443 ENV_WRAPPER3(gen_fcmp_lt, gen_helper_fcmp_lt)
444 ENV_WRAPPER3(gen_fcmp_eq, gen_helper_fcmp_eq)
445 ENV_WRAPPER3(gen_fcmp_le, gen_helper_fcmp_le)
446 ENV_WRAPPER3(gen_fcmp_gt, gen_helper_fcmp_gt)
447 ENV_WRAPPER3(gen_fcmp_ne, gen_helper_fcmp_ne)
448 ENV_WRAPPER3(gen_fcmp_ge, gen_helper_fcmp_ge)
449 
450 DO_TYPEA_CFG(fadd, use_fpu, true, gen_fadd)
451 DO_TYPEA_CFG(frsub, use_fpu, true, gen_frsub)
452 DO_TYPEA_CFG(fmul, use_fpu, true, gen_fmul)
453 DO_TYPEA_CFG(fdiv, use_fpu, true, gen_fdiv)
454 DO_TYPEA_CFG(fcmp_un, use_fpu, true, gen_fcmp_un)
455 DO_TYPEA_CFG(fcmp_lt, use_fpu, true, gen_fcmp_lt)
456 DO_TYPEA_CFG(fcmp_eq, use_fpu, true, gen_fcmp_eq)
457 DO_TYPEA_CFG(fcmp_le, use_fpu, true, gen_fcmp_le)
458 DO_TYPEA_CFG(fcmp_gt, use_fpu, true, gen_fcmp_gt)
459 DO_TYPEA_CFG(fcmp_ne, use_fpu, true, gen_fcmp_ne)
460 DO_TYPEA_CFG(fcmp_ge, use_fpu, true, gen_fcmp_ge)
461 
462 ENV_WRAPPER2(gen_flt, gen_helper_flt)
463 ENV_WRAPPER2(gen_fint, gen_helper_fint)
464 ENV_WRAPPER2(gen_fsqrt, gen_helper_fsqrt)
465 
466 DO_TYPEA0_CFG(flt, use_fpu >= 2, true, gen_flt)
467 DO_TYPEA0_CFG(fint, use_fpu >= 2, true, gen_fint)
468 DO_TYPEA0_CFG(fsqrt, use_fpu >= 2, true, gen_fsqrt)
469 
470 /* Does not use ENV_WRAPPER3, because arguments are swapped as well. */
471 static void gen_idiv(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
472 {
473     gen_helper_divs(out, tcg_env, inb, ina);
474 }
475 
476 static void gen_idivu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
477 {
478     gen_helper_divu(out, tcg_env, inb, ina);
479 }
480 
481 DO_TYPEA_CFG(idiv, use_div, true, gen_idiv)
482 DO_TYPEA_CFG(idivu, use_div, true, gen_idivu)
483 
484 static bool trans_imm(DisasContext *dc, arg_imm *arg)
485 {
486     if (invalid_delay_slot(dc, "imm")) {
487         return true;
488     }
489     dc->ext_imm = arg->imm << 16;
490     tcg_gen_movi_i32(cpu_imm, dc->ext_imm);
491     dc->tb_flags_to_set = IMM_FLAG;
492     return true;
493 }
494 
495 static void gen_mulh(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
496 {
497     TCGv_i32 tmp = tcg_temp_new_i32();
498     tcg_gen_muls2_i32(tmp, out, ina, inb);
499 }
500 
501 static void gen_mulhu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
502 {
503     TCGv_i32 tmp = tcg_temp_new_i32();
504     tcg_gen_mulu2_i32(tmp, out, ina, inb);
505 }
506 
507 static void gen_mulhsu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
508 {
509     TCGv_i32 tmp = tcg_temp_new_i32();
510     tcg_gen_mulsu2_i32(tmp, out, ina, inb);
511 }
512 
513 DO_TYPEA_CFG(mul, use_hw_mul, false, tcg_gen_mul_i32)
514 DO_TYPEA_CFG(mulh, use_hw_mul >= 2, false, gen_mulh)
515 DO_TYPEA_CFG(mulhu, use_hw_mul >= 2, false, gen_mulhu)
516 DO_TYPEA_CFG(mulhsu, use_hw_mul >= 2, false, gen_mulhsu)
517 DO_TYPEBI_CFG(muli, use_hw_mul, false, tcg_gen_muli_i32)
518 
519 DO_TYPEA(or, false, tcg_gen_or_i32)
520 DO_TYPEBI(ori, false, tcg_gen_ori_i32)
521 
522 static void gen_pcmpeq(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
523 {
524     tcg_gen_setcond_i32(TCG_COND_EQ, out, ina, inb);
525 }
526 
527 static void gen_pcmpne(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
528 {
529     tcg_gen_setcond_i32(TCG_COND_NE, out, ina, inb);
530 }
531 
532 DO_TYPEA_CFG(pcmpbf, use_pcmp_instr, false, gen_helper_pcmpbf)
533 DO_TYPEA_CFG(pcmpeq, use_pcmp_instr, false, gen_pcmpeq)
534 DO_TYPEA_CFG(pcmpne, use_pcmp_instr, false, gen_pcmpne)
535 
536 /* No input carry, but output carry. */
537 static void gen_rsub(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
538 {
539     tcg_gen_setcond_i32(TCG_COND_GEU, cpu_msr_c, inb, ina);
540     tcg_gen_sub_i32(out, inb, ina);
541 }
542 
543 /* Input and output carry. */
544 static void gen_rsubc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
545 {
546     TCGv_i32 zero = tcg_constant_i32(0);
547     TCGv_i32 tmp = tcg_temp_new_i32();
548 
549     tcg_gen_not_i32(tmp, ina);
550     tcg_gen_add2_i32(tmp, cpu_msr_c, tmp, zero, cpu_msr_c, zero);
551     tcg_gen_add2_i32(out, cpu_msr_c, tmp, cpu_msr_c, inb, zero);
552 }
553 
554 /* No input or output carry. */
555 static void gen_rsubk(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
556 {
557     tcg_gen_sub_i32(out, inb, ina);
558 }
559 
560 /* Input carry, no output carry. */
561 static void gen_rsubkc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
562 {
563     TCGv_i32 nota = tcg_temp_new_i32();
564 
565     tcg_gen_not_i32(nota, ina);
566     tcg_gen_add_i32(out, inb, nota);
567     tcg_gen_add_i32(out, out, cpu_msr_c);
568 }
569 
570 DO_TYPEA(rsub, true, gen_rsub)
571 DO_TYPEA(rsubc, true, gen_rsubc)
572 DO_TYPEA(rsubk, false, gen_rsubk)
573 DO_TYPEA(rsubkc, true, gen_rsubkc)
574 
575 DO_TYPEBV(rsubi, true, gen_rsub)
576 DO_TYPEBV(rsubic, true, gen_rsubc)
577 DO_TYPEBV(rsubik, false, gen_rsubk)
578 DO_TYPEBV(rsubikc, true, gen_rsubkc)
579 
580 DO_TYPEA0(sext8, false, tcg_gen_ext8s_i32)
581 DO_TYPEA0(sext16, false, tcg_gen_ext16s_i32)
582 
583 static void gen_sra(TCGv_i32 out, TCGv_i32 ina)
584 {
585     tcg_gen_andi_i32(cpu_msr_c, ina, 1);
586     tcg_gen_sari_i32(out, ina, 1);
587 }
588 
589 static void gen_src(TCGv_i32 out, TCGv_i32 ina)
590 {
591     TCGv_i32 tmp = tcg_temp_new_i32();
592 
593     tcg_gen_mov_i32(tmp, cpu_msr_c);
594     tcg_gen_andi_i32(cpu_msr_c, ina, 1);
595     tcg_gen_extract2_i32(out, ina, tmp, 1);
596 }
597 
598 static void gen_srl(TCGv_i32 out, TCGv_i32 ina)
599 {
600     tcg_gen_andi_i32(cpu_msr_c, ina, 1);
601     tcg_gen_shri_i32(out, ina, 1);
602 }
603 
604 DO_TYPEA0(sra, false, gen_sra)
605 DO_TYPEA0(src, false, gen_src)
606 DO_TYPEA0(srl, false, gen_srl)
607 
608 static void gen_swaph(TCGv_i32 out, TCGv_i32 ina)
609 {
610     tcg_gen_rotri_i32(out, ina, 16);
611 }
612 
613 DO_TYPEA0(swapb, false, tcg_gen_bswap32_i32)
614 DO_TYPEA0(swaph, false, gen_swaph)
615 
616 static bool trans_wdic(DisasContext *dc, arg_wdic *a)
617 {
618     /* Cache operations are nops: only check for supervisor mode.  */
619     trap_userspace(dc, true);
620     return true;
621 }
622 
623 DO_TYPEA(xor, false, tcg_gen_xor_i32)
624 DO_TYPEBI(xori, false, tcg_gen_xori_i32)
625 
626 static TCGv compute_ldst_addr_typea(DisasContext *dc, int ra, int rb)
627 {
628     TCGv ret = tcg_temp_new();
629 
630     /* If any of the regs is r0, set t to the value of the other reg.  */
631     if (ra && rb) {
632         TCGv_i32 tmp = tcg_temp_new_i32();
633         tcg_gen_add_i32(tmp, cpu_R[ra], cpu_R[rb]);
634         tcg_gen_extu_i32_tl(ret, tmp);
635     } else if (ra) {
636         tcg_gen_extu_i32_tl(ret, cpu_R[ra]);
637     } else if (rb) {
638         tcg_gen_extu_i32_tl(ret, cpu_R[rb]);
639     } else {
640         tcg_gen_movi_tl(ret, 0);
641     }
642 
643     if ((ra == 1 || rb == 1) && dc->cfg->stackprot) {
644         gen_helper_stackprot(tcg_env, ret);
645     }
646     return ret;
647 }
648 
649 static TCGv compute_ldst_addr_typeb(DisasContext *dc, int ra, int imm)
650 {
651     TCGv ret = tcg_temp_new();
652 
653     /* If any of the regs is r0, set t to the value of the other reg.  */
654     if (ra) {
655         TCGv_i32 tmp = tcg_temp_new_i32();
656         tcg_gen_addi_i32(tmp, cpu_R[ra], imm);
657         tcg_gen_extu_i32_tl(ret, tmp);
658     } else {
659         tcg_gen_movi_tl(ret, (uint32_t)imm);
660     }
661 
662     if (ra == 1 && dc->cfg->stackprot) {
663         gen_helper_stackprot(tcg_env, ret);
664     }
665     return ret;
666 }
667 
668 #ifndef CONFIG_USER_ONLY
669 static TCGv compute_ldst_addr_ea(DisasContext *dc, int ra, int rb)
670 {
671     int addr_size = dc->cfg->addr_size;
672     TCGv ret = tcg_temp_new();
673 
674     if (addr_size == 32 || ra == 0) {
675         if (rb) {
676             tcg_gen_extu_i32_tl(ret, cpu_R[rb]);
677         } else {
678             tcg_gen_movi_tl(ret, 0);
679         }
680     } else {
681         if (rb) {
682             tcg_gen_concat_i32_i64(ret, cpu_R[rb], cpu_R[ra]);
683         } else {
684             tcg_gen_extu_i32_tl(ret, cpu_R[ra]);
685             tcg_gen_shli_tl(ret, ret, 32);
686         }
687         if (addr_size < 64) {
688             /* Mask off out of range bits.  */
689             tcg_gen_andi_i64(ret, ret, MAKE_64BIT_MASK(0, addr_size));
690         }
691     }
692     return ret;
693 }
694 #endif
695 
696 #ifndef CONFIG_USER_ONLY
697 static void record_unaligned_ess(DisasContext *dc, int rd,
698                                  MemOp size, bool store)
699 {
700     uint32_t iflags = tcg_get_insn_start_param(dc->base.insn_start, 1);
701 
702     iflags |= ESR_ESS_FLAG;
703     iflags |= rd << 5;
704     iflags |= store * ESR_S;
705     iflags |= (size == MO_32) * ESR_W;
706 
707     tcg_set_insn_start_param(dc->base.insn_start, 1, iflags);
708 }
709 #endif
710 
711 static bool do_load(DisasContext *dc, int rd, TCGv addr, MemOp mop,
712                     int mem_index, bool rev)
713 {
714     MemOp size = mop & MO_SIZE;
715 
716     /*
717      * When doing reverse accesses we need to do two things.
718      *
719      * 1. Reverse the address wrt endianness.
720      * 2. Byteswap the data lanes on the way back into the CPU core.
721      */
722     if (rev) {
723         if (size > MO_8) {
724             mop ^= MO_BSWAP;
725         }
726         if (size < MO_32) {
727             tcg_gen_xori_tl(addr, addr, 3 - size);
728         }
729     }
730 
731     /*
732      * For system mode, enforce alignment if the cpu configuration
733      * requires it.  For user-mode, the Linux kernel will have fixed up
734      * any unaligned access, so emulate that by *not* setting MO_ALIGN.
735      */
736 #ifndef CONFIG_USER_ONLY
737     if (size > MO_8 &&
738         (dc->tb_flags & MSR_EE) &&
739         dc->cfg->unaligned_exceptions) {
740         record_unaligned_ess(dc, rd, size, false);
741         mop |= MO_ALIGN;
742     }
743 #endif
744 
745     tcg_gen_qemu_ld_i32(reg_for_write(dc, rd), addr, mem_index, mop);
746     return true;
747 }
748 
749 static bool trans_lbu(DisasContext *dc, arg_typea *arg)
750 {
751     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
752     return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
753 }
754 
755 static bool trans_lbur(DisasContext *dc, arg_typea *arg)
756 {
757     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
758     return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, true);
759 }
760 
761 static bool trans_lbuea(DisasContext *dc, arg_typea *arg)
762 {
763     if (trap_userspace(dc, true)) {
764         return true;
765     }
766 #ifdef CONFIG_USER_ONLY
767     return true;
768 #else
769     TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
770     return do_load(dc, arg->rd, addr, MO_UB, MMU_NOMMU_IDX, false);
771 #endif
772 }
773 
774 static bool trans_lbui(DisasContext *dc, arg_typeb *arg)
775 {
776     TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
777     return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
778 }
779 
780 static bool trans_lhu(DisasContext *dc, arg_typea *arg)
781 {
782     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
783     return do_load(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
784 }
785 
786 static bool trans_lhur(DisasContext *dc, arg_typea *arg)
787 {
788     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
789     return do_load(dc, arg->rd, addr, MO_TEUW, dc->mem_index, true);
790 }
791 
792 static bool trans_lhuea(DisasContext *dc, arg_typea *arg)
793 {
794     if (trap_userspace(dc, true)) {
795         return true;
796     }
797 #ifdef CONFIG_USER_ONLY
798     return true;
799 #else
800     TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
801     return do_load(dc, arg->rd, addr, MO_TEUW, MMU_NOMMU_IDX, false);
802 #endif
803 }
804 
805 static bool trans_lhui(DisasContext *dc, arg_typeb *arg)
806 {
807     TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
808     return do_load(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
809 }
810 
811 static bool trans_lw(DisasContext *dc, arg_typea *arg)
812 {
813     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
814     return do_load(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
815 }
816 
817 static bool trans_lwr(DisasContext *dc, arg_typea *arg)
818 {
819     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
820     return do_load(dc, arg->rd, addr, MO_TEUL, dc->mem_index, true);
821 }
822 
823 static bool trans_lwea(DisasContext *dc, arg_typea *arg)
824 {
825     if (trap_userspace(dc, true)) {
826         return true;
827     }
828 #ifdef CONFIG_USER_ONLY
829     return true;
830 #else
831     TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
832     return do_load(dc, arg->rd, addr, MO_TEUL, MMU_NOMMU_IDX, false);
833 #endif
834 }
835 
836 static bool trans_lwi(DisasContext *dc, arg_typeb *arg)
837 {
838     TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
839     return do_load(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
840 }
841 
842 static bool trans_lwx(DisasContext *dc, arg_typea *arg)
843 {
844     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
845 
846     /* lwx does not throw unaligned access errors, so force alignment */
847     tcg_gen_andi_tl(addr, addr, ~3);
848 
849     tcg_gen_qemu_ld_i32(cpu_res_val, addr, dc->mem_index, MO_TEUL);
850     tcg_gen_mov_tl(cpu_res_addr, addr);
851 
852     if (arg->rd) {
853         tcg_gen_mov_i32(cpu_R[arg->rd], cpu_res_val);
854     }
855 
856     /* No support for AXI exclusive so always clear C */
857     tcg_gen_movi_i32(cpu_msr_c, 0);
858     return true;
859 }
860 
861 static bool do_store(DisasContext *dc, int rd, TCGv addr, MemOp mop,
862                      int mem_index, bool rev)
863 {
864     MemOp size = mop & MO_SIZE;
865 
866     /*
867      * When doing reverse accesses we need to do two things.
868      *
869      * 1. Reverse the address wrt endianness.
870      * 2. Byteswap the data lanes on the way back into the CPU core.
871      */
872     if (rev) {
873         if (size > MO_8) {
874             mop ^= MO_BSWAP;
875         }
876         if (size < MO_32) {
877             tcg_gen_xori_tl(addr, addr, 3 - size);
878         }
879     }
880 
881     /*
882      * For system mode, enforce alignment if the cpu configuration
883      * requires it.  For user-mode, the Linux kernel will have fixed up
884      * any unaligned access, so emulate that by *not* setting MO_ALIGN.
885      */
886 #ifndef CONFIG_USER_ONLY
887     if (size > MO_8 &&
888         (dc->tb_flags & MSR_EE) &&
889         dc->cfg->unaligned_exceptions) {
890         record_unaligned_ess(dc, rd, size, true);
891         mop |= MO_ALIGN;
892     }
893 #endif
894 
895     tcg_gen_qemu_st_i32(reg_for_read(dc, rd), addr, mem_index, mop);
896     return true;
897 }
898 
899 static bool trans_sb(DisasContext *dc, arg_typea *arg)
900 {
901     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
902     return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
903 }
904 
905 static bool trans_sbr(DisasContext *dc, arg_typea *arg)
906 {
907     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
908     return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, true);
909 }
910 
911 static bool trans_sbea(DisasContext *dc, arg_typea *arg)
912 {
913     if (trap_userspace(dc, true)) {
914         return true;
915     }
916 #ifdef CONFIG_USER_ONLY
917     return true;
918 #else
919     TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
920     return do_store(dc, arg->rd, addr, MO_UB, MMU_NOMMU_IDX, false);
921 #endif
922 }
923 
924 static bool trans_sbi(DisasContext *dc, arg_typeb *arg)
925 {
926     TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
927     return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
928 }
929 
930 static bool trans_sh(DisasContext *dc, arg_typea *arg)
931 {
932     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
933     return do_store(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
934 }
935 
936 static bool trans_shr(DisasContext *dc, arg_typea *arg)
937 {
938     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
939     return do_store(dc, arg->rd, addr, MO_TEUW, dc->mem_index, true);
940 }
941 
942 static bool trans_shea(DisasContext *dc, arg_typea *arg)
943 {
944     if (trap_userspace(dc, true)) {
945         return true;
946     }
947 #ifdef CONFIG_USER_ONLY
948     return true;
949 #else
950     TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
951     return do_store(dc, arg->rd, addr, MO_TEUW, MMU_NOMMU_IDX, false);
952 #endif
953 }
954 
955 static bool trans_shi(DisasContext *dc, arg_typeb *arg)
956 {
957     TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
958     return do_store(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
959 }
960 
961 static bool trans_sw(DisasContext *dc, arg_typea *arg)
962 {
963     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
964     return do_store(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
965 }
966 
967 static bool trans_swr(DisasContext *dc, arg_typea *arg)
968 {
969     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
970     return do_store(dc, arg->rd, addr, MO_TEUL, dc->mem_index, true);
971 }
972 
973 static bool trans_swea(DisasContext *dc, arg_typea *arg)
974 {
975     if (trap_userspace(dc, true)) {
976         return true;
977     }
978 #ifdef CONFIG_USER_ONLY
979     return true;
980 #else
981     TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
982     return do_store(dc, arg->rd, addr, MO_TEUL, MMU_NOMMU_IDX, false);
983 #endif
984 }
985 
986 static bool trans_swi(DisasContext *dc, arg_typeb *arg)
987 {
988     TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
989     return do_store(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
990 }
991 
992 static bool trans_swx(DisasContext *dc, arg_typea *arg)
993 {
994     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
995     TCGLabel *swx_done = gen_new_label();
996     TCGLabel *swx_fail = gen_new_label();
997     TCGv_i32 tval;
998 
999     /* swx does not throw unaligned access errors, so force alignment */
1000     tcg_gen_andi_tl(addr, addr, ~3);
1001 
1002     /*
1003      * Compare the address vs the one we used during lwx.
1004      * On mismatch, the operation fails.  On match, addr dies at the
1005      * branch, but we know we can use the equal version in the global.
1006      * In either case, addr is no longer needed.
1007      */
1008     tcg_gen_brcond_tl(TCG_COND_NE, cpu_res_addr, addr, swx_fail);
1009 
1010     /*
1011      * Compare the value loaded during lwx with current contents of
1012      * the reserved location.
1013      */
1014     tval = tcg_temp_new_i32();
1015 
1016     tcg_gen_atomic_cmpxchg_i32(tval, cpu_res_addr, cpu_res_val,
1017                                reg_for_write(dc, arg->rd),
1018                                dc->mem_index, MO_TEUL);
1019 
1020     tcg_gen_brcond_i32(TCG_COND_NE, cpu_res_val, tval, swx_fail);
1021 
1022     /* Success */
1023     tcg_gen_movi_i32(cpu_msr_c, 0);
1024     tcg_gen_br(swx_done);
1025 
1026     /* Failure */
1027     gen_set_label(swx_fail);
1028     tcg_gen_movi_i32(cpu_msr_c, 1);
1029 
1030     gen_set_label(swx_done);
1031 
1032     /*
1033      * Prevent the saved address from working again without another ldx.
1034      * Akin to the pseudocode setting reservation = 0.
1035      */
1036     tcg_gen_movi_tl(cpu_res_addr, -1);
1037     return true;
1038 }
1039 
1040 static void setup_dslot(DisasContext *dc, bool type_b)
1041 {
1042     dc->tb_flags_to_set |= D_FLAG;
1043     if (type_b && (dc->tb_flags & IMM_FLAG)) {
1044         dc->tb_flags_to_set |= BIMM_FLAG;
1045     }
1046 }
1047 
1048 static bool do_branch(DisasContext *dc, int dest_rb, int dest_imm,
1049                       bool delay, bool abs, int link)
1050 {
1051     uint32_t add_pc;
1052 
1053     if (invalid_delay_slot(dc, "branch")) {
1054         return true;
1055     }
1056     if (delay) {
1057         setup_dslot(dc, dest_rb < 0);
1058     }
1059 
1060     if (link) {
1061         tcg_gen_movi_i32(cpu_R[link], dc->base.pc_next);
1062     }
1063 
1064     /* Store the branch taken destination into btarget.  */
1065     add_pc = abs ? 0 : dc->base.pc_next;
1066     if (dest_rb > 0) {
1067         dc->jmp_dest = -1;
1068         tcg_gen_addi_i32(cpu_btarget, cpu_R[dest_rb], add_pc);
1069     } else {
1070         dc->jmp_dest = add_pc + dest_imm;
1071         tcg_gen_movi_i32(cpu_btarget, dc->jmp_dest);
1072     }
1073     dc->jmp_cond = TCG_COND_ALWAYS;
1074     return true;
1075 }
1076 
1077 #define DO_BR(NAME, NAMEI, DELAY, ABS, LINK)                               \
1078     static bool trans_##NAME(DisasContext *dc, arg_typea_br *arg)          \
1079     { return do_branch(dc, arg->rb, 0, DELAY, ABS, LINK ? arg->rd : 0); }  \
1080     static bool trans_##NAMEI(DisasContext *dc, arg_typeb_br *arg)         \
1081     { return do_branch(dc, -1, arg->imm, DELAY, ABS, LINK ? arg->rd : 0); }
1082 
1083 DO_BR(br, bri, false, false, false)
1084 DO_BR(bra, brai, false, true, false)
1085 DO_BR(brd, brid, true, false, false)
1086 DO_BR(brad, braid, true, true, false)
1087 DO_BR(brld, brlid, true, false, true)
1088 DO_BR(brald, bralid, true, true, true)
1089 
1090 static bool do_bcc(DisasContext *dc, int dest_rb, int dest_imm,
1091                    TCGCond cond, int ra, bool delay)
1092 {
1093     TCGv_i32 zero, next;
1094 
1095     if (invalid_delay_slot(dc, "bcc")) {
1096         return true;
1097     }
1098     if (delay) {
1099         setup_dslot(dc, dest_rb < 0);
1100     }
1101 
1102     dc->jmp_cond = cond;
1103 
1104     /* Cache the condition register in cpu_bvalue across any delay slot.  */
1105     tcg_gen_mov_i32(cpu_bvalue, reg_for_read(dc, ra));
1106 
1107     /* Store the branch taken destination into btarget.  */
1108     if (dest_rb > 0) {
1109         dc->jmp_dest = -1;
1110         tcg_gen_addi_i32(cpu_btarget, cpu_R[dest_rb], dc->base.pc_next);
1111     } else {
1112         dc->jmp_dest = dc->base.pc_next + dest_imm;
1113         tcg_gen_movi_i32(cpu_btarget, dc->jmp_dest);
1114     }
1115 
1116     /* Compute the final destination into btarget.  */
1117     zero = tcg_constant_i32(0);
1118     next = tcg_constant_i32(dc->base.pc_next + (delay + 1) * 4);
1119     tcg_gen_movcond_i32(dc->jmp_cond, cpu_btarget,
1120                         reg_for_read(dc, ra), zero,
1121                         cpu_btarget, next);
1122 
1123     return true;
1124 }
1125 
1126 #define DO_BCC(NAME, COND)                                              \
1127     static bool trans_##NAME(DisasContext *dc, arg_typea_bc *arg)       \
1128     { return do_bcc(dc, arg->rb, 0, COND, arg->ra, false); }            \
1129     static bool trans_##NAME##d(DisasContext *dc, arg_typea_bc *arg)    \
1130     { return do_bcc(dc, arg->rb, 0, COND, arg->ra, true); }             \
1131     static bool trans_##NAME##i(DisasContext *dc, arg_typeb_bc *arg)    \
1132     { return do_bcc(dc, -1, arg->imm, COND, arg->ra, false); }          \
1133     static bool trans_##NAME##id(DisasContext *dc, arg_typeb_bc *arg)   \
1134     { return do_bcc(dc, -1, arg->imm, COND, arg->ra, true); }
1135 
1136 DO_BCC(beq, TCG_COND_EQ)
1137 DO_BCC(bge, TCG_COND_GE)
1138 DO_BCC(bgt, TCG_COND_GT)
1139 DO_BCC(ble, TCG_COND_LE)
1140 DO_BCC(blt, TCG_COND_LT)
1141 DO_BCC(bne, TCG_COND_NE)
1142 
1143 static bool trans_brk(DisasContext *dc, arg_typea_br *arg)
1144 {
1145     if (trap_userspace(dc, true)) {
1146         return true;
1147     }
1148     if (invalid_delay_slot(dc, "brk")) {
1149         return true;
1150     }
1151 
1152     tcg_gen_mov_i32(cpu_pc, reg_for_read(dc, arg->rb));
1153     if (arg->rd) {
1154         tcg_gen_movi_i32(cpu_R[arg->rd], dc->base.pc_next);
1155     }
1156     tcg_gen_ori_i32(cpu_msr, cpu_msr, MSR_BIP);
1157     tcg_gen_movi_tl(cpu_res_addr, -1);
1158 
1159     dc->base.is_jmp = DISAS_EXIT;
1160     return true;
1161 }
1162 
1163 static bool trans_brki(DisasContext *dc, arg_typeb_br *arg)
1164 {
1165     uint32_t imm = arg->imm;
1166 
1167     if (trap_userspace(dc, imm != 0x8 && imm != 0x18)) {
1168         return true;
1169     }
1170     if (invalid_delay_slot(dc, "brki")) {
1171         return true;
1172     }
1173 
1174     tcg_gen_movi_i32(cpu_pc, imm);
1175     if (arg->rd) {
1176         tcg_gen_movi_i32(cpu_R[arg->rd], dc->base.pc_next);
1177     }
1178     tcg_gen_movi_tl(cpu_res_addr, -1);
1179 
1180 #ifdef CONFIG_USER_ONLY
1181     switch (imm) {
1182     case 0x8:  /* syscall trap */
1183         gen_raise_exception_sync(dc, EXCP_SYSCALL);
1184         break;
1185     case 0x18: /* debug trap */
1186         gen_raise_exception_sync(dc, EXCP_DEBUG);
1187         break;
1188     default:   /* eliminated with trap_userspace check */
1189         g_assert_not_reached();
1190     }
1191 #else
1192     uint32_t msr_to_set = 0;
1193 
1194     if (imm != 0x18) {
1195         msr_to_set |= MSR_BIP;
1196     }
1197     if (imm == 0x8 || imm == 0x18) {
1198         /* MSR_UM and MSR_VM are in tb_flags, so we know their value. */
1199         msr_to_set |= (dc->tb_flags & (MSR_UM | MSR_VM)) << 1;
1200         tcg_gen_andi_i32(cpu_msr, cpu_msr,
1201                          ~(MSR_VMS | MSR_UMS | MSR_VM | MSR_UM));
1202     }
1203     tcg_gen_ori_i32(cpu_msr, cpu_msr, msr_to_set);
1204     dc->base.is_jmp = DISAS_EXIT;
1205 #endif
1206 
1207     return true;
1208 }
1209 
1210 static bool trans_mbar(DisasContext *dc, arg_mbar *arg)
1211 {
1212     int mbar_imm = arg->imm;
1213 
1214     /* Note that mbar is a specialized branch instruction. */
1215     if (invalid_delay_slot(dc, "mbar")) {
1216         return true;
1217     }
1218 
1219     /* Data access memory barrier.  */
1220     if ((mbar_imm & 2) == 0) {
1221         tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
1222     }
1223 
1224     /* Sleep. */
1225     if (mbar_imm & 16) {
1226         if (trap_userspace(dc, true)) {
1227             /* Sleep is a privileged instruction.  */
1228             return true;
1229         }
1230 
1231         t_sync_flags(dc);
1232 
1233         tcg_gen_st_i32(tcg_constant_i32(1), tcg_env,
1234                        -offsetof(MicroBlazeCPU, env)
1235                        +offsetof(CPUState, halted));
1236 
1237         tcg_gen_movi_i32(cpu_pc, dc->base.pc_next + 4);
1238 
1239         gen_raise_exception(dc, EXCP_HLT);
1240     }
1241 
1242     /*
1243      * If !(mbar_imm & 1), this is an instruction access memory barrier
1244      * and we need to end the TB so that we recognize self-modified
1245      * code immediately.
1246      *
1247      * However, there are some data mbars that need the TB break
1248      * (and return to main loop) to recognize interrupts right away.
1249      * E.g. recognizing a change to an interrupt controller register.
1250      *
1251      * Therefore, choose to end the TB always.
1252      */
1253     dc->base.is_jmp = DISAS_EXIT_NEXT;
1254     return true;
1255 }
1256 
1257 static bool do_rts(DisasContext *dc, arg_typeb_bc *arg, int to_set)
1258 {
1259     if (trap_userspace(dc, to_set)) {
1260         return true;
1261     }
1262     if (invalid_delay_slot(dc, "rts")) {
1263         return true;
1264     }
1265 
1266     dc->tb_flags_to_set |= to_set;
1267     setup_dslot(dc, true);
1268 
1269     dc->jmp_cond = TCG_COND_ALWAYS;
1270     dc->jmp_dest = -1;
1271     tcg_gen_addi_i32(cpu_btarget, reg_for_read(dc, arg->ra), arg->imm);
1272     return true;
1273 }
1274 
1275 #define DO_RTS(NAME, IFLAG) \
1276     static bool trans_##NAME(DisasContext *dc, arg_typeb_bc *arg) \
1277     { return do_rts(dc, arg, IFLAG); }
1278 
1279 DO_RTS(rtbd, DRTB_FLAG)
1280 DO_RTS(rtid, DRTI_FLAG)
1281 DO_RTS(rted, DRTE_FLAG)
1282 DO_RTS(rtsd, 0)
1283 
1284 static bool trans_zero(DisasContext *dc, arg_zero *arg)
1285 {
1286     /* If opcode_0_illegal, trap.  */
1287     if (dc->cfg->opcode_0_illegal) {
1288         trap_illegal(dc, true);
1289         return true;
1290     }
1291     /*
1292      * Otherwise, this is "add r0, r0, r0".
1293      * Continue to trans_add so that MSR[C] gets cleared.
1294      */
1295     return false;
1296 }
1297 
1298 static void msr_read(DisasContext *dc, TCGv_i32 d)
1299 {
1300     TCGv_i32 t;
1301 
1302     /* Replicate the cpu_msr_c boolean into the proper bit and the copy. */
1303     t = tcg_temp_new_i32();
1304     tcg_gen_muli_i32(t, cpu_msr_c, MSR_C | MSR_CC);
1305     tcg_gen_or_i32(d, cpu_msr, t);
1306 }
1307 
1308 static bool do_msrclrset(DisasContext *dc, arg_type_msr *arg, bool set)
1309 {
1310     uint32_t imm = arg->imm;
1311 
1312     if (trap_userspace(dc, imm != MSR_C)) {
1313         return true;
1314     }
1315 
1316     if (arg->rd) {
1317         msr_read(dc, cpu_R[arg->rd]);
1318     }
1319 
1320     /*
1321      * Handle the carry bit separately.
1322      * This is the only bit that userspace can modify.
1323      */
1324     if (imm & MSR_C) {
1325         tcg_gen_movi_i32(cpu_msr_c, set);
1326     }
1327 
1328     /*
1329      * MSR_C and MSR_CC set above.
1330      * MSR_PVR is not writable, and is always clear.
1331      */
1332     imm &= ~(MSR_C | MSR_CC | MSR_PVR);
1333 
1334     if (imm != 0) {
1335         if (set) {
1336             tcg_gen_ori_i32(cpu_msr, cpu_msr, imm);
1337         } else {
1338             tcg_gen_andi_i32(cpu_msr, cpu_msr, ~imm);
1339         }
1340         dc->base.is_jmp = DISAS_EXIT_NEXT;
1341     }
1342     return true;
1343 }
1344 
1345 static bool trans_msrclr(DisasContext *dc, arg_type_msr *arg)
1346 {
1347     return do_msrclrset(dc, arg, false);
1348 }
1349 
1350 static bool trans_msrset(DisasContext *dc, arg_type_msr *arg)
1351 {
1352     return do_msrclrset(dc, arg, true);
1353 }
1354 
1355 static bool trans_mts(DisasContext *dc, arg_mts *arg)
1356 {
1357     if (trap_userspace(dc, true)) {
1358         return true;
1359     }
1360 
1361 #ifdef CONFIG_USER_ONLY
1362     g_assert_not_reached();
1363 #else
1364     if (arg->e && arg->rs != 0x1003) {
1365         qemu_log_mask(LOG_GUEST_ERROR,
1366                       "Invalid extended mts reg 0x%x\n", arg->rs);
1367         return true;
1368     }
1369 
1370     TCGv_i32 src = reg_for_read(dc, arg->ra);
1371     switch (arg->rs) {
1372     case SR_MSR:
1373         /* Install MSR_C.  */
1374         tcg_gen_extract_i32(cpu_msr_c, src, 2, 1);
1375         /*
1376          * Clear MSR_C and MSR_CC;
1377          * MSR_PVR is not writable, and is always clear.
1378          */
1379         tcg_gen_andi_i32(cpu_msr, src, ~(MSR_C | MSR_CC | MSR_PVR));
1380         break;
1381     case SR_FSR:
1382         tcg_gen_st_i32(src, tcg_env, offsetof(CPUMBState, fsr));
1383         break;
1384     case 0x800:
1385         tcg_gen_st_i32(src, tcg_env, offsetof(CPUMBState, slr));
1386         break;
1387     case 0x802:
1388         tcg_gen_st_i32(src, tcg_env, offsetof(CPUMBState, shr));
1389         break;
1390 
1391     case 0x1000: /* PID */
1392     case 0x1001: /* ZPR */
1393     case 0x1002: /* TLBX */
1394     case 0x1003: /* TLBLO */
1395     case 0x1004: /* TLBHI */
1396     case 0x1005: /* TLBSX */
1397         {
1398             TCGv_i32 tmp_ext = tcg_constant_i32(arg->e);
1399             TCGv_i32 tmp_reg = tcg_constant_i32(arg->rs & 7);
1400 
1401             gen_helper_mmu_write(tcg_env, tmp_ext, tmp_reg, src);
1402         }
1403         break;
1404 
1405     default:
1406         qemu_log_mask(LOG_GUEST_ERROR, "Invalid mts reg 0x%x\n", arg->rs);
1407         return true;
1408     }
1409     dc->base.is_jmp = DISAS_EXIT_NEXT;
1410     return true;
1411 #endif
1412 }
1413 
1414 static bool trans_mfs(DisasContext *dc, arg_mfs *arg)
1415 {
1416     TCGv_i32 dest = reg_for_write(dc, arg->rd);
1417 
1418     if (arg->e) {
1419         switch (arg->rs) {
1420         case SR_EAR:
1421             {
1422                 TCGv_i64 t64 = tcg_temp_new_i64();
1423                 tcg_gen_ld_i64(t64, tcg_env, offsetof(CPUMBState, ear));
1424                 tcg_gen_extrh_i64_i32(dest, t64);
1425             }
1426             return true;
1427 #ifndef CONFIG_USER_ONLY
1428         case 0x1003: /* TLBLO */
1429             /* Handled below. */
1430             break;
1431 #endif
1432         case 0x2006 ... 0x2009:
1433             /* High bits of PVR6-9 not implemented. */
1434             tcg_gen_movi_i32(dest, 0);
1435             return true;
1436         default:
1437             qemu_log_mask(LOG_GUEST_ERROR,
1438                           "Invalid extended mfs reg 0x%x\n", arg->rs);
1439             return true;
1440         }
1441     }
1442 
1443     switch (arg->rs) {
1444     case SR_PC:
1445         tcg_gen_movi_i32(dest, dc->base.pc_next);
1446         break;
1447     case SR_MSR:
1448         msr_read(dc, dest);
1449         break;
1450     case SR_EAR:
1451         {
1452             TCGv_i64 t64 = tcg_temp_new_i64();
1453             tcg_gen_ld_i64(t64, tcg_env, offsetof(CPUMBState, ear));
1454             tcg_gen_extrl_i64_i32(dest, t64);
1455         }
1456         break;
1457     case SR_ESR:
1458         tcg_gen_ld_i32(dest, tcg_env, offsetof(CPUMBState, esr));
1459         break;
1460     case SR_FSR:
1461         tcg_gen_ld_i32(dest, tcg_env, offsetof(CPUMBState, fsr));
1462         break;
1463     case SR_BTR:
1464         tcg_gen_ld_i32(dest, tcg_env, offsetof(CPUMBState, btr));
1465         break;
1466     case SR_EDR:
1467         tcg_gen_ld_i32(dest, tcg_env, offsetof(CPUMBState, edr));
1468         break;
1469     case 0x800:
1470         tcg_gen_ld_i32(dest, tcg_env, offsetof(CPUMBState, slr));
1471         break;
1472     case 0x802:
1473         tcg_gen_ld_i32(dest, tcg_env, offsetof(CPUMBState, shr));
1474         break;
1475 
1476 #ifndef CONFIG_USER_ONLY
1477     case 0x1000: /* PID */
1478     case 0x1001: /* ZPR */
1479     case 0x1002: /* TLBX */
1480     case 0x1003: /* TLBLO */
1481     case 0x1004: /* TLBHI */
1482     case 0x1005: /* TLBSX */
1483         {
1484             TCGv_i32 tmp_ext = tcg_constant_i32(arg->e);
1485             TCGv_i32 tmp_reg = tcg_constant_i32(arg->rs & 7);
1486 
1487             gen_helper_mmu_read(dest, tcg_env, tmp_ext, tmp_reg);
1488         }
1489         break;
1490 #endif
1491 
1492     case 0x2000 ... 0x200c:
1493         tcg_gen_ld_i32(dest, tcg_env,
1494                        offsetof(MicroBlazeCPU, cfg.pvr_regs[arg->rs - 0x2000])
1495                        - offsetof(MicroBlazeCPU, env));
1496         break;
1497     default:
1498         qemu_log_mask(LOG_GUEST_ERROR, "Invalid mfs reg 0x%x\n", arg->rs);
1499         break;
1500     }
1501     return true;
1502 }
1503 
1504 static void do_rti(DisasContext *dc)
1505 {
1506     TCGv_i32 tmp = tcg_temp_new_i32();
1507 
1508     tcg_gen_shri_i32(tmp, cpu_msr, 1);
1509     tcg_gen_ori_i32(cpu_msr, cpu_msr, MSR_IE);
1510     tcg_gen_andi_i32(tmp, tmp, MSR_VM | MSR_UM);
1511     tcg_gen_andi_i32(cpu_msr, cpu_msr, ~(MSR_VM | MSR_UM));
1512     tcg_gen_or_i32(cpu_msr, cpu_msr, tmp);
1513 }
1514 
1515 static void do_rtb(DisasContext *dc)
1516 {
1517     TCGv_i32 tmp = tcg_temp_new_i32();
1518 
1519     tcg_gen_shri_i32(tmp, cpu_msr, 1);
1520     tcg_gen_andi_i32(cpu_msr, cpu_msr, ~(MSR_VM | MSR_UM | MSR_BIP));
1521     tcg_gen_andi_i32(tmp, tmp, (MSR_VM | MSR_UM));
1522     tcg_gen_or_i32(cpu_msr, cpu_msr, tmp);
1523 }
1524 
1525 static void do_rte(DisasContext *dc)
1526 {
1527     TCGv_i32 tmp = tcg_temp_new_i32();
1528 
1529     tcg_gen_shri_i32(tmp, cpu_msr, 1);
1530     tcg_gen_ori_i32(cpu_msr, cpu_msr, MSR_EE);
1531     tcg_gen_andi_i32(tmp, tmp, (MSR_VM | MSR_UM));
1532     tcg_gen_andi_i32(cpu_msr, cpu_msr, ~(MSR_VM | MSR_UM | MSR_EIP));
1533     tcg_gen_or_i32(cpu_msr, cpu_msr, tmp);
1534 }
1535 
1536 /* Insns connected to FSL or AXI stream attached devices.  */
1537 static bool do_get(DisasContext *dc, int rd, int rb, int imm, int ctrl)
1538 {
1539     TCGv_i32 t_id, t_ctrl;
1540 
1541     if (trap_userspace(dc, true)) {
1542         return true;
1543     }
1544 
1545     t_id = tcg_temp_new_i32();
1546     if (rb) {
1547         tcg_gen_andi_i32(t_id, cpu_R[rb], 0xf);
1548     } else {
1549         tcg_gen_movi_i32(t_id, imm);
1550     }
1551 
1552     t_ctrl = tcg_constant_i32(ctrl);
1553     gen_helper_get(reg_for_write(dc, rd), t_id, t_ctrl);
1554     return true;
1555 }
1556 
1557 static bool trans_get(DisasContext *dc, arg_get *arg)
1558 {
1559     return do_get(dc, arg->rd, 0, arg->imm, arg->ctrl);
1560 }
1561 
1562 static bool trans_getd(DisasContext *dc, arg_getd *arg)
1563 {
1564     return do_get(dc, arg->rd, arg->rb, 0, arg->ctrl);
1565 }
1566 
1567 static bool do_put(DisasContext *dc, int ra, int rb, int imm, int ctrl)
1568 {
1569     TCGv_i32 t_id, t_ctrl;
1570 
1571     if (trap_userspace(dc, true)) {
1572         return true;
1573     }
1574 
1575     t_id = tcg_temp_new_i32();
1576     if (rb) {
1577         tcg_gen_andi_i32(t_id, cpu_R[rb], 0xf);
1578     } else {
1579         tcg_gen_movi_i32(t_id, imm);
1580     }
1581 
1582     t_ctrl = tcg_constant_i32(ctrl);
1583     gen_helper_put(t_id, t_ctrl, reg_for_read(dc, ra));
1584     return true;
1585 }
1586 
1587 static bool trans_put(DisasContext *dc, arg_put *arg)
1588 {
1589     return do_put(dc, arg->ra, 0, arg->imm, arg->ctrl);
1590 }
1591 
1592 static bool trans_putd(DisasContext *dc, arg_putd *arg)
1593 {
1594     return do_put(dc, arg->ra, arg->rb, 0, arg->ctrl);
1595 }
1596 
1597 static void mb_tr_init_disas_context(DisasContextBase *dcb, CPUState *cs)
1598 {
1599     DisasContext *dc = container_of(dcb, DisasContext, base);
1600     MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1601     int bound;
1602 
1603     dc->cfg = &cpu->cfg;
1604     dc->tb_flags = dc->base.tb->flags;
1605     dc->ext_imm = dc->base.tb->cs_base;
1606     dc->r0 = NULL;
1607     dc->r0_set = false;
1608     dc->mem_index = cpu_mmu_index(cs, false);
1609     dc->jmp_cond = dc->tb_flags & D_FLAG ? TCG_COND_ALWAYS : TCG_COND_NEVER;
1610     dc->jmp_dest = -1;
1611 
1612     bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
1613     dc->base.max_insns = MIN(dc->base.max_insns, bound);
1614 }
1615 
1616 static void mb_tr_tb_start(DisasContextBase *dcb, CPUState *cs)
1617 {
1618 }
1619 
1620 static void mb_tr_insn_start(DisasContextBase *dcb, CPUState *cs)
1621 {
1622     DisasContext *dc = container_of(dcb, DisasContext, base);
1623 
1624     tcg_gen_insn_start(dc->base.pc_next, dc->tb_flags & ~MSR_TB_MASK);
1625 }
1626 
1627 static void mb_tr_translate_insn(DisasContextBase *dcb, CPUState *cs)
1628 {
1629     DisasContext *dc = container_of(dcb, DisasContext, base);
1630     uint32_t ir;
1631 
1632     /* TODO: This should raise an exception, not terminate qemu. */
1633     if (dc->base.pc_next & 3) {
1634         cpu_abort(cs, "Microblaze: unaligned PC=%x\n",
1635                   (uint32_t)dc->base.pc_next);
1636     }
1637 
1638     dc->tb_flags_to_set = 0;
1639 
1640     ir = cpu_ldl_code(cpu_env(cs), dc->base.pc_next);
1641     if (!decode(dc, ir)) {
1642         trap_illegal(dc, true);
1643     }
1644 
1645     if (dc->r0) {
1646         dc->r0 = NULL;
1647         dc->r0_set = false;
1648     }
1649 
1650     /* Discard the imm global when its contents cannot be used. */
1651     if ((dc->tb_flags & ~dc->tb_flags_to_set) & IMM_FLAG) {
1652         tcg_gen_discard_i32(cpu_imm);
1653     }
1654 
1655     dc->tb_flags &= ~(IMM_FLAG | BIMM_FLAG | D_FLAG);
1656     dc->tb_flags |= dc->tb_flags_to_set;
1657     dc->base.pc_next += 4;
1658 
1659     if (dc->jmp_cond != TCG_COND_NEVER && !(dc->tb_flags & D_FLAG)) {
1660         /*
1661          * Finish any return-from branch.
1662          */
1663         uint32_t rt_ibe = dc->tb_flags & (DRTI_FLAG | DRTB_FLAG | DRTE_FLAG);
1664         if (unlikely(rt_ibe != 0)) {
1665             dc->tb_flags &= ~(DRTI_FLAG | DRTB_FLAG | DRTE_FLAG);
1666             if (rt_ibe & DRTI_FLAG) {
1667                 do_rti(dc);
1668             } else if (rt_ibe & DRTB_FLAG) {
1669                 do_rtb(dc);
1670             } else {
1671                 do_rte(dc);
1672             }
1673         }
1674 
1675         /* Complete the branch, ending the TB. */
1676         switch (dc->base.is_jmp) {
1677         case DISAS_NORETURN:
1678             /*
1679              * E.g. illegal insn in a delay slot.  We've already exited
1680              * and will handle D_FLAG in mb_cpu_do_interrupt.
1681              */
1682             break;
1683         case DISAS_NEXT:
1684             /*
1685              * Normal insn a delay slot.
1686              * However, the return-from-exception type insns should
1687              * return to the main loop, as they have adjusted MSR.
1688              */
1689             dc->base.is_jmp = (rt_ibe ? DISAS_EXIT_JUMP : DISAS_JUMP);
1690             break;
1691         case DISAS_EXIT_NEXT:
1692             /*
1693              * E.g. mts insn in a delay slot.  Continue with btarget,
1694              * but still return to the main loop.
1695              */
1696             dc->base.is_jmp = DISAS_EXIT_JUMP;
1697             break;
1698         default:
1699             g_assert_not_reached();
1700         }
1701     }
1702 }
1703 
1704 static void mb_tr_tb_stop(DisasContextBase *dcb, CPUState *cs)
1705 {
1706     DisasContext *dc = container_of(dcb, DisasContext, base);
1707 
1708     if (dc->base.is_jmp == DISAS_NORETURN) {
1709         /* We have already exited the TB. */
1710         return;
1711     }
1712 
1713     t_sync_flags(dc);
1714 
1715     switch (dc->base.is_jmp) {
1716     case DISAS_TOO_MANY:
1717         gen_goto_tb(dc, 0, dc->base.pc_next);
1718         return;
1719 
1720     case DISAS_EXIT:
1721         break;
1722     case DISAS_EXIT_NEXT:
1723         tcg_gen_movi_i32(cpu_pc, dc->base.pc_next);
1724         break;
1725     case DISAS_EXIT_JUMP:
1726         tcg_gen_mov_i32(cpu_pc, cpu_btarget);
1727         tcg_gen_discard_i32(cpu_btarget);
1728         break;
1729 
1730     case DISAS_JUMP:
1731         if (dc->jmp_dest != -1 && !(tb_cflags(dc->base.tb) & CF_NO_GOTO_TB)) {
1732             /* Direct jump. */
1733             tcg_gen_discard_i32(cpu_btarget);
1734 
1735             if (dc->jmp_cond != TCG_COND_ALWAYS) {
1736                 /* Conditional direct jump. */
1737                 TCGLabel *taken = gen_new_label();
1738                 TCGv_i32 tmp = tcg_temp_new_i32();
1739 
1740                 /*
1741                  * Copy bvalue to a temp now, so we can discard bvalue.
1742                  * This can avoid writing bvalue to memory when the
1743                  * delay slot cannot raise an exception.
1744                  */
1745                 tcg_gen_mov_i32(tmp, cpu_bvalue);
1746                 tcg_gen_discard_i32(cpu_bvalue);
1747 
1748                 tcg_gen_brcondi_i32(dc->jmp_cond, tmp, 0, taken);
1749                 gen_goto_tb(dc, 1, dc->base.pc_next);
1750                 gen_set_label(taken);
1751             }
1752             gen_goto_tb(dc, 0, dc->jmp_dest);
1753             return;
1754         }
1755 
1756         /* Indirect jump (or direct jump w/ goto_tb disabled) */
1757         tcg_gen_mov_i32(cpu_pc, cpu_btarget);
1758         tcg_gen_discard_i32(cpu_btarget);
1759         tcg_gen_lookup_and_goto_ptr();
1760         return;
1761 
1762     default:
1763         g_assert_not_reached();
1764     }
1765 
1766     /* Finish DISAS_EXIT_* */
1767     if (unlikely(cs->singlestep_enabled)) {
1768         gen_raise_exception(dc, EXCP_DEBUG);
1769     } else {
1770         tcg_gen_exit_tb(NULL, 0);
1771     }
1772 }
1773 
1774 static void mb_tr_disas_log(const DisasContextBase *dcb,
1775                             CPUState *cs, FILE *logfile)
1776 {
1777     fprintf(logfile, "IN: %s\n", lookup_symbol(dcb->pc_first));
1778     target_disas(logfile, cs, dcb->pc_first, dcb->tb->size);
1779 }
1780 
1781 static const TranslatorOps mb_tr_ops = {
1782     .init_disas_context = mb_tr_init_disas_context,
1783     .tb_start           = mb_tr_tb_start,
1784     .insn_start         = mb_tr_insn_start,
1785     .translate_insn     = mb_tr_translate_insn,
1786     .tb_stop            = mb_tr_tb_stop,
1787     .disas_log          = mb_tr_disas_log,
1788 };
1789 
1790 void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
1791                            vaddr pc, void *host_pc)
1792 {
1793     DisasContext dc;
1794     translator_loop(cpu, tb, max_insns, pc, host_pc, &mb_tr_ops, &dc.base);
1795 }
1796 
1797 void mb_cpu_dump_state(CPUState *cs, FILE *f, int flags)
1798 {
1799     CPUMBState *env = cpu_env(cs);
1800     uint32_t iflags;
1801     int i;
1802 
1803     qemu_fprintf(f, "pc=0x%08x msr=0x%05x mode=%s(saved=%s) eip=%d ie=%d\n",
1804                  env->pc, env->msr,
1805                  (env->msr & MSR_UM) ? "user" : "kernel",
1806                  (env->msr & MSR_UMS) ? "user" : "kernel",
1807                  (bool)(env->msr & MSR_EIP),
1808                  (bool)(env->msr & MSR_IE));
1809 
1810     iflags = env->iflags;
1811     qemu_fprintf(f, "iflags: 0x%08x", iflags);
1812     if (iflags & IMM_FLAG) {
1813         qemu_fprintf(f, " IMM(0x%08x)", env->imm);
1814     }
1815     if (iflags & BIMM_FLAG) {
1816         qemu_fprintf(f, " BIMM");
1817     }
1818     if (iflags & D_FLAG) {
1819         qemu_fprintf(f, " D(btarget=0x%08x)", env->btarget);
1820     }
1821     if (iflags & DRTI_FLAG) {
1822         qemu_fprintf(f, " DRTI");
1823     }
1824     if (iflags & DRTE_FLAG) {
1825         qemu_fprintf(f, " DRTE");
1826     }
1827     if (iflags & DRTB_FLAG) {
1828         qemu_fprintf(f, " DRTB");
1829     }
1830     if (iflags & ESR_ESS_FLAG) {
1831         qemu_fprintf(f, " ESR_ESS(0x%04x)", iflags & ESR_ESS_MASK);
1832     }
1833 
1834     qemu_fprintf(f, "\nesr=0x%04x fsr=0x%02x btr=0x%08x edr=0x%x\n"
1835                  "ear=0x" TARGET_FMT_lx " slr=0x%x shr=0x%x\n",
1836                  env->esr, env->fsr, env->btr, env->edr,
1837                  env->ear, env->slr, env->shr);
1838 
1839     for (i = 0; i < 32; i++) {
1840         qemu_fprintf(f, "r%2.2d=%08x%c",
1841                      i, env->regs[i], i % 4 == 3 ? '\n' : ' ');
1842     }
1843     qemu_fprintf(f, "\n");
1844 }
1845 
1846 void mb_tcg_init(void)
1847 {
1848 #define R(X)  { &cpu_R[X], offsetof(CPUMBState, regs[X]), "r" #X }
1849 #define SP(X) { &cpu_##X, offsetof(CPUMBState, X), #X }
1850 
1851     static const struct {
1852         TCGv_i32 *var; int ofs; char name[8];
1853     } i32s[] = {
1854         /*
1855          * Note that r0 is handled specially in reg_for_read
1856          * and reg_for_write.  Nothing should touch cpu_R[0].
1857          * Leave that element NULL, which will assert quickly
1858          * inside the tcg generator functions.
1859          */
1860                R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),
1861         R(8),  R(9),  R(10), R(11), R(12), R(13), R(14), R(15),
1862         R(16), R(17), R(18), R(19), R(20), R(21), R(22), R(23),
1863         R(24), R(25), R(26), R(27), R(28), R(29), R(30), R(31),
1864 
1865         SP(pc),
1866         SP(msr),
1867         SP(msr_c),
1868         SP(imm),
1869         SP(iflags),
1870         SP(bvalue),
1871         SP(btarget),
1872         SP(res_val),
1873     };
1874 
1875 #undef R
1876 #undef SP
1877 
1878     for (int i = 0; i < ARRAY_SIZE(i32s); ++i) {
1879         *i32s[i].var =
1880           tcg_global_mem_new_i32(tcg_env, i32s[i].ofs, i32s[i].name);
1881     }
1882 
1883     cpu_res_addr =
1884         tcg_global_mem_new(tcg_env, offsetof(CPUMBState, res_addr), "res_addr");
1885 }
1886