xref: /openbmc/qemu/target/microblaze/translate.c (revision d6fd5d83)
1 /*
2  *  Xilinx MicroBlaze emulation for qemu: main translation routines.
3  *
4  *  Copyright (c) 2009 Edgar E. Iglesias.
5  *  Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "exec/helper-proto.h"
27 #include "exec/helper-gen.h"
28 #include "exec/translator.h"
29 #include "qemu/qemu-print.h"
30 
31 #include "exec/log.h"
32 
33 #define HELPER_H "helper.h"
34 #include "exec/helper-info.c.inc"
35 #undef  HELPER_H
36 
37 #define EXTRACT_FIELD(src, start, end) \
38             (((src) >> start) & ((1 << (end - start + 1)) - 1))
39 
40 /* is_jmp field values */
41 #define DISAS_JUMP    DISAS_TARGET_0 /* only pc was modified dynamically */
42 #define DISAS_EXIT    DISAS_TARGET_1 /* all cpu state modified dynamically */
43 
44 /* cpu state besides pc was modified dynamically; update pc to next */
45 #define DISAS_EXIT_NEXT DISAS_TARGET_2
46 /* cpu state besides pc was modified dynamically; update pc to btarget */
47 #define DISAS_EXIT_JUMP DISAS_TARGET_3
48 
49 static TCGv_i32 cpu_R[32];
50 static TCGv_i32 cpu_pc;
51 static TCGv_i32 cpu_msr;
52 static TCGv_i32 cpu_msr_c;
53 static TCGv_i32 cpu_imm;
54 static TCGv_i32 cpu_bvalue;
55 static TCGv_i32 cpu_btarget;
56 static TCGv_i32 cpu_iflags;
57 static TCGv cpu_res_addr;
58 static TCGv_i32 cpu_res_val;
59 
60 /* This is the state at translation time.  */
61 typedef struct DisasContext {
62     DisasContextBase base;
63     const MicroBlazeCPUConfig *cfg;
64 
65     /* TCG op of the current insn_start.  */
66     TCGOp *insn_start;
67 
68     TCGv_i32 r0;
69     bool r0_set;
70 
71     /* Decoder.  */
72     uint32_t ext_imm;
73     unsigned int tb_flags;
74     unsigned int tb_flags_to_set;
75     int mem_index;
76 
77     /* Condition under which to jump, including NEVER and ALWAYS. */
78     TCGCond jmp_cond;
79 
80     /* Immediate branch-taken destination, or -1 for indirect. */
81     uint32_t jmp_dest;
82 } DisasContext;
83 
84 static int typeb_imm(DisasContext *dc, int x)
85 {
86     if (dc->tb_flags & IMM_FLAG) {
87         return deposit32(dc->ext_imm, 0, 16, x);
88     }
89     return x;
90 }
91 
92 /* Include the auto-generated decoder.  */
93 #include "decode-insns.c.inc"
94 
95 static void t_sync_flags(DisasContext *dc)
96 {
97     /* Synch the tb dependent flags between translator and runtime.  */
98     if ((dc->tb_flags ^ dc->base.tb->flags) & IFLAGS_TB_MASK) {
99         tcg_gen_movi_i32(cpu_iflags, dc->tb_flags & IFLAGS_TB_MASK);
100     }
101 }
102 
103 static void gen_raise_exception(DisasContext *dc, uint32_t index)
104 {
105     gen_helper_raise_exception(tcg_env, tcg_constant_i32(index));
106     dc->base.is_jmp = DISAS_NORETURN;
107 }
108 
109 static void gen_raise_exception_sync(DisasContext *dc, uint32_t index)
110 {
111     t_sync_flags(dc);
112     tcg_gen_movi_i32(cpu_pc, dc->base.pc_next);
113     gen_raise_exception(dc, index);
114 }
115 
116 static void gen_raise_hw_excp(DisasContext *dc, uint32_t esr_ec)
117 {
118     TCGv_i32 tmp = tcg_constant_i32(esr_ec);
119     tcg_gen_st_i32(tmp, tcg_env, offsetof(CPUMBState, esr));
120 
121     gen_raise_exception_sync(dc, EXCP_HW_EXCP);
122 }
123 
124 static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
125 {
126     if (translator_use_goto_tb(&dc->base, dest)) {
127         tcg_gen_goto_tb(n);
128         tcg_gen_movi_i32(cpu_pc, dest);
129         tcg_gen_exit_tb(dc->base.tb, n);
130     } else {
131         tcg_gen_movi_i32(cpu_pc, dest);
132         tcg_gen_lookup_and_goto_ptr();
133     }
134     dc->base.is_jmp = DISAS_NORETURN;
135 }
136 
137 /*
138  * Returns true if the insn an illegal operation.
139  * If exceptions are enabled, an exception is raised.
140  */
141 static bool trap_illegal(DisasContext *dc, bool cond)
142 {
143     if (cond && (dc->tb_flags & MSR_EE)
144         && dc->cfg->illegal_opcode_exception) {
145         gen_raise_hw_excp(dc, ESR_EC_ILLEGAL_OP);
146     }
147     return cond;
148 }
149 
150 /*
151  * Returns true if the insn is illegal in userspace.
152  * If exceptions are enabled, an exception is raised.
153  */
154 static bool trap_userspace(DisasContext *dc, bool cond)
155 {
156     bool cond_user = cond && dc->mem_index == MMU_USER_IDX;
157 
158     if (cond_user && (dc->tb_flags & MSR_EE)) {
159         gen_raise_hw_excp(dc, ESR_EC_PRIVINSN);
160     }
161     return cond_user;
162 }
163 
164 /*
165  * Return true, and log an error, if the current insn is
166  * within a delay slot.
167  */
168 static bool invalid_delay_slot(DisasContext *dc, const char *insn_type)
169 {
170     if (dc->tb_flags & D_FLAG) {
171         qemu_log_mask(LOG_GUEST_ERROR,
172                       "Invalid insn in delay slot: %s at %08x\n",
173                       insn_type, (uint32_t)dc->base.pc_next);
174         return true;
175     }
176     return false;
177 }
178 
179 static TCGv_i32 reg_for_read(DisasContext *dc, int reg)
180 {
181     if (likely(reg != 0)) {
182         return cpu_R[reg];
183     }
184     if (!dc->r0_set) {
185         if (dc->r0 == NULL) {
186             dc->r0 = tcg_temp_new_i32();
187         }
188         tcg_gen_movi_i32(dc->r0, 0);
189         dc->r0_set = true;
190     }
191     return dc->r0;
192 }
193 
194 static TCGv_i32 reg_for_write(DisasContext *dc, int reg)
195 {
196     if (likely(reg != 0)) {
197         return cpu_R[reg];
198     }
199     if (dc->r0 == NULL) {
200         dc->r0 = tcg_temp_new_i32();
201     }
202     return dc->r0;
203 }
204 
205 static bool do_typea(DisasContext *dc, arg_typea *arg, bool side_effects,
206                      void (*fn)(TCGv_i32, TCGv_i32, TCGv_i32))
207 {
208     TCGv_i32 rd, ra, rb;
209 
210     if (arg->rd == 0 && !side_effects) {
211         return true;
212     }
213 
214     rd = reg_for_write(dc, arg->rd);
215     ra = reg_for_read(dc, arg->ra);
216     rb = reg_for_read(dc, arg->rb);
217     fn(rd, ra, rb);
218     return true;
219 }
220 
221 static bool do_typea0(DisasContext *dc, arg_typea0 *arg, bool side_effects,
222                       void (*fn)(TCGv_i32, TCGv_i32))
223 {
224     TCGv_i32 rd, ra;
225 
226     if (arg->rd == 0 && !side_effects) {
227         return true;
228     }
229 
230     rd = reg_for_write(dc, arg->rd);
231     ra = reg_for_read(dc, arg->ra);
232     fn(rd, ra);
233     return true;
234 }
235 
236 static bool do_typeb_imm(DisasContext *dc, arg_typeb *arg, bool side_effects,
237                          void (*fni)(TCGv_i32, TCGv_i32, int32_t))
238 {
239     TCGv_i32 rd, ra;
240 
241     if (arg->rd == 0 && !side_effects) {
242         return true;
243     }
244 
245     rd = reg_for_write(dc, arg->rd);
246     ra = reg_for_read(dc, arg->ra);
247     fni(rd, ra, arg->imm);
248     return true;
249 }
250 
251 static bool do_typeb_val(DisasContext *dc, arg_typeb *arg, bool side_effects,
252                          void (*fn)(TCGv_i32, TCGv_i32, TCGv_i32))
253 {
254     TCGv_i32 rd, ra, imm;
255 
256     if (arg->rd == 0 && !side_effects) {
257         return true;
258     }
259 
260     rd = reg_for_write(dc, arg->rd);
261     ra = reg_for_read(dc, arg->ra);
262     imm = tcg_constant_i32(arg->imm);
263 
264     fn(rd, ra, imm);
265     return true;
266 }
267 
268 #define DO_TYPEA(NAME, SE, FN) \
269     static bool trans_##NAME(DisasContext *dc, arg_typea *a) \
270     { return do_typea(dc, a, SE, FN); }
271 
272 #define DO_TYPEA_CFG(NAME, CFG, SE, FN) \
273     static bool trans_##NAME(DisasContext *dc, arg_typea *a) \
274     { return dc->cfg->CFG && do_typea(dc, a, SE, FN); }
275 
276 #define DO_TYPEA0(NAME, SE, FN) \
277     static bool trans_##NAME(DisasContext *dc, arg_typea0 *a) \
278     { return do_typea0(dc, a, SE, FN); }
279 
280 #define DO_TYPEA0_CFG(NAME, CFG, SE, FN) \
281     static bool trans_##NAME(DisasContext *dc, arg_typea0 *a) \
282     { return dc->cfg->CFG && do_typea0(dc, a, SE, FN); }
283 
284 #define DO_TYPEBI(NAME, SE, FNI) \
285     static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
286     { return do_typeb_imm(dc, a, SE, FNI); }
287 
288 #define DO_TYPEBI_CFG(NAME, CFG, SE, FNI) \
289     static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
290     { return dc->cfg->CFG && do_typeb_imm(dc, a, SE, FNI); }
291 
292 #define DO_TYPEBV(NAME, SE, FN) \
293     static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
294     { return do_typeb_val(dc, a, SE, FN); }
295 
296 #define ENV_WRAPPER2(NAME, HELPER) \
297     static void NAME(TCGv_i32 out, TCGv_i32 ina) \
298     { HELPER(out, tcg_env, ina); }
299 
300 #define ENV_WRAPPER3(NAME, HELPER) \
301     static void NAME(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) \
302     { HELPER(out, tcg_env, ina, inb); }
303 
304 /* No input carry, but output carry. */
305 static void gen_add(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
306 {
307     TCGv_i32 zero = tcg_constant_i32(0);
308 
309     tcg_gen_add2_i32(out, cpu_msr_c, ina, zero, inb, zero);
310 }
311 
312 /* Input and output carry. */
313 static void gen_addc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
314 {
315     TCGv_i32 zero = tcg_constant_i32(0);
316     TCGv_i32 tmp = tcg_temp_new_i32();
317 
318     tcg_gen_add2_i32(tmp, cpu_msr_c, ina, zero, cpu_msr_c, zero);
319     tcg_gen_add2_i32(out, cpu_msr_c, tmp, cpu_msr_c, inb, zero);
320 }
321 
322 /* Input carry, but no output carry. */
323 static void gen_addkc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
324 {
325     tcg_gen_add_i32(out, ina, inb);
326     tcg_gen_add_i32(out, out, cpu_msr_c);
327 }
328 
329 DO_TYPEA(add, true, gen_add)
330 DO_TYPEA(addc, true, gen_addc)
331 DO_TYPEA(addk, false, tcg_gen_add_i32)
332 DO_TYPEA(addkc, true, gen_addkc)
333 
334 DO_TYPEBV(addi, true, gen_add)
335 DO_TYPEBV(addic, true, gen_addc)
336 DO_TYPEBI(addik, false, tcg_gen_addi_i32)
337 DO_TYPEBV(addikc, true, gen_addkc)
338 
339 static void gen_andni(TCGv_i32 out, TCGv_i32 ina, int32_t imm)
340 {
341     tcg_gen_andi_i32(out, ina, ~imm);
342 }
343 
344 DO_TYPEA(and, false, tcg_gen_and_i32)
345 DO_TYPEBI(andi, false, tcg_gen_andi_i32)
346 DO_TYPEA(andn, false, tcg_gen_andc_i32)
347 DO_TYPEBI(andni, false, gen_andni)
348 
349 static void gen_bsra(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
350 {
351     TCGv_i32 tmp = tcg_temp_new_i32();
352     tcg_gen_andi_i32(tmp, inb, 31);
353     tcg_gen_sar_i32(out, ina, tmp);
354 }
355 
356 static void gen_bsrl(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
357 {
358     TCGv_i32 tmp = tcg_temp_new_i32();
359     tcg_gen_andi_i32(tmp, inb, 31);
360     tcg_gen_shr_i32(out, ina, tmp);
361 }
362 
363 static void gen_bsll(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
364 {
365     TCGv_i32 tmp = tcg_temp_new_i32();
366     tcg_gen_andi_i32(tmp, inb, 31);
367     tcg_gen_shl_i32(out, ina, tmp);
368 }
369 
370 static void gen_bsefi(TCGv_i32 out, TCGv_i32 ina, int32_t imm)
371 {
372     /* Note that decodetree has extracted and reassembled imm_w/imm_s. */
373     int imm_w = extract32(imm, 5, 5);
374     int imm_s = extract32(imm, 0, 5);
375 
376     if (imm_w + imm_s > 32 || imm_w == 0) {
377         /* These inputs have an undefined behavior.  */
378         qemu_log_mask(LOG_GUEST_ERROR, "bsefi: Bad input w=%d s=%d\n",
379                       imm_w, imm_s);
380     } else {
381         tcg_gen_extract_i32(out, ina, imm_s, imm_w);
382     }
383 }
384 
385 static void gen_bsifi(TCGv_i32 out, TCGv_i32 ina, int32_t imm)
386 {
387     /* Note that decodetree has extracted and reassembled imm_w/imm_s. */
388     int imm_w = extract32(imm, 5, 5);
389     int imm_s = extract32(imm, 0, 5);
390     int width = imm_w - imm_s + 1;
391 
392     if (imm_w < imm_s) {
393         /* These inputs have an undefined behavior.  */
394         qemu_log_mask(LOG_GUEST_ERROR, "bsifi: Bad input w=%d s=%d\n",
395                       imm_w, imm_s);
396     } else {
397         tcg_gen_deposit_i32(out, out, ina, imm_s, width);
398     }
399 }
400 
401 DO_TYPEA_CFG(bsra, use_barrel, false, gen_bsra)
402 DO_TYPEA_CFG(bsrl, use_barrel, false, gen_bsrl)
403 DO_TYPEA_CFG(bsll, use_barrel, false, gen_bsll)
404 
405 DO_TYPEBI_CFG(bsrai, use_barrel, false, tcg_gen_sari_i32)
406 DO_TYPEBI_CFG(bsrli, use_barrel, false, tcg_gen_shri_i32)
407 DO_TYPEBI_CFG(bslli, use_barrel, false, tcg_gen_shli_i32)
408 
409 DO_TYPEBI_CFG(bsefi, use_barrel, false, gen_bsefi)
410 DO_TYPEBI_CFG(bsifi, use_barrel, false, gen_bsifi)
411 
412 static void gen_clz(TCGv_i32 out, TCGv_i32 ina)
413 {
414     tcg_gen_clzi_i32(out, ina, 32);
415 }
416 
417 DO_TYPEA0_CFG(clz, use_pcmp_instr, false, gen_clz)
418 
419 static void gen_cmp(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
420 {
421     TCGv_i32 lt = tcg_temp_new_i32();
422 
423     tcg_gen_setcond_i32(TCG_COND_LT, lt, inb, ina);
424     tcg_gen_sub_i32(out, inb, ina);
425     tcg_gen_deposit_i32(out, out, lt, 31, 1);
426 }
427 
428 static void gen_cmpu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
429 {
430     TCGv_i32 lt = tcg_temp_new_i32();
431 
432     tcg_gen_setcond_i32(TCG_COND_LTU, lt, inb, ina);
433     tcg_gen_sub_i32(out, inb, ina);
434     tcg_gen_deposit_i32(out, out, lt, 31, 1);
435 }
436 
437 DO_TYPEA(cmp, false, gen_cmp)
438 DO_TYPEA(cmpu, false, gen_cmpu)
439 
440 ENV_WRAPPER3(gen_fadd, gen_helper_fadd)
441 ENV_WRAPPER3(gen_frsub, gen_helper_frsub)
442 ENV_WRAPPER3(gen_fmul, gen_helper_fmul)
443 ENV_WRAPPER3(gen_fdiv, gen_helper_fdiv)
444 ENV_WRAPPER3(gen_fcmp_un, gen_helper_fcmp_un)
445 ENV_WRAPPER3(gen_fcmp_lt, gen_helper_fcmp_lt)
446 ENV_WRAPPER3(gen_fcmp_eq, gen_helper_fcmp_eq)
447 ENV_WRAPPER3(gen_fcmp_le, gen_helper_fcmp_le)
448 ENV_WRAPPER3(gen_fcmp_gt, gen_helper_fcmp_gt)
449 ENV_WRAPPER3(gen_fcmp_ne, gen_helper_fcmp_ne)
450 ENV_WRAPPER3(gen_fcmp_ge, gen_helper_fcmp_ge)
451 
452 DO_TYPEA_CFG(fadd, use_fpu, true, gen_fadd)
453 DO_TYPEA_CFG(frsub, use_fpu, true, gen_frsub)
454 DO_TYPEA_CFG(fmul, use_fpu, true, gen_fmul)
455 DO_TYPEA_CFG(fdiv, use_fpu, true, gen_fdiv)
456 DO_TYPEA_CFG(fcmp_un, use_fpu, true, gen_fcmp_un)
457 DO_TYPEA_CFG(fcmp_lt, use_fpu, true, gen_fcmp_lt)
458 DO_TYPEA_CFG(fcmp_eq, use_fpu, true, gen_fcmp_eq)
459 DO_TYPEA_CFG(fcmp_le, use_fpu, true, gen_fcmp_le)
460 DO_TYPEA_CFG(fcmp_gt, use_fpu, true, gen_fcmp_gt)
461 DO_TYPEA_CFG(fcmp_ne, use_fpu, true, gen_fcmp_ne)
462 DO_TYPEA_CFG(fcmp_ge, use_fpu, true, gen_fcmp_ge)
463 
464 ENV_WRAPPER2(gen_flt, gen_helper_flt)
465 ENV_WRAPPER2(gen_fint, gen_helper_fint)
466 ENV_WRAPPER2(gen_fsqrt, gen_helper_fsqrt)
467 
468 DO_TYPEA0_CFG(flt, use_fpu >= 2, true, gen_flt)
469 DO_TYPEA0_CFG(fint, use_fpu >= 2, true, gen_fint)
470 DO_TYPEA0_CFG(fsqrt, use_fpu >= 2, true, gen_fsqrt)
471 
472 /* Does not use ENV_WRAPPER3, because arguments are swapped as well. */
473 static void gen_idiv(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
474 {
475     gen_helper_divs(out, tcg_env, inb, ina);
476 }
477 
478 static void gen_idivu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
479 {
480     gen_helper_divu(out, tcg_env, inb, ina);
481 }
482 
483 DO_TYPEA_CFG(idiv, use_div, true, gen_idiv)
484 DO_TYPEA_CFG(idivu, use_div, true, gen_idivu)
485 
486 static bool trans_imm(DisasContext *dc, arg_imm *arg)
487 {
488     if (invalid_delay_slot(dc, "imm")) {
489         return true;
490     }
491     dc->ext_imm = arg->imm << 16;
492     tcg_gen_movi_i32(cpu_imm, dc->ext_imm);
493     dc->tb_flags_to_set = IMM_FLAG;
494     return true;
495 }
496 
497 static void gen_mulh(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
498 {
499     TCGv_i32 tmp = tcg_temp_new_i32();
500     tcg_gen_muls2_i32(tmp, out, ina, inb);
501 }
502 
503 static void gen_mulhu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
504 {
505     TCGv_i32 tmp = tcg_temp_new_i32();
506     tcg_gen_mulu2_i32(tmp, out, ina, inb);
507 }
508 
509 static void gen_mulhsu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
510 {
511     TCGv_i32 tmp = tcg_temp_new_i32();
512     tcg_gen_mulsu2_i32(tmp, out, ina, inb);
513 }
514 
515 DO_TYPEA_CFG(mul, use_hw_mul, false, tcg_gen_mul_i32)
516 DO_TYPEA_CFG(mulh, use_hw_mul >= 2, false, gen_mulh)
517 DO_TYPEA_CFG(mulhu, use_hw_mul >= 2, false, gen_mulhu)
518 DO_TYPEA_CFG(mulhsu, use_hw_mul >= 2, false, gen_mulhsu)
519 DO_TYPEBI_CFG(muli, use_hw_mul, false, tcg_gen_muli_i32)
520 
521 DO_TYPEA(or, false, tcg_gen_or_i32)
522 DO_TYPEBI(ori, false, tcg_gen_ori_i32)
523 
524 static void gen_pcmpeq(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
525 {
526     tcg_gen_setcond_i32(TCG_COND_EQ, out, ina, inb);
527 }
528 
529 static void gen_pcmpne(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
530 {
531     tcg_gen_setcond_i32(TCG_COND_NE, out, ina, inb);
532 }
533 
534 DO_TYPEA_CFG(pcmpbf, use_pcmp_instr, false, gen_helper_pcmpbf)
535 DO_TYPEA_CFG(pcmpeq, use_pcmp_instr, false, gen_pcmpeq)
536 DO_TYPEA_CFG(pcmpne, use_pcmp_instr, false, gen_pcmpne)
537 
538 /* No input carry, but output carry. */
539 static void gen_rsub(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
540 {
541     tcg_gen_setcond_i32(TCG_COND_GEU, cpu_msr_c, inb, ina);
542     tcg_gen_sub_i32(out, inb, ina);
543 }
544 
545 /* Input and output carry. */
546 static void gen_rsubc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
547 {
548     TCGv_i32 zero = tcg_constant_i32(0);
549     TCGv_i32 tmp = tcg_temp_new_i32();
550 
551     tcg_gen_not_i32(tmp, ina);
552     tcg_gen_add2_i32(tmp, cpu_msr_c, tmp, zero, cpu_msr_c, zero);
553     tcg_gen_add2_i32(out, cpu_msr_c, tmp, cpu_msr_c, inb, zero);
554 }
555 
556 /* No input or output carry. */
557 static void gen_rsubk(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
558 {
559     tcg_gen_sub_i32(out, inb, ina);
560 }
561 
562 /* Input carry, no output carry. */
563 static void gen_rsubkc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
564 {
565     TCGv_i32 nota = tcg_temp_new_i32();
566 
567     tcg_gen_not_i32(nota, ina);
568     tcg_gen_add_i32(out, inb, nota);
569     tcg_gen_add_i32(out, out, cpu_msr_c);
570 }
571 
572 DO_TYPEA(rsub, true, gen_rsub)
573 DO_TYPEA(rsubc, true, gen_rsubc)
574 DO_TYPEA(rsubk, false, gen_rsubk)
575 DO_TYPEA(rsubkc, true, gen_rsubkc)
576 
577 DO_TYPEBV(rsubi, true, gen_rsub)
578 DO_TYPEBV(rsubic, true, gen_rsubc)
579 DO_TYPEBV(rsubik, false, gen_rsubk)
580 DO_TYPEBV(rsubikc, true, gen_rsubkc)
581 
582 DO_TYPEA0(sext8, false, tcg_gen_ext8s_i32)
583 DO_TYPEA0(sext16, false, tcg_gen_ext16s_i32)
584 
585 static void gen_sra(TCGv_i32 out, TCGv_i32 ina)
586 {
587     tcg_gen_andi_i32(cpu_msr_c, ina, 1);
588     tcg_gen_sari_i32(out, ina, 1);
589 }
590 
591 static void gen_src(TCGv_i32 out, TCGv_i32 ina)
592 {
593     TCGv_i32 tmp = tcg_temp_new_i32();
594 
595     tcg_gen_mov_i32(tmp, cpu_msr_c);
596     tcg_gen_andi_i32(cpu_msr_c, ina, 1);
597     tcg_gen_extract2_i32(out, ina, tmp, 1);
598 }
599 
600 static void gen_srl(TCGv_i32 out, TCGv_i32 ina)
601 {
602     tcg_gen_andi_i32(cpu_msr_c, ina, 1);
603     tcg_gen_shri_i32(out, ina, 1);
604 }
605 
606 DO_TYPEA0(sra, false, gen_sra)
607 DO_TYPEA0(src, false, gen_src)
608 DO_TYPEA0(srl, false, gen_srl)
609 
610 static void gen_swaph(TCGv_i32 out, TCGv_i32 ina)
611 {
612     tcg_gen_rotri_i32(out, ina, 16);
613 }
614 
615 DO_TYPEA0(swapb, false, tcg_gen_bswap32_i32)
616 DO_TYPEA0(swaph, false, gen_swaph)
617 
618 static bool trans_wdic(DisasContext *dc, arg_wdic *a)
619 {
620     /* Cache operations are nops: only check for supervisor mode.  */
621     trap_userspace(dc, true);
622     return true;
623 }
624 
625 DO_TYPEA(xor, false, tcg_gen_xor_i32)
626 DO_TYPEBI(xori, false, tcg_gen_xori_i32)
627 
628 static TCGv compute_ldst_addr_typea(DisasContext *dc, int ra, int rb)
629 {
630     TCGv ret = tcg_temp_new();
631 
632     /* If any of the regs is r0, set t to the value of the other reg.  */
633     if (ra && rb) {
634         TCGv_i32 tmp = tcg_temp_new_i32();
635         tcg_gen_add_i32(tmp, cpu_R[ra], cpu_R[rb]);
636         tcg_gen_extu_i32_tl(ret, tmp);
637     } else if (ra) {
638         tcg_gen_extu_i32_tl(ret, cpu_R[ra]);
639     } else if (rb) {
640         tcg_gen_extu_i32_tl(ret, cpu_R[rb]);
641     } else {
642         tcg_gen_movi_tl(ret, 0);
643     }
644 
645     if ((ra == 1 || rb == 1) && dc->cfg->stackprot) {
646         gen_helper_stackprot(tcg_env, ret);
647     }
648     return ret;
649 }
650 
651 static TCGv compute_ldst_addr_typeb(DisasContext *dc, int ra, int imm)
652 {
653     TCGv ret = tcg_temp_new();
654 
655     /* If any of the regs is r0, set t to the value of the other reg.  */
656     if (ra) {
657         TCGv_i32 tmp = tcg_temp_new_i32();
658         tcg_gen_addi_i32(tmp, cpu_R[ra], imm);
659         tcg_gen_extu_i32_tl(ret, tmp);
660     } else {
661         tcg_gen_movi_tl(ret, (uint32_t)imm);
662     }
663 
664     if (ra == 1 && dc->cfg->stackprot) {
665         gen_helper_stackprot(tcg_env, ret);
666     }
667     return ret;
668 }
669 
670 #ifndef CONFIG_USER_ONLY
671 static TCGv compute_ldst_addr_ea(DisasContext *dc, int ra, int rb)
672 {
673     int addr_size = dc->cfg->addr_size;
674     TCGv ret = tcg_temp_new();
675 
676     if (addr_size == 32 || ra == 0) {
677         if (rb) {
678             tcg_gen_extu_i32_tl(ret, cpu_R[rb]);
679         } else {
680             tcg_gen_movi_tl(ret, 0);
681         }
682     } else {
683         if (rb) {
684             tcg_gen_concat_i32_i64(ret, cpu_R[rb], cpu_R[ra]);
685         } else {
686             tcg_gen_extu_i32_tl(ret, cpu_R[ra]);
687             tcg_gen_shli_tl(ret, ret, 32);
688         }
689         if (addr_size < 64) {
690             /* Mask off out of range bits.  */
691             tcg_gen_andi_i64(ret, ret, MAKE_64BIT_MASK(0, addr_size));
692         }
693     }
694     return ret;
695 }
696 #endif
697 
698 #ifndef CONFIG_USER_ONLY
699 static void record_unaligned_ess(DisasContext *dc, int rd,
700                                  MemOp size, bool store)
701 {
702     uint32_t iflags = tcg_get_insn_start_param(dc->insn_start, 1);
703 
704     iflags |= ESR_ESS_FLAG;
705     iflags |= rd << 5;
706     iflags |= store * ESR_S;
707     iflags |= (size == MO_32) * ESR_W;
708 
709     tcg_set_insn_start_param(dc->insn_start, 1, iflags);
710 }
711 #endif
712 
713 static bool do_load(DisasContext *dc, int rd, TCGv addr, MemOp mop,
714                     int mem_index, bool rev)
715 {
716     MemOp size = mop & MO_SIZE;
717 
718     /*
719      * When doing reverse accesses we need to do two things.
720      *
721      * 1. Reverse the address wrt endianness.
722      * 2. Byteswap the data lanes on the way back into the CPU core.
723      */
724     if (rev) {
725         if (size > MO_8) {
726             mop ^= MO_BSWAP;
727         }
728         if (size < MO_32) {
729             tcg_gen_xori_tl(addr, addr, 3 - size);
730         }
731     }
732 
733     /*
734      * For system mode, enforce alignment if the cpu configuration
735      * requires it.  For user-mode, the Linux kernel will have fixed up
736      * any unaligned access, so emulate that by *not* setting MO_ALIGN.
737      */
738 #ifndef CONFIG_USER_ONLY
739     if (size > MO_8 &&
740         (dc->tb_flags & MSR_EE) &&
741         dc->cfg->unaligned_exceptions) {
742         record_unaligned_ess(dc, rd, size, false);
743         mop |= MO_ALIGN;
744     }
745 #endif
746 
747     tcg_gen_qemu_ld_i32(reg_for_write(dc, rd), addr, mem_index, mop);
748     return true;
749 }
750 
751 static bool trans_lbu(DisasContext *dc, arg_typea *arg)
752 {
753     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
754     return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
755 }
756 
757 static bool trans_lbur(DisasContext *dc, arg_typea *arg)
758 {
759     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
760     return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, true);
761 }
762 
763 static bool trans_lbuea(DisasContext *dc, arg_typea *arg)
764 {
765     if (trap_userspace(dc, true)) {
766         return true;
767     }
768 #ifdef CONFIG_USER_ONLY
769     return true;
770 #else
771     TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
772     return do_load(dc, arg->rd, addr, MO_UB, MMU_NOMMU_IDX, false);
773 #endif
774 }
775 
776 static bool trans_lbui(DisasContext *dc, arg_typeb *arg)
777 {
778     TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
779     return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
780 }
781 
782 static bool trans_lhu(DisasContext *dc, arg_typea *arg)
783 {
784     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
785     return do_load(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
786 }
787 
788 static bool trans_lhur(DisasContext *dc, arg_typea *arg)
789 {
790     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
791     return do_load(dc, arg->rd, addr, MO_TEUW, dc->mem_index, true);
792 }
793 
794 static bool trans_lhuea(DisasContext *dc, arg_typea *arg)
795 {
796     if (trap_userspace(dc, true)) {
797         return true;
798     }
799 #ifdef CONFIG_USER_ONLY
800     return true;
801 #else
802     TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
803     return do_load(dc, arg->rd, addr, MO_TEUW, MMU_NOMMU_IDX, false);
804 #endif
805 }
806 
807 static bool trans_lhui(DisasContext *dc, arg_typeb *arg)
808 {
809     TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
810     return do_load(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
811 }
812 
813 static bool trans_lw(DisasContext *dc, arg_typea *arg)
814 {
815     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
816     return do_load(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
817 }
818 
819 static bool trans_lwr(DisasContext *dc, arg_typea *arg)
820 {
821     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
822     return do_load(dc, arg->rd, addr, MO_TEUL, dc->mem_index, true);
823 }
824 
825 static bool trans_lwea(DisasContext *dc, arg_typea *arg)
826 {
827     if (trap_userspace(dc, true)) {
828         return true;
829     }
830 #ifdef CONFIG_USER_ONLY
831     return true;
832 #else
833     TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
834     return do_load(dc, arg->rd, addr, MO_TEUL, MMU_NOMMU_IDX, false);
835 #endif
836 }
837 
838 static bool trans_lwi(DisasContext *dc, arg_typeb *arg)
839 {
840     TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
841     return do_load(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
842 }
843 
844 static bool trans_lwx(DisasContext *dc, arg_typea *arg)
845 {
846     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
847 
848     /* lwx does not throw unaligned access errors, so force alignment */
849     tcg_gen_andi_tl(addr, addr, ~3);
850 
851     tcg_gen_qemu_ld_i32(cpu_res_val, addr, dc->mem_index, MO_TEUL);
852     tcg_gen_mov_tl(cpu_res_addr, addr);
853 
854     if (arg->rd) {
855         tcg_gen_mov_i32(cpu_R[arg->rd], cpu_res_val);
856     }
857 
858     /* No support for AXI exclusive so always clear C */
859     tcg_gen_movi_i32(cpu_msr_c, 0);
860     return true;
861 }
862 
863 static bool do_store(DisasContext *dc, int rd, TCGv addr, MemOp mop,
864                      int mem_index, bool rev)
865 {
866     MemOp size = mop & MO_SIZE;
867 
868     /*
869      * When doing reverse accesses we need to do two things.
870      *
871      * 1. Reverse the address wrt endianness.
872      * 2. Byteswap the data lanes on the way back into the CPU core.
873      */
874     if (rev) {
875         if (size > MO_8) {
876             mop ^= MO_BSWAP;
877         }
878         if (size < MO_32) {
879             tcg_gen_xori_tl(addr, addr, 3 - size);
880         }
881     }
882 
883     /*
884      * For system mode, enforce alignment if the cpu configuration
885      * requires it.  For user-mode, the Linux kernel will have fixed up
886      * any unaligned access, so emulate that by *not* setting MO_ALIGN.
887      */
888 #ifndef CONFIG_USER_ONLY
889     if (size > MO_8 &&
890         (dc->tb_flags & MSR_EE) &&
891         dc->cfg->unaligned_exceptions) {
892         record_unaligned_ess(dc, rd, size, true);
893         mop |= MO_ALIGN;
894     }
895 #endif
896 
897     tcg_gen_qemu_st_i32(reg_for_read(dc, rd), addr, mem_index, mop);
898     return true;
899 }
900 
901 static bool trans_sb(DisasContext *dc, arg_typea *arg)
902 {
903     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
904     return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
905 }
906 
907 static bool trans_sbr(DisasContext *dc, arg_typea *arg)
908 {
909     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
910     return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, true);
911 }
912 
913 static bool trans_sbea(DisasContext *dc, arg_typea *arg)
914 {
915     if (trap_userspace(dc, true)) {
916         return true;
917     }
918 #ifdef CONFIG_USER_ONLY
919     return true;
920 #else
921     TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
922     return do_store(dc, arg->rd, addr, MO_UB, MMU_NOMMU_IDX, false);
923 #endif
924 }
925 
926 static bool trans_sbi(DisasContext *dc, arg_typeb *arg)
927 {
928     TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
929     return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
930 }
931 
932 static bool trans_sh(DisasContext *dc, arg_typea *arg)
933 {
934     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
935     return do_store(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
936 }
937 
938 static bool trans_shr(DisasContext *dc, arg_typea *arg)
939 {
940     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
941     return do_store(dc, arg->rd, addr, MO_TEUW, dc->mem_index, true);
942 }
943 
944 static bool trans_shea(DisasContext *dc, arg_typea *arg)
945 {
946     if (trap_userspace(dc, true)) {
947         return true;
948     }
949 #ifdef CONFIG_USER_ONLY
950     return true;
951 #else
952     TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
953     return do_store(dc, arg->rd, addr, MO_TEUW, MMU_NOMMU_IDX, false);
954 #endif
955 }
956 
957 static bool trans_shi(DisasContext *dc, arg_typeb *arg)
958 {
959     TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
960     return do_store(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
961 }
962 
963 static bool trans_sw(DisasContext *dc, arg_typea *arg)
964 {
965     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
966     return do_store(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
967 }
968 
969 static bool trans_swr(DisasContext *dc, arg_typea *arg)
970 {
971     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
972     return do_store(dc, arg->rd, addr, MO_TEUL, dc->mem_index, true);
973 }
974 
975 static bool trans_swea(DisasContext *dc, arg_typea *arg)
976 {
977     if (trap_userspace(dc, true)) {
978         return true;
979     }
980 #ifdef CONFIG_USER_ONLY
981     return true;
982 #else
983     TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
984     return do_store(dc, arg->rd, addr, MO_TEUL, MMU_NOMMU_IDX, false);
985 #endif
986 }
987 
988 static bool trans_swi(DisasContext *dc, arg_typeb *arg)
989 {
990     TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
991     return do_store(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
992 }
993 
994 static bool trans_swx(DisasContext *dc, arg_typea *arg)
995 {
996     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
997     TCGLabel *swx_done = gen_new_label();
998     TCGLabel *swx_fail = gen_new_label();
999     TCGv_i32 tval;
1000 
1001     /* swx does not throw unaligned access errors, so force alignment */
1002     tcg_gen_andi_tl(addr, addr, ~3);
1003 
1004     /*
1005      * Compare the address vs the one we used during lwx.
1006      * On mismatch, the operation fails.  On match, addr dies at the
1007      * branch, but we know we can use the equal version in the global.
1008      * In either case, addr is no longer needed.
1009      */
1010     tcg_gen_brcond_tl(TCG_COND_NE, cpu_res_addr, addr, swx_fail);
1011 
1012     /*
1013      * Compare the value loaded during lwx with current contents of
1014      * the reserved location.
1015      */
1016     tval = tcg_temp_new_i32();
1017 
1018     tcg_gen_atomic_cmpxchg_i32(tval, cpu_res_addr, cpu_res_val,
1019                                reg_for_write(dc, arg->rd),
1020                                dc->mem_index, MO_TEUL);
1021 
1022     tcg_gen_brcond_i32(TCG_COND_NE, cpu_res_val, tval, swx_fail);
1023 
1024     /* Success */
1025     tcg_gen_movi_i32(cpu_msr_c, 0);
1026     tcg_gen_br(swx_done);
1027 
1028     /* Failure */
1029     gen_set_label(swx_fail);
1030     tcg_gen_movi_i32(cpu_msr_c, 1);
1031 
1032     gen_set_label(swx_done);
1033 
1034     /*
1035      * Prevent the saved address from working again without another ldx.
1036      * Akin to the pseudocode setting reservation = 0.
1037      */
1038     tcg_gen_movi_tl(cpu_res_addr, -1);
1039     return true;
1040 }
1041 
1042 static void setup_dslot(DisasContext *dc, bool type_b)
1043 {
1044     dc->tb_flags_to_set |= D_FLAG;
1045     if (type_b && (dc->tb_flags & IMM_FLAG)) {
1046         dc->tb_flags_to_set |= BIMM_FLAG;
1047     }
1048 }
1049 
1050 static bool do_branch(DisasContext *dc, int dest_rb, int dest_imm,
1051                       bool delay, bool abs, int link)
1052 {
1053     uint32_t add_pc;
1054 
1055     if (invalid_delay_slot(dc, "branch")) {
1056         return true;
1057     }
1058     if (delay) {
1059         setup_dslot(dc, dest_rb < 0);
1060     }
1061 
1062     if (link) {
1063         tcg_gen_movi_i32(cpu_R[link], dc->base.pc_next);
1064     }
1065 
1066     /* Store the branch taken destination into btarget.  */
1067     add_pc = abs ? 0 : dc->base.pc_next;
1068     if (dest_rb > 0) {
1069         dc->jmp_dest = -1;
1070         tcg_gen_addi_i32(cpu_btarget, cpu_R[dest_rb], add_pc);
1071     } else {
1072         dc->jmp_dest = add_pc + dest_imm;
1073         tcg_gen_movi_i32(cpu_btarget, dc->jmp_dest);
1074     }
1075     dc->jmp_cond = TCG_COND_ALWAYS;
1076     return true;
1077 }
1078 
1079 #define DO_BR(NAME, NAMEI, DELAY, ABS, LINK)                               \
1080     static bool trans_##NAME(DisasContext *dc, arg_typea_br *arg)          \
1081     { return do_branch(dc, arg->rb, 0, DELAY, ABS, LINK ? arg->rd : 0); }  \
1082     static bool trans_##NAMEI(DisasContext *dc, arg_typeb_br *arg)         \
1083     { return do_branch(dc, -1, arg->imm, DELAY, ABS, LINK ? arg->rd : 0); }
1084 
1085 DO_BR(br, bri, false, false, false)
1086 DO_BR(bra, brai, false, true, false)
1087 DO_BR(brd, brid, true, false, false)
1088 DO_BR(brad, braid, true, true, false)
1089 DO_BR(brld, brlid, true, false, true)
1090 DO_BR(brald, bralid, true, true, true)
1091 
1092 static bool do_bcc(DisasContext *dc, int dest_rb, int dest_imm,
1093                    TCGCond cond, int ra, bool delay)
1094 {
1095     TCGv_i32 zero, next;
1096 
1097     if (invalid_delay_slot(dc, "bcc")) {
1098         return true;
1099     }
1100     if (delay) {
1101         setup_dslot(dc, dest_rb < 0);
1102     }
1103 
1104     dc->jmp_cond = cond;
1105 
1106     /* Cache the condition register in cpu_bvalue across any delay slot.  */
1107     tcg_gen_mov_i32(cpu_bvalue, reg_for_read(dc, ra));
1108 
1109     /* Store the branch taken destination into btarget.  */
1110     if (dest_rb > 0) {
1111         dc->jmp_dest = -1;
1112         tcg_gen_addi_i32(cpu_btarget, cpu_R[dest_rb], dc->base.pc_next);
1113     } else {
1114         dc->jmp_dest = dc->base.pc_next + dest_imm;
1115         tcg_gen_movi_i32(cpu_btarget, dc->jmp_dest);
1116     }
1117 
1118     /* Compute the final destination into btarget.  */
1119     zero = tcg_constant_i32(0);
1120     next = tcg_constant_i32(dc->base.pc_next + (delay + 1) * 4);
1121     tcg_gen_movcond_i32(dc->jmp_cond, cpu_btarget,
1122                         reg_for_read(dc, ra), zero,
1123                         cpu_btarget, next);
1124 
1125     return true;
1126 }
1127 
1128 #define DO_BCC(NAME, COND)                                              \
1129     static bool trans_##NAME(DisasContext *dc, arg_typea_bc *arg)       \
1130     { return do_bcc(dc, arg->rb, 0, COND, arg->ra, false); }            \
1131     static bool trans_##NAME##d(DisasContext *dc, arg_typea_bc *arg)    \
1132     { return do_bcc(dc, arg->rb, 0, COND, arg->ra, true); }             \
1133     static bool trans_##NAME##i(DisasContext *dc, arg_typeb_bc *arg)    \
1134     { return do_bcc(dc, -1, arg->imm, COND, arg->ra, false); }          \
1135     static bool trans_##NAME##id(DisasContext *dc, arg_typeb_bc *arg)   \
1136     { return do_bcc(dc, -1, arg->imm, COND, arg->ra, true); }
1137 
1138 DO_BCC(beq, TCG_COND_EQ)
1139 DO_BCC(bge, TCG_COND_GE)
1140 DO_BCC(bgt, TCG_COND_GT)
1141 DO_BCC(ble, TCG_COND_LE)
1142 DO_BCC(blt, TCG_COND_LT)
1143 DO_BCC(bne, TCG_COND_NE)
1144 
1145 static bool trans_brk(DisasContext *dc, arg_typea_br *arg)
1146 {
1147     if (trap_userspace(dc, true)) {
1148         return true;
1149     }
1150     if (invalid_delay_slot(dc, "brk")) {
1151         return true;
1152     }
1153 
1154     tcg_gen_mov_i32(cpu_pc, reg_for_read(dc, arg->rb));
1155     if (arg->rd) {
1156         tcg_gen_movi_i32(cpu_R[arg->rd], dc->base.pc_next);
1157     }
1158     tcg_gen_ori_i32(cpu_msr, cpu_msr, MSR_BIP);
1159     tcg_gen_movi_tl(cpu_res_addr, -1);
1160 
1161     dc->base.is_jmp = DISAS_EXIT;
1162     return true;
1163 }
1164 
1165 static bool trans_brki(DisasContext *dc, arg_typeb_br *arg)
1166 {
1167     uint32_t imm = arg->imm;
1168 
1169     if (trap_userspace(dc, imm != 0x8 && imm != 0x18)) {
1170         return true;
1171     }
1172     if (invalid_delay_slot(dc, "brki")) {
1173         return true;
1174     }
1175 
1176     tcg_gen_movi_i32(cpu_pc, imm);
1177     if (arg->rd) {
1178         tcg_gen_movi_i32(cpu_R[arg->rd], dc->base.pc_next);
1179     }
1180     tcg_gen_movi_tl(cpu_res_addr, -1);
1181 
1182 #ifdef CONFIG_USER_ONLY
1183     switch (imm) {
1184     case 0x8:  /* syscall trap */
1185         gen_raise_exception_sync(dc, EXCP_SYSCALL);
1186         break;
1187     case 0x18: /* debug trap */
1188         gen_raise_exception_sync(dc, EXCP_DEBUG);
1189         break;
1190     default:   /* eliminated with trap_userspace check */
1191         g_assert_not_reached();
1192     }
1193 #else
1194     uint32_t msr_to_set = 0;
1195 
1196     if (imm != 0x18) {
1197         msr_to_set |= MSR_BIP;
1198     }
1199     if (imm == 0x8 || imm == 0x18) {
1200         /* MSR_UM and MSR_VM are in tb_flags, so we know their value. */
1201         msr_to_set |= (dc->tb_flags & (MSR_UM | MSR_VM)) << 1;
1202         tcg_gen_andi_i32(cpu_msr, cpu_msr,
1203                          ~(MSR_VMS | MSR_UMS | MSR_VM | MSR_UM));
1204     }
1205     tcg_gen_ori_i32(cpu_msr, cpu_msr, msr_to_set);
1206     dc->base.is_jmp = DISAS_EXIT;
1207 #endif
1208 
1209     return true;
1210 }
1211 
1212 static bool trans_mbar(DisasContext *dc, arg_mbar *arg)
1213 {
1214     int mbar_imm = arg->imm;
1215 
1216     /* Note that mbar is a specialized branch instruction. */
1217     if (invalid_delay_slot(dc, "mbar")) {
1218         return true;
1219     }
1220 
1221     /* Data access memory barrier.  */
1222     if ((mbar_imm & 2) == 0) {
1223         tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
1224     }
1225 
1226     /* Sleep. */
1227     if (mbar_imm & 16) {
1228         if (trap_userspace(dc, true)) {
1229             /* Sleep is a privileged instruction.  */
1230             return true;
1231         }
1232 
1233         t_sync_flags(dc);
1234 
1235         tcg_gen_st_i32(tcg_constant_i32(1), tcg_env,
1236                        -offsetof(MicroBlazeCPU, env)
1237                        +offsetof(CPUState, halted));
1238 
1239         tcg_gen_movi_i32(cpu_pc, dc->base.pc_next + 4);
1240 
1241         gen_raise_exception(dc, EXCP_HLT);
1242     }
1243 
1244     /*
1245      * If !(mbar_imm & 1), this is an instruction access memory barrier
1246      * and we need to end the TB so that we recognize self-modified
1247      * code immediately.
1248      *
1249      * However, there are some data mbars that need the TB break
1250      * (and return to main loop) to recognize interrupts right away.
1251      * E.g. recognizing a change to an interrupt controller register.
1252      *
1253      * Therefore, choose to end the TB always.
1254      */
1255     dc->base.is_jmp = DISAS_EXIT_NEXT;
1256     return true;
1257 }
1258 
1259 static bool do_rts(DisasContext *dc, arg_typeb_bc *arg, int to_set)
1260 {
1261     if (trap_userspace(dc, to_set)) {
1262         return true;
1263     }
1264     if (invalid_delay_slot(dc, "rts")) {
1265         return true;
1266     }
1267 
1268     dc->tb_flags_to_set |= to_set;
1269     setup_dslot(dc, true);
1270 
1271     dc->jmp_cond = TCG_COND_ALWAYS;
1272     dc->jmp_dest = -1;
1273     tcg_gen_addi_i32(cpu_btarget, reg_for_read(dc, arg->ra), arg->imm);
1274     return true;
1275 }
1276 
1277 #define DO_RTS(NAME, IFLAG) \
1278     static bool trans_##NAME(DisasContext *dc, arg_typeb_bc *arg) \
1279     { return do_rts(dc, arg, IFLAG); }
1280 
1281 DO_RTS(rtbd, DRTB_FLAG)
1282 DO_RTS(rtid, DRTI_FLAG)
1283 DO_RTS(rted, DRTE_FLAG)
1284 DO_RTS(rtsd, 0)
1285 
1286 static bool trans_zero(DisasContext *dc, arg_zero *arg)
1287 {
1288     /* If opcode_0_illegal, trap.  */
1289     if (dc->cfg->opcode_0_illegal) {
1290         trap_illegal(dc, true);
1291         return true;
1292     }
1293     /*
1294      * Otherwise, this is "add r0, r0, r0".
1295      * Continue to trans_add so that MSR[C] gets cleared.
1296      */
1297     return false;
1298 }
1299 
1300 static void msr_read(DisasContext *dc, TCGv_i32 d)
1301 {
1302     TCGv_i32 t;
1303 
1304     /* Replicate the cpu_msr_c boolean into the proper bit and the copy. */
1305     t = tcg_temp_new_i32();
1306     tcg_gen_muli_i32(t, cpu_msr_c, MSR_C | MSR_CC);
1307     tcg_gen_or_i32(d, cpu_msr, t);
1308 }
1309 
1310 static bool do_msrclrset(DisasContext *dc, arg_type_msr *arg, bool set)
1311 {
1312     uint32_t imm = arg->imm;
1313 
1314     if (trap_userspace(dc, imm != MSR_C)) {
1315         return true;
1316     }
1317 
1318     if (arg->rd) {
1319         msr_read(dc, cpu_R[arg->rd]);
1320     }
1321 
1322     /*
1323      * Handle the carry bit separately.
1324      * This is the only bit that userspace can modify.
1325      */
1326     if (imm & MSR_C) {
1327         tcg_gen_movi_i32(cpu_msr_c, set);
1328     }
1329 
1330     /*
1331      * MSR_C and MSR_CC set above.
1332      * MSR_PVR is not writable, and is always clear.
1333      */
1334     imm &= ~(MSR_C | MSR_CC | MSR_PVR);
1335 
1336     if (imm != 0) {
1337         if (set) {
1338             tcg_gen_ori_i32(cpu_msr, cpu_msr, imm);
1339         } else {
1340             tcg_gen_andi_i32(cpu_msr, cpu_msr, ~imm);
1341         }
1342         dc->base.is_jmp = DISAS_EXIT_NEXT;
1343     }
1344     return true;
1345 }
1346 
1347 static bool trans_msrclr(DisasContext *dc, arg_type_msr *arg)
1348 {
1349     return do_msrclrset(dc, arg, false);
1350 }
1351 
1352 static bool trans_msrset(DisasContext *dc, arg_type_msr *arg)
1353 {
1354     return do_msrclrset(dc, arg, true);
1355 }
1356 
1357 static bool trans_mts(DisasContext *dc, arg_mts *arg)
1358 {
1359     if (trap_userspace(dc, true)) {
1360         return true;
1361     }
1362 
1363 #ifdef CONFIG_USER_ONLY
1364     g_assert_not_reached();
1365 #else
1366     if (arg->e && arg->rs != 0x1003) {
1367         qemu_log_mask(LOG_GUEST_ERROR,
1368                       "Invalid extended mts reg 0x%x\n", arg->rs);
1369         return true;
1370     }
1371 
1372     TCGv_i32 src = reg_for_read(dc, arg->ra);
1373     switch (arg->rs) {
1374     case SR_MSR:
1375         /* Install MSR_C.  */
1376         tcg_gen_extract_i32(cpu_msr_c, src, 2, 1);
1377         /*
1378          * Clear MSR_C and MSR_CC;
1379          * MSR_PVR is not writable, and is always clear.
1380          */
1381         tcg_gen_andi_i32(cpu_msr, src, ~(MSR_C | MSR_CC | MSR_PVR));
1382         break;
1383     case SR_FSR:
1384         tcg_gen_st_i32(src, tcg_env, offsetof(CPUMBState, fsr));
1385         break;
1386     case 0x800:
1387         tcg_gen_st_i32(src, tcg_env, offsetof(CPUMBState, slr));
1388         break;
1389     case 0x802:
1390         tcg_gen_st_i32(src, tcg_env, offsetof(CPUMBState, shr));
1391         break;
1392 
1393     case 0x1000: /* PID */
1394     case 0x1001: /* ZPR */
1395     case 0x1002: /* TLBX */
1396     case 0x1003: /* TLBLO */
1397     case 0x1004: /* TLBHI */
1398     case 0x1005: /* TLBSX */
1399         {
1400             TCGv_i32 tmp_ext = tcg_constant_i32(arg->e);
1401             TCGv_i32 tmp_reg = tcg_constant_i32(arg->rs & 7);
1402 
1403             gen_helper_mmu_write(tcg_env, tmp_ext, tmp_reg, src);
1404         }
1405         break;
1406 
1407     default:
1408         qemu_log_mask(LOG_GUEST_ERROR, "Invalid mts reg 0x%x\n", arg->rs);
1409         return true;
1410     }
1411     dc->base.is_jmp = DISAS_EXIT_NEXT;
1412     return true;
1413 #endif
1414 }
1415 
1416 static bool trans_mfs(DisasContext *dc, arg_mfs *arg)
1417 {
1418     TCGv_i32 dest = reg_for_write(dc, arg->rd);
1419 
1420     if (arg->e) {
1421         switch (arg->rs) {
1422         case SR_EAR:
1423             {
1424                 TCGv_i64 t64 = tcg_temp_new_i64();
1425                 tcg_gen_ld_i64(t64, tcg_env, offsetof(CPUMBState, ear));
1426                 tcg_gen_extrh_i64_i32(dest, t64);
1427             }
1428             return true;
1429 #ifndef CONFIG_USER_ONLY
1430         case 0x1003: /* TLBLO */
1431             /* Handled below. */
1432             break;
1433 #endif
1434         case 0x2006 ... 0x2009:
1435             /* High bits of PVR6-9 not implemented. */
1436             tcg_gen_movi_i32(dest, 0);
1437             return true;
1438         default:
1439             qemu_log_mask(LOG_GUEST_ERROR,
1440                           "Invalid extended mfs reg 0x%x\n", arg->rs);
1441             return true;
1442         }
1443     }
1444 
1445     switch (arg->rs) {
1446     case SR_PC:
1447         tcg_gen_movi_i32(dest, dc->base.pc_next);
1448         break;
1449     case SR_MSR:
1450         msr_read(dc, dest);
1451         break;
1452     case SR_EAR:
1453         {
1454             TCGv_i64 t64 = tcg_temp_new_i64();
1455             tcg_gen_ld_i64(t64, tcg_env, offsetof(CPUMBState, ear));
1456             tcg_gen_extrl_i64_i32(dest, t64);
1457         }
1458         break;
1459     case SR_ESR:
1460         tcg_gen_ld_i32(dest, tcg_env, offsetof(CPUMBState, esr));
1461         break;
1462     case SR_FSR:
1463         tcg_gen_ld_i32(dest, tcg_env, offsetof(CPUMBState, fsr));
1464         break;
1465     case SR_BTR:
1466         tcg_gen_ld_i32(dest, tcg_env, offsetof(CPUMBState, btr));
1467         break;
1468     case SR_EDR:
1469         tcg_gen_ld_i32(dest, tcg_env, offsetof(CPUMBState, edr));
1470         break;
1471     case 0x800:
1472         tcg_gen_ld_i32(dest, tcg_env, offsetof(CPUMBState, slr));
1473         break;
1474     case 0x802:
1475         tcg_gen_ld_i32(dest, tcg_env, offsetof(CPUMBState, shr));
1476         break;
1477 
1478 #ifndef CONFIG_USER_ONLY
1479     case 0x1000: /* PID */
1480     case 0x1001: /* ZPR */
1481     case 0x1002: /* TLBX */
1482     case 0x1003: /* TLBLO */
1483     case 0x1004: /* TLBHI */
1484     case 0x1005: /* TLBSX */
1485         {
1486             TCGv_i32 tmp_ext = tcg_constant_i32(arg->e);
1487             TCGv_i32 tmp_reg = tcg_constant_i32(arg->rs & 7);
1488 
1489             gen_helper_mmu_read(dest, tcg_env, tmp_ext, tmp_reg);
1490         }
1491         break;
1492 #endif
1493 
1494     case 0x2000 ... 0x200c:
1495         tcg_gen_ld_i32(dest, tcg_env,
1496                        offsetof(MicroBlazeCPU, cfg.pvr_regs[arg->rs - 0x2000])
1497                        - offsetof(MicroBlazeCPU, env));
1498         break;
1499     default:
1500         qemu_log_mask(LOG_GUEST_ERROR, "Invalid mfs reg 0x%x\n", arg->rs);
1501         break;
1502     }
1503     return true;
1504 }
1505 
1506 static void do_rti(DisasContext *dc)
1507 {
1508     TCGv_i32 tmp = tcg_temp_new_i32();
1509 
1510     tcg_gen_shri_i32(tmp, cpu_msr, 1);
1511     tcg_gen_ori_i32(cpu_msr, cpu_msr, MSR_IE);
1512     tcg_gen_andi_i32(tmp, tmp, MSR_VM | MSR_UM);
1513     tcg_gen_andi_i32(cpu_msr, cpu_msr, ~(MSR_VM | MSR_UM));
1514     tcg_gen_or_i32(cpu_msr, cpu_msr, tmp);
1515 }
1516 
1517 static void do_rtb(DisasContext *dc)
1518 {
1519     TCGv_i32 tmp = tcg_temp_new_i32();
1520 
1521     tcg_gen_shri_i32(tmp, cpu_msr, 1);
1522     tcg_gen_andi_i32(cpu_msr, cpu_msr, ~(MSR_VM | MSR_UM | MSR_BIP));
1523     tcg_gen_andi_i32(tmp, tmp, (MSR_VM | MSR_UM));
1524     tcg_gen_or_i32(cpu_msr, cpu_msr, tmp);
1525 }
1526 
1527 static void do_rte(DisasContext *dc)
1528 {
1529     TCGv_i32 tmp = tcg_temp_new_i32();
1530 
1531     tcg_gen_shri_i32(tmp, cpu_msr, 1);
1532     tcg_gen_ori_i32(cpu_msr, cpu_msr, MSR_EE);
1533     tcg_gen_andi_i32(tmp, tmp, (MSR_VM | MSR_UM));
1534     tcg_gen_andi_i32(cpu_msr, cpu_msr, ~(MSR_VM | MSR_UM | MSR_EIP));
1535     tcg_gen_or_i32(cpu_msr, cpu_msr, tmp);
1536 }
1537 
1538 /* Insns connected to FSL or AXI stream attached devices.  */
1539 static bool do_get(DisasContext *dc, int rd, int rb, int imm, int ctrl)
1540 {
1541     TCGv_i32 t_id, t_ctrl;
1542 
1543     if (trap_userspace(dc, true)) {
1544         return true;
1545     }
1546 
1547     t_id = tcg_temp_new_i32();
1548     if (rb) {
1549         tcg_gen_andi_i32(t_id, cpu_R[rb], 0xf);
1550     } else {
1551         tcg_gen_movi_i32(t_id, imm);
1552     }
1553 
1554     t_ctrl = tcg_constant_i32(ctrl);
1555     gen_helper_get(reg_for_write(dc, rd), t_id, t_ctrl);
1556     return true;
1557 }
1558 
1559 static bool trans_get(DisasContext *dc, arg_get *arg)
1560 {
1561     return do_get(dc, arg->rd, 0, arg->imm, arg->ctrl);
1562 }
1563 
1564 static bool trans_getd(DisasContext *dc, arg_getd *arg)
1565 {
1566     return do_get(dc, arg->rd, arg->rb, 0, arg->ctrl);
1567 }
1568 
1569 static bool do_put(DisasContext *dc, int ra, int rb, int imm, int ctrl)
1570 {
1571     TCGv_i32 t_id, t_ctrl;
1572 
1573     if (trap_userspace(dc, true)) {
1574         return true;
1575     }
1576 
1577     t_id = tcg_temp_new_i32();
1578     if (rb) {
1579         tcg_gen_andi_i32(t_id, cpu_R[rb], 0xf);
1580     } else {
1581         tcg_gen_movi_i32(t_id, imm);
1582     }
1583 
1584     t_ctrl = tcg_constant_i32(ctrl);
1585     gen_helper_put(t_id, t_ctrl, reg_for_read(dc, ra));
1586     return true;
1587 }
1588 
1589 static bool trans_put(DisasContext *dc, arg_put *arg)
1590 {
1591     return do_put(dc, arg->ra, 0, arg->imm, arg->ctrl);
1592 }
1593 
1594 static bool trans_putd(DisasContext *dc, arg_putd *arg)
1595 {
1596     return do_put(dc, arg->ra, arg->rb, 0, arg->ctrl);
1597 }
1598 
1599 static void mb_tr_init_disas_context(DisasContextBase *dcb, CPUState *cs)
1600 {
1601     DisasContext *dc = container_of(dcb, DisasContext, base);
1602     MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1603     int bound;
1604 
1605     dc->cfg = &cpu->cfg;
1606     dc->tb_flags = dc->base.tb->flags;
1607     dc->ext_imm = dc->base.tb->cs_base;
1608     dc->r0 = NULL;
1609     dc->r0_set = false;
1610     dc->mem_index = cpu_mmu_index(cs, false);
1611     dc->jmp_cond = dc->tb_flags & D_FLAG ? TCG_COND_ALWAYS : TCG_COND_NEVER;
1612     dc->jmp_dest = -1;
1613 
1614     bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
1615     dc->base.max_insns = MIN(dc->base.max_insns, bound);
1616 }
1617 
1618 static void mb_tr_tb_start(DisasContextBase *dcb, CPUState *cs)
1619 {
1620 }
1621 
1622 static void mb_tr_insn_start(DisasContextBase *dcb, CPUState *cs)
1623 {
1624     DisasContext *dc = container_of(dcb, DisasContext, base);
1625 
1626     tcg_gen_insn_start(dc->base.pc_next, dc->tb_flags & ~MSR_TB_MASK);
1627     dc->insn_start = tcg_last_op();
1628 }
1629 
1630 static void mb_tr_translate_insn(DisasContextBase *dcb, CPUState *cs)
1631 {
1632     DisasContext *dc = container_of(dcb, DisasContext, base);
1633     uint32_t ir;
1634 
1635     /* TODO: This should raise an exception, not terminate qemu. */
1636     if (dc->base.pc_next & 3) {
1637         cpu_abort(cs, "Microblaze: unaligned PC=%x\n",
1638                   (uint32_t)dc->base.pc_next);
1639     }
1640 
1641     dc->tb_flags_to_set = 0;
1642 
1643     ir = cpu_ldl_code(cpu_env(cs), dc->base.pc_next);
1644     if (!decode(dc, ir)) {
1645         trap_illegal(dc, true);
1646     }
1647 
1648     if (dc->r0) {
1649         dc->r0 = NULL;
1650         dc->r0_set = false;
1651     }
1652 
1653     /* Discard the imm global when its contents cannot be used. */
1654     if ((dc->tb_flags & ~dc->tb_flags_to_set) & IMM_FLAG) {
1655         tcg_gen_discard_i32(cpu_imm);
1656     }
1657 
1658     dc->tb_flags &= ~(IMM_FLAG | BIMM_FLAG | D_FLAG);
1659     dc->tb_flags |= dc->tb_flags_to_set;
1660     dc->base.pc_next += 4;
1661 
1662     if (dc->jmp_cond != TCG_COND_NEVER && !(dc->tb_flags & D_FLAG)) {
1663         /*
1664          * Finish any return-from branch.
1665          */
1666         uint32_t rt_ibe = dc->tb_flags & (DRTI_FLAG | DRTB_FLAG | DRTE_FLAG);
1667         if (unlikely(rt_ibe != 0)) {
1668             dc->tb_flags &= ~(DRTI_FLAG | DRTB_FLAG | DRTE_FLAG);
1669             if (rt_ibe & DRTI_FLAG) {
1670                 do_rti(dc);
1671             } else if (rt_ibe & DRTB_FLAG) {
1672                 do_rtb(dc);
1673             } else {
1674                 do_rte(dc);
1675             }
1676         }
1677 
1678         /* Complete the branch, ending the TB. */
1679         switch (dc->base.is_jmp) {
1680         case DISAS_NORETURN:
1681             /*
1682              * E.g. illegal insn in a delay slot.  We've already exited
1683              * and will handle D_FLAG in mb_cpu_do_interrupt.
1684              */
1685             break;
1686         case DISAS_NEXT:
1687             /*
1688              * Normal insn a delay slot.
1689              * However, the return-from-exception type insns should
1690              * return to the main loop, as they have adjusted MSR.
1691              */
1692             dc->base.is_jmp = (rt_ibe ? DISAS_EXIT_JUMP : DISAS_JUMP);
1693             break;
1694         case DISAS_EXIT_NEXT:
1695             /*
1696              * E.g. mts insn in a delay slot.  Continue with btarget,
1697              * but still return to the main loop.
1698              */
1699             dc->base.is_jmp = DISAS_EXIT_JUMP;
1700             break;
1701         default:
1702             g_assert_not_reached();
1703         }
1704     }
1705 }
1706 
1707 static void mb_tr_tb_stop(DisasContextBase *dcb, CPUState *cs)
1708 {
1709     DisasContext *dc = container_of(dcb, DisasContext, base);
1710 
1711     if (dc->base.is_jmp == DISAS_NORETURN) {
1712         /* We have already exited the TB. */
1713         return;
1714     }
1715 
1716     t_sync_flags(dc);
1717 
1718     switch (dc->base.is_jmp) {
1719     case DISAS_TOO_MANY:
1720         gen_goto_tb(dc, 0, dc->base.pc_next);
1721         return;
1722 
1723     case DISAS_EXIT:
1724         break;
1725     case DISAS_EXIT_NEXT:
1726         tcg_gen_movi_i32(cpu_pc, dc->base.pc_next);
1727         break;
1728     case DISAS_EXIT_JUMP:
1729         tcg_gen_mov_i32(cpu_pc, cpu_btarget);
1730         tcg_gen_discard_i32(cpu_btarget);
1731         break;
1732 
1733     case DISAS_JUMP:
1734         if (dc->jmp_dest != -1 && !(tb_cflags(dc->base.tb) & CF_NO_GOTO_TB)) {
1735             /* Direct jump. */
1736             tcg_gen_discard_i32(cpu_btarget);
1737 
1738             if (dc->jmp_cond != TCG_COND_ALWAYS) {
1739                 /* Conditional direct jump. */
1740                 TCGLabel *taken = gen_new_label();
1741                 TCGv_i32 tmp = tcg_temp_new_i32();
1742 
1743                 /*
1744                  * Copy bvalue to a temp now, so we can discard bvalue.
1745                  * This can avoid writing bvalue to memory when the
1746                  * delay slot cannot raise an exception.
1747                  */
1748                 tcg_gen_mov_i32(tmp, cpu_bvalue);
1749                 tcg_gen_discard_i32(cpu_bvalue);
1750 
1751                 tcg_gen_brcondi_i32(dc->jmp_cond, tmp, 0, taken);
1752                 gen_goto_tb(dc, 1, dc->base.pc_next);
1753                 gen_set_label(taken);
1754             }
1755             gen_goto_tb(dc, 0, dc->jmp_dest);
1756             return;
1757         }
1758 
1759         /* Indirect jump (or direct jump w/ goto_tb disabled) */
1760         tcg_gen_mov_i32(cpu_pc, cpu_btarget);
1761         tcg_gen_discard_i32(cpu_btarget);
1762         tcg_gen_lookup_and_goto_ptr();
1763         return;
1764 
1765     default:
1766         g_assert_not_reached();
1767     }
1768 
1769     /* Finish DISAS_EXIT_* */
1770     if (unlikely(cs->singlestep_enabled)) {
1771         gen_raise_exception(dc, EXCP_DEBUG);
1772     } else {
1773         tcg_gen_exit_tb(NULL, 0);
1774     }
1775 }
1776 
1777 static void mb_tr_disas_log(const DisasContextBase *dcb,
1778                             CPUState *cs, FILE *logfile)
1779 {
1780     fprintf(logfile, "IN: %s\n", lookup_symbol(dcb->pc_first));
1781     target_disas(logfile, cs, dcb->pc_first, dcb->tb->size);
1782 }
1783 
1784 static const TranslatorOps mb_tr_ops = {
1785     .init_disas_context = mb_tr_init_disas_context,
1786     .tb_start           = mb_tr_tb_start,
1787     .insn_start         = mb_tr_insn_start,
1788     .translate_insn     = mb_tr_translate_insn,
1789     .tb_stop            = mb_tr_tb_stop,
1790     .disas_log          = mb_tr_disas_log,
1791 };
1792 
1793 void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
1794                            vaddr pc, void *host_pc)
1795 {
1796     DisasContext dc;
1797     translator_loop(cpu, tb, max_insns, pc, host_pc, &mb_tr_ops, &dc.base);
1798 }
1799 
1800 void mb_cpu_dump_state(CPUState *cs, FILE *f, int flags)
1801 {
1802     CPUMBState *env = cpu_env(cs);
1803     uint32_t iflags;
1804     int i;
1805 
1806     qemu_fprintf(f, "pc=0x%08x msr=0x%05x mode=%s(saved=%s) eip=%d ie=%d\n",
1807                  env->pc, env->msr,
1808                  (env->msr & MSR_UM) ? "user" : "kernel",
1809                  (env->msr & MSR_UMS) ? "user" : "kernel",
1810                  (bool)(env->msr & MSR_EIP),
1811                  (bool)(env->msr & MSR_IE));
1812 
1813     iflags = env->iflags;
1814     qemu_fprintf(f, "iflags: 0x%08x", iflags);
1815     if (iflags & IMM_FLAG) {
1816         qemu_fprintf(f, " IMM(0x%08x)", env->imm);
1817     }
1818     if (iflags & BIMM_FLAG) {
1819         qemu_fprintf(f, " BIMM");
1820     }
1821     if (iflags & D_FLAG) {
1822         qemu_fprintf(f, " D(btarget=0x%08x)", env->btarget);
1823     }
1824     if (iflags & DRTI_FLAG) {
1825         qemu_fprintf(f, " DRTI");
1826     }
1827     if (iflags & DRTE_FLAG) {
1828         qemu_fprintf(f, " DRTE");
1829     }
1830     if (iflags & DRTB_FLAG) {
1831         qemu_fprintf(f, " DRTB");
1832     }
1833     if (iflags & ESR_ESS_FLAG) {
1834         qemu_fprintf(f, " ESR_ESS(0x%04x)", iflags & ESR_ESS_MASK);
1835     }
1836 
1837     qemu_fprintf(f, "\nesr=0x%04x fsr=0x%02x btr=0x%08x edr=0x%x\n"
1838                  "ear=0x" TARGET_FMT_lx " slr=0x%x shr=0x%x\n",
1839                  env->esr, env->fsr, env->btr, env->edr,
1840                  env->ear, env->slr, env->shr);
1841 
1842     for (i = 0; i < 32; i++) {
1843         qemu_fprintf(f, "r%2.2d=%08x%c",
1844                      i, env->regs[i], i % 4 == 3 ? '\n' : ' ');
1845     }
1846     qemu_fprintf(f, "\n");
1847 }
1848 
1849 void mb_tcg_init(void)
1850 {
1851 #define R(X)  { &cpu_R[X], offsetof(CPUMBState, regs[X]), "r" #X }
1852 #define SP(X) { &cpu_##X, offsetof(CPUMBState, X), #X }
1853 
1854     static const struct {
1855         TCGv_i32 *var; int ofs; char name[8];
1856     } i32s[] = {
1857         /*
1858          * Note that r0 is handled specially in reg_for_read
1859          * and reg_for_write.  Nothing should touch cpu_R[0].
1860          * Leave that element NULL, which will assert quickly
1861          * inside the tcg generator functions.
1862          */
1863                R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),
1864         R(8),  R(9),  R(10), R(11), R(12), R(13), R(14), R(15),
1865         R(16), R(17), R(18), R(19), R(20), R(21), R(22), R(23),
1866         R(24), R(25), R(26), R(27), R(28), R(29), R(30), R(31),
1867 
1868         SP(pc),
1869         SP(msr),
1870         SP(msr_c),
1871         SP(imm),
1872         SP(iflags),
1873         SP(bvalue),
1874         SP(btarget),
1875         SP(res_val),
1876     };
1877 
1878 #undef R
1879 #undef SP
1880 
1881     for (int i = 0; i < ARRAY_SIZE(i32s); ++i) {
1882         *i32s[i].var =
1883           tcg_global_mem_new_i32(tcg_env, i32s[i].ofs, i32s[i].name);
1884     }
1885 
1886     cpu_res_addr =
1887         tcg_global_mem_new(tcg_env, offsetof(CPUMBState, res_addr), "res_addr");
1888 }
1889