xref: /openbmc/qemu/target/microblaze/translate.c (revision 1cab5a02ab8144aad2abd001835e49104e4aae0f)
1 /*
2  *  Xilinx MicroBlaze emulation for qemu: main translation routines.
3  *
4  *  Copyright (c) 2009 Edgar E. Iglesias.
5  *  Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "accel/tcg/cpu-ldst.h"
24 #include "tcg/tcg-op.h"
25 #include "exec/helper-proto.h"
26 #include "exec/helper-gen.h"
27 #include "exec/translator.h"
28 #include "exec/translation-block.h"
29 #include "exec/target_page.h"
30 #include "qemu/qemu-print.h"
31 
32 #include "exec/log.h"
33 
34 #define HELPER_H "helper.h"
35 #include "exec/helper-info.c.inc"
36 #undef  HELPER_H
37 
38 #define EXTRACT_FIELD(src, start, end) \
39             (((src) >> start) & ((1 << (end - start + 1)) - 1))
40 
41 /* is_jmp field values */
42 #define DISAS_JUMP    DISAS_TARGET_0 /* only pc was modified dynamically */
43 #define DISAS_EXIT    DISAS_TARGET_1 /* all cpu state modified dynamically */
44 
45 /* cpu state besides pc was modified dynamically; update pc to next */
46 #define DISAS_EXIT_NEXT DISAS_TARGET_2
47 /* cpu state besides pc was modified dynamically; update pc to btarget */
48 #define DISAS_EXIT_JUMP DISAS_TARGET_3
49 
50 static TCGv_i32 cpu_R[32];
51 static TCGv_i32 cpu_pc;
52 static TCGv_i32 cpu_msr;
53 static TCGv_i32 cpu_msr_c;
54 static TCGv_i32 cpu_imm;
55 static TCGv_i32 cpu_bvalue;
56 static TCGv_i32 cpu_btarget;
57 static TCGv_i32 cpu_iflags;
58 static TCGv cpu_res_addr;
59 static TCGv_i32 cpu_res_val;
60 
61 /* This is the state at translation time.  */
62 typedef struct DisasContext {
63     DisasContextBase base;
64     const MicroBlazeCPUConfig *cfg;
65 
66     TCGv_i32 r0;
67     bool r0_set;
68 
69     /* Decoder.  */
70     uint32_t ext_imm;
71     unsigned int tb_flags;
72     unsigned int tb_flags_to_set;
73     int mem_index;
74 
75     /* Condition under which to jump, including NEVER and ALWAYS. */
76     TCGCond jmp_cond;
77 
78     /* Immediate branch-taken destination, or -1 for indirect. */
79     uint32_t jmp_dest;
80 } DisasContext;
81 
82 static int typeb_imm(DisasContext *dc, int x)
83 {
84     if (dc->tb_flags & IMM_FLAG) {
85         return deposit32(dc->ext_imm, 0, 16, x);
86     }
87     return x;
88 }
89 
90 /* Include the auto-generated decoder.  */
91 #include "decode-insns.c.inc"
92 
93 static void t_sync_flags(DisasContext *dc)
94 {
95     /* Synch the tb dependent flags between translator and runtime.  */
96     if ((dc->tb_flags ^ dc->base.tb->flags) & IFLAGS_TB_MASK) {
97         tcg_gen_movi_i32(cpu_iflags, dc->tb_flags & IFLAGS_TB_MASK);
98     }
99 }
100 
101 static void gen_raise_exception(DisasContext *dc, uint32_t index)
102 {
103     gen_helper_raise_exception(tcg_env, tcg_constant_i32(index));
104     dc->base.is_jmp = DISAS_NORETURN;
105 }
106 
107 static void gen_raise_exception_sync(DisasContext *dc, uint32_t index)
108 {
109     t_sync_flags(dc);
110     tcg_gen_movi_i32(cpu_pc, dc->base.pc_next);
111     gen_raise_exception(dc, index);
112 }
113 
114 static void gen_raise_hw_excp(DisasContext *dc, uint32_t esr_ec)
115 {
116     TCGv_i32 tmp = tcg_constant_i32(esr_ec);
117     tcg_gen_st_i32(tmp, tcg_env, offsetof(CPUMBState, esr));
118 
119     gen_raise_exception_sync(dc, EXCP_HW_EXCP);
120 }
121 
122 static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
123 {
124     if (translator_use_goto_tb(&dc->base, dest)) {
125         tcg_gen_goto_tb(n);
126         tcg_gen_movi_i32(cpu_pc, dest);
127         tcg_gen_exit_tb(dc->base.tb, n);
128     } else {
129         tcg_gen_movi_i32(cpu_pc, dest);
130         tcg_gen_lookup_and_goto_ptr();
131     }
132     dc->base.is_jmp = DISAS_NORETURN;
133 }
134 
135 /*
136  * Returns true if the insn an illegal operation.
137  * If exceptions are enabled, an exception is raised.
138  */
139 static bool trap_illegal(DisasContext *dc, bool cond)
140 {
141     if (cond && (dc->tb_flags & MSR_EE)
142         && dc->cfg->illegal_opcode_exception) {
143         gen_raise_hw_excp(dc, ESR_EC_ILLEGAL_OP);
144     }
145     return cond;
146 }
147 
148 /*
149  * Returns true if the insn is illegal in userspace.
150  * If exceptions are enabled, an exception is raised.
151  */
152 static bool trap_userspace(DisasContext *dc, bool cond)
153 {
154     bool cond_user = cond && dc->mem_index == MMU_USER_IDX;
155 
156     if (cond_user && (dc->tb_flags & MSR_EE)) {
157         gen_raise_hw_excp(dc, ESR_EC_PRIVINSN);
158     }
159     return cond_user;
160 }
161 
162 /*
163  * Return true, and log an error, if the current insn is
164  * within a delay slot.
165  */
166 static bool invalid_delay_slot(DisasContext *dc, const char *insn_type)
167 {
168     if (dc->tb_flags & D_FLAG) {
169         qemu_log_mask(LOG_GUEST_ERROR,
170                       "Invalid insn in delay slot: %s at %08x\n",
171                       insn_type, (uint32_t)dc->base.pc_next);
172         return true;
173     }
174     return false;
175 }
176 
177 static TCGv_i32 reg_for_read(DisasContext *dc, int reg)
178 {
179     if (likely(reg != 0)) {
180         return cpu_R[reg];
181     }
182     if (!dc->r0_set) {
183         if (dc->r0 == NULL) {
184             dc->r0 = tcg_temp_new_i32();
185         }
186         tcg_gen_movi_i32(dc->r0, 0);
187         dc->r0_set = true;
188     }
189     return dc->r0;
190 }
191 
192 static TCGv_i32 reg_for_write(DisasContext *dc, int reg)
193 {
194     if (likely(reg != 0)) {
195         return cpu_R[reg];
196     }
197     if (dc->r0 == NULL) {
198         dc->r0 = tcg_temp_new_i32();
199     }
200     return dc->r0;
201 }
202 
203 static bool do_typea(DisasContext *dc, arg_typea *arg, bool side_effects,
204                      void (*fn)(TCGv_i32, TCGv_i32, TCGv_i32))
205 {
206     TCGv_i32 rd, ra, rb;
207 
208     if (arg->rd == 0 && !side_effects) {
209         return true;
210     }
211 
212     rd = reg_for_write(dc, arg->rd);
213     ra = reg_for_read(dc, arg->ra);
214     rb = reg_for_read(dc, arg->rb);
215     fn(rd, ra, rb);
216     return true;
217 }
218 
219 static bool do_typea0(DisasContext *dc, arg_typea0 *arg, bool side_effects,
220                       void (*fn)(TCGv_i32, TCGv_i32))
221 {
222     TCGv_i32 rd, ra;
223 
224     if (arg->rd == 0 && !side_effects) {
225         return true;
226     }
227 
228     rd = reg_for_write(dc, arg->rd);
229     ra = reg_for_read(dc, arg->ra);
230     fn(rd, ra);
231     return true;
232 }
233 
234 static bool do_typeb_imm(DisasContext *dc, arg_typeb *arg, bool side_effects,
235                          void (*fni)(TCGv_i32, TCGv_i32, int32_t))
236 {
237     TCGv_i32 rd, ra;
238 
239     if (arg->rd == 0 && !side_effects) {
240         return true;
241     }
242 
243     rd = reg_for_write(dc, arg->rd);
244     ra = reg_for_read(dc, arg->ra);
245     fni(rd, ra, arg->imm);
246     return true;
247 }
248 
249 static bool do_typeb_val(DisasContext *dc, arg_typeb *arg, bool side_effects,
250                          void (*fn)(TCGv_i32, TCGv_i32, TCGv_i32))
251 {
252     TCGv_i32 rd, ra, imm;
253 
254     if (arg->rd == 0 && !side_effects) {
255         return true;
256     }
257 
258     rd = reg_for_write(dc, arg->rd);
259     ra = reg_for_read(dc, arg->ra);
260     imm = tcg_constant_i32(arg->imm);
261 
262     fn(rd, ra, imm);
263     return true;
264 }
265 
266 #define DO_TYPEA(NAME, SE, FN) \
267     static bool trans_##NAME(DisasContext *dc, arg_typea *a) \
268     { return do_typea(dc, a, SE, FN); }
269 
270 #define DO_TYPEA_CFG(NAME, CFG, SE, FN) \
271     static bool trans_##NAME(DisasContext *dc, arg_typea *a) \
272     { return dc->cfg->CFG && do_typea(dc, a, SE, FN); }
273 
274 #define DO_TYPEA0(NAME, SE, FN) \
275     static bool trans_##NAME(DisasContext *dc, arg_typea0 *a) \
276     { return do_typea0(dc, a, SE, FN); }
277 
278 #define DO_TYPEA0_CFG(NAME, CFG, SE, FN) \
279     static bool trans_##NAME(DisasContext *dc, arg_typea0 *a) \
280     { return dc->cfg->CFG && do_typea0(dc, a, SE, FN); }
281 
282 #define DO_TYPEBI(NAME, SE, FNI) \
283     static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
284     { return do_typeb_imm(dc, a, SE, FNI); }
285 
286 #define DO_TYPEBI_CFG(NAME, CFG, SE, FNI) \
287     static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
288     { return dc->cfg->CFG && do_typeb_imm(dc, a, SE, FNI); }
289 
290 #define DO_TYPEBV(NAME, SE, FN) \
291     static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
292     { return do_typeb_val(dc, a, SE, FN); }
293 
294 #define ENV_WRAPPER2(NAME, HELPER) \
295     static void NAME(TCGv_i32 out, TCGv_i32 ina) \
296     { HELPER(out, tcg_env, ina); }
297 
298 #define ENV_WRAPPER3(NAME, HELPER) \
299     static void NAME(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) \
300     { HELPER(out, tcg_env, ina, inb); }
301 
302 /* No input carry, but output carry. */
303 static void gen_add(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
304 {
305     TCGv_i32 zero = tcg_constant_i32(0);
306 
307     tcg_gen_add2_i32(out, cpu_msr_c, ina, zero, inb, zero);
308 }
309 
310 /* Input and output carry. */
311 static void gen_addc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
312 {
313     tcg_gen_addcio_i32(out, cpu_msr_c, ina, inb, cpu_msr_c);
314 }
315 
316 /* Input carry, but no output carry. */
317 static void gen_addkc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
318 {
319     tcg_gen_add_i32(out, ina, inb);
320     tcg_gen_add_i32(out, out, cpu_msr_c);
321 }
322 
323 DO_TYPEA(add, true, gen_add)
324 DO_TYPEA(addc, true, gen_addc)
325 DO_TYPEA(addk, false, tcg_gen_add_i32)
326 DO_TYPEA(addkc, true, gen_addkc)
327 
328 DO_TYPEBV(addi, true, gen_add)
329 DO_TYPEBV(addic, true, gen_addc)
330 DO_TYPEBI(addik, false, tcg_gen_addi_i32)
331 DO_TYPEBV(addikc, true, gen_addkc)
332 
333 static void gen_andni(TCGv_i32 out, TCGv_i32 ina, int32_t imm)
334 {
335     tcg_gen_andi_i32(out, ina, ~imm);
336 }
337 
338 DO_TYPEA(and, false, tcg_gen_and_i32)
339 DO_TYPEBI(andi, false, tcg_gen_andi_i32)
340 DO_TYPEA(andn, false, tcg_gen_andc_i32)
341 DO_TYPEBI(andni, false, gen_andni)
342 
343 static void gen_bsra(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
344 {
345     TCGv_i32 tmp = tcg_temp_new_i32();
346     tcg_gen_andi_i32(tmp, inb, 31);
347     tcg_gen_sar_i32(out, ina, tmp);
348 }
349 
350 static void gen_bsrl(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
351 {
352     TCGv_i32 tmp = tcg_temp_new_i32();
353     tcg_gen_andi_i32(tmp, inb, 31);
354     tcg_gen_shr_i32(out, ina, tmp);
355 }
356 
357 static void gen_bsll(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
358 {
359     TCGv_i32 tmp = tcg_temp_new_i32();
360     tcg_gen_andi_i32(tmp, inb, 31);
361     tcg_gen_shl_i32(out, ina, tmp);
362 }
363 
364 static void gen_bsefi(TCGv_i32 out, TCGv_i32 ina, int32_t imm)
365 {
366     /* Note that decodetree has extracted and reassembled imm_w/imm_s. */
367     int imm_w = extract32(imm, 5, 5);
368     int imm_s = extract32(imm, 0, 5);
369 
370     if (imm_w + imm_s > 32 || imm_w == 0) {
371         /* These inputs have an undefined behavior.  */
372         qemu_log_mask(LOG_GUEST_ERROR, "bsefi: Bad input w=%d s=%d\n",
373                       imm_w, imm_s);
374     } else {
375         tcg_gen_extract_i32(out, ina, imm_s, imm_w);
376     }
377 }
378 
379 static void gen_bsifi(TCGv_i32 out, TCGv_i32 ina, int32_t imm)
380 {
381     /* Note that decodetree has extracted and reassembled imm_w/imm_s. */
382     int imm_w = extract32(imm, 5, 5);
383     int imm_s = extract32(imm, 0, 5);
384     int width = imm_w - imm_s + 1;
385 
386     if (imm_w < imm_s) {
387         /* These inputs have an undefined behavior.  */
388         qemu_log_mask(LOG_GUEST_ERROR, "bsifi: Bad input w=%d s=%d\n",
389                       imm_w, imm_s);
390     } else {
391         tcg_gen_deposit_i32(out, out, ina, imm_s, width);
392     }
393 }
394 
395 DO_TYPEA_CFG(bsra, use_barrel, false, gen_bsra)
396 DO_TYPEA_CFG(bsrl, use_barrel, false, gen_bsrl)
397 DO_TYPEA_CFG(bsll, use_barrel, false, gen_bsll)
398 
399 DO_TYPEBI_CFG(bsrai, use_barrel, false, tcg_gen_sari_i32)
400 DO_TYPEBI_CFG(bsrli, use_barrel, false, tcg_gen_shri_i32)
401 DO_TYPEBI_CFG(bslli, use_barrel, false, tcg_gen_shli_i32)
402 
403 DO_TYPEBI_CFG(bsefi, use_barrel, false, gen_bsefi)
404 DO_TYPEBI_CFG(bsifi, use_barrel, false, gen_bsifi)
405 
406 static void gen_clz(TCGv_i32 out, TCGv_i32 ina)
407 {
408     tcg_gen_clzi_i32(out, ina, 32);
409 }
410 
411 DO_TYPEA0_CFG(clz, use_pcmp_instr, false, gen_clz)
412 
413 static void gen_cmp(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
414 {
415     TCGv_i32 lt = tcg_temp_new_i32();
416 
417     tcg_gen_setcond_i32(TCG_COND_LT, lt, inb, ina);
418     tcg_gen_sub_i32(out, inb, ina);
419     tcg_gen_deposit_i32(out, out, lt, 31, 1);
420 }
421 
422 static void gen_cmpu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
423 {
424     TCGv_i32 lt = tcg_temp_new_i32();
425 
426     tcg_gen_setcond_i32(TCG_COND_LTU, lt, inb, ina);
427     tcg_gen_sub_i32(out, inb, ina);
428     tcg_gen_deposit_i32(out, out, lt, 31, 1);
429 }
430 
431 DO_TYPEA(cmp, false, gen_cmp)
432 DO_TYPEA(cmpu, false, gen_cmpu)
433 
434 ENV_WRAPPER3(gen_fadd, gen_helper_fadd)
435 ENV_WRAPPER3(gen_frsub, gen_helper_frsub)
436 ENV_WRAPPER3(gen_fmul, gen_helper_fmul)
437 ENV_WRAPPER3(gen_fdiv, gen_helper_fdiv)
438 ENV_WRAPPER3(gen_fcmp_un, gen_helper_fcmp_un)
439 ENV_WRAPPER3(gen_fcmp_lt, gen_helper_fcmp_lt)
440 ENV_WRAPPER3(gen_fcmp_eq, gen_helper_fcmp_eq)
441 ENV_WRAPPER3(gen_fcmp_le, gen_helper_fcmp_le)
442 ENV_WRAPPER3(gen_fcmp_gt, gen_helper_fcmp_gt)
443 ENV_WRAPPER3(gen_fcmp_ne, gen_helper_fcmp_ne)
444 ENV_WRAPPER3(gen_fcmp_ge, gen_helper_fcmp_ge)
445 
446 DO_TYPEA_CFG(fadd, use_fpu, true, gen_fadd)
447 DO_TYPEA_CFG(frsub, use_fpu, true, gen_frsub)
448 DO_TYPEA_CFG(fmul, use_fpu, true, gen_fmul)
449 DO_TYPEA_CFG(fdiv, use_fpu, true, gen_fdiv)
450 DO_TYPEA_CFG(fcmp_un, use_fpu, true, gen_fcmp_un)
451 DO_TYPEA_CFG(fcmp_lt, use_fpu, true, gen_fcmp_lt)
452 DO_TYPEA_CFG(fcmp_eq, use_fpu, true, gen_fcmp_eq)
453 DO_TYPEA_CFG(fcmp_le, use_fpu, true, gen_fcmp_le)
454 DO_TYPEA_CFG(fcmp_gt, use_fpu, true, gen_fcmp_gt)
455 DO_TYPEA_CFG(fcmp_ne, use_fpu, true, gen_fcmp_ne)
456 DO_TYPEA_CFG(fcmp_ge, use_fpu, true, gen_fcmp_ge)
457 
458 ENV_WRAPPER2(gen_flt, gen_helper_flt)
459 ENV_WRAPPER2(gen_fint, gen_helper_fint)
460 ENV_WRAPPER2(gen_fsqrt, gen_helper_fsqrt)
461 
462 DO_TYPEA0_CFG(flt, use_fpu >= 2, true, gen_flt)
463 DO_TYPEA0_CFG(fint, use_fpu >= 2, true, gen_fint)
464 DO_TYPEA0_CFG(fsqrt, use_fpu >= 2, true, gen_fsqrt)
465 
466 /* Does not use ENV_WRAPPER3, because arguments are swapped as well. */
467 static void gen_idiv(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
468 {
469     gen_helper_divs(out, tcg_env, inb, ina);
470 }
471 
472 static void gen_idivu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
473 {
474     gen_helper_divu(out, tcg_env, inb, ina);
475 }
476 
477 DO_TYPEA_CFG(idiv, use_div, true, gen_idiv)
478 DO_TYPEA_CFG(idivu, use_div, true, gen_idivu)
479 
480 static bool trans_imm(DisasContext *dc, arg_imm *arg)
481 {
482     if (invalid_delay_slot(dc, "imm")) {
483         return true;
484     }
485     dc->ext_imm = arg->imm << 16;
486     tcg_gen_movi_i32(cpu_imm, dc->ext_imm);
487     dc->tb_flags_to_set = IMM_FLAG;
488     return true;
489 }
490 
491 static void gen_mulh(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
492 {
493     TCGv_i32 tmp = tcg_temp_new_i32();
494     tcg_gen_muls2_i32(tmp, out, ina, inb);
495 }
496 
497 static void gen_mulhu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
498 {
499     TCGv_i32 tmp = tcg_temp_new_i32();
500     tcg_gen_mulu2_i32(tmp, out, ina, inb);
501 }
502 
503 static void gen_mulhsu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
504 {
505     TCGv_i32 tmp = tcg_temp_new_i32();
506     tcg_gen_mulsu2_i32(tmp, out, ina, inb);
507 }
508 
509 DO_TYPEA_CFG(mul, use_hw_mul, false, tcg_gen_mul_i32)
510 DO_TYPEA_CFG(mulh, use_hw_mul >= 2, false, gen_mulh)
511 DO_TYPEA_CFG(mulhu, use_hw_mul >= 2, false, gen_mulhu)
512 DO_TYPEA_CFG(mulhsu, use_hw_mul >= 2, false, gen_mulhsu)
513 DO_TYPEBI_CFG(muli, use_hw_mul, false, tcg_gen_muli_i32)
514 
515 DO_TYPEA(or, false, tcg_gen_or_i32)
516 DO_TYPEBI(ori, false, tcg_gen_ori_i32)
517 
518 static void gen_pcmpeq(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
519 {
520     tcg_gen_setcond_i32(TCG_COND_EQ, out, ina, inb);
521 }
522 
523 static void gen_pcmpne(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
524 {
525     tcg_gen_setcond_i32(TCG_COND_NE, out, ina, inb);
526 }
527 
528 DO_TYPEA_CFG(pcmpbf, use_pcmp_instr, false, gen_helper_pcmpbf)
529 DO_TYPEA_CFG(pcmpeq, use_pcmp_instr, false, gen_pcmpeq)
530 DO_TYPEA_CFG(pcmpne, use_pcmp_instr, false, gen_pcmpne)
531 
532 /* No input carry, but output carry. */
533 static void gen_rsub(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
534 {
535     tcg_gen_setcond_i32(TCG_COND_GEU, cpu_msr_c, inb, ina);
536     tcg_gen_sub_i32(out, inb, ina);
537 }
538 
539 /* Input and output carry. */
540 static void gen_rsubc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
541 {
542     TCGv_i32 tmp = tcg_temp_new_i32();
543 
544     tcg_gen_not_i32(tmp, ina);
545     tcg_gen_addcio_i32(out, cpu_msr_c, tmp, inb, cpu_msr_c);
546 }
547 
548 /* No input or output carry. */
549 static void gen_rsubk(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
550 {
551     tcg_gen_sub_i32(out, inb, ina);
552 }
553 
554 /* Input carry, no output carry. */
555 static void gen_rsubkc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
556 {
557     TCGv_i32 nota = tcg_temp_new_i32();
558 
559     tcg_gen_not_i32(nota, ina);
560     tcg_gen_add_i32(out, inb, nota);
561     tcg_gen_add_i32(out, out, cpu_msr_c);
562 }
563 
564 DO_TYPEA(rsub, true, gen_rsub)
565 DO_TYPEA(rsubc, true, gen_rsubc)
566 DO_TYPEA(rsubk, false, gen_rsubk)
567 DO_TYPEA(rsubkc, true, gen_rsubkc)
568 
569 DO_TYPEBV(rsubi, true, gen_rsub)
570 DO_TYPEBV(rsubic, true, gen_rsubc)
571 DO_TYPEBV(rsubik, false, gen_rsubk)
572 DO_TYPEBV(rsubikc, true, gen_rsubkc)
573 
574 DO_TYPEA0(sext8, false, tcg_gen_ext8s_i32)
575 DO_TYPEA0(sext16, false, tcg_gen_ext16s_i32)
576 
577 static void gen_sra(TCGv_i32 out, TCGv_i32 ina)
578 {
579     tcg_gen_andi_i32(cpu_msr_c, ina, 1);
580     tcg_gen_sari_i32(out, ina, 1);
581 }
582 
583 static void gen_src(TCGv_i32 out, TCGv_i32 ina)
584 {
585     TCGv_i32 tmp = tcg_temp_new_i32();
586 
587     tcg_gen_mov_i32(tmp, cpu_msr_c);
588     tcg_gen_andi_i32(cpu_msr_c, ina, 1);
589     tcg_gen_extract2_i32(out, ina, tmp, 1);
590 }
591 
592 static void gen_srl(TCGv_i32 out, TCGv_i32 ina)
593 {
594     tcg_gen_andi_i32(cpu_msr_c, ina, 1);
595     tcg_gen_shri_i32(out, ina, 1);
596 }
597 
598 DO_TYPEA0(sra, false, gen_sra)
599 DO_TYPEA0(src, false, gen_src)
600 DO_TYPEA0(srl, false, gen_srl)
601 
602 static void gen_swaph(TCGv_i32 out, TCGv_i32 ina)
603 {
604     tcg_gen_rotri_i32(out, ina, 16);
605 }
606 
607 DO_TYPEA0(swapb, false, tcg_gen_bswap32_i32)
608 DO_TYPEA0(swaph, false, gen_swaph)
609 
610 static bool trans_wdic(DisasContext *dc, arg_wdic *a)
611 {
612     /* Cache operations are nops: only check for supervisor mode.  */
613     trap_userspace(dc, true);
614     return true;
615 }
616 
617 DO_TYPEA(xor, false, tcg_gen_xor_i32)
618 DO_TYPEBI(xori, false, tcg_gen_xori_i32)
619 
620 static TCGv compute_ldst_addr_typea(DisasContext *dc, int ra, int rb)
621 {
622     TCGv ret = tcg_temp_new();
623 
624     /* If any of the regs is r0, set t to the value of the other reg.  */
625     if (ra && rb) {
626         TCGv_i32 tmp = tcg_temp_new_i32();
627         tcg_gen_add_i32(tmp, cpu_R[ra], cpu_R[rb]);
628         tcg_gen_extu_i32_tl(ret, tmp);
629     } else if (ra) {
630         tcg_gen_extu_i32_tl(ret, cpu_R[ra]);
631     } else if (rb) {
632         tcg_gen_extu_i32_tl(ret, cpu_R[rb]);
633     } else {
634         tcg_gen_movi_tl(ret, 0);
635     }
636 
637     if ((ra == 1 || rb == 1) && dc->cfg->stackprot) {
638         gen_helper_stackprot(tcg_env, ret);
639     }
640     return ret;
641 }
642 
643 static TCGv compute_ldst_addr_typeb(DisasContext *dc, int ra, int imm)
644 {
645     TCGv ret = tcg_temp_new();
646 
647     /* If any of the regs is r0, set t to the value of the other reg.  */
648     if (ra) {
649         TCGv_i32 tmp = tcg_temp_new_i32();
650         tcg_gen_addi_i32(tmp, cpu_R[ra], imm);
651         tcg_gen_extu_i32_tl(ret, tmp);
652     } else {
653         tcg_gen_movi_tl(ret, (uint32_t)imm);
654     }
655 
656     if (ra == 1 && dc->cfg->stackprot) {
657         gen_helper_stackprot(tcg_env, ret);
658     }
659     return ret;
660 }
661 
662 #ifndef CONFIG_USER_ONLY
663 static TCGv compute_ldst_addr_ea(DisasContext *dc, int ra, int rb)
664 {
665     int addr_size = dc->cfg->addr_size;
666     TCGv ret = tcg_temp_new();
667 
668     if (addr_size == 32 || ra == 0) {
669         if (rb) {
670             tcg_gen_extu_i32_tl(ret, cpu_R[rb]);
671         } else {
672             tcg_gen_movi_tl(ret, 0);
673         }
674     } else {
675         if (rb) {
676             tcg_gen_concat_i32_i64(ret, cpu_R[rb], cpu_R[ra]);
677         } else {
678             tcg_gen_extu_i32_tl(ret, cpu_R[ra]);
679             tcg_gen_shli_tl(ret, ret, 32);
680         }
681         if (addr_size < 64) {
682             /* Mask off out of range bits.  */
683             tcg_gen_andi_i64(ret, ret, MAKE_64BIT_MASK(0, addr_size));
684         }
685     }
686     return ret;
687 }
688 #endif
689 
690 #ifndef CONFIG_USER_ONLY
691 static void record_unaligned_ess(DisasContext *dc, int rd,
692                                  MemOp size, bool store)
693 {
694     uint32_t iflags = tcg_get_insn_start_param(dc->base.insn_start, 1);
695 
696     iflags |= ESR_ESS_FLAG;
697     iflags |= rd << 5;
698     iflags |= store * ESR_S;
699     iflags |= (size == MO_32) * ESR_W;
700 
701     tcg_set_insn_start_param(dc->base.insn_start, 1, iflags);
702 }
703 #endif
704 
705 static inline MemOp mo_endian(DisasContext *dc)
706 {
707     return dc->cfg->endi ? MO_LE : MO_BE;
708 }
709 
710 static bool do_load(DisasContext *dc, int rd, TCGv addr, MemOp mop,
711                     int mem_index, bool rev)
712 {
713     MemOp size = mop & MO_SIZE;
714 
715     mop |= mo_endian(dc);
716 
717     /*
718      * When doing reverse accesses we need to do two things.
719      *
720      * 1. Reverse the address wrt endianness.
721      * 2. Byteswap the data lanes on the way back into the CPU core.
722      */
723     if (rev) {
724         if (size > MO_8) {
725             mop ^= MO_BSWAP;
726         }
727         if (size < MO_32) {
728             tcg_gen_xori_tl(addr, addr, 3 - size);
729         }
730     }
731 
732     /*
733      * For system mode, enforce alignment if the cpu configuration
734      * requires it.  For user-mode, the Linux kernel will have fixed up
735      * any unaligned access, so emulate that by *not* setting MO_ALIGN.
736      */
737 #ifndef CONFIG_USER_ONLY
738     if (size > MO_8 &&
739         (dc->tb_flags & MSR_EE) &&
740         dc->cfg->unaligned_exceptions) {
741         record_unaligned_ess(dc, rd, size, false);
742         mop |= MO_ALIGN;
743     }
744 #endif
745 
746     tcg_gen_qemu_ld_i32(reg_for_write(dc, rd), addr, mem_index, mop);
747     return true;
748 }
749 
750 static bool trans_lbu(DisasContext *dc, arg_typea *arg)
751 {
752     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
753     return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
754 }
755 
756 static bool trans_lbur(DisasContext *dc, arg_typea *arg)
757 {
758     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
759     return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, true);
760 }
761 
762 static bool trans_lbuea(DisasContext *dc, arg_typea *arg)
763 {
764     if (trap_userspace(dc, true)) {
765         return true;
766     }
767 #ifdef CONFIG_USER_ONLY
768     return true;
769 #else
770     TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
771     return do_load(dc, arg->rd, addr, MO_UB, MMU_NOMMU_IDX, false);
772 #endif
773 }
774 
775 static bool trans_lbui(DisasContext *dc, arg_typeb *arg)
776 {
777     TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
778     return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
779 }
780 
781 static bool trans_lhu(DisasContext *dc, arg_typea *arg)
782 {
783     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
784     return do_load(dc, arg->rd, addr, MO_UW, dc->mem_index, false);
785 }
786 
787 static bool trans_lhur(DisasContext *dc, arg_typea *arg)
788 {
789     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
790     return do_load(dc, arg->rd, addr, MO_UW, dc->mem_index, true);
791 }
792 
793 static bool trans_lhuea(DisasContext *dc, arg_typea *arg)
794 {
795     if (trap_userspace(dc, true)) {
796         return true;
797     }
798 #ifdef CONFIG_USER_ONLY
799     return true;
800 #else
801     TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
802     return do_load(dc, arg->rd, addr, MO_UW, MMU_NOMMU_IDX, false);
803 #endif
804 }
805 
806 static bool trans_lhui(DisasContext *dc, arg_typeb *arg)
807 {
808     TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
809     return do_load(dc, arg->rd, addr, MO_UW, dc->mem_index, false);
810 }
811 
812 static bool trans_lw(DisasContext *dc, arg_typea *arg)
813 {
814     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
815     return do_load(dc, arg->rd, addr, MO_UL, dc->mem_index, false);
816 }
817 
818 static bool trans_lwr(DisasContext *dc, arg_typea *arg)
819 {
820     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
821     return do_load(dc, arg->rd, addr, MO_UL, dc->mem_index, true);
822 }
823 
824 static bool trans_lwea(DisasContext *dc, arg_typea *arg)
825 {
826     if (trap_userspace(dc, true)) {
827         return true;
828     }
829 #ifdef CONFIG_USER_ONLY
830     return true;
831 #else
832     TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
833     return do_load(dc, arg->rd, addr, MO_UL, MMU_NOMMU_IDX, false);
834 #endif
835 }
836 
837 static bool trans_lwi(DisasContext *dc, arg_typeb *arg)
838 {
839     TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
840     return do_load(dc, arg->rd, addr, MO_UL, dc->mem_index, false);
841 }
842 
843 static bool trans_lwx(DisasContext *dc, arg_typea *arg)
844 {
845     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
846 
847     /* lwx does not throw unaligned access errors, so force alignment */
848     tcg_gen_andi_tl(addr, addr, ~3);
849 
850     tcg_gen_qemu_ld_i32(cpu_res_val, addr, dc->mem_index,
851                         mo_endian(dc) | MO_UL);
852     tcg_gen_mov_tl(cpu_res_addr, addr);
853 
854     if (arg->rd) {
855         tcg_gen_mov_i32(cpu_R[arg->rd], cpu_res_val);
856     }
857 
858     /* No support for AXI exclusive so always clear C */
859     tcg_gen_movi_i32(cpu_msr_c, 0);
860     return true;
861 }
862 
863 static bool do_store(DisasContext *dc, int rd, TCGv addr, MemOp mop,
864                      int mem_index, bool rev)
865 {
866     MemOp size = mop & MO_SIZE;
867 
868     mop |= mo_endian(dc);
869 
870     /*
871      * When doing reverse accesses we need to do two things.
872      *
873      * 1. Reverse the address wrt endianness.
874      * 2. Byteswap the data lanes on the way back into the CPU core.
875      */
876     if (rev) {
877         if (size > MO_8) {
878             mop ^= MO_BSWAP;
879         }
880         if (size < MO_32) {
881             tcg_gen_xori_tl(addr, addr, 3 - size);
882         }
883     }
884 
885     /*
886      * For system mode, enforce alignment if the cpu configuration
887      * requires it.  For user-mode, the Linux kernel will have fixed up
888      * any unaligned access, so emulate that by *not* setting MO_ALIGN.
889      */
890 #ifndef CONFIG_USER_ONLY
891     if (size > MO_8 &&
892         (dc->tb_flags & MSR_EE) &&
893         dc->cfg->unaligned_exceptions) {
894         record_unaligned_ess(dc, rd, size, true);
895         mop |= MO_ALIGN;
896     }
897 #endif
898 
899     tcg_gen_qemu_st_i32(reg_for_read(dc, rd), addr, mem_index, mop);
900     return true;
901 }
902 
903 static bool trans_sb(DisasContext *dc, arg_typea *arg)
904 {
905     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
906     return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
907 }
908 
909 static bool trans_sbr(DisasContext *dc, arg_typea *arg)
910 {
911     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
912     return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, true);
913 }
914 
915 static bool trans_sbea(DisasContext *dc, arg_typea *arg)
916 {
917     if (trap_userspace(dc, true)) {
918         return true;
919     }
920 #ifdef CONFIG_USER_ONLY
921     return true;
922 #else
923     TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
924     return do_store(dc, arg->rd, addr, MO_UB, MMU_NOMMU_IDX, false);
925 #endif
926 }
927 
928 static bool trans_sbi(DisasContext *dc, arg_typeb *arg)
929 {
930     TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
931     return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
932 }
933 
934 static bool trans_sh(DisasContext *dc, arg_typea *arg)
935 {
936     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
937     return do_store(dc, arg->rd, addr, MO_UW, dc->mem_index, false);
938 }
939 
940 static bool trans_shr(DisasContext *dc, arg_typea *arg)
941 {
942     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
943     return do_store(dc, arg->rd, addr, MO_UW, dc->mem_index, true);
944 }
945 
946 static bool trans_shea(DisasContext *dc, arg_typea *arg)
947 {
948     if (trap_userspace(dc, true)) {
949         return true;
950     }
951 #ifdef CONFIG_USER_ONLY
952     return true;
953 #else
954     TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
955     return do_store(dc, arg->rd, addr, MO_UW, MMU_NOMMU_IDX, false);
956 #endif
957 }
958 
959 static bool trans_shi(DisasContext *dc, arg_typeb *arg)
960 {
961     TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
962     return do_store(dc, arg->rd, addr, MO_UW, dc->mem_index, false);
963 }
964 
965 static bool trans_sw(DisasContext *dc, arg_typea *arg)
966 {
967     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
968     return do_store(dc, arg->rd, addr, MO_UL, dc->mem_index, false);
969 }
970 
971 static bool trans_swr(DisasContext *dc, arg_typea *arg)
972 {
973     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
974     return do_store(dc, arg->rd, addr, MO_UL, dc->mem_index, true);
975 }
976 
977 static bool trans_swea(DisasContext *dc, arg_typea *arg)
978 {
979     if (trap_userspace(dc, true)) {
980         return true;
981     }
982 #ifdef CONFIG_USER_ONLY
983     return true;
984 #else
985     TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
986     return do_store(dc, arg->rd, addr, MO_UL, MMU_NOMMU_IDX, false);
987 #endif
988 }
989 
990 static bool trans_swi(DisasContext *dc, arg_typeb *arg)
991 {
992     TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
993     return do_store(dc, arg->rd, addr, MO_UL, dc->mem_index, false);
994 }
995 
996 static bool trans_swx(DisasContext *dc, arg_typea *arg)
997 {
998     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
999     TCGLabel *swx_done = gen_new_label();
1000     TCGLabel *swx_fail = gen_new_label();
1001     TCGv_i32 tval;
1002 
1003     /* swx does not throw unaligned access errors, so force alignment */
1004     tcg_gen_andi_tl(addr, addr, ~3);
1005 
1006     /*
1007      * Compare the address vs the one we used during lwx.
1008      * On mismatch, the operation fails.  On match, addr dies at the
1009      * branch, but we know we can use the equal version in the global.
1010      * In either case, addr is no longer needed.
1011      */
1012     tcg_gen_brcond_tl(TCG_COND_NE, cpu_res_addr, addr, swx_fail);
1013 
1014     /*
1015      * Compare the value loaded during lwx with current contents of
1016      * the reserved location.
1017      */
1018     tval = tcg_temp_new_i32();
1019 
1020     tcg_gen_atomic_cmpxchg_i32(tval, cpu_res_addr, cpu_res_val,
1021                                reg_for_write(dc, arg->rd),
1022                                dc->mem_index, mo_endian(dc) | MO_UL);
1023 
1024     tcg_gen_brcond_i32(TCG_COND_NE, cpu_res_val, tval, swx_fail);
1025 
1026     /* Success */
1027     tcg_gen_movi_i32(cpu_msr_c, 0);
1028     tcg_gen_br(swx_done);
1029 
1030     /* Failure */
1031     gen_set_label(swx_fail);
1032     tcg_gen_movi_i32(cpu_msr_c, 1);
1033 
1034     gen_set_label(swx_done);
1035 
1036     /*
1037      * Prevent the saved address from working again without another ldx.
1038      * Akin to the pseudocode setting reservation = 0.
1039      */
1040     tcg_gen_movi_tl(cpu_res_addr, -1);
1041     return true;
1042 }
1043 
1044 static void setup_dslot(DisasContext *dc, bool type_b)
1045 {
1046     dc->tb_flags_to_set |= D_FLAG;
1047     if (type_b && (dc->tb_flags & IMM_FLAG)) {
1048         dc->tb_flags_to_set |= BIMM_FLAG;
1049     }
1050 }
1051 
1052 static bool do_branch(DisasContext *dc, int dest_rb, int dest_imm,
1053                       bool delay, bool abs, int link)
1054 {
1055     uint32_t add_pc;
1056 
1057     if (invalid_delay_slot(dc, "branch")) {
1058         return true;
1059     }
1060     if (delay) {
1061         setup_dslot(dc, dest_rb < 0);
1062     }
1063 
1064     if (link) {
1065         tcg_gen_movi_i32(cpu_R[link], dc->base.pc_next);
1066     }
1067 
1068     /* Store the branch taken destination into btarget.  */
1069     add_pc = abs ? 0 : dc->base.pc_next;
1070     if (dest_rb > 0) {
1071         dc->jmp_dest = -1;
1072         tcg_gen_addi_i32(cpu_btarget, cpu_R[dest_rb], add_pc);
1073     } else {
1074         dc->jmp_dest = add_pc + dest_imm;
1075         tcg_gen_movi_i32(cpu_btarget, dc->jmp_dest);
1076     }
1077     dc->jmp_cond = TCG_COND_ALWAYS;
1078     return true;
1079 }
1080 
1081 #define DO_BR(NAME, NAMEI, DELAY, ABS, LINK)                               \
1082     static bool trans_##NAME(DisasContext *dc, arg_typea_br *arg)          \
1083     { return do_branch(dc, arg->rb, 0, DELAY, ABS, LINK ? arg->rd : 0); }  \
1084     static bool trans_##NAMEI(DisasContext *dc, arg_typeb_br *arg)         \
1085     { return do_branch(dc, -1, arg->imm, DELAY, ABS, LINK ? arg->rd : 0); }
1086 
1087 DO_BR(br, bri, false, false, false)
1088 DO_BR(bra, brai, false, true, false)
1089 DO_BR(brd, brid, true, false, false)
1090 DO_BR(brad, braid, true, true, false)
1091 DO_BR(brld, brlid, true, false, true)
1092 DO_BR(brald, bralid, true, true, true)
1093 
1094 static bool do_bcc(DisasContext *dc, int dest_rb, int dest_imm,
1095                    TCGCond cond, int ra, bool delay)
1096 {
1097     TCGv_i32 zero, next;
1098 
1099     if (invalid_delay_slot(dc, "bcc")) {
1100         return true;
1101     }
1102     if (delay) {
1103         setup_dslot(dc, dest_rb < 0);
1104     }
1105 
1106     dc->jmp_cond = cond;
1107 
1108     /* Cache the condition register in cpu_bvalue across any delay slot.  */
1109     tcg_gen_mov_i32(cpu_bvalue, reg_for_read(dc, ra));
1110 
1111     /* Store the branch taken destination into btarget.  */
1112     if (dest_rb > 0) {
1113         dc->jmp_dest = -1;
1114         tcg_gen_addi_i32(cpu_btarget, cpu_R[dest_rb], dc->base.pc_next);
1115     } else {
1116         dc->jmp_dest = dc->base.pc_next + dest_imm;
1117         tcg_gen_movi_i32(cpu_btarget, dc->jmp_dest);
1118     }
1119 
1120     /* Compute the final destination into btarget.  */
1121     zero = tcg_constant_i32(0);
1122     next = tcg_constant_i32(dc->base.pc_next + (delay + 1) * 4);
1123     tcg_gen_movcond_i32(dc->jmp_cond, cpu_btarget,
1124                         reg_for_read(dc, ra), zero,
1125                         cpu_btarget, next);
1126 
1127     return true;
1128 }
1129 
1130 #define DO_BCC(NAME, COND)                                              \
1131     static bool trans_##NAME(DisasContext *dc, arg_typea_bc *arg)       \
1132     { return do_bcc(dc, arg->rb, 0, COND, arg->ra, false); }            \
1133     static bool trans_##NAME##d(DisasContext *dc, arg_typea_bc *arg)    \
1134     { return do_bcc(dc, arg->rb, 0, COND, arg->ra, true); }             \
1135     static bool trans_##NAME##i(DisasContext *dc, arg_typeb_bc *arg)    \
1136     { return do_bcc(dc, -1, arg->imm, COND, arg->ra, false); }          \
1137     static bool trans_##NAME##id(DisasContext *dc, arg_typeb_bc *arg)   \
1138     { return do_bcc(dc, -1, arg->imm, COND, arg->ra, true); }
1139 
1140 DO_BCC(beq, TCG_COND_EQ)
1141 DO_BCC(bge, TCG_COND_GE)
1142 DO_BCC(bgt, TCG_COND_GT)
1143 DO_BCC(ble, TCG_COND_LE)
1144 DO_BCC(blt, TCG_COND_LT)
1145 DO_BCC(bne, TCG_COND_NE)
1146 
1147 static bool trans_brk(DisasContext *dc, arg_typea_br *arg)
1148 {
1149     if (trap_userspace(dc, true)) {
1150         return true;
1151     }
1152     if (invalid_delay_slot(dc, "brk")) {
1153         return true;
1154     }
1155 
1156     tcg_gen_mov_i32(cpu_pc, reg_for_read(dc, arg->rb));
1157     if (arg->rd) {
1158         tcg_gen_movi_i32(cpu_R[arg->rd], dc->base.pc_next);
1159     }
1160     tcg_gen_ori_i32(cpu_msr, cpu_msr, MSR_BIP);
1161     tcg_gen_movi_tl(cpu_res_addr, -1);
1162 
1163     dc->base.is_jmp = DISAS_EXIT;
1164     return true;
1165 }
1166 
1167 static bool trans_brki(DisasContext *dc, arg_typeb_br *arg)
1168 {
1169     uint32_t imm = arg->imm;
1170 
1171     if (trap_userspace(dc, imm != 0x8 && imm != 0x18)) {
1172         return true;
1173     }
1174     if (invalid_delay_slot(dc, "brki")) {
1175         return true;
1176     }
1177 
1178     tcg_gen_movi_i32(cpu_pc, imm);
1179     if (arg->rd) {
1180         tcg_gen_movi_i32(cpu_R[arg->rd], dc->base.pc_next);
1181     }
1182     tcg_gen_movi_tl(cpu_res_addr, -1);
1183 
1184 #ifdef CONFIG_USER_ONLY
1185     switch (imm) {
1186     case 0x8:  /* syscall trap */
1187         gen_raise_exception_sync(dc, EXCP_SYSCALL);
1188         break;
1189     case 0x18: /* debug trap */
1190         gen_raise_exception_sync(dc, EXCP_DEBUG);
1191         break;
1192     default:   /* eliminated with trap_userspace check */
1193         g_assert_not_reached();
1194     }
1195 #else
1196     uint32_t msr_to_set = 0;
1197 
1198     if (imm != 0x18) {
1199         msr_to_set |= MSR_BIP;
1200     }
1201     if (imm == 0x8 || imm == 0x18) {
1202         /* MSR_UM and MSR_VM are in tb_flags, so we know their value. */
1203         msr_to_set |= (dc->tb_flags & (MSR_UM | MSR_VM)) << 1;
1204         tcg_gen_andi_i32(cpu_msr, cpu_msr,
1205                          ~(MSR_VMS | MSR_UMS | MSR_VM | MSR_UM));
1206     }
1207     tcg_gen_ori_i32(cpu_msr, cpu_msr, msr_to_set);
1208     dc->base.is_jmp = DISAS_EXIT;
1209 #endif
1210 
1211     return true;
1212 }
1213 
1214 static bool trans_mbar(DisasContext *dc, arg_mbar *arg)
1215 {
1216     int mbar_imm = arg->imm;
1217 
1218     /* Note that mbar is a specialized branch instruction. */
1219     if (invalid_delay_slot(dc, "mbar")) {
1220         return true;
1221     }
1222 
1223     /* Data access memory barrier.  */
1224     if ((mbar_imm & 2) == 0) {
1225         tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
1226     }
1227 
1228     /* Sleep. */
1229     if (mbar_imm & 16) {
1230         if (trap_userspace(dc, true)) {
1231             /* Sleep is a privileged instruction.  */
1232             return true;
1233         }
1234 
1235         t_sync_flags(dc);
1236 
1237         tcg_gen_st_i32(tcg_constant_i32(1), tcg_env,
1238                        -offsetof(MicroBlazeCPU, env)
1239                        +offsetof(CPUState, halted));
1240 
1241         tcg_gen_movi_i32(cpu_pc, dc->base.pc_next + 4);
1242 
1243         gen_raise_exception(dc, EXCP_HLT);
1244     }
1245 
1246     /*
1247      * If !(mbar_imm & 1), this is an instruction access memory barrier
1248      * and we need to end the TB so that we recognize self-modified
1249      * code immediately.
1250      *
1251      * However, there are some data mbars that need the TB break
1252      * (and return to main loop) to recognize interrupts right away.
1253      * E.g. recognizing a change to an interrupt controller register.
1254      *
1255      * Therefore, choose to end the TB always.
1256      */
1257     dc->base.is_jmp = DISAS_EXIT_NEXT;
1258     return true;
1259 }
1260 
1261 static bool do_rts(DisasContext *dc, arg_typeb_bc *arg, int to_set)
1262 {
1263     if (trap_userspace(dc, to_set)) {
1264         return true;
1265     }
1266     if (invalid_delay_slot(dc, "rts")) {
1267         return true;
1268     }
1269 
1270     dc->tb_flags_to_set |= to_set;
1271     setup_dslot(dc, true);
1272 
1273     dc->jmp_cond = TCG_COND_ALWAYS;
1274     dc->jmp_dest = -1;
1275     tcg_gen_addi_i32(cpu_btarget, reg_for_read(dc, arg->ra), arg->imm);
1276     return true;
1277 }
1278 
1279 #define DO_RTS(NAME, IFLAG) \
1280     static bool trans_##NAME(DisasContext *dc, arg_typeb_bc *arg) \
1281     { return do_rts(dc, arg, IFLAG); }
1282 
1283 DO_RTS(rtbd, DRTB_FLAG)
1284 DO_RTS(rtid, DRTI_FLAG)
1285 DO_RTS(rted, DRTE_FLAG)
1286 DO_RTS(rtsd, 0)
1287 
1288 static bool trans_zero(DisasContext *dc, arg_zero *arg)
1289 {
1290     /* If opcode_0_illegal, trap.  */
1291     if (dc->cfg->opcode_0_illegal) {
1292         trap_illegal(dc, true);
1293         return true;
1294     }
1295     /*
1296      * Otherwise, this is "add r0, r0, r0".
1297      * Continue to trans_add so that MSR[C] gets cleared.
1298      */
1299     return false;
1300 }
1301 
1302 static void msr_read(DisasContext *dc, TCGv_i32 d)
1303 {
1304     TCGv_i32 t;
1305 
1306     /* Replicate the cpu_msr_c boolean into the proper bit and the copy. */
1307     t = tcg_temp_new_i32();
1308     tcg_gen_muli_i32(t, cpu_msr_c, MSR_C | MSR_CC);
1309     tcg_gen_or_i32(d, cpu_msr, t);
1310 }
1311 
1312 static bool do_msrclrset(DisasContext *dc, arg_type_msr *arg, bool set)
1313 {
1314     uint32_t imm = arg->imm;
1315 
1316     if (trap_userspace(dc, imm != MSR_C)) {
1317         return true;
1318     }
1319 
1320     if (arg->rd) {
1321         msr_read(dc, cpu_R[arg->rd]);
1322     }
1323 
1324     /*
1325      * Handle the carry bit separately.
1326      * This is the only bit that userspace can modify.
1327      */
1328     if (imm & MSR_C) {
1329         tcg_gen_movi_i32(cpu_msr_c, set);
1330     }
1331 
1332     /*
1333      * MSR_C and MSR_CC set above.
1334      * MSR_PVR is not writable, and is always clear.
1335      */
1336     imm &= ~(MSR_C | MSR_CC | MSR_PVR);
1337 
1338     if (imm != 0) {
1339         if (set) {
1340             tcg_gen_ori_i32(cpu_msr, cpu_msr, imm);
1341         } else {
1342             tcg_gen_andi_i32(cpu_msr, cpu_msr, ~imm);
1343         }
1344         dc->base.is_jmp = DISAS_EXIT_NEXT;
1345     }
1346     return true;
1347 }
1348 
1349 static bool trans_msrclr(DisasContext *dc, arg_type_msr *arg)
1350 {
1351     return do_msrclrset(dc, arg, false);
1352 }
1353 
1354 static bool trans_msrset(DisasContext *dc, arg_type_msr *arg)
1355 {
1356     return do_msrclrset(dc, arg, true);
1357 }
1358 
1359 static bool trans_mts(DisasContext *dc, arg_mts *arg)
1360 {
1361     if (trap_userspace(dc, true)) {
1362         return true;
1363     }
1364 
1365 #ifdef CONFIG_USER_ONLY
1366     g_assert_not_reached();
1367 #else
1368     if (arg->e && arg->rs != 0x1003) {
1369         qemu_log_mask(LOG_GUEST_ERROR,
1370                       "Invalid extended mts reg 0x%x\n", arg->rs);
1371         return true;
1372     }
1373 
1374     TCGv_i32 src = reg_for_read(dc, arg->ra);
1375     switch (arg->rs) {
1376     case SR_MSR:
1377         /* Install MSR_C.  */
1378         tcg_gen_extract_i32(cpu_msr_c, src, 2, 1);
1379         /*
1380          * Clear MSR_C and MSR_CC;
1381          * MSR_PVR is not writable, and is always clear.
1382          */
1383         tcg_gen_andi_i32(cpu_msr, src, ~(MSR_C | MSR_CC | MSR_PVR));
1384         break;
1385     case SR_FSR:
1386         tcg_gen_st_i32(src, tcg_env, offsetof(CPUMBState, fsr));
1387         break;
1388     case 0x800:
1389         tcg_gen_st_i32(src, tcg_env, offsetof(CPUMBState, slr));
1390         break;
1391     case 0x802:
1392         tcg_gen_st_i32(src, tcg_env, offsetof(CPUMBState, shr));
1393         break;
1394 
1395     case 0x1000: /* PID */
1396     case 0x1001: /* ZPR */
1397     case 0x1002: /* TLBX */
1398     case 0x1003: /* TLBLO */
1399     case 0x1004: /* TLBHI */
1400     case 0x1005: /* TLBSX */
1401         {
1402             TCGv_i32 tmp_ext = tcg_constant_i32(arg->e);
1403             TCGv_i32 tmp_reg = tcg_constant_i32(arg->rs & 7);
1404 
1405             gen_helper_mmu_write(tcg_env, tmp_ext, tmp_reg, src);
1406         }
1407         break;
1408 
1409     default:
1410         qemu_log_mask(LOG_GUEST_ERROR, "Invalid mts reg 0x%x\n", arg->rs);
1411         return true;
1412     }
1413     dc->base.is_jmp = DISAS_EXIT_NEXT;
1414     return true;
1415 #endif
1416 }
1417 
1418 static bool trans_mfs(DisasContext *dc, arg_mfs *arg)
1419 {
1420     TCGv_i32 dest = reg_for_write(dc, arg->rd);
1421 
1422     if (arg->e) {
1423         switch (arg->rs) {
1424         case SR_EAR:
1425             {
1426                 TCGv_i64 t64 = tcg_temp_new_i64();
1427                 tcg_gen_ld_i64(t64, tcg_env, offsetof(CPUMBState, ear));
1428                 tcg_gen_extrh_i64_i32(dest, t64);
1429             }
1430             return true;
1431 #ifndef CONFIG_USER_ONLY
1432         case 0x1003: /* TLBLO */
1433             /* Handled below. */
1434             break;
1435 #endif
1436         case 0x2006 ... 0x2009:
1437             /* High bits of PVR6-9 not implemented. */
1438             tcg_gen_movi_i32(dest, 0);
1439             return true;
1440         default:
1441             qemu_log_mask(LOG_GUEST_ERROR,
1442                           "Invalid extended mfs reg 0x%x\n", arg->rs);
1443             return true;
1444         }
1445     }
1446 
1447     switch (arg->rs) {
1448     case SR_PC:
1449         tcg_gen_movi_i32(dest, dc->base.pc_next);
1450         break;
1451     case SR_MSR:
1452         msr_read(dc, dest);
1453         break;
1454     case SR_EAR:
1455         {
1456             TCGv_i64 t64 = tcg_temp_new_i64();
1457             tcg_gen_ld_i64(t64, tcg_env, offsetof(CPUMBState, ear));
1458             tcg_gen_extrl_i64_i32(dest, t64);
1459         }
1460         break;
1461     case SR_ESR:
1462         tcg_gen_ld_i32(dest, tcg_env, offsetof(CPUMBState, esr));
1463         break;
1464     case SR_FSR:
1465         tcg_gen_ld_i32(dest, tcg_env, offsetof(CPUMBState, fsr));
1466         break;
1467     case SR_BTR:
1468         tcg_gen_ld_i32(dest, tcg_env, offsetof(CPUMBState, btr));
1469         break;
1470     case SR_EDR:
1471         tcg_gen_ld_i32(dest, tcg_env, offsetof(CPUMBState, edr));
1472         break;
1473     case 0x800:
1474         tcg_gen_ld_i32(dest, tcg_env, offsetof(CPUMBState, slr));
1475         break;
1476     case 0x802:
1477         tcg_gen_ld_i32(dest, tcg_env, offsetof(CPUMBState, shr));
1478         break;
1479 
1480 #ifndef CONFIG_USER_ONLY
1481     case 0x1000: /* PID */
1482     case 0x1001: /* ZPR */
1483     case 0x1002: /* TLBX */
1484     case 0x1003: /* TLBLO */
1485     case 0x1004: /* TLBHI */
1486     case 0x1005: /* TLBSX */
1487         {
1488             TCGv_i32 tmp_ext = tcg_constant_i32(arg->e);
1489             TCGv_i32 tmp_reg = tcg_constant_i32(arg->rs & 7);
1490 
1491             gen_helper_mmu_read(dest, tcg_env, tmp_ext, tmp_reg);
1492         }
1493         break;
1494 #endif
1495 
1496     case 0x2000 ... 0x200c:
1497         tcg_gen_ld_i32(dest, tcg_env,
1498                        offsetof(MicroBlazeCPU, cfg.pvr_regs[arg->rs - 0x2000])
1499                        - offsetof(MicroBlazeCPU, env));
1500         break;
1501     default:
1502         qemu_log_mask(LOG_GUEST_ERROR, "Invalid mfs reg 0x%x\n", arg->rs);
1503         break;
1504     }
1505     return true;
1506 }
1507 
1508 static void do_rti(DisasContext *dc)
1509 {
1510     TCGv_i32 tmp = tcg_temp_new_i32();
1511 
1512     tcg_gen_shri_i32(tmp, cpu_msr, 1);
1513     tcg_gen_ori_i32(cpu_msr, cpu_msr, MSR_IE);
1514     tcg_gen_andi_i32(tmp, tmp, MSR_VM | MSR_UM);
1515     tcg_gen_andi_i32(cpu_msr, cpu_msr, ~(MSR_VM | MSR_UM));
1516     tcg_gen_or_i32(cpu_msr, cpu_msr, tmp);
1517 }
1518 
1519 static void do_rtb(DisasContext *dc)
1520 {
1521     TCGv_i32 tmp = tcg_temp_new_i32();
1522 
1523     tcg_gen_shri_i32(tmp, cpu_msr, 1);
1524     tcg_gen_andi_i32(cpu_msr, cpu_msr, ~(MSR_VM | MSR_UM | MSR_BIP));
1525     tcg_gen_andi_i32(tmp, tmp, (MSR_VM | MSR_UM));
1526     tcg_gen_or_i32(cpu_msr, cpu_msr, tmp);
1527 }
1528 
1529 static void do_rte(DisasContext *dc)
1530 {
1531     TCGv_i32 tmp = tcg_temp_new_i32();
1532 
1533     tcg_gen_shri_i32(tmp, cpu_msr, 1);
1534     tcg_gen_ori_i32(cpu_msr, cpu_msr, MSR_EE);
1535     tcg_gen_andi_i32(tmp, tmp, (MSR_VM | MSR_UM));
1536     tcg_gen_andi_i32(cpu_msr, cpu_msr, ~(MSR_VM | MSR_UM | MSR_EIP));
1537     tcg_gen_or_i32(cpu_msr, cpu_msr, tmp);
1538 }
1539 
1540 /* Insns connected to FSL or AXI stream attached devices.  */
1541 static bool do_get(DisasContext *dc, int rd, int rb, int imm, int ctrl)
1542 {
1543     TCGv_i32 t_id, t_ctrl;
1544 
1545     if (trap_userspace(dc, true)) {
1546         return true;
1547     }
1548 
1549     t_id = tcg_temp_new_i32();
1550     if (rb) {
1551         tcg_gen_andi_i32(t_id, cpu_R[rb], 0xf);
1552     } else {
1553         tcg_gen_movi_i32(t_id, imm);
1554     }
1555 
1556     t_ctrl = tcg_constant_i32(ctrl);
1557     gen_helper_get(reg_for_write(dc, rd), t_id, t_ctrl);
1558     return true;
1559 }
1560 
1561 static bool trans_get(DisasContext *dc, arg_get *arg)
1562 {
1563     return do_get(dc, arg->rd, 0, arg->imm, arg->ctrl);
1564 }
1565 
1566 static bool trans_getd(DisasContext *dc, arg_getd *arg)
1567 {
1568     return do_get(dc, arg->rd, arg->rb, 0, arg->ctrl);
1569 }
1570 
1571 static bool do_put(DisasContext *dc, int ra, int rb, int imm, int ctrl)
1572 {
1573     TCGv_i32 t_id, t_ctrl;
1574 
1575     if (trap_userspace(dc, true)) {
1576         return true;
1577     }
1578 
1579     t_id = tcg_temp_new_i32();
1580     if (rb) {
1581         tcg_gen_andi_i32(t_id, cpu_R[rb], 0xf);
1582     } else {
1583         tcg_gen_movi_i32(t_id, imm);
1584     }
1585 
1586     t_ctrl = tcg_constant_i32(ctrl);
1587     gen_helper_put(t_id, t_ctrl, reg_for_read(dc, ra));
1588     return true;
1589 }
1590 
1591 static bool trans_put(DisasContext *dc, arg_put *arg)
1592 {
1593     return do_put(dc, arg->ra, 0, arg->imm, arg->ctrl);
1594 }
1595 
1596 static bool trans_putd(DisasContext *dc, arg_putd *arg)
1597 {
1598     return do_put(dc, arg->ra, arg->rb, 0, arg->ctrl);
1599 }
1600 
1601 static void mb_tr_init_disas_context(DisasContextBase *dcb, CPUState *cs)
1602 {
1603     DisasContext *dc = container_of(dcb, DisasContext, base);
1604     MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1605     int bound;
1606 
1607     dc->cfg = &cpu->cfg;
1608     dc->tb_flags = dc->base.tb->flags;
1609     dc->ext_imm = dc->base.tb->cs_base;
1610     dc->r0 = NULL;
1611     dc->r0_set = false;
1612     dc->mem_index = cpu_mmu_index(cs, false);
1613     dc->jmp_cond = dc->tb_flags & D_FLAG ? TCG_COND_ALWAYS : TCG_COND_NEVER;
1614     dc->jmp_dest = -1;
1615 
1616     bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
1617     dc->base.max_insns = MIN(dc->base.max_insns, bound);
1618 }
1619 
1620 static void mb_tr_tb_start(DisasContextBase *dcb, CPUState *cs)
1621 {
1622 }
1623 
1624 static void mb_tr_insn_start(DisasContextBase *dcb, CPUState *cs)
1625 {
1626     DisasContext *dc = container_of(dcb, DisasContext, base);
1627 
1628     tcg_gen_insn_start(dc->base.pc_next, dc->tb_flags & ~MSR_TB_MASK);
1629 }
1630 
1631 static void mb_tr_translate_insn(DisasContextBase *dcb, CPUState *cs)
1632 {
1633     DisasContext *dc = container_of(dcb, DisasContext, base);
1634     uint32_t ir;
1635 
1636     /* TODO: This should raise an exception, not terminate qemu. */
1637     if (dc->base.pc_next & 3) {
1638         cpu_abort(cs, "Microblaze: unaligned PC=%x\n",
1639                   (uint32_t)dc->base.pc_next);
1640     }
1641 
1642     dc->tb_flags_to_set = 0;
1643 
1644     ir = translator_ldl_swap(cpu_env(cs), &dc->base, dc->base.pc_next,
1645                              mb_cpu_is_big_endian(cs) != TARGET_BIG_ENDIAN);
1646     if (!decode(dc, ir)) {
1647         trap_illegal(dc, true);
1648     }
1649 
1650     if (dc->r0) {
1651         dc->r0 = NULL;
1652         dc->r0_set = false;
1653     }
1654 
1655     /* Discard the imm global when its contents cannot be used. */
1656     if ((dc->tb_flags & ~dc->tb_flags_to_set) & IMM_FLAG) {
1657         tcg_gen_discard_i32(cpu_imm);
1658     }
1659 
1660     dc->tb_flags &= ~(IMM_FLAG | BIMM_FLAG | D_FLAG);
1661     dc->tb_flags |= dc->tb_flags_to_set;
1662     dc->base.pc_next += 4;
1663 
1664     if (dc->jmp_cond != TCG_COND_NEVER && !(dc->tb_flags & D_FLAG)) {
1665         /*
1666          * Finish any return-from branch.
1667          */
1668         uint32_t rt_ibe = dc->tb_flags & (DRTI_FLAG | DRTB_FLAG | DRTE_FLAG);
1669         if (unlikely(rt_ibe != 0)) {
1670             dc->tb_flags &= ~(DRTI_FLAG | DRTB_FLAG | DRTE_FLAG);
1671             if (rt_ibe & DRTI_FLAG) {
1672                 do_rti(dc);
1673             } else if (rt_ibe & DRTB_FLAG) {
1674                 do_rtb(dc);
1675             } else {
1676                 do_rte(dc);
1677             }
1678         }
1679 
1680         /* Complete the branch, ending the TB. */
1681         switch (dc->base.is_jmp) {
1682         case DISAS_NORETURN:
1683             /*
1684              * E.g. illegal insn in a delay slot.  We've already exited
1685              * and will handle D_FLAG in mb_cpu_do_interrupt.
1686              */
1687             break;
1688         case DISAS_NEXT:
1689             /*
1690              * Normal insn a delay slot.
1691              * However, the return-from-exception type insns should
1692              * return to the main loop, as they have adjusted MSR.
1693              */
1694             dc->base.is_jmp = (rt_ibe ? DISAS_EXIT_JUMP : DISAS_JUMP);
1695             break;
1696         case DISAS_EXIT_NEXT:
1697             /*
1698              * E.g. mts insn in a delay slot.  Continue with btarget,
1699              * but still return to the main loop.
1700              */
1701             dc->base.is_jmp = DISAS_EXIT_JUMP;
1702             break;
1703         default:
1704             g_assert_not_reached();
1705         }
1706     }
1707 }
1708 
1709 static void mb_tr_tb_stop(DisasContextBase *dcb, CPUState *cs)
1710 {
1711     DisasContext *dc = container_of(dcb, DisasContext, base);
1712 
1713     if (dc->base.is_jmp == DISAS_NORETURN) {
1714         /* We have already exited the TB. */
1715         return;
1716     }
1717 
1718     t_sync_flags(dc);
1719 
1720     switch (dc->base.is_jmp) {
1721     case DISAS_TOO_MANY:
1722         gen_goto_tb(dc, 0, dc->base.pc_next);
1723         return;
1724 
1725     case DISAS_EXIT:
1726         break;
1727     case DISAS_EXIT_NEXT:
1728         tcg_gen_movi_i32(cpu_pc, dc->base.pc_next);
1729         break;
1730     case DISAS_EXIT_JUMP:
1731         tcg_gen_mov_i32(cpu_pc, cpu_btarget);
1732         tcg_gen_discard_i32(cpu_btarget);
1733         break;
1734 
1735     case DISAS_JUMP:
1736         if (dc->jmp_dest != -1 && !(tb_cflags(dc->base.tb) & CF_NO_GOTO_TB)) {
1737             /* Direct jump. */
1738             tcg_gen_discard_i32(cpu_btarget);
1739 
1740             if (dc->jmp_cond != TCG_COND_ALWAYS) {
1741                 /* Conditional direct jump. */
1742                 TCGLabel *taken = gen_new_label();
1743                 TCGv_i32 tmp = tcg_temp_new_i32();
1744 
1745                 /*
1746                  * Copy bvalue to a temp now, so we can discard bvalue.
1747                  * This can avoid writing bvalue to memory when the
1748                  * delay slot cannot raise an exception.
1749                  */
1750                 tcg_gen_mov_i32(tmp, cpu_bvalue);
1751                 tcg_gen_discard_i32(cpu_bvalue);
1752 
1753                 tcg_gen_brcondi_i32(dc->jmp_cond, tmp, 0, taken);
1754                 gen_goto_tb(dc, 1, dc->base.pc_next);
1755                 gen_set_label(taken);
1756             }
1757             gen_goto_tb(dc, 0, dc->jmp_dest);
1758             return;
1759         }
1760 
1761         /* Indirect jump (or direct jump w/ goto_tb disabled) */
1762         tcg_gen_mov_i32(cpu_pc, cpu_btarget);
1763         tcg_gen_discard_i32(cpu_btarget);
1764         tcg_gen_lookup_and_goto_ptr();
1765         return;
1766 
1767     default:
1768         g_assert_not_reached();
1769     }
1770 
1771     /* Finish DISAS_EXIT_* */
1772     if (unlikely(cs->singlestep_enabled)) {
1773         gen_raise_exception(dc, EXCP_DEBUG);
1774     } else {
1775         tcg_gen_exit_tb(NULL, 0);
1776     }
1777 }
1778 
1779 static const TranslatorOps mb_tr_ops = {
1780     .init_disas_context = mb_tr_init_disas_context,
1781     .tb_start           = mb_tr_tb_start,
1782     .insn_start         = mb_tr_insn_start,
1783     .translate_insn     = mb_tr_translate_insn,
1784     .tb_stop            = mb_tr_tb_stop,
1785 };
1786 
1787 void mb_translate_code(CPUState *cpu, TranslationBlock *tb,
1788                        int *max_insns, vaddr pc, void *host_pc)
1789 {
1790     DisasContext dc;
1791     translator_loop(cpu, tb, max_insns, pc, host_pc, &mb_tr_ops, &dc.base);
1792 }
1793 
1794 void mb_cpu_dump_state(CPUState *cs, FILE *f, int flags)
1795 {
1796     CPUMBState *env = cpu_env(cs);
1797     uint32_t iflags;
1798     int i;
1799 
1800     qemu_fprintf(f, "pc=0x%08x msr=0x%05x mode=%s(saved=%s) eip=%d ie=%d\n",
1801                  env->pc, env->msr,
1802                  (env->msr & MSR_UM) ? "user" : "kernel",
1803                  (env->msr & MSR_UMS) ? "user" : "kernel",
1804                  (bool)(env->msr & MSR_EIP),
1805                  (bool)(env->msr & MSR_IE));
1806 
1807     iflags = env->iflags;
1808     qemu_fprintf(f, "iflags: 0x%08x", iflags);
1809     if (iflags & IMM_FLAG) {
1810         qemu_fprintf(f, " IMM(0x%08x)", env->imm);
1811     }
1812     if (iflags & BIMM_FLAG) {
1813         qemu_fprintf(f, " BIMM");
1814     }
1815     if (iflags & D_FLAG) {
1816         qemu_fprintf(f, " D(btarget=0x%08x)", env->btarget);
1817     }
1818     if (iflags & DRTI_FLAG) {
1819         qemu_fprintf(f, " DRTI");
1820     }
1821     if (iflags & DRTE_FLAG) {
1822         qemu_fprintf(f, " DRTE");
1823     }
1824     if (iflags & DRTB_FLAG) {
1825         qemu_fprintf(f, " DRTB");
1826     }
1827     if (iflags & ESR_ESS_FLAG) {
1828         qemu_fprintf(f, " ESR_ESS(0x%04x)", iflags & ESR_ESS_MASK);
1829     }
1830 
1831     qemu_fprintf(f, "\nesr=0x%04x fsr=0x%02x btr=0x%08x edr=0x%x\n"
1832                  "ear=0x" TARGET_FMT_lx " slr=0x%x shr=0x%x\n",
1833                  env->esr, env->fsr, env->btr, env->edr,
1834                  env->ear, env->slr, env->shr);
1835 
1836     for (i = 0; i < 32; i++) {
1837         qemu_fprintf(f, "r%2.2d=%08x%c",
1838                      i, env->regs[i], i % 4 == 3 ? '\n' : ' ');
1839     }
1840     qemu_fprintf(f, "\n");
1841 }
1842 
1843 void mb_tcg_init(void)
1844 {
1845 #define R(X)  { &cpu_R[X], offsetof(CPUMBState, regs[X]), "r" #X }
1846 #define SP(X) { &cpu_##X, offsetof(CPUMBState, X), #X }
1847 
1848     static const struct {
1849         TCGv_i32 *var; int ofs; char name[8];
1850     } i32s[] = {
1851         /*
1852          * Note that r0 is handled specially in reg_for_read
1853          * and reg_for_write.  Nothing should touch cpu_R[0].
1854          * Leave that element NULL, which will assert quickly
1855          * inside the tcg generator functions.
1856          */
1857                R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),
1858         R(8),  R(9),  R(10), R(11), R(12), R(13), R(14), R(15),
1859         R(16), R(17), R(18), R(19), R(20), R(21), R(22), R(23),
1860         R(24), R(25), R(26), R(27), R(28), R(29), R(30), R(31),
1861 
1862         SP(pc),
1863         SP(msr),
1864         SP(msr_c),
1865         SP(imm),
1866         SP(iflags),
1867         SP(bvalue),
1868         SP(btarget),
1869         SP(res_val),
1870     };
1871 
1872 #undef R
1873 #undef SP
1874 
1875     for (int i = 0; i < ARRAY_SIZE(i32s); ++i) {
1876         *i32s[i].var =
1877           tcg_global_mem_new_i32(tcg_env, i32s[i].ofs, i32s[i].name);
1878     }
1879 
1880     cpu_res_addr =
1881         tcg_global_mem_new(tcg_env, offsetof(CPUMBState, res_addr), "res_addr");
1882 }
1883