xref: /openbmc/qemu/target/microblaze/translate.c (revision 5a894dd7)
1 /*
2  *  Xilinx MicroBlaze emulation for qemu: main translation routines.
3  *
4  *  Copyright (c) 2009 Edgar E. Iglesias.
5  *  Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "exec/helper-proto.h"
27 #include "microblaze-decode.h"
28 #include "exec/cpu_ldst.h"
29 #include "exec/helper-gen.h"
30 #include "exec/translator.h"
31 #include "qemu/qemu-print.h"
32 
33 #include "trace-tcg.h"
34 #include "exec/log.h"
35 
36 
37 #define SIM_COMPAT 0
38 #define DISAS_GNU 1
39 #define DISAS_MB 1
40 #if DISAS_MB && !SIM_COMPAT
41 #  define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
42 #else
43 #  define LOG_DIS(...) do { } while (0)
44 #endif
45 
46 #define D(x)
47 
48 #define EXTRACT_FIELD(src, start, end) \
49             (((src) >> start) & ((1 << (end - start + 1)) - 1))
50 
51 /* is_jmp field values */
52 #define DISAS_JUMP    DISAS_TARGET_0 /* only pc was modified dynamically */
53 #define DISAS_UPDATE  DISAS_TARGET_1 /* cpu state was modified dynamically */
54 #define DISAS_TB_JUMP DISAS_TARGET_2 /* only pc was modified statically */
55 
56 static TCGv_i32 env_debug;
57 static TCGv_i32 cpu_R[32];
58 static TCGv_i64 cpu_SR[14];
59 static TCGv_i32 env_imm;
60 static TCGv_i32 env_btaken;
61 static TCGv_i64 env_btarget;
62 static TCGv_i32 env_iflags;
63 static TCGv env_res_addr;
64 static TCGv_i32 env_res_val;
65 
66 #include "exec/gen-icount.h"
67 
68 /* This is the state at translation time.  */
69 typedef struct DisasContext {
70     MicroBlazeCPU *cpu;
71     uint32_t pc;
72 
73     /* Decoder.  */
74     int type_b;
75     uint32_t ir;
76     uint8_t opcode;
77     uint8_t rd, ra, rb;
78     uint16_t imm;
79 
80     unsigned int cpustate_changed;
81     unsigned int delayed_branch;
82     unsigned int tb_flags, synced_flags; /* tb dependent flags.  */
83     unsigned int clear_imm;
84     int is_jmp;
85 
86 #define JMP_NOJMP     0
87 #define JMP_DIRECT    1
88 #define JMP_DIRECT_CC 2
89 #define JMP_INDIRECT  3
90     unsigned int jmp;
91     uint32_t jmp_pc;
92 
93     int abort_at_next_insn;
94     struct TranslationBlock *tb;
95     int singlestep_enabled;
96 } DisasContext;
97 
98 static const char *regnames[] =
99 {
100     "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
101     "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
102     "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
103     "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
104 };
105 
106 static const char *special_regnames[] =
107 {
108     "rpc", "rmsr", "sr2", "rear", "sr4", "resr", "sr6", "rfsr",
109     "sr8", "sr9", "sr10", "rbtr", "sr12", "redr"
110 };
111 
112 static inline void t_sync_flags(DisasContext *dc)
113 {
114     /* Synch the tb dependent flags between translator and runtime.  */
115     if (dc->tb_flags != dc->synced_flags) {
116         tcg_gen_movi_i32(env_iflags, dc->tb_flags);
117         dc->synced_flags = dc->tb_flags;
118     }
119 }
120 
121 static inline void t_gen_raise_exception(DisasContext *dc, uint32_t index)
122 {
123     TCGv_i32 tmp = tcg_const_i32(index);
124 
125     t_sync_flags(dc);
126     tcg_gen_movi_i64(cpu_SR[SR_PC], dc->pc);
127     gen_helper_raise_exception(cpu_env, tmp);
128     tcg_temp_free_i32(tmp);
129     dc->is_jmp = DISAS_UPDATE;
130 }
131 
132 static inline bool use_goto_tb(DisasContext *dc, target_ulong dest)
133 {
134 #ifndef CONFIG_USER_ONLY
135     return (dc->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
136 #else
137     return true;
138 #endif
139 }
140 
141 static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
142 {
143     if (use_goto_tb(dc, dest)) {
144         tcg_gen_goto_tb(n);
145         tcg_gen_movi_i64(cpu_SR[SR_PC], dest);
146         tcg_gen_exit_tb(dc->tb, n);
147     } else {
148         tcg_gen_movi_i64(cpu_SR[SR_PC], dest);
149         tcg_gen_exit_tb(NULL, 0);
150     }
151 }
152 
153 static void read_carry(DisasContext *dc, TCGv_i32 d)
154 {
155     tcg_gen_extrl_i64_i32(d, cpu_SR[SR_MSR]);
156     tcg_gen_shri_i32(d, d, 31);
157 }
158 
159 /*
160  * write_carry sets the carry bits in MSR based on bit 0 of v.
161  * v[31:1] are ignored.
162  */
163 static void write_carry(DisasContext *dc, TCGv_i32 v)
164 {
165     TCGv_i64 t0 = tcg_temp_new_i64();
166     tcg_gen_extu_i32_i64(t0, v);
167     /* Deposit bit 0 into MSR_C and the alias MSR_CC.  */
168     tcg_gen_deposit_i64(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t0, 2, 1);
169     tcg_gen_deposit_i64(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t0, 31, 1);
170     tcg_temp_free_i64(t0);
171 }
172 
173 static void write_carryi(DisasContext *dc, bool carry)
174 {
175     TCGv_i32 t0 = tcg_temp_new_i32();
176     tcg_gen_movi_i32(t0, carry);
177     write_carry(dc, t0);
178     tcg_temp_free_i32(t0);
179 }
180 
181 /*
182  * Returns true if the insn an illegal operation.
183  * If exceptions are enabled, an exception is raised.
184  */
185 static bool trap_illegal(DisasContext *dc, bool cond)
186 {
187     if (cond && (dc->tb_flags & MSR_EE_FLAG)
188         && dc->cpu->cfg.illegal_opcode_exception) {
189         tcg_gen_movi_i64(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
190         t_gen_raise_exception(dc, EXCP_HW_EXCP);
191     }
192     return cond;
193 }
194 
195 /*
196  * Returns true if the insn is illegal in userspace.
197  * If exceptions are enabled, an exception is raised.
198  */
199 static bool trap_userspace(DisasContext *dc, bool cond)
200 {
201     int mem_index = cpu_mmu_index(&dc->cpu->env, false);
202     bool cond_user = cond && mem_index == MMU_USER_IDX;
203 
204     if (cond_user && (dc->tb_flags & MSR_EE_FLAG)) {
205         tcg_gen_movi_i64(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
206         t_gen_raise_exception(dc, EXCP_HW_EXCP);
207     }
208     return cond_user;
209 }
210 
211 /* True if ALU operand b is a small immediate that may deserve
212    faster treatment.  */
213 static inline int dec_alu_op_b_is_small_imm(DisasContext *dc)
214 {
215     /* Immediate insn without the imm prefix ?  */
216     return dc->type_b && !(dc->tb_flags & IMM_FLAG);
217 }
218 
219 static inline TCGv_i32 *dec_alu_op_b(DisasContext *dc)
220 {
221     if (dc->type_b) {
222         if (dc->tb_flags & IMM_FLAG)
223             tcg_gen_ori_i32(env_imm, env_imm, dc->imm);
224         else
225             tcg_gen_movi_i32(env_imm, (int32_t)((int16_t)dc->imm));
226         return &env_imm;
227     } else
228         return &cpu_R[dc->rb];
229 }
230 
231 static void dec_add(DisasContext *dc)
232 {
233     unsigned int k, c;
234     TCGv_i32 cf;
235 
236     k = dc->opcode & 4;
237     c = dc->opcode & 2;
238 
239     LOG_DIS("add%s%s%s r%d r%d r%d\n",
240             dc->type_b ? "i" : "", k ? "k" : "", c ? "c" : "",
241             dc->rd, dc->ra, dc->rb);
242 
243     /* Take care of the easy cases first.  */
244     if (k) {
245         /* k - keep carry, no need to update MSR.  */
246         /* If rd == r0, it's a nop.  */
247         if (dc->rd) {
248             tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
249 
250             if (c) {
251                 /* c - Add carry into the result.  */
252                 cf = tcg_temp_new_i32();
253 
254                 read_carry(dc, cf);
255                 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
256                 tcg_temp_free_i32(cf);
257             }
258         }
259         return;
260     }
261 
262     /* From now on, we can assume k is zero.  So we need to update MSR.  */
263     /* Extract carry.  */
264     cf = tcg_temp_new_i32();
265     if (c) {
266         read_carry(dc, cf);
267     } else {
268         tcg_gen_movi_i32(cf, 0);
269     }
270 
271     if (dc->rd) {
272         TCGv_i32 ncf = tcg_temp_new_i32();
273         gen_helper_carry(ncf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
274         tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
275         tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
276         write_carry(dc, ncf);
277         tcg_temp_free_i32(ncf);
278     } else {
279         gen_helper_carry(cf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
280         write_carry(dc, cf);
281     }
282     tcg_temp_free_i32(cf);
283 }
284 
285 static void dec_sub(DisasContext *dc)
286 {
287     unsigned int u, cmp, k, c;
288     TCGv_i32 cf, na;
289 
290     u = dc->imm & 2;
291     k = dc->opcode & 4;
292     c = dc->opcode & 2;
293     cmp = (dc->imm & 1) && (!dc->type_b) && k;
294 
295     if (cmp) {
296         LOG_DIS("cmp%s r%d, r%d ir=%x\n", u ? "u" : "", dc->rd, dc->ra, dc->ir);
297         if (dc->rd) {
298             if (u)
299                 gen_helper_cmpu(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
300             else
301                 gen_helper_cmp(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
302         }
303         return;
304     }
305 
306     LOG_DIS("sub%s%s r%d, r%d r%d\n",
307              k ? "k" : "",  c ? "c" : "", dc->rd, dc->ra, dc->rb);
308 
309     /* Take care of the easy cases first.  */
310     if (k) {
311         /* k - keep carry, no need to update MSR.  */
312         /* If rd == r0, it's a nop.  */
313         if (dc->rd) {
314             tcg_gen_sub_i32(cpu_R[dc->rd], *(dec_alu_op_b(dc)), cpu_R[dc->ra]);
315 
316             if (c) {
317                 /* c - Add carry into the result.  */
318                 cf = tcg_temp_new_i32();
319 
320                 read_carry(dc, cf);
321                 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
322                 tcg_temp_free_i32(cf);
323             }
324         }
325         return;
326     }
327 
328     /* From now on, we can assume k is zero.  So we need to update MSR.  */
329     /* Extract carry. And complement a into na.  */
330     cf = tcg_temp_new_i32();
331     na = tcg_temp_new_i32();
332     if (c) {
333         read_carry(dc, cf);
334     } else {
335         tcg_gen_movi_i32(cf, 1);
336     }
337 
338     /* d = b + ~a + c. carry defaults to 1.  */
339     tcg_gen_not_i32(na, cpu_R[dc->ra]);
340 
341     if (dc->rd) {
342         TCGv_i32 ncf = tcg_temp_new_i32();
343         gen_helper_carry(ncf, na, *(dec_alu_op_b(dc)), cf);
344         tcg_gen_add_i32(cpu_R[dc->rd], na, *(dec_alu_op_b(dc)));
345         tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
346         write_carry(dc, ncf);
347         tcg_temp_free_i32(ncf);
348     } else {
349         gen_helper_carry(cf, na, *(dec_alu_op_b(dc)), cf);
350         write_carry(dc, cf);
351     }
352     tcg_temp_free_i32(cf);
353     tcg_temp_free_i32(na);
354 }
355 
356 static void dec_pattern(DisasContext *dc)
357 {
358     unsigned int mode;
359 
360     if (trap_illegal(dc, !dc->cpu->cfg.use_pcmp_instr)) {
361         return;
362     }
363 
364     mode = dc->opcode & 3;
365     switch (mode) {
366         case 0:
367             /* pcmpbf.  */
368             LOG_DIS("pcmpbf r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
369             if (dc->rd)
370                 gen_helper_pcmpbf(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
371             break;
372         case 2:
373             LOG_DIS("pcmpeq r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
374             if (dc->rd) {
375                 tcg_gen_setcond_i32(TCG_COND_EQ, cpu_R[dc->rd],
376                                    cpu_R[dc->ra], cpu_R[dc->rb]);
377             }
378             break;
379         case 3:
380             LOG_DIS("pcmpne r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
381             if (dc->rd) {
382                 tcg_gen_setcond_i32(TCG_COND_NE, cpu_R[dc->rd],
383                                    cpu_R[dc->ra], cpu_R[dc->rb]);
384             }
385             break;
386         default:
387             cpu_abort(CPU(dc->cpu),
388                       "unsupported pattern insn opcode=%x\n", dc->opcode);
389             break;
390     }
391 }
392 
393 static void dec_and(DisasContext *dc)
394 {
395     unsigned int not;
396 
397     if (!dc->type_b && (dc->imm & (1 << 10))) {
398         dec_pattern(dc);
399         return;
400     }
401 
402     not = dc->opcode & (1 << 1);
403     LOG_DIS("and%s\n", not ? "n" : "");
404 
405     if (!dc->rd)
406         return;
407 
408     if (not) {
409         tcg_gen_andc_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
410     } else
411         tcg_gen_and_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
412 }
413 
414 static void dec_or(DisasContext *dc)
415 {
416     if (!dc->type_b && (dc->imm & (1 << 10))) {
417         dec_pattern(dc);
418         return;
419     }
420 
421     LOG_DIS("or r%d r%d r%d imm=%x\n", dc->rd, dc->ra, dc->rb, dc->imm);
422     if (dc->rd)
423         tcg_gen_or_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
424 }
425 
426 static void dec_xor(DisasContext *dc)
427 {
428     if (!dc->type_b && (dc->imm & (1 << 10))) {
429         dec_pattern(dc);
430         return;
431     }
432 
433     LOG_DIS("xor r%d\n", dc->rd);
434     if (dc->rd)
435         tcg_gen_xor_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
436 }
437 
438 static inline void msr_read(DisasContext *dc, TCGv_i32 d)
439 {
440     tcg_gen_extrl_i64_i32(d, cpu_SR[SR_MSR]);
441 }
442 
443 static inline void msr_write(DisasContext *dc, TCGv_i32 v)
444 {
445     TCGv_i64 t;
446 
447     t = tcg_temp_new_i64();
448     dc->cpustate_changed = 1;
449     /* PVR bit is not writable.  */
450     tcg_gen_extu_i32_i64(t, v);
451     tcg_gen_andi_i64(t, t, ~MSR_PVR);
452     tcg_gen_andi_i64(cpu_SR[SR_MSR], cpu_SR[SR_MSR], MSR_PVR);
453     tcg_gen_or_i64(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t);
454     tcg_temp_free_i64(t);
455 }
456 
457 static void dec_msr(DisasContext *dc)
458 {
459     CPUState *cs = CPU(dc->cpu);
460     TCGv_i32 t0, t1;
461     unsigned int sr, rn;
462     bool to, clrset, extended = false;
463 
464     sr = extract32(dc->imm, 0, 14);
465     to = extract32(dc->imm, 14, 1);
466     clrset = extract32(dc->imm, 15, 1) == 0;
467     dc->type_b = 1;
468     if (to) {
469         dc->cpustate_changed = 1;
470     }
471 
472     /* Extended MSRs are only available if addr_size > 32.  */
473     if (dc->cpu->cfg.addr_size > 32) {
474         /* The E-bit is encoded differently for To/From MSR.  */
475         static const unsigned int e_bit[] = { 19, 24 };
476 
477         extended = extract32(dc->imm, e_bit[to], 1);
478     }
479 
480     /* msrclr and msrset.  */
481     if (clrset) {
482         bool clr = extract32(dc->ir, 16, 1);
483 
484         LOG_DIS("msr%s r%d imm=%x\n", clr ? "clr" : "set",
485                 dc->rd, dc->imm);
486 
487         if (!dc->cpu->cfg.use_msr_instr) {
488             /* nop??? */
489             return;
490         }
491 
492         if (trap_userspace(dc, dc->imm != 4 && dc->imm != 0)) {
493             return;
494         }
495 
496         if (dc->rd)
497             msr_read(dc, cpu_R[dc->rd]);
498 
499         t0 = tcg_temp_new_i32();
500         t1 = tcg_temp_new_i32();
501         msr_read(dc, t0);
502         tcg_gen_mov_i32(t1, *(dec_alu_op_b(dc)));
503 
504         if (clr) {
505             tcg_gen_not_i32(t1, t1);
506             tcg_gen_and_i32(t0, t0, t1);
507         } else
508             tcg_gen_or_i32(t0, t0, t1);
509         msr_write(dc, t0);
510         tcg_temp_free_i32(t0);
511         tcg_temp_free_i32(t1);
512         tcg_gen_movi_i64(cpu_SR[SR_PC], dc->pc + 4);
513         dc->is_jmp = DISAS_UPDATE;
514         return;
515     }
516 
517     if (trap_userspace(dc, to)) {
518         return;
519     }
520 
521 #if !defined(CONFIG_USER_ONLY)
522     /* Catch read/writes to the mmu block.  */
523     if ((sr & ~0xff) == 0x1000) {
524         TCGv_i32 tmp_ext = tcg_const_i32(extended);
525         TCGv_i32 tmp_sr;
526 
527         sr &= 7;
528         tmp_sr = tcg_const_i32(sr);
529         LOG_DIS("m%ss sr%d r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
530         if (to) {
531             gen_helper_mmu_write(cpu_env, tmp_ext, tmp_sr, cpu_R[dc->ra]);
532         } else {
533             gen_helper_mmu_read(cpu_R[dc->rd], cpu_env, tmp_ext, tmp_sr);
534         }
535         tcg_temp_free_i32(tmp_sr);
536         tcg_temp_free_i32(tmp_ext);
537         return;
538     }
539 #endif
540 
541     if (to) {
542         LOG_DIS("m%ss sr%x r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
543         switch (sr) {
544             case 0:
545                 break;
546             case 1:
547                 msr_write(dc, cpu_R[dc->ra]);
548                 break;
549             case SR_EAR:
550             case SR_ESR:
551             case SR_FSR:
552                 tcg_gen_extu_i32_i64(cpu_SR[sr], cpu_R[dc->ra]);
553                 break;
554             case 0x800:
555                 tcg_gen_st_i32(cpu_R[dc->ra],
556                                cpu_env, offsetof(CPUMBState, slr));
557                 break;
558             case 0x802:
559                 tcg_gen_st_i32(cpu_R[dc->ra],
560                                cpu_env, offsetof(CPUMBState, shr));
561                 break;
562             default:
563                 cpu_abort(CPU(dc->cpu), "unknown mts reg %x\n", sr);
564                 break;
565         }
566     } else {
567         LOG_DIS("m%ss r%d sr%x imm=%x\n", to ? "t" : "f", dc->rd, sr, dc->imm);
568 
569         switch (sr) {
570             case 0:
571                 tcg_gen_movi_i32(cpu_R[dc->rd], dc->pc);
572                 break;
573             case 1:
574                 msr_read(dc, cpu_R[dc->rd]);
575                 break;
576             case SR_EAR:
577                 if (extended) {
578                     tcg_gen_extrh_i64_i32(cpu_R[dc->rd], cpu_SR[sr]);
579                     break;
580                 }
581             case SR_ESR:
582             case SR_FSR:
583             case SR_BTR:
584             case SR_EDR:
585                 tcg_gen_extrl_i64_i32(cpu_R[dc->rd], cpu_SR[sr]);
586                 break;
587             case 0x800:
588                 tcg_gen_ld_i32(cpu_R[dc->rd],
589                                cpu_env, offsetof(CPUMBState, slr));
590                 break;
591             case 0x802:
592                 tcg_gen_ld_i32(cpu_R[dc->rd],
593                                cpu_env, offsetof(CPUMBState, shr));
594                 break;
595             case 0x2000 ... 0x200c:
596                 rn = sr & 0xf;
597                 tcg_gen_ld_i32(cpu_R[dc->rd],
598                               cpu_env, offsetof(CPUMBState, pvr.regs[rn]));
599                 break;
600             default:
601                 cpu_abort(cs, "unknown mfs reg %x\n", sr);
602                 break;
603         }
604     }
605 
606     if (dc->rd == 0) {
607         tcg_gen_movi_i32(cpu_R[0], 0);
608     }
609 }
610 
611 /* Multiplier unit.  */
612 static void dec_mul(DisasContext *dc)
613 {
614     TCGv_i32 tmp;
615     unsigned int subcode;
616 
617     if (trap_illegal(dc, !dc->cpu->cfg.use_hw_mul)) {
618         return;
619     }
620 
621     subcode = dc->imm & 3;
622 
623     if (dc->type_b) {
624         LOG_DIS("muli r%d r%d %x\n", dc->rd, dc->ra, dc->imm);
625         tcg_gen_mul_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
626         return;
627     }
628 
629     /* mulh, mulhsu and mulhu are not available if C_USE_HW_MUL is < 2.  */
630     if (subcode >= 1 && subcode <= 3 && dc->cpu->cfg.use_hw_mul < 2) {
631         /* nop??? */
632     }
633 
634     tmp = tcg_temp_new_i32();
635     switch (subcode) {
636         case 0:
637             LOG_DIS("mul r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
638             tcg_gen_mul_i32(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
639             break;
640         case 1:
641             LOG_DIS("mulh r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
642             tcg_gen_muls2_i32(tmp, cpu_R[dc->rd],
643                               cpu_R[dc->ra], cpu_R[dc->rb]);
644             break;
645         case 2:
646             LOG_DIS("mulhsu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
647             tcg_gen_mulsu2_i32(tmp, cpu_R[dc->rd],
648                                cpu_R[dc->ra], cpu_R[dc->rb]);
649             break;
650         case 3:
651             LOG_DIS("mulhu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
652             tcg_gen_mulu2_i32(tmp, cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
653             break;
654         default:
655             cpu_abort(CPU(dc->cpu), "unknown MUL insn %x\n", subcode);
656             break;
657     }
658     tcg_temp_free_i32(tmp);
659 }
660 
661 /* Div unit.  */
662 static void dec_div(DisasContext *dc)
663 {
664     unsigned int u;
665 
666     u = dc->imm & 2;
667     LOG_DIS("div\n");
668 
669     if (trap_illegal(dc, !dc->cpu->cfg.use_div)) {
670         return;
671     }
672 
673     if (u)
674         gen_helper_divu(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
675                         cpu_R[dc->ra]);
676     else
677         gen_helper_divs(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
678                         cpu_R[dc->ra]);
679     if (!dc->rd)
680         tcg_gen_movi_i32(cpu_R[dc->rd], 0);
681 }
682 
683 static void dec_barrel(DisasContext *dc)
684 {
685     TCGv_i32 t0;
686     unsigned int imm_w, imm_s;
687     bool s, t, e = false, i = false;
688 
689     if (trap_illegal(dc, !dc->cpu->cfg.use_barrel)) {
690         return;
691     }
692 
693     if (dc->type_b) {
694         /* Insert and extract are only available in immediate mode.  */
695         i = extract32(dc->imm, 15, 1);
696         e = extract32(dc->imm, 14, 1);
697     }
698     s = extract32(dc->imm, 10, 1);
699     t = extract32(dc->imm, 9, 1);
700     imm_w = extract32(dc->imm, 6, 5);
701     imm_s = extract32(dc->imm, 0, 5);
702 
703     LOG_DIS("bs%s%s%s r%d r%d r%d\n",
704             e ? "e" : "",
705             s ? "l" : "r", t ? "a" : "l", dc->rd, dc->ra, dc->rb);
706 
707     if (e) {
708         if (imm_w + imm_s > 32 || imm_w == 0) {
709             /* These inputs have an undefined behavior.  */
710             qemu_log_mask(LOG_GUEST_ERROR, "bsefi: Bad input w=%d s=%d\n",
711                           imm_w, imm_s);
712         } else {
713             tcg_gen_extract_i32(cpu_R[dc->rd], cpu_R[dc->ra], imm_s, imm_w);
714         }
715     } else if (i) {
716         int width = imm_w - imm_s + 1;
717 
718         if (imm_w < imm_s) {
719             /* These inputs have an undefined behavior.  */
720             qemu_log_mask(LOG_GUEST_ERROR, "bsifi: Bad input w=%d s=%d\n",
721                           imm_w, imm_s);
722         } else {
723             tcg_gen_deposit_i32(cpu_R[dc->rd], cpu_R[dc->rd], cpu_R[dc->ra],
724                                 imm_s, width);
725         }
726     } else {
727         t0 = tcg_temp_new_i32();
728 
729         tcg_gen_mov_i32(t0, *(dec_alu_op_b(dc)));
730         tcg_gen_andi_i32(t0, t0, 31);
731 
732         if (s) {
733             tcg_gen_shl_i32(cpu_R[dc->rd], cpu_R[dc->ra], t0);
734         } else {
735             if (t) {
736                 tcg_gen_sar_i32(cpu_R[dc->rd], cpu_R[dc->ra], t0);
737             } else {
738                 tcg_gen_shr_i32(cpu_R[dc->rd], cpu_R[dc->ra], t0);
739             }
740         }
741         tcg_temp_free_i32(t0);
742     }
743 }
744 
745 static void dec_bit(DisasContext *dc)
746 {
747     CPUState *cs = CPU(dc->cpu);
748     TCGv_i32 t0;
749     unsigned int op;
750 
751     op = dc->ir & ((1 << 9) - 1);
752     switch (op) {
753         case 0x21:
754             /* src.  */
755             t0 = tcg_temp_new_i32();
756 
757             LOG_DIS("src r%d r%d\n", dc->rd, dc->ra);
758             tcg_gen_extrl_i64_i32(t0, cpu_SR[SR_MSR]);
759             tcg_gen_andi_i32(t0, t0, MSR_CC);
760             write_carry(dc, cpu_R[dc->ra]);
761             if (dc->rd) {
762                 tcg_gen_shri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 1);
763                 tcg_gen_or_i32(cpu_R[dc->rd], cpu_R[dc->rd], t0);
764             }
765             tcg_temp_free_i32(t0);
766             break;
767 
768         case 0x1:
769         case 0x41:
770             /* srl.  */
771             LOG_DIS("srl r%d r%d\n", dc->rd, dc->ra);
772 
773             /* Update carry. Note that write carry only looks at the LSB.  */
774             write_carry(dc, cpu_R[dc->ra]);
775             if (dc->rd) {
776                 if (op == 0x41)
777                     tcg_gen_shri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 1);
778                 else
779                     tcg_gen_sari_i32(cpu_R[dc->rd], cpu_R[dc->ra], 1);
780             }
781             break;
782         case 0x60:
783             LOG_DIS("ext8s r%d r%d\n", dc->rd, dc->ra);
784             tcg_gen_ext8s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
785             break;
786         case 0x61:
787             LOG_DIS("ext16s r%d r%d\n", dc->rd, dc->ra);
788             tcg_gen_ext16s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
789             break;
790         case 0x64:
791         case 0x66:
792         case 0x74:
793         case 0x76:
794             /* wdc.  */
795             LOG_DIS("wdc r%d\n", dc->ra);
796             trap_userspace(dc, true);
797             break;
798         case 0x68:
799             /* wic.  */
800             LOG_DIS("wic r%d\n", dc->ra);
801             trap_userspace(dc, true);
802             break;
803         case 0xe0:
804             if (trap_illegal(dc, !dc->cpu->cfg.use_pcmp_instr)) {
805                 return;
806             }
807             if (dc->cpu->cfg.use_pcmp_instr) {
808                 tcg_gen_clzi_i32(cpu_R[dc->rd], cpu_R[dc->ra], 32);
809             }
810             break;
811         case 0x1e0:
812             /* swapb */
813             LOG_DIS("swapb r%d r%d\n", dc->rd, dc->ra);
814             tcg_gen_bswap32_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
815             break;
816         case 0x1e2:
817             /*swaph */
818             LOG_DIS("swaph r%d r%d\n", dc->rd, dc->ra);
819             tcg_gen_rotri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 16);
820             break;
821         default:
822             cpu_abort(cs, "unknown bit oc=%x op=%x rd=%d ra=%d rb=%d\n",
823                       dc->pc, op, dc->rd, dc->ra, dc->rb);
824             break;
825     }
826 }
827 
828 static inline void sync_jmpstate(DisasContext *dc)
829 {
830     if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
831         if (dc->jmp == JMP_DIRECT) {
832             tcg_gen_movi_i32(env_btaken, 1);
833         }
834         dc->jmp = JMP_INDIRECT;
835         tcg_gen_movi_i64(env_btarget, dc->jmp_pc);
836     }
837 }
838 
839 static void dec_imm(DisasContext *dc)
840 {
841     LOG_DIS("imm %x\n", dc->imm << 16);
842     tcg_gen_movi_i32(env_imm, (dc->imm << 16));
843     dc->tb_flags |= IMM_FLAG;
844     dc->clear_imm = 0;
845 }
846 
847 static inline void compute_ldst_addr(DisasContext *dc, bool ea, TCGv t)
848 {
849     bool extimm = dc->tb_flags & IMM_FLAG;
850     /* Should be set to true if r1 is used by loadstores.  */
851     bool stackprot = false;
852     TCGv_i32 t32;
853 
854     /* All load/stores use ra.  */
855     if (dc->ra == 1 && dc->cpu->cfg.stackprot) {
856         stackprot = true;
857     }
858 
859     /* Treat the common cases first.  */
860     if (!dc->type_b) {
861         if (ea) {
862             int addr_size = dc->cpu->cfg.addr_size;
863 
864             if (addr_size == 32) {
865                 tcg_gen_extu_i32_tl(t, cpu_R[dc->rb]);
866                 return;
867             }
868 
869             tcg_gen_concat_i32_i64(t, cpu_R[dc->rb], cpu_R[dc->ra]);
870             if (addr_size < 64) {
871                 /* Mask off out of range bits.  */
872                 tcg_gen_andi_i64(t, t, MAKE_64BIT_MASK(0, addr_size));
873             }
874             return;
875         }
876 
877         /* If any of the regs is r0, set t to the value of the other reg.  */
878         if (dc->ra == 0) {
879             tcg_gen_extu_i32_tl(t, cpu_R[dc->rb]);
880             return;
881         } else if (dc->rb == 0) {
882             tcg_gen_extu_i32_tl(t, cpu_R[dc->ra]);
883             return;
884         }
885 
886         if (dc->rb == 1 && dc->cpu->cfg.stackprot) {
887             stackprot = true;
888         }
889 
890         t32 = tcg_temp_new_i32();
891         tcg_gen_add_i32(t32, cpu_R[dc->ra], cpu_R[dc->rb]);
892         tcg_gen_extu_i32_tl(t, t32);
893         tcg_temp_free_i32(t32);
894 
895         if (stackprot) {
896             gen_helper_stackprot(cpu_env, t);
897         }
898         return;
899     }
900     /* Immediate.  */
901     t32 = tcg_temp_new_i32();
902     if (!extimm) {
903         tcg_gen_addi_i32(t32, cpu_R[dc->ra], (int16_t)dc->imm);
904     } else {
905         tcg_gen_add_i32(t32, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
906     }
907     tcg_gen_extu_i32_tl(t, t32);
908     tcg_temp_free_i32(t32);
909 
910     if (stackprot) {
911         gen_helper_stackprot(cpu_env, t);
912     }
913     return;
914 }
915 
916 static void dec_load(DisasContext *dc)
917 {
918     TCGv_i32 v;
919     TCGv addr;
920     unsigned int size;
921     bool rev = false, ex = false, ea = false;
922     int mem_index = cpu_mmu_index(&dc->cpu->env, false);
923     MemOp mop;
924 
925     mop = dc->opcode & 3;
926     size = 1 << mop;
927     if (!dc->type_b) {
928         ea = extract32(dc->ir, 7, 1);
929         rev = extract32(dc->ir, 9, 1);
930         ex = extract32(dc->ir, 10, 1);
931     }
932     mop |= MO_TE;
933     if (rev) {
934         mop ^= MO_BSWAP;
935     }
936 
937     if (trap_illegal(dc, size > 4)) {
938         return;
939     }
940 
941     if (trap_userspace(dc, ea)) {
942         return;
943     }
944 
945     LOG_DIS("l%d%s%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
946                                                         ex ? "x" : "",
947                                                         ea ? "ea" : "");
948 
949     t_sync_flags(dc);
950     addr = tcg_temp_new();
951     compute_ldst_addr(dc, ea, addr);
952     /* Extended addressing bypasses the MMU.  */
953     mem_index = ea ? MMU_NOMMU_IDX : mem_index;
954 
955     /*
956      * When doing reverse accesses we need to do two things.
957      *
958      * 1. Reverse the address wrt endianness.
959      * 2. Byteswap the data lanes on the way back into the CPU core.
960      */
961     if (rev && size != 4) {
962         /* Endian reverse the address. t is addr.  */
963         switch (size) {
964             case 1:
965             {
966                 tcg_gen_xori_tl(addr, addr, 3);
967                 break;
968             }
969 
970             case 2:
971                 /* 00 -> 10
972                    10 -> 00.  */
973                 tcg_gen_xori_tl(addr, addr, 2);
974                 break;
975             default:
976                 cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
977                 break;
978         }
979     }
980 
981     /* lwx does not throw unaligned access errors, so force alignment */
982     if (ex) {
983         tcg_gen_andi_tl(addr, addr, ~3);
984     }
985 
986     /* If we get a fault on a dslot, the jmpstate better be in sync.  */
987     sync_jmpstate(dc);
988 
989     /* Verify alignment if needed.  */
990     /*
991      * Microblaze gives MMU faults priority over faults due to
992      * unaligned addresses. That's why we speculatively do the load
993      * into v. If the load succeeds, we verify alignment of the
994      * address and if that succeeds we write into the destination reg.
995      */
996     v = tcg_temp_new_i32();
997     tcg_gen_qemu_ld_i32(v, addr, mem_index, mop);
998 
999     if (dc->cpu->cfg.unaligned_exceptions && size > 1) {
1000         TCGv_i32 t0 = tcg_const_i32(0);
1001         TCGv_i32 treg = tcg_const_i32(dc->rd);
1002         TCGv_i32 tsize = tcg_const_i32(size - 1);
1003 
1004         tcg_gen_movi_i64(cpu_SR[SR_PC], dc->pc);
1005         gen_helper_memalign(cpu_env, addr, treg, t0, tsize);
1006 
1007         tcg_temp_free_i32(t0);
1008         tcg_temp_free_i32(treg);
1009         tcg_temp_free_i32(tsize);
1010     }
1011 
1012     if (ex) {
1013         tcg_gen_mov_tl(env_res_addr, addr);
1014         tcg_gen_mov_i32(env_res_val, v);
1015     }
1016     if (dc->rd) {
1017         tcg_gen_mov_i32(cpu_R[dc->rd], v);
1018     }
1019     tcg_temp_free_i32(v);
1020 
1021     if (ex) { /* lwx */
1022         /* no support for AXI exclusive so always clear C */
1023         write_carryi(dc, 0);
1024     }
1025 
1026     tcg_temp_free(addr);
1027 }
1028 
1029 static void dec_store(DisasContext *dc)
1030 {
1031     TCGv addr;
1032     TCGLabel *swx_skip = NULL;
1033     unsigned int size;
1034     bool rev = false, ex = false, ea = false;
1035     int mem_index = cpu_mmu_index(&dc->cpu->env, false);
1036     MemOp mop;
1037 
1038     mop = dc->opcode & 3;
1039     size = 1 << mop;
1040     if (!dc->type_b) {
1041         ea = extract32(dc->ir, 7, 1);
1042         rev = extract32(dc->ir, 9, 1);
1043         ex = extract32(dc->ir, 10, 1);
1044     }
1045     mop |= MO_TE;
1046     if (rev) {
1047         mop ^= MO_BSWAP;
1048     }
1049 
1050     if (trap_illegal(dc, size > 4)) {
1051         return;
1052     }
1053 
1054     trap_userspace(dc, ea);
1055 
1056     LOG_DIS("s%d%s%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
1057                                                         ex ? "x" : "",
1058                                                         ea ? "ea" : "");
1059     t_sync_flags(dc);
1060     /* If we get a fault on a dslot, the jmpstate better be in sync.  */
1061     sync_jmpstate(dc);
1062     /* SWX needs a temp_local.  */
1063     addr = ex ? tcg_temp_local_new() : tcg_temp_new();
1064     compute_ldst_addr(dc, ea, addr);
1065     /* Extended addressing bypasses the MMU.  */
1066     mem_index = ea ? MMU_NOMMU_IDX : mem_index;
1067 
1068     if (ex) { /* swx */
1069         TCGv_i32 tval;
1070 
1071         /* swx does not throw unaligned access errors, so force alignment */
1072         tcg_gen_andi_tl(addr, addr, ~3);
1073 
1074         write_carryi(dc, 1);
1075         swx_skip = gen_new_label();
1076         tcg_gen_brcond_tl(TCG_COND_NE, env_res_addr, addr, swx_skip);
1077 
1078         /*
1079          * Compare the value loaded at lwx with current contents of
1080          * the reserved location.
1081          */
1082         tval = tcg_temp_new_i32();
1083 
1084         tcg_gen_atomic_cmpxchg_i32(tval, addr, env_res_val,
1085                                    cpu_R[dc->rd], mem_index,
1086                                    mop);
1087 
1088         tcg_gen_brcond_i32(TCG_COND_NE, env_res_val, tval, swx_skip);
1089         write_carryi(dc, 0);
1090         tcg_temp_free_i32(tval);
1091     }
1092 
1093     if (rev && size != 4) {
1094         /* Endian reverse the address. t is addr.  */
1095         switch (size) {
1096             case 1:
1097             {
1098                 tcg_gen_xori_tl(addr, addr, 3);
1099                 break;
1100             }
1101 
1102             case 2:
1103                 /* 00 -> 10
1104                    10 -> 00.  */
1105                 /* Force addr into the temp.  */
1106                 tcg_gen_xori_tl(addr, addr, 2);
1107                 break;
1108             default:
1109                 cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
1110                 break;
1111         }
1112     }
1113 
1114     if (!ex) {
1115         tcg_gen_qemu_st_i32(cpu_R[dc->rd], addr, mem_index, mop);
1116     }
1117 
1118     /* Verify alignment if needed.  */
1119     if (dc->cpu->cfg.unaligned_exceptions && size > 1) {
1120         TCGv_i32 t1 = tcg_const_i32(1);
1121         TCGv_i32 treg = tcg_const_i32(dc->rd);
1122         TCGv_i32 tsize = tcg_const_i32(size - 1);
1123 
1124         tcg_gen_movi_i64(cpu_SR[SR_PC], dc->pc);
1125         /* FIXME: if the alignment is wrong, we should restore the value
1126          *        in memory. One possible way to achieve this is to probe
1127          *        the MMU prior to the memaccess, thay way we could put
1128          *        the alignment checks in between the probe and the mem
1129          *        access.
1130          */
1131         gen_helper_memalign(cpu_env, addr, treg, t1, tsize);
1132 
1133         tcg_temp_free_i32(t1);
1134         tcg_temp_free_i32(treg);
1135         tcg_temp_free_i32(tsize);
1136     }
1137 
1138     if (ex) {
1139         gen_set_label(swx_skip);
1140     }
1141 
1142     tcg_temp_free(addr);
1143 }
1144 
1145 static inline void eval_cc(DisasContext *dc, unsigned int cc,
1146                            TCGv_i32 d, TCGv_i32 a)
1147 {
1148     static const int mb_to_tcg_cc[] = {
1149         [CC_EQ] = TCG_COND_EQ,
1150         [CC_NE] = TCG_COND_NE,
1151         [CC_LT] = TCG_COND_LT,
1152         [CC_LE] = TCG_COND_LE,
1153         [CC_GE] = TCG_COND_GE,
1154         [CC_GT] = TCG_COND_GT,
1155     };
1156 
1157     switch (cc) {
1158     case CC_EQ:
1159     case CC_NE:
1160     case CC_LT:
1161     case CC_LE:
1162     case CC_GE:
1163     case CC_GT:
1164         tcg_gen_setcondi_i32(mb_to_tcg_cc[cc], d, a, 0);
1165         break;
1166     default:
1167         cpu_abort(CPU(dc->cpu), "Unknown condition code %x.\n", cc);
1168         break;
1169     }
1170 }
1171 
1172 static void eval_cond_jmp(DisasContext *dc, TCGv_i64 pc_true, TCGv_i64 pc_false)
1173 {
1174     TCGv_i64 tmp_btaken = tcg_temp_new_i64();
1175     TCGv_i64 tmp_zero = tcg_const_i64(0);
1176 
1177     tcg_gen_extu_i32_i64(tmp_btaken, env_btaken);
1178     tcg_gen_movcond_i64(TCG_COND_NE, cpu_SR[SR_PC],
1179                         tmp_btaken, tmp_zero,
1180                         pc_true, pc_false);
1181 
1182     tcg_temp_free_i64(tmp_btaken);
1183     tcg_temp_free_i64(tmp_zero);
1184 }
1185 
1186 static void dec_setup_dslot(DisasContext *dc)
1187 {
1188         TCGv_i32 tmp = tcg_const_i32(dc->type_b && (dc->tb_flags & IMM_FLAG));
1189 
1190         dc->delayed_branch = 2;
1191         dc->tb_flags |= D_FLAG;
1192 
1193         tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUMBState, bimm));
1194         tcg_temp_free_i32(tmp);
1195 }
1196 
1197 static void dec_bcc(DisasContext *dc)
1198 {
1199     unsigned int cc;
1200     unsigned int dslot;
1201 
1202     cc = EXTRACT_FIELD(dc->ir, 21, 23);
1203     dslot = dc->ir & (1 << 25);
1204     LOG_DIS("bcc%s r%d %x\n", dslot ? "d" : "", dc->ra, dc->imm);
1205 
1206     dc->delayed_branch = 1;
1207     if (dslot) {
1208         dec_setup_dslot(dc);
1209     }
1210 
1211     if (dec_alu_op_b_is_small_imm(dc)) {
1212         int32_t offset = (int32_t)((int16_t)dc->imm); /* sign-extend.  */
1213 
1214         tcg_gen_movi_i64(env_btarget, dc->pc + offset);
1215         dc->jmp = JMP_DIRECT_CC;
1216         dc->jmp_pc = dc->pc + offset;
1217     } else {
1218         dc->jmp = JMP_INDIRECT;
1219         tcg_gen_extu_i32_i64(env_btarget, *(dec_alu_op_b(dc)));
1220         tcg_gen_addi_i64(env_btarget, env_btarget, dc->pc);
1221         tcg_gen_andi_i64(env_btarget, env_btarget, UINT32_MAX);
1222     }
1223     eval_cc(dc, cc, env_btaken, cpu_R[dc->ra]);
1224 }
1225 
1226 static void dec_br(DisasContext *dc)
1227 {
1228     unsigned int dslot, link, abs, mbar;
1229 
1230     dslot = dc->ir & (1 << 20);
1231     abs = dc->ir & (1 << 19);
1232     link = dc->ir & (1 << 18);
1233 
1234     /* Memory barrier.  */
1235     mbar = (dc->ir >> 16) & 31;
1236     if (mbar == 2 && dc->imm == 4) {
1237         uint16_t mbar_imm = dc->rd;
1238 
1239         LOG_DIS("mbar %d\n", mbar_imm);
1240 
1241         /* Data access memory barrier.  */
1242         if ((mbar_imm & 2) == 0) {
1243             tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
1244         }
1245 
1246         /* mbar IMM & 16 decodes to sleep.  */
1247         if (mbar_imm & 16) {
1248             TCGv_i32 tmp_hlt = tcg_const_i32(EXCP_HLT);
1249             TCGv_i32 tmp_1 = tcg_const_i32(1);
1250 
1251             LOG_DIS("sleep\n");
1252 
1253             if (trap_userspace(dc, true)) {
1254                 /* Sleep is a privileged instruction.  */
1255                 return;
1256             }
1257 
1258             t_sync_flags(dc);
1259             tcg_gen_st_i32(tmp_1, cpu_env,
1260                            -offsetof(MicroBlazeCPU, env)
1261                            +offsetof(CPUState, halted));
1262             tcg_gen_movi_i64(cpu_SR[SR_PC], dc->pc + 4);
1263             gen_helper_raise_exception(cpu_env, tmp_hlt);
1264             tcg_temp_free_i32(tmp_hlt);
1265             tcg_temp_free_i32(tmp_1);
1266             return;
1267         }
1268         /* Break the TB.  */
1269         dc->cpustate_changed = 1;
1270         return;
1271     }
1272 
1273     LOG_DIS("br%s%s%s%s imm=%x\n",
1274              abs ? "a" : "", link ? "l" : "",
1275              dc->type_b ? "i" : "", dslot ? "d" : "",
1276              dc->imm);
1277 
1278     dc->delayed_branch = 1;
1279     if (dslot) {
1280         dec_setup_dslot(dc);
1281     }
1282     if (link && dc->rd)
1283         tcg_gen_movi_i32(cpu_R[dc->rd], dc->pc);
1284 
1285     dc->jmp = JMP_INDIRECT;
1286     if (abs) {
1287         tcg_gen_movi_i32(env_btaken, 1);
1288         tcg_gen_extu_i32_i64(env_btarget, *(dec_alu_op_b(dc)));
1289         if (link && !dslot) {
1290             if (!(dc->tb_flags & IMM_FLAG) && (dc->imm == 8 || dc->imm == 0x18))
1291                 t_gen_raise_exception(dc, EXCP_BREAK);
1292             if (dc->imm == 0) {
1293                 if (trap_userspace(dc, true)) {
1294                     return;
1295                 }
1296 
1297                 t_gen_raise_exception(dc, EXCP_DEBUG);
1298             }
1299         }
1300     } else {
1301         if (dec_alu_op_b_is_small_imm(dc)) {
1302             dc->jmp = JMP_DIRECT;
1303             dc->jmp_pc = dc->pc + (int32_t)((int16_t)dc->imm);
1304         } else {
1305             tcg_gen_movi_i32(env_btaken, 1);
1306             tcg_gen_extu_i32_i64(env_btarget, *(dec_alu_op_b(dc)));
1307             tcg_gen_addi_i64(env_btarget, env_btarget, dc->pc);
1308             tcg_gen_andi_i64(env_btarget, env_btarget, UINT32_MAX);
1309         }
1310     }
1311 }
1312 
1313 static inline void do_rti(DisasContext *dc)
1314 {
1315     TCGv_i32 t0, t1;
1316     t0 = tcg_temp_new_i32();
1317     t1 = tcg_temp_new_i32();
1318     tcg_gen_extrl_i64_i32(t1, cpu_SR[SR_MSR]);
1319     tcg_gen_shri_i32(t0, t1, 1);
1320     tcg_gen_ori_i32(t1, t1, MSR_IE);
1321     tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
1322 
1323     tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
1324     tcg_gen_or_i32(t1, t1, t0);
1325     msr_write(dc, t1);
1326     tcg_temp_free_i32(t1);
1327     tcg_temp_free_i32(t0);
1328     dc->tb_flags &= ~DRTI_FLAG;
1329 }
1330 
1331 static inline void do_rtb(DisasContext *dc)
1332 {
1333     TCGv_i32 t0, t1;
1334     t0 = tcg_temp_new_i32();
1335     t1 = tcg_temp_new_i32();
1336     tcg_gen_extrl_i64_i32(t1, cpu_SR[SR_MSR]);
1337     tcg_gen_andi_i32(t1, t1, ~MSR_BIP);
1338     tcg_gen_shri_i32(t0, t1, 1);
1339     tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
1340 
1341     tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
1342     tcg_gen_or_i32(t1, t1, t0);
1343     msr_write(dc, t1);
1344     tcg_temp_free_i32(t1);
1345     tcg_temp_free_i32(t0);
1346     dc->tb_flags &= ~DRTB_FLAG;
1347 }
1348 
1349 static inline void do_rte(DisasContext *dc)
1350 {
1351     TCGv_i32 t0, t1;
1352     t0 = tcg_temp_new_i32();
1353     t1 = tcg_temp_new_i32();
1354 
1355     tcg_gen_extrl_i64_i32(t1, cpu_SR[SR_MSR]);
1356     tcg_gen_ori_i32(t1, t1, MSR_EE);
1357     tcg_gen_andi_i32(t1, t1, ~MSR_EIP);
1358     tcg_gen_shri_i32(t0, t1, 1);
1359     tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
1360 
1361     tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
1362     tcg_gen_or_i32(t1, t1, t0);
1363     msr_write(dc, t1);
1364     tcg_temp_free_i32(t1);
1365     tcg_temp_free_i32(t0);
1366     dc->tb_flags &= ~DRTE_FLAG;
1367 }
1368 
1369 static void dec_rts(DisasContext *dc)
1370 {
1371     unsigned int b_bit, i_bit, e_bit;
1372     TCGv_i64 tmp64;
1373 
1374     i_bit = dc->ir & (1 << 21);
1375     b_bit = dc->ir & (1 << 22);
1376     e_bit = dc->ir & (1 << 23);
1377 
1378     if (trap_userspace(dc, i_bit || b_bit || e_bit)) {
1379         return;
1380     }
1381 
1382     dec_setup_dslot(dc);
1383 
1384     if (i_bit) {
1385         LOG_DIS("rtid ir=%x\n", dc->ir);
1386         dc->tb_flags |= DRTI_FLAG;
1387     } else if (b_bit) {
1388         LOG_DIS("rtbd ir=%x\n", dc->ir);
1389         dc->tb_flags |= DRTB_FLAG;
1390     } else if (e_bit) {
1391         LOG_DIS("rted ir=%x\n", dc->ir);
1392         dc->tb_flags |= DRTE_FLAG;
1393     } else
1394         LOG_DIS("rts ir=%x\n", dc->ir);
1395 
1396     dc->jmp = JMP_INDIRECT;
1397     tcg_gen_movi_i32(env_btaken, 1);
1398 
1399     tmp64 = tcg_temp_new_i64();
1400     tcg_gen_extu_i32_i64(env_btarget, *(dec_alu_op_b(dc)));
1401     tcg_gen_extu_i32_i64(tmp64, cpu_R[dc->ra]);
1402     tcg_gen_add_i64(env_btarget, env_btarget, tmp64);
1403     tcg_gen_andi_i64(env_btarget, env_btarget, UINT32_MAX);
1404     tcg_temp_free_i64(tmp64);
1405 }
1406 
1407 static int dec_check_fpuv2(DisasContext *dc)
1408 {
1409     if ((dc->cpu->cfg.use_fpu != 2) && (dc->tb_flags & MSR_EE_FLAG)) {
1410         tcg_gen_movi_i64(cpu_SR[SR_ESR], ESR_EC_FPU);
1411         t_gen_raise_exception(dc, EXCP_HW_EXCP);
1412     }
1413     return (dc->cpu->cfg.use_fpu == 2) ? PVR2_USE_FPU2_MASK : 0;
1414 }
1415 
1416 static void dec_fpu(DisasContext *dc)
1417 {
1418     unsigned int fpu_insn;
1419 
1420     if (trap_illegal(dc, !dc->cpu->cfg.use_fpu)) {
1421         return;
1422     }
1423 
1424     fpu_insn = (dc->ir >> 7) & 7;
1425 
1426     switch (fpu_insn) {
1427         case 0:
1428             gen_helper_fadd(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1429                             cpu_R[dc->rb]);
1430             break;
1431 
1432         case 1:
1433             gen_helper_frsub(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1434                              cpu_R[dc->rb]);
1435             break;
1436 
1437         case 2:
1438             gen_helper_fmul(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1439                             cpu_R[dc->rb]);
1440             break;
1441 
1442         case 3:
1443             gen_helper_fdiv(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1444                             cpu_R[dc->rb]);
1445             break;
1446 
1447         case 4:
1448             switch ((dc->ir >> 4) & 7) {
1449                 case 0:
1450                     gen_helper_fcmp_un(cpu_R[dc->rd], cpu_env,
1451                                        cpu_R[dc->ra], cpu_R[dc->rb]);
1452                     break;
1453                 case 1:
1454                     gen_helper_fcmp_lt(cpu_R[dc->rd], cpu_env,
1455                                        cpu_R[dc->ra], cpu_R[dc->rb]);
1456                     break;
1457                 case 2:
1458                     gen_helper_fcmp_eq(cpu_R[dc->rd], cpu_env,
1459                                        cpu_R[dc->ra], cpu_R[dc->rb]);
1460                     break;
1461                 case 3:
1462                     gen_helper_fcmp_le(cpu_R[dc->rd], cpu_env,
1463                                        cpu_R[dc->ra], cpu_R[dc->rb]);
1464                     break;
1465                 case 4:
1466                     gen_helper_fcmp_gt(cpu_R[dc->rd], cpu_env,
1467                                        cpu_R[dc->ra], cpu_R[dc->rb]);
1468                     break;
1469                 case 5:
1470                     gen_helper_fcmp_ne(cpu_R[dc->rd], cpu_env,
1471                                        cpu_R[dc->ra], cpu_R[dc->rb]);
1472                     break;
1473                 case 6:
1474                     gen_helper_fcmp_ge(cpu_R[dc->rd], cpu_env,
1475                                        cpu_R[dc->ra], cpu_R[dc->rb]);
1476                     break;
1477                 default:
1478                     qemu_log_mask(LOG_UNIMP,
1479                                   "unimplemented fcmp fpu_insn=%x pc=%x"
1480                                   " opc=%x\n",
1481                                   fpu_insn, dc->pc, dc->opcode);
1482                     dc->abort_at_next_insn = 1;
1483                     break;
1484             }
1485             break;
1486 
1487         case 5:
1488             if (!dec_check_fpuv2(dc)) {
1489                 return;
1490             }
1491             gen_helper_flt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1492             break;
1493 
1494         case 6:
1495             if (!dec_check_fpuv2(dc)) {
1496                 return;
1497             }
1498             gen_helper_fint(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1499             break;
1500 
1501         case 7:
1502             if (!dec_check_fpuv2(dc)) {
1503                 return;
1504             }
1505             gen_helper_fsqrt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1506             break;
1507 
1508         default:
1509             qemu_log_mask(LOG_UNIMP, "unimplemented FPU insn fpu_insn=%x pc=%x"
1510                           " opc=%x\n",
1511                           fpu_insn, dc->pc, dc->opcode);
1512             dc->abort_at_next_insn = 1;
1513             break;
1514     }
1515 }
1516 
1517 static void dec_null(DisasContext *dc)
1518 {
1519     if (trap_illegal(dc, true)) {
1520         return;
1521     }
1522     qemu_log_mask(LOG_GUEST_ERROR, "unknown insn pc=%x opc=%x\n", dc->pc, dc->opcode);
1523     dc->abort_at_next_insn = 1;
1524 }
1525 
1526 /* Insns connected to FSL or AXI stream attached devices.  */
1527 static void dec_stream(DisasContext *dc)
1528 {
1529     TCGv_i32 t_id, t_ctrl;
1530     int ctrl;
1531 
1532     LOG_DIS("%s%s imm=%x\n", dc->rd ? "get" : "put",
1533             dc->type_b ? "" : "d", dc->imm);
1534 
1535     if (trap_userspace(dc, true)) {
1536         return;
1537     }
1538 
1539     t_id = tcg_temp_new_i32();
1540     if (dc->type_b) {
1541         tcg_gen_movi_i32(t_id, dc->imm & 0xf);
1542         ctrl = dc->imm >> 10;
1543     } else {
1544         tcg_gen_andi_i32(t_id, cpu_R[dc->rb], 0xf);
1545         ctrl = dc->imm >> 5;
1546     }
1547 
1548     t_ctrl = tcg_const_i32(ctrl);
1549 
1550     if (dc->rd == 0) {
1551         gen_helper_put(t_id, t_ctrl, cpu_R[dc->ra]);
1552     } else {
1553         gen_helper_get(cpu_R[dc->rd], t_id, t_ctrl);
1554     }
1555     tcg_temp_free_i32(t_id);
1556     tcg_temp_free_i32(t_ctrl);
1557 }
1558 
1559 static struct decoder_info {
1560     struct {
1561         uint32_t bits;
1562         uint32_t mask;
1563     };
1564     void (*dec)(DisasContext *dc);
1565 } decinfo[] = {
1566     {DEC_ADD, dec_add},
1567     {DEC_SUB, dec_sub},
1568     {DEC_AND, dec_and},
1569     {DEC_XOR, dec_xor},
1570     {DEC_OR, dec_or},
1571     {DEC_BIT, dec_bit},
1572     {DEC_BARREL, dec_barrel},
1573     {DEC_LD, dec_load},
1574     {DEC_ST, dec_store},
1575     {DEC_IMM, dec_imm},
1576     {DEC_BR, dec_br},
1577     {DEC_BCC, dec_bcc},
1578     {DEC_RTS, dec_rts},
1579     {DEC_FPU, dec_fpu},
1580     {DEC_MUL, dec_mul},
1581     {DEC_DIV, dec_div},
1582     {DEC_MSR, dec_msr},
1583     {DEC_STREAM, dec_stream},
1584     {{0, 0}, dec_null}
1585 };
1586 
1587 static inline void decode(DisasContext *dc, uint32_t ir)
1588 {
1589     int i;
1590 
1591     dc->ir = ir;
1592     LOG_DIS("%8.8x\t", dc->ir);
1593 
1594     if (ir == 0) {
1595         trap_illegal(dc, dc->cpu->cfg.opcode_0_illegal);
1596         /* Don't decode nop/zero instructions any further.  */
1597         return;
1598     }
1599 
1600     /* bit 2 seems to indicate insn type.  */
1601     dc->type_b = ir & (1 << 29);
1602 
1603     dc->opcode = EXTRACT_FIELD(ir, 26, 31);
1604     dc->rd = EXTRACT_FIELD(ir, 21, 25);
1605     dc->ra = EXTRACT_FIELD(ir, 16, 20);
1606     dc->rb = EXTRACT_FIELD(ir, 11, 15);
1607     dc->imm = EXTRACT_FIELD(ir, 0, 15);
1608 
1609     /* Large switch for all insns.  */
1610     for (i = 0; i < ARRAY_SIZE(decinfo); i++) {
1611         if ((dc->opcode & decinfo[i].mask) == decinfo[i].bits) {
1612             decinfo[i].dec(dc);
1613             break;
1614         }
1615     }
1616 }
1617 
1618 /* generate intermediate code for basic block 'tb'.  */
1619 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
1620 {
1621     CPUMBState *env = cs->env_ptr;
1622     MicroBlazeCPU *cpu = env_archcpu(env);
1623     uint32_t pc_start;
1624     struct DisasContext ctx;
1625     struct DisasContext *dc = &ctx;
1626     uint32_t page_start, org_flags;
1627     uint32_t npc;
1628     int num_insns;
1629 
1630     pc_start = tb->pc;
1631     dc->cpu = cpu;
1632     dc->tb = tb;
1633     org_flags = dc->synced_flags = dc->tb_flags = tb->flags;
1634 
1635     dc->is_jmp = DISAS_NEXT;
1636     dc->jmp = 0;
1637     dc->delayed_branch = !!(dc->tb_flags & D_FLAG);
1638     if (dc->delayed_branch) {
1639         dc->jmp = JMP_INDIRECT;
1640     }
1641     dc->pc = pc_start;
1642     dc->singlestep_enabled = cs->singlestep_enabled;
1643     dc->cpustate_changed = 0;
1644     dc->abort_at_next_insn = 0;
1645 
1646     if (pc_start & 3) {
1647         cpu_abort(cs, "Microblaze: unaligned PC=%x\n", pc_start);
1648     }
1649 
1650     page_start = pc_start & TARGET_PAGE_MASK;
1651     num_insns = 0;
1652 
1653     gen_tb_start(tb);
1654     do
1655     {
1656         tcg_gen_insn_start(dc->pc);
1657         num_insns++;
1658 
1659 #if SIM_COMPAT
1660         if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1661             tcg_gen_movi_i64(cpu_SR[SR_PC], dc->pc);
1662             gen_helper_debug();
1663         }
1664 #endif
1665 
1666         if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
1667             t_gen_raise_exception(dc, EXCP_DEBUG);
1668             dc->is_jmp = DISAS_UPDATE;
1669             /* The address covered by the breakpoint must be included in
1670                [tb->pc, tb->pc + tb->size) in order to for it to be
1671                properly cleared -- thus we increment the PC here so that
1672                the logic setting tb->size below does the right thing.  */
1673             dc->pc += 4;
1674             break;
1675         }
1676 
1677         /* Pretty disas.  */
1678         LOG_DIS("%8.8x:\t", dc->pc);
1679 
1680         if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
1681             gen_io_start();
1682         }
1683 
1684         dc->clear_imm = 1;
1685         decode(dc, cpu_ldl_code(env, dc->pc));
1686         if (dc->clear_imm)
1687             dc->tb_flags &= ~IMM_FLAG;
1688         dc->pc += 4;
1689 
1690         if (dc->delayed_branch) {
1691             dc->delayed_branch--;
1692             if (!dc->delayed_branch) {
1693                 if (dc->tb_flags & DRTI_FLAG)
1694                     do_rti(dc);
1695                  if (dc->tb_flags & DRTB_FLAG)
1696                     do_rtb(dc);
1697                 if (dc->tb_flags & DRTE_FLAG)
1698                     do_rte(dc);
1699                 /* Clear the delay slot flag.  */
1700                 dc->tb_flags &= ~D_FLAG;
1701                 /* If it is a direct jump, try direct chaining.  */
1702                 if (dc->jmp == JMP_INDIRECT) {
1703                     TCGv_i64 tmp_pc = tcg_const_i64(dc->pc);
1704                     eval_cond_jmp(dc, env_btarget, tmp_pc);
1705                     tcg_temp_free_i64(tmp_pc);
1706 
1707                     dc->is_jmp = DISAS_JUMP;
1708                 } else if (dc->jmp == JMP_DIRECT) {
1709                     t_sync_flags(dc);
1710                     gen_goto_tb(dc, 0, dc->jmp_pc);
1711                     dc->is_jmp = DISAS_TB_JUMP;
1712                 } else if (dc->jmp == JMP_DIRECT_CC) {
1713                     TCGLabel *l1 = gen_new_label();
1714                     t_sync_flags(dc);
1715                     /* Conditional jmp.  */
1716                     tcg_gen_brcondi_i32(TCG_COND_NE, env_btaken, 0, l1);
1717                     gen_goto_tb(dc, 1, dc->pc);
1718                     gen_set_label(l1);
1719                     gen_goto_tb(dc, 0, dc->jmp_pc);
1720 
1721                     dc->is_jmp = DISAS_TB_JUMP;
1722                 }
1723                 break;
1724             }
1725         }
1726         if (cs->singlestep_enabled) {
1727             break;
1728         }
1729     } while (!dc->is_jmp && !dc->cpustate_changed
1730              && !tcg_op_buf_full()
1731              && !singlestep
1732              && (dc->pc - page_start < TARGET_PAGE_SIZE)
1733              && num_insns < max_insns);
1734 
1735     npc = dc->pc;
1736     if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
1737         if (dc->tb_flags & D_FLAG) {
1738             dc->is_jmp = DISAS_UPDATE;
1739             tcg_gen_movi_i64(cpu_SR[SR_PC], npc);
1740             sync_jmpstate(dc);
1741         } else
1742             npc = dc->jmp_pc;
1743     }
1744 
1745     /* Force an update if the per-tb cpu state has changed.  */
1746     if (dc->is_jmp == DISAS_NEXT
1747         && (dc->cpustate_changed || org_flags != dc->tb_flags)) {
1748         dc->is_jmp = DISAS_UPDATE;
1749         tcg_gen_movi_i64(cpu_SR[SR_PC], npc);
1750     }
1751     t_sync_flags(dc);
1752 
1753     if (unlikely(cs->singlestep_enabled)) {
1754         TCGv_i32 tmp = tcg_const_i32(EXCP_DEBUG);
1755 
1756         if (dc->is_jmp != DISAS_JUMP) {
1757             tcg_gen_movi_i64(cpu_SR[SR_PC], npc);
1758         }
1759         gen_helper_raise_exception(cpu_env, tmp);
1760         tcg_temp_free_i32(tmp);
1761     } else {
1762         switch(dc->is_jmp) {
1763             case DISAS_NEXT:
1764                 gen_goto_tb(dc, 1, npc);
1765                 break;
1766             default:
1767             case DISAS_JUMP:
1768             case DISAS_UPDATE:
1769                 /* indicate that the hash table must be used
1770                    to find the next TB */
1771                 tcg_gen_exit_tb(NULL, 0);
1772                 break;
1773             case DISAS_TB_JUMP:
1774                 /* nothing more to generate */
1775                 break;
1776         }
1777     }
1778     gen_tb_end(tb, num_insns);
1779 
1780     tb->size = dc->pc - pc_start;
1781     tb->icount = num_insns;
1782 
1783 #ifdef DEBUG_DISAS
1784 #if !SIM_COMPAT
1785     if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
1786         && qemu_log_in_addr_range(pc_start)) {
1787         FILE *logfile = qemu_log_lock();
1788         qemu_log("--------------\n");
1789         log_target_disas(cs, pc_start, dc->pc - pc_start);
1790         qemu_log_unlock(logfile);
1791     }
1792 #endif
1793 #endif
1794     assert(!dc->abort_at_next_insn);
1795 }
1796 
1797 void mb_cpu_dump_state(CPUState *cs, FILE *f, int flags)
1798 {
1799     MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1800     CPUMBState *env = &cpu->env;
1801     int i;
1802 
1803     if (!env) {
1804         return;
1805     }
1806 
1807     qemu_fprintf(f, "IN: PC=%" PRIx64 " %s\n",
1808                  env->sregs[SR_PC], lookup_symbol(env->sregs[SR_PC]));
1809     qemu_fprintf(f, "rmsr=%" PRIx64 " resr=%" PRIx64 " rear=%" PRIx64 " "
1810                  "debug=%x imm=%x iflags=%x fsr=%" PRIx64 " "
1811                  "rbtr=%" PRIx64 "\n",
1812                  env->sregs[SR_MSR], env->sregs[SR_ESR], env->sregs[SR_EAR],
1813                  env->debug, env->imm, env->iflags, env->sregs[SR_FSR],
1814                  env->sregs[SR_BTR]);
1815     qemu_fprintf(f, "btaken=%d btarget=%" PRIx64 " mode=%s(saved=%s) "
1816                  "eip=%d ie=%d\n",
1817                  env->btaken, env->btarget,
1818                  (env->sregs[SR_MSR] & MSR_UM) ? "user" : "kernel",
1819                  (env->sregs[SR_MSR] & MSR_UMS) ? "user" : "kernel",
1820                  (bool)(env->sregs[SR_MSR] & MSR_EIP),
1821                  (bool)(env->sregs[SR_MSR] & MSR_IE));
1822     for (i = 0; i < 12; i++) {
1823         qemu_fprintf(f, "rpvr%2.2d=%8.8x ", i, env->pvr.regs[i]);
1824         if ((i + 1) % 4 == 0) {
1825             qemu_fprintf(f, "\n");
1826         }
1827     }
1828 
1829     /* Registers that aren't modeled are reported as 0 */
1830     qemu_fprintf(f, "redr=%" PRIx64 " rpid=0 rzpr=0 rtlbx=0 rtlbsx=0 "
1831                     "rtlblo=0 rtlbhi=0\n", env->sregs[SR_EDR]);
1832     qemu_fprintf(f, "slr=%x shr=%x\n", env->slr, env->shr);
1833     for (i = 0; i < 32; i++) {
1834         qemu_fprintf(f, "r%2.2d=%8.8x ", i, env->regs[i]);
1835         if ((i + 1) % 4 == 0)
1836             qemu_fprintf(f, "\n");
1837         }
1838     qemu_fprintf(f, "\n\n");
1839 }
1840 
1841 void mb_tcg_init(void)
1842 {
1843     int i;
1844 
1845     env_debug = tcg_global_mem_new_i32(cpu_env,
1846                     offsetof(CPUMBState, debug),
1847                     "debug0");
1848     env_iflags = tcg_global_mem_new_i32(cpu_env,
1849                     offsetof(CPUMBState, iflags),
1850                     "iflags");
1851     env_imm = tcg_global_mem_new_i32(cpu_env,
1852                     offsetof(CPUMBState, imm),
1853                     "imm");
1854     env_btarget = tcg_global_mem_new_i64(cpu_env,
1855                      offsetof(CPUMBState, btarget),
1856                      "btarget");
1857     env_btaken = tcg_global_mem_new_i32(cpu_env,
1858                      offsetof(CPUMBState, btaken),
1859                      "btaken");
1860     env_res_addr = tcg_global_mem_new(cpu_env,
1861                      offsetof(CPUMBState, res_addr),
1862                      "res_addr");
1863     env_res_val = tcg_global_mem_new_i32(cpu_env,
1864                      offsetof(CPUMBState, res_val),
1865                      "res_val");
1866     for (i = 0; i < ARRAY_SIZE(cpu_R); i++) {
1867         cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
1868                           offsetof(CPUMBState, regs[i]),
1869                           regnames[i]);
1870     }
1871     for (i = 0; i < ARRAY_SIZE(cpu_SR); i++) {
1872         cpu_SR[i] = tcg_global_mem_new_i64(cpu_env,
1873                           offsetof(CPUMBState, sregs[i]),
1874                           special_regnames[i]);
1875     }
1876 }
1877 
1878 void restore_state_to_opc(CPUMBState *env, TranslationBlock *tb,
1879                           target_ulong *data)
1880 {
1881     env->sregs[SR_PC] = data[0];
1882 }
1883