xref: /openbmc/qemu/target/microblaze/translate.c (revision 65a117da)
1 /*
2  *  Xilinx MicroBlaze emulation for qemu: main translation routines.
3  *
4  *  Copyright (c) 2009 Edgar E. Iglesias.
5  *  Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "exec/helper-proto.h"
27 #include "microblaze-decode.h"
28 #include "exec/cpu_ldst.h"
29 #include "exec/helper-gen.h"
30 #include "exec/translator.h"
31 #include "qemu/qemu-print.h"
32 
33 #include "trace-tcg.h"
34 #include "exec/log.h"
35 
36 
37 #define SIM_COMPAT 0
38 #define DISAS_GNU 1
39 #define DISAS_MB 1
40 #if DISAS_MB && !SIM_COMPAT
41 #  define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
42 #else
43 #  define LOG_DIS(...) do { } while (0)
44 #endif
45 
46 #define D(x)
47 
48 #define EXTRACT_FIELD(src, start, end) \
49             (((src) >> start) & ((1 << (end - start + 1)) - 1))
50 
51 /* is_jmp field values */
52 #define DISAS_JUMP    DISAS_TARGET_0 /* only pc was modified dynamically */
53 #define DISAS_UPDATE  DISAS_TARGET_1 /* cpu state was modified dynamically */
54 #define DISAS_TB_JUMP DISAS_TARGET_2 /* only pc was modified statically */
55 
56 static TCGv_i32 env_debug;
57 static TCGv_i32 cpu_R[32];
58 static TCGv_i64 cpu_SR[14];
59 static TCGv_i32 env_imm;
60 static TCGv_i32 env_btaken;
61 static TCGv_i64 env_btarget;
62 static TCGv_i32 env_iflags;
63 static TCGv env_res_addr;
64 static TCGv_i32 env_res_val;
65 
66 #include "exec/gen-icount.h"
67 
68 /* This is the state at translation time.  */
69 typedef struct DisasContext {
70     MicroBlazeCPU *cpu;
71     uint32_t pc;
72 
73     /* Decoder.  */
74     int type_b;
75     uint32_t ir;
76     uint8_t opcode;
77     uint8_t rd, ra, rb;
78     uint16_t imm;
79 
80     unsigned int cpustate_changed;
81     unsigned int delayed_branch;
82     unsigned int tb_flags, synced_flags; /* tb dependent flags.  */
83     unsigned int clear_imm;
84     int is_jmp;
85 
86 #define JMP_NOJMP     0
87 #define JMP_DIRECT    1
88 #define JMP_DIRECT_CC 2
89 #define JMP_INDIRECT  3
90     unsigned int jmp;
91     uint32_t jmp_pc;
92 
93     int abort_at_next_insn;
94     struct TranslationBlock *tb;
95     int singlestep_enabled;
96 } DisasContext;
97 
98 static const char *regnames[] =
99 {
100     "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
101     "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
102     "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
103     "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
104 };
105 
106 static const char *special_regnames[] =
107 {
108     "rpc", "rmsr", "sr2", "rear", "sr4", "resr", "sr6", "rfsr",
109     "sr8", "sr9", "sr10", "rbtr", "sr12", "redr"
110 };
111 
112 static inline void t_sync_flags(DisasContext *dc)
113 {
114     /* Synch the tb dependent flags between translator and runtime.  */
115     if (dc->tb_flags != dc->synced_flags) {
116         tcg_gen_movi_i32(env_iflags, dc->tb_flags);
117         dc->synced_flags = dc->tb_flags;
118     }
119 }
120 
121 static inline void t_gen_raise_exception(DisasContext *dc, uint32_t index)
122 {
123     TCGv_i32 tmp = tcg_const_i32(index);
124 
125     t_sync_flags(dc);
126     tcg_gen_movi_i64(cpu_SR[SR_PC], dc->pc);
127     gen_helper_raise_exception(cpu_env, tmp);
128     tcg_temp_free_i32(tmp);
129     dc->is_jmp = DISAS_UPDATE;
130 }
131 
132 static inline bool use_goto_tb(DisasContext *dc, target_ulong dest)
133 {
134 #ifndef CONFIG_USER_ONLY
135     return (dc->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
136 #else
137     return true;
138 #endif
139 }
140 
141 static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
142 {
143     if (use_goto_tb(dc, dest)) {
144         tcg_gen_goto_tb(n);
145         tcg_gen_movi_i64(cpu_SR[SR_PC], dest);
146         tcg_gen_exit_tb(dc->tb, n);
147     } else {
148         tcg_gen_movi_i64(cpu_SR[SR_PC], dest);
149         tcg_gen_exit_tb(NULL, 0);
150     }
151 }
152 
153 static void read_carry(DisasContext *dc, TCGv_i32 d)
154 {
155     tcg_gen_extrl_i64_i32(d, cpu_SR[SR_MSR]);
156     tcg_gen_shri_i32(d, d, 31);
157 }
158 
159 /*
160  * write_carry sets the carry bits in MSR based on bit 0 of v.
161  * v[31:1] are ignored.
162  */
163 static void write_carry(DisasContext *dc, TCGv_i32 v)
164 {
165     TCGv_i64 t0 = tcg_temp_new_i64();
166     tcg_gen_extu_i32_i64(t0, v);
167     /* Deposit bit 0 into MSR_C and the alias MSR_CC.  */
168     tcg_gen_deposit_i64(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t0, 2, 1);
169     tcg_gen_deposit_i64(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t0, 31, 1);
170     tcg_temp_free_i64(t0);
171 }
172 
173 static void write_carryi(DisasContext *dc, bool carry)
174 {
175     TCGv_i32 t0 = tcg_temp_new_i32();
176     tcg_gen_movi_i32(t0, carry);
177     write_carry(dc, t0);
178     tcg_temp_free_i32(t0);
179 }
180 
181 /*
182  * Returns true if the insn an illegal operation.
183  * If exceptions are enabled, an exception is raised.
184  */
185 static bool trap_illegal(DisasContext *dc, bool cond)
186 {
187     if (cond && (dc->tb_flags & MSR_EE_FLAG)
188         && dc->cpu->cfg.illegal_opcode_exception) {
189         tcg_gen_movi_i64(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
190         t_gen_raise_exception(dc, EXCP_HW_EXCP);
191     }
192     return cond;
193 }
194 
195 /*
196  * Returns true if the insn is illegal in userspace.
197  * If exceptions are enabled, an exception is raised.
198  */
199 static bool trap_userspace(DisasContext *dc, bool cond)
200 {
201     int mem_index = cpu_mmu_index(&dc->cpu->env, false);
202     bool cond_user = cond && mem_index == MMU_USER_IDX;
203 
204     if (cond_user && (dc->tb_flags & MSR_EE_FLAG)) {
205         tcg_gen_movi_i64(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
206         t_gen_raise_exception(dc, EXCP_HW_EXCP);
207     }
208     return cond_user;
209 }
210 
211 /* True if ALU operand b is a small immediate that may deserve
212    faster treatment.  */
213 static inline int dec_alu_op_b_is_small_imm(DisasContext *dc)
214 {
215     /* Immediate insn without the imm prefix ?  */
216     return dc->type_b && !(dc->tb_flags & IMM_FLAG);
217 }
218 
219 static inline TCGv_i32 *dec_alu_op_b(DisasContext *dc)
220 {
221     if (dc->type_b) {
222         if (dc->tb_flags & IMM_FLAG)
223             tcg_gen_ori_i32(env_imm, env_imm, dc->imm);
224         else
225             tcg_gen_movi_i32(env_imm, (int32_t)((int16_t)dc->imm));
226         return &env_imm;
227     } else
228         return &cpu_R[dc->rb];
229 }
230 
231 static void dec_add(DisasContext *dc)
232 {
233     unsigned int k, c;
234     TCGv_i32 cf;
235 
236     k = dc->opcode & 4;
237     c = dc->opcode & 2;
238 
239     LOG_DIS("add%s%s%s r%d r%d r%d\n",
240             dc->type_b ? "i" : "", k ? "k" : "", c ? "c" : "",
241             dc->rd, dc->ra, dc->rb);
242 
243     /* Take care of the easy cases first.  */
244     if (k) {
245         /* k - keep carry, no need to update MSR.  */
246         /* If rd == r0, it's a nop.  */
247         if (dc->rd) {
248             tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
249 
250             if (c) {
251                 /* c - Add carry into the result.  */
252                 cf = tcg_temp_new_i32();
253 
254                 read_carry(dc, cf);
255                 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
256                 tcg_temp_free_i32(cf);
257             }
258         }
259         return;
260     }
261 
262     /* From now on, we can assume k is zero.  So we need to update MSR.  */
263     /* Extract carry.  */
264     cf = tcg_temp_new_i32();
265     if (c) {
266         read_carry(dc, cf);
267     } else {
268         tcg_gen_movi_i32(cf, 0);
269     }
270 
271     if (dc->rd) {
272         TCGv_i32 ncf = tcg_temp_new_i32();
273         gen_helper_carry(ncf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
274         tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
275         tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
276         write_carry(dc, ncf);
277         tcg_temp_free_i32(ncf);
278     } else {
279         gen_helper_carry(cf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
280         write_carry(dc, cf);
281     }
282     tcg_temp_free_i32(cf);
283 }
284 
285 static void dec_sub(DisasContext *dc)
286 {
287     unsigned int u, cmp, k, c;
288     TCGv_i32 cf, na;
289 
290     u = dc->imm & 2;
291     k = dc->opcode & 4;
292     c = dc->opcode & 2;
293     cmp = (dc->imm & 1) && (!dc->type_b) && k;
294 
295     if (cmp) {
296         LOG_DIS("cmp%s r%d, r%d ir=%x\n", u ? "u" : "", dc->rd, dc->ra, dc->ir);
297         if (dc->rd) {
298             if (u)
299                 gen_helper_cmpu(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
300             else
301                 gen_helper_cmp(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
302         }
303         return;
304     }
305 
306     LOG_DIS("sub%s%s r%d, r%d r%d\n",
307              k ? "k" : "",  c ? "c" : "", dc->rd, dc->ra, dc->rb);
308 
309     /* Take care of the easy cases first.  */
310     if (k) {
311         /* k - keep carry, no need to update MSR.  */
312         /* If rd == r0, it's a nop.  */
313         if (dc->rd) {
314             tcg_gen_sub_i32(cpu_R[dc->rd], *(dec_alu_op_b(dc)), cpu_R[dc->ra]);
315 
316             if (c) {
317                 /* c - Add carry into the result.  */
318                 cf = tcg_temp_new_i32();
319 
320                 read_carry(dc, cf);
321                 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
322                 tcg_temp_free_i32(cf);
323             }
324         }
325         return;
326     }
327 
328     /* From now on, we can assume k is zero.  So we need to update MSR.  */
329     /* Extract carry. And complement a into na.  */
330     cf = tcg_temp_new_i32();
331     na = tcg_temp_new_i32();
332     if (c) {
333         read_carry(dc, cf);
334     } else {
335         tcg_gen_movi_i32(cf, 1);
336     }
337 
338     /* d = b + ~a + c. carry defaults to 1.  */
339     tcg_gen_not_i32(na, cpu_R[dc->ra]);
340 
341     if (dc->rd) {
342         TCGv_i32 ncf = tcg_temp_new_i32();
343         gen_helper_carry(ncf, na, *(dec_alu_op_b(dc)), cf);
344         tcg_gen_add_i32(cpu_R[dc->rd], na, *(dec_alu_op_b(dc)));
345         tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
346         write_carry(dc, ncf);
347         tcg_temp_free_i32(ncf);
348     } else {
349         gen_helper_carry(cf, na, *(dec_alu_op_b(dc)), cf);
350         write_carry(dc, cf);
351     }
352     tcg_temp_free_i32(cf);
353     tcg_temp_free_i32(na);
354 }
355 
356 static void dec_pattern(DisasContext *dc)
357 {
358     unsigned int mode;
359 
360     if (trap_illegal(dc, !dc->cpu->cfg.use_pcmp_instr)) {
361         return;
362     }
363 
364     mode = dc->opcode & 3;
365     switch (mode) {
366         case 0:
367             /* pcmpbf.  */
368             LOG_DIS("pcmpbf r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
369             if (dc->rd)
370                 gen_helper_pcmpbf(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
371             break;
372         case 2:
373             LOG_DIS("pcmpeq r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
374             if (dc->rd) {
375                 tcg_gen_setcond_i32(TCG_COND_EQ, cpu_R[dc->rd],
376                                    cpu_R[dc->ra], cpu_R[dc->rb]);
377             }
378             break;
379         case 3:
380             LOG_DIS("pcmpne r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
381             if (dc->rd) {
382                 tcg_gen_setcond_i32(TCG_COND_NE, cpu_R[dc->rd],
383                                    cpu_R[dc->ra], cpu_R[dc->rb]);
384             }
385             break;
386         default:
387             cpu_abort(CPU(dc->cpu),
388                       "unsupported pattern insn opcode=%x\n", dc->opcode);
389             break;
390     }
391 }
392 
393 static void dec_and(DisasContext *dc)
394 {
395     unsigned int not;
396 
397     if (!dc->type_b && (dc->imm & (1 << 10))) {
398         dec_pattern(dc);
399         return;
400     }
401 
402     not = dc->opcode & (1 << 1);
403     LOG_DIS("and%s\n", not ? "n" : "");
404 
405     if (!dc->rd)
406         return;
407 
408     if (not) {
409         tcg_gen_andc_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
410     } else
411         tcg_gen_and_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
412 }
413 
414 static void dec_or(DisasContext *dc)
415 {
416     if (!dc->type_b && (dc->imm & (1 << 10))) {
417         dec_pattern(dc);
418         return;
419     }
420 
421     LOG_DIS("or r%d r%d r%d imm=%x\n", dc->rd, dc->ra, dc->rb, dc->imm);
422     if (dc->rd)
423         tcg_gen_or_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
424 }
425 
426 static void dec_xor(DisasContext *dc)
427 {
428     if (!dc->type_b && (dc->imm & (1 << 10))) {
429         dec_pattern(dc);
430         return;
431     }
432 
433     LOG_DIS("xor r%d\n", dc->rd);
434     if (dc->rd)
435         tcg_gen_xor_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
436 }
437 
438 static inline void msr_read(DisasContext *dc, TCGv_i32 d)
439 {
440     tcg_gen_extrl_i64_i32(d, cpu_SR[SR_MSR]);
441 }
442 
443 static inline void msr_write(DisasContext *dc, TCGv_i32 v)
444 {
445     TCGv_i64 t;
446 
447     t = tcg_temp_new_i64();
448     dc->cpustate_changed = 1;
449     /* PVR bit is not writable.  */
450     tcg_gen_extu_i32_i64(t, v);
451     tcg_gen_andi_i64(t, t, ~MSR_PVR);
452     tcg_gen_andi_i64(cpu_SR[SR_MSR], cpu_SR[SR_MSR], MSR_PVR);
453     tcg_gen_or_i64(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t);
454     tcg_temp_free_i64(t);
455 }
456 
457 static void dec_msr(DisasContext *dc)
458 {
459     CPUState *cs = CPU(dc->cpu);
460     TCGv_i32 t0, t1;
461     unsigned int sr, rn;
462     bool to, clrset, extended = false;
463 
464     sr = extract32(dc->imm, 0, 14);
465     to = extract32(dc->imm, 14, 1);
466     clrset = extract32(dc->imm, 15, 1) == 0;
467     dc->type_b = 1;
468     if (to) {
469         dc->cpustate_changed = 1;
470     }
471 
472     /* Extended MSRs are only available if addr_size > 32.  */
473     if (dc->cpu->cfg.addr_size > 32) {
474         /* The E-bit is encoded differently for To/From MSR.  */
475         static const unsigned int e_bit[] = { 19, 24 };
476 
477         extended = extract32(dc->imm, e_bit[to], 1);
478     }
479 
480     /* msrclr and msrset.  */
481     if (clrset) {
482         bool clr = extract32(dc->ir, 16, 1);
483 
484         LOG_DIS("msr%s r%d imm=%x\n", clr ? "clr" : "set",
485                 dc->rd, dc->imm);
486 
487         if (!dc->cpu->cfg.use_msr_instr) {
488             /* nop??? */
489             return;
490         }
491 
492         if (trap_userspace(dc, dc->imm != 4 && dc->imm != 0)) {
493             return;
494         }
495 
496         if (dc->rd)
497             msr_read(dc, cpu_R[dc->rd]);
498 
499         t0 = tcg_temp_new_i32();
500         t1 = tcg_temp_new_i32();
501         msr_read(dc, t0);
502         tcg_gen_mov_i32(t1, *(dec_alu_op_b(dc)));
503 
504         if (clr) {
505             tcg_gen_not_i32(t1, t1);
506             tcg_gen_and_i32(t0, t0, t1);
507         } else
508             tcg_gen_or_i32(t0, t0, t1);
509         msr_write(dc, t0);
510         tcg_temp_free_i32(t0);
511         tcg_temp_free_i32(t1);
512         tcg_gen_movi_i64(cpu_SR[SR_PC], dc->pc + 4);
513         dc->is_jmp = DISAS_UPDATE;
514         return;
515     }
516 
517     if (trap_userspace(dc, to)) {
518         return;
519     }
520 
521 #if !defined(CONFIG_USER_ONLY)
522     /* Catch read/writes to the mmu block.  */
523     if ((sr & ~0xff) == 0x1000) {
524         TCGv_i32 tmp_ext = tcg_const_i32(extended);
525         TCGv_i32 tmp_sr;
526 
527         sr &= 7;
528         tmp_sr = tcg_const_i32(sr);
529         LOG_DIS("m%ss sr%d r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
530         if (to) {
531             gen_helper_mmu_write(cpu_env, tmp_ext, tmp_sr, cpu_R[dc->ra]);
532         } else {
533             gen_helper_mmu_read(cpu_R[dc->rd], cpu_env, tmp_ext, tmp_sr);
534         }
535         tcg_temp_free_i32(tmp_sr);
536         tcg_temp_free_i32(tmp_ext);
537         return;
538     }
539 #endif
540 
541     if (to) {
542         LOG_DIS("m%ss sr%x r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
543         switch (sr) {
544             case 0:
545                 break;
546             case 1:
547                 msr_write(dc, cpu_R[dc->ra]);
548                 break;
549             case SR_EAR:
550             case SR_ESR:
551             case SR_FSR:
552                 tcg_gen_extu_i32_i64(cpu_SR[sr], cpu_R[dc->ra]);
553                 break;
554             case 0x800:
555                 tcg_gen_st_i32(cpu_R[dc->ra],
556                                cpu_env, offsetof(CPUMBState, slr));
557                 break;
558             case 0x802:
559                 tcg_gen_st_i32(cpu_R[dc->ra],
560                                cpu_env, offsetof(CPUMBState, shr));
561                 break;
562             default:
563                 cpu_abort(CPU(dc->cpu), "unknown mts reg %x\n", sr);
564                 break;
565         }
566     } else {
567         LOG_DIS("m%ss r%d sr%x imm=%x\n", to ? "t" : "f", dc->rd, sr, dc->imm);
568 
569         switch (sr) {
570             case 0:
571                 tcg_gen_movi_i32(cpu_R[dc->rd], dc->pc);
572                 break;
573             case 1:
574                 msr_read(dc, cpu_R[dc->rd]);
575                 break;
576             case SR_EAR:
577                 if (extended) {
578                     tcg_gen_extrh_i64_i32(cpu_R[dc->rd], cpu_SR[sr]);
579                     break;
580                 }
581             case SR_ESR:
582             case SR_FSR:
583             case SR_BTR:
584             case SR_EDR:
585                 tcg_gen_extrl_i64_i32(cpu_R[dc->rd], cpu_SR[sr]);
586                 break;
587             case 0x800:
588                 tcg_gen_ld_i32(cpu_R[dc->rd],
589                                cpu_env, offsetof(CPUMBState, slr));
590                 break;
591             case 0x802:
592                 tcg_gen_ld_i32(cpu_R[dc->rd],
593                                cpu_env, offsetof(CPUMBState, shr));
594                 break;
595             case 0x2000 ... 0x200c:
596                 rn = sr & 0xf;
597                 tcg_gen_ld_i32(cpu_R[dc->rd],
598                               cpu_env, offsetof(CPUMBState, pvr.regs[rn]));
599                 break;
600             default:
601                 cpu_abort(cs, "unknown mfs reg %x\n", sr);
602                 break;
603         }
604     }
605 
606     if (dc->rd == 0) {
607         tcg_gen_movi_i32(cpu_R[0], 0);
608     }
609 }
610 
611 /* Multiplier unit.  */
612 static void dec_mul(DisasContext *dc)
613 {
614     TCGv_i32 tmp;
615     unsigned int subcode;
616 
617     if (trap_illegal(dc, !dc->cpu->cfg.use_hw_mul)) {
618         return;
619     }
620 
621     subcode = dc->imm & 3;
622 
623     if (dc->type_b) {
624         LOG_DIS("muli r%d r%d %x\n", dc->rd, dc->ra, dc->imm);
625         tcg_gen_mul_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
626         return;
627     }
628 
629     /* mulh, mulhsu and mulhu are not available if C_USE_HW_MUL is < 2.  */
630     if (subcode >= 1 && subcode <= 3 && dc->cpu->cfg.use_hw_mul < 2) {
631         /* nop??? */
632     }
633 
634     tmp = tcg_temp_new_i32();
635     switch (subcode) {
636         case 0:
637             LOG_DIS("mul r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
638             tcg_gen_mul_i32(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
639             break;
640         case 1:
641             LOG_DIS("mulh r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
642             tcg_gen_muls2_i32(tmp, cpu_R[dc->rd],
643                               cpu_R[dc->ra], cpu_R[dc->rb]);
644             break;
645         case 2:
646             LOG_DIS("mulhsu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
647             tcg_gen_mulsu2_i32(tmp, cpu_R[dc->rd],
648                                cpu_R[dc->ra], cpu_R[dc->rb]);
649             break;
650         case 3:
651             LOG_DIS("mulhu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
652             tcg_gen_mulu2_i32(tmp, cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
653             break;
654         default:
655             cpu_abort(CPU(dc->cpu), "unknown MUL insn %x\n", subcode);
656             break;
657     }
658     tcg_temp_free_i32(tmp);
659 }
660 
661 /* Div unit.  */
662 static void dec_div(DisasContext *dc)
663 {
664     unsigned int u;
665 
666     u = dc->imm & 2;
667     LOG_DIS("div\n");
668 
669     if (trap_illegal(dc, !dc->cpu->cfg.use_div)) {
670         return;
671     }
672 
673     if (u)
674         gen_helper_divu(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
675                         cpu_R[dc->ra]);
676     else
677         gen_helper_divs(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
678                         cpu_R[dc->ra]);
679     if (!dc->rd)
680         tcg_gen_movi_i32(cpu_R[dc->rd], 0);
681 }
682 
683 static void dec_barrel(DisasContext *dc)
684 {
685     TCGv_i32 t0;
686     unsigned int imm_w, imm_s;
687     bool s, t, e = false, i = false;
688 
689     if (trap_illegal(dc, !dc->cpu->cfg.use_barrel)) {
690         return;
691     }
692 
693     if (dc->type_b) {
694         /* Insert and extract are only available in immediate mode.  */
695         i = extract32(dc->imm, 15, 1);
696         e = extract32(dc->imm, 14, 1);
697     }
698     s = extract32(dc->imm, 10, 1);
699     t = extract32(dc->imm, 9, 1);
700     imm_w = extract32(dc->imm, 6, 5);
701     imm_s = extract32(dc->imm, 0, 5);
702 
703     LOG_DIS("bs%s%s%s r%d r%d r%d\n",
704             e ? "e" : "",
705             s ? "l" : "r", t ? "a" : "l", dc->rd, dc->ra, dc->rb);
706 
707     if (e) {
708         if (imm_w + imm_s > 32 || imm_w == 0) {
709             /* These inputs have an undefined behavior.  */
710             qemu_log_mask(LOG_GUEST_ERROR, "bsefi: Bad input w=%d s=%d\n",
711                           imm_w, imm_s);
712         } else {
713             tcg_gen_extract_i32(cpu_R[dc->rd], cpu_R[dc->ra], imm_s, imm_w);
714         }
715     } else if (i) {
716         int width = imm_w - imm_s + 1;
717 
718         if (imm_w < imm_s) {
719             /* These inputs have an undefined behavior.  */
720             qemu_log_mask(LOG_GUEST_ERROR, "bsifi: Bad input w=%d s=%d\n",
721                           imm_w, imm_s);
722         } else {
723             tcg_gen_deposit_i32(cpu_R[dc->rd], cpu_R[dc->rd], cpu_R[dc->ra],
724                                 imm_s, width);
725         }
726     } else {
727         t0 = tcg_temp_new_i32();
728 
729         tcg_gen_mov_i32(t0, *(dec_alu_op_b(dc)));
730         tcg_gen_andi_i32(t0, t0, 31);
731 
732         if (s) {
733             tcg_gen_shl_i32(cpu_R[dc->rd], cpu_R[dc->ra], t0);
734         } else {
735             if (t) {
736                 tcg_gen_sar_i32(cpu_R[dc->rd], cpu_R[dc->ra], t0);
737             } else {
738                 tcg_gen_shr_i32(cpu_R[dc->rd], cpu_R[dc->ra], t0);
739             }
740         }
741         tcg_temp_free_i32(t0);
742     }
743 }
744 
745 static void dec_bit(DisasContext *dc)
746 {
747     CPUState *cs = CPU(dc->cpu);
748     TCGv_i32 t0;
749     unsigned int op;
750 
751     op = dc->ir & ((1 << 9) - 1);
752     switch (op) {
753         case 0x21:
754             /* src.  */
755             t0 = tcg_temp_new_i32();
756 
757             LOG_DIS("src r%d r%d\n", dc->rd, dc->ra);
758             tcg_gen_extrl_i64_i32(t0, cpu_SR[SR_MSR]);
759             tcg_gen_andi_i32(t0, t0, MSR_CC);
760             write_carry(dc, cpu_R[dc->ra]);
761             if (dc->rd) {
762                 tcg_gen_shri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 1);
763                 tcg_gen_or_i32(cpu_R[dc->rd], cpu_R[dc->rd], t0);
764             }
765             tcg_temp_free_i32(t0);
766             break;
767 
768         case 0x1:
769         case 0x41:
770             /* srl.  */
771             LOG_DIS("srl r%d r%d\n", dc->rd, dc->ra);
772 
773             /* Update carry. Note that write carry only looks at the LSB.  */
774             write_carry(dc, cpu_R[dc->ra]);
775             if (dc->rd) {
776                 if (op == 0x41)
777                     tcg_gen_shri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 1);
778                 else
779                     tcg_gen_sari_i32(cpu_R[dc->rd], cpu_R[dc->ra], 1);
780             }
781             break;
782         case 0x60:
783             LOG_DIS("ext8s r%d r%d\n", dc->rd, dc->ra);
784             tcg_gen_ext8s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
785             break;
786         case 0x61:
787             LOG_DIS("ext16s r%d r%d\n", dc->rd, dc->ra);
788             tcg_gen_ext16s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
789             break;
790         case 0x64:
791         case 0x66:
792         case 0x74:
793         case 0x76:
794             /* wdc.  */
795             LOG_DIS("wdc r%d\n", dc->ra);
796             trap_userspace(dc, true);
797             break;
798         case 0x68:
799             /* wic.  */
800             LOG_DIS("wic r%d\n", dc->ra);
801             trap_userspace(dc, true);
802             break;
803         case 0xe0:
804             if (trap_illegal(dc, !dc->cpu->cfg.use_pcmp_instr)) {
805                 return;
806             }
807             if (dc->cpu->cfg.use_pcmp_instr) {
808                 tcg_gen_clzi_i32(cpu_R[dc->rd], cpu_R[dc->ra], 32);
809             }
810             break;
811         case 0x1e0:
812             /* swapb */
813             LOG_DIS("swapb r%d r%d\n", dc->rd, dc->ra);
814             tcg_gen_bswap32_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
815             break;
816         case 0x1e2:
817             /*swaph */
818             LOG_DIS("swaph r%d r%d\n", dc->rd, dc->ra);
819             tcg_gen_rotri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 16);
820             break;
821         default:
822             cpu_abort(cs, "unknown bit oc=%x op=%x rd=%d ra=%d rb=%d\n",
823                       dc->pc, op, dc->rd, dc->ra, dc->rb);
824             break;
825     }
826 }
827 
828 static inline void sync_jmpstate(DisasContext *dc)
829 {
830     if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
831         if (dc->jmp == JMP_DIRECT) {
832             tcg_gen_movi_i32(env_btaken, 1);
833         }
834         dc->jmp = JMP_INDIRECT;
835         tcg_gen_movi_i64(env_btarget, dc->jmp_pc);
836     }
837 }
838 
839 static void dec_imm(DisasContext *dc)
840 {
841     LOG_DIS("imm %x\n", dc->imm << 16);
842     tcg_gen_movi_i32(env_imm, (dc->imm << 16));
843     dc->tb_flags |= IMM_FLAG;
844     dc->clear_imm = 0;
845 }
846 
847 static inline void compute_ldst_addr(DisasContext *dc, bool ea, TCGv t)
848 {
849     bool extimm = dc->tb_flags & IMM_FLAG;
850     /* Should be set to true if r1 is used by loadstores.  */
851     bool stackprot = false;
852     TCGv_i32 t32;
853 
854     /* All load/stores use ra.  */
855     if (dc->ra == 1 && dc->cpu->cfg.stackprot) {
856         stackprot = true;
857     }
858 
859     /* Treat the common cases first.  */
860     if (!dc->type_b) {
861         if (ea) {
862             int addr_size = dc->cpu->cfg.addr_size;
863 
864             if (addr_size == 32) {
865                 tcg_gen_extu_i32_tl(t, cpu_R[dc->rb]);
866                 return;
867             }
868 
869             tcg_gen_concat_i32_i64(t, cpu_R[dc->rb], cpu_R[dc->ra]);
870             if (addr_size < 64) {
871                 /* Mask off out of range bits.  */
872                 tcg_gen_andi_i64(t, t, MAKE_64BIT_MASK(0, addr_size));
873             }
874             return;
875         }
876 
877         /* If any of the regs is r0, set t to the value of the other reg.  */
878         if (dc->ra == 0) {
879             tcg_gen_extu_i32_tl(t, cpu_R[dc->rb]);
880             return;
881         } else if (dc->rb == 0) {
882             tcg_gen_extu_i32_tl(t, cpu_R[dc->ra]);
883             return;
884         }
885 
886         if (dc->rb == 1 && dc->cpu->cfg.stackprot) {
887             stackprot = true;
888         }
889 
890         t32 = tcg_temp_new_i32();
891         tcg_gen_add_i32(t32, cpu_R[dc->ra], cpu_R[dc->rb]);
892         tcg_gen_extu_i32_tl(t, t32);
893         tcg_temp_free_i32(t32);
894 
895         if (stackprot) {
896             gen_helper_stackprot(cpu_env, t);
897         }
898         return;
899     }
900     /* Immediate.  */
901     t32 = tcg_temp_new_i32();
902     if (!extimm) {
903         tcg_gen_addi_i32(t32, cpu_R[dc->ra], (int16_t)dc->imm);
904     } else {
905         tcg_gen_add_i32(t32, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
906     }
907     tcg_gen_extu_i32_tl(t, t32);
908     tcg_temp_free_i32(t32);
909 
910     if (stackprot) {
911         gen_helper_stackprot(cpu_env, t);
912     }
913     return;
914 }
915 
916 static void dec_load(DisasContext *dc)
917 {
918     TCGv_i32 v;
919     TCGv addr;
920     unsigned int size;
921     bool rev = false, ex = false, ea = false;
922     int mem_index = cpu_mmu_index(&dc->cpu->env, false);
923     MemOp mop;
924 
925     mop = dc->opcode & 3;
926     size = 1 << mop;
927     if (!dc->type_b) {
928         ea = extract32(dc->ir, 7, 1);
929         rev = extract32(dc->ir, 9, 1);
930         ex = extract32(dc->ir, 10, 1);
931     }
932     mop |= MO_TE;
933     if (rev) {
934         mop ^= MO_BSWAP;
935     }
936 
937     if (trap_illegal(dc, size > 4)) {
938         return;
939     }
940 
941     if (trap_userspace(dc, ea)) {
942         return;
943     }
944 
945     LOG_DIS("l%d%s%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
946                                                         ex ? "x" : "",
947                                                         ea ? "ea" : "");
948 
949     t_sync_flags(dc);
950     addr = tcg_temp_new();
951     compute_ldst_addr(dc, ea, addr);
952     /* Extended addressing bypasses the MMU.  */
953     mem_index = ea ? MMU_NOMMU_IDX : mem_index;
954 
955     /*
956      * When doing reverse accesses we need to do two things.
957      *
958      * 1. Reverse the address wrt endianness.
959      * 2. Byteswap the data lanes on the way back into the CPU core.
960      */
961     if (rev && size != 4) {
962         /* Endian reverse the address. t is addr.  */
963         switch (size) {
964             case 1:
965             {
966                 tcg_gen_xori_tl(addr, addr, 3);
967                 break;
968             }
969 
970             case 2:
971                 /* 00 -> 10
972                    10 -> 00.  */
973                 tcg_gen_xori_tl(addr, addr, 2);
974                 break;
975             default:
976                 cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
977                 break;
978         }
979     }
980 
981     /* lwx does not throw unaligned access errors, so force alignment */
982     if (ex) {
983         tcg_gen_andi_tl(addr, addr, ~3);
984     }
985 
986     /* If we get a fault on a dslot, the jmpstate better be in sync.  */
987     sync_jmpstate(dc);
988 
989     /* Verify alignment if needed.  */
990     /*
991      * Microblaze gives MMU faults priority over faults due to
992      * unaligned addresses. That's why we speculatively do the load
993      * into v. If the load succeeds, we verify alignment of the
994      * address and if that succeeds we write into the destination reg.
995      */
996     v = tcg_temp_new_i32();
997     tcg_gen_qemu_ld_i32(v, addr, mem_index, mop);
998 
999     if (dc->cpu->cfg.unaligned_exceptions && size > 1) {
1000         TCGv_i32 t0 = tcg_const_i32(0);
1001         TCGv_i32 treg = tcg_const_i32(dc->rd);
1002         TCGv_i32 tsize = tcg_const_i32(size - 1);
1003 
1004         tcg_gen_movi_i64(cpu_SR[SR_PC], dc->pc);
1005         gen_helper_memalign(cpu_env, addr, treg, t0, tsize);
1006 
1007         tcg_temp_free_i32(t0);
1008         tcg_temp_free_i32(treg);
1009         tcg_temp_free_i32(tsize);
1010     }
1011 
1012     if (ex) {
1013         tcg_gen_mov_tl(env_res_addr, addr);
1014         tcg_gen_mov_i32(env_res_val, v);
1015     }
1016     if (dc->rd) {
1017         tcg_gen_mov_i32(cpu_R[dc->rd], v);
1018     }
1019     tcg_temp_free_i32(v);
1020 
1021     if (ex) { /* lwx */
1022         /* no support for AXI exclusive so always clear C */
1023         write_carryi(dc, 0);
1024     }
1025 
1026     tcg_temp_free(addr);
1027 }
1028 
1029 static void dec_store(DisasContext *dc)
1030 {
1031     TCGv addr;
1032     TCGLabel *swx_skip = NULL;
1033     unsigned int size;
1034     bool rev = false, ex = false, ea = false;
1035     int mem_index = cpu_mmu_index(&dc->cpu->env, false);
1036     MemOp mop;
1037 
1038     mop = dc->opcode & 3;
1039     size = 1 << mop;
1040     if (!dc->type_b) {
1041         ea = extract32(dc->ir, 7, 1);
1042         rev = extract32(dc->ir, 9, 1);
1043         ex = extract32(dc->ir, 10, 1);
1044     }
1045     mop |= MO_TE;
1046     if (rev) {
1047         mop ^= MO_BSWAP;
1048     }
1049 
1050     if (trap_illegal(dc, size > 4)) {
1051         return;
1052     }
1053 
1054     trap_userspace(dc, ea);
1055 
1056     LOG_DIS("s%d%s%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
1057                                                         ex ? "x" : "",
1058                                                         ea ? "ea" : "");
1059     t_sync_flags(dc);
1060     /* If we get a fault on a dslot, the jmpstate better be in sync.  */
1061     sync_jmpstate(dc);
1062     /* SWX needs a temp_local.  */
1063     addr = ex ? tcg_temp_local_new() : tcg_temp_new();
1064     compute_ldst_addr(dc, ea, addr);
1065     /* Extended addressing bypasses the MMU.  */
1066     mem_index = ea ? MMU_NOMMU_IDX : mem_index;
1067 
1068     if (ex) { /* swx */
1069         TCGv_i32 tval;
1070 
1071         /* swx does not throw unaligned access errors, so force alignment */
1072         tcg_gen_andi_tl(addr, addr, ~3);
1073 
1074         write_carryi(dc, 1);
1075         swx_skip = gen_new_label();
1076         tcg_gen_brcond_tl(TCG_COND_NE, env_res_addr, addr, swx_skip);
1077 
1078         /* Compare the value loaded at lwx with current contents of
1079            the reserved location.
1080            FIXME: This only works for system emulation where we can expect
1081            this compare and the following write to be atomic. For user
1082            emulation we need to add atomicity between threads.  */
1083         tval = tcg_temp_new_i32();
1084         tcg_gen_qemu_ld_i32(tval, addr, cpu_mmu_index(&dc->cpu->env, false),
1085                             MO_TEUL);
1086         tcg_gen_brcond_i32(TCG_COND_NE, env_res_val, tval, swx_skip);
1087         write_carryi(dc, 0);
1088         tcg_temp_free_i32(tval);
1089     }
1090 
1091     if (rev && size != 4) {
1092         /* Endian reverse the address. t is addr.  */
1093         switch (size) {
1094             case 1:
1095             {
1096                 tcg_gen_xori_tl(addr, addr, 3);
1097                 break;
1098             }
1099 
1100             case 2:
1101                 /* 00 -> 10
1102                    10 -> 00.  */
1103                 /* Force addr into the temp.  */
1104                 tcg_gen_xori_tl(addr, addr, 2);
1105                 break;
1106             default:
1107                 cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
1108                 break;
1109         }
1110     }
1111     tcg_gen_qemu_st_i32(cpu_R[dc->rd], addr, mem_index, mop);
1112 
1113     /* Verify alignment if needed.  */
1114     if (dc->cpu->cfg.unaligned_exceptions && size > 1) {
1115         TCGv_i32 t1 = tcg_const_i32(1);
1116         TCGv_i32 treg = tcg_const_i32(dc->rd);
1117         TCGv_i32 tsize = tcg_const_i32(size - 1);
1118 
1119         tcg_gen_movi_i64(cpu_SR[SR_PC], dc->pc);
1120         /* FIXME: if the alignment is wrong, we should restore the value
1121          *        in memory. One possible way to achieve this is to probe
1122          *        the MMU prior to the memaccess, thay way we could put
1123          *        the alignment checks in between the probe and the mem
1124          *        access.
1125          */
1126         gen_helper_memalign(cpu_env, addr, treg, t1, tsize);
1127 
1128         tcg_temp_free_i32(t1);
1129         tcg_temp_free_i32(treg);
1130         tcg_temp_free_i32(tsize);
1131     }
1132 
1133     if (ex) {
1134         gen_set_label(swx_skip);
1135     }
1136 
1137     tcg_temp_free(addr);
1138 }
1139 
1140 static inline void eval_cc(DisasContext *dc, unsigned int cc,
1141                            TCGv_i32 d, TCGv_i32 a)
1142 {
1143     static const int mb_to_tcg_cc[] = {
1144         [CC_EQ] = TCG_COND_EQ,
1145         [CC_NE] = TCG_COND_NE,
1146         [CC_LT] = TCG_COND_LT,
1147         [CC_LE] = TCG_COND_LE,
1148         [CC_GE] = TCG_COND_GE,
1149         [CC_GT] = TCG_COND_GT,
1150     };
1151 
1152     switch (cc) {
1153     case CC_EQ:
1154     case CC_NE:
1155     case CC_LT:
1156     case CC_LE:
1157     case CC_GE:
1158     case CC_GT:
1159         tcg_gen_setcondi_i32(mb_to_tcg_cc[cc], d, a, 0);
1160         break;
1161     default:
1162         cpu_abort(CPU(dc->cpu), "Unknown condition code %x.\n", cc);
1163         break;
1164     }
1165 }
1166 
1167 static void eval_cond_jmp(DisasContext *dc, TCGv_i64 pc_true, TCGv_i64 pc_false)
1168 {
1169     TCGv_i64 tmp_btaken = tcg_temp_new_i64();
1170     TCGv_i64 tmp_zero = tcg_const_i64(0);
1171 
1172     tcg_gen_extu_i32_i64(tmp_btaken, env_btaken);
1173     tcg_gen_movcond_i64(TCG_COND_NE, cpu_SR[SR_PC],
1174                         tmp_btaken, tmp_zero,
1175                         pc_true, pc_false);
1176 
1177     tcg_temp_free_i64(tmp_btaken);
1178     tcg_temp_free_i64(tmp_zero);
1179 }
1180 
1181 static void dec_setup_dslot(DisasContext *dc)
1182 {
1183         TCGv_i32 tmp = tcg_const_i32(dc->type_b && (dc->tb_flags & IMM_FLAG));
1184 
1185         dc->delayed_branch = 2;
1186         dc->tb_flags |= D_FLAG;
1187 
1188         tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUMBState, bimm));
1189         tcg_temp_free_i32(tmp);
1190 }
1191 
1192 static void dec_bcc(DisasContext *dc)
1193 {
1194     unsigned int cc;
1195     unsigned int dslot;
1196 
1197     cc = EXTRACT_FIELD(dc->ir, 21, 23);
1198     dslot = dc->ir & (1 << 25);
1199     LOG_DIS("bcc%s r%d %x\n", dslot ? "d" : "", dc->ra, dc->imm);
1200 
1201     dc->delayed_branch = 1;
1202     if (dslot) {
1203         dec_setup_dslot(dc);
1204     }
1205 
1206     if (dec_alu_op_b_is_small_imm(dc)) {
1207         int32_t offset = (int32_t)((int16_t)dc->imm); /* sign-extend.  */
1208 
1209         tcg_gen_movi_i64(env_btarget, dc->pc + offset);
1210         dc->jmp = JMP_DIRECT_CC;
1211         dc->jmp_pc = dc->pc + offset;
1212     } else {
1213         dc->jmp = JMP_INDIRECT;
1214         tcg_gen_extu_i32_i64(env_btarget, *(dec_alu_op_b(dc)));
1215         tcg_gen_addi_i64(env_btarget, env_btarget, dc->pc);
1216         tcg_gen_andi_i64(env_btarget, env_btarget, UINT32_MAX);
1217     }
1218     eval_cc(dc, cc, env_btaken, cpu_R[dc->ra]);
1219 }
1220 
1221 static void dec_br(DisasContext *dc)
1222 {
1223     unsigned int dslot, link, abs, mbar;
1224 
1225     dslot = dc->ir & (1 << 20);
1226     abs = dc->ir & (1 << 19);
1227     link = dc->ir & (1 << 18);
1228 
1229     /* Memory barrier.  */
1230     mbar = (dc->ir >> 16) & 31;
1231     if (mbar == 2 && dc->imm == 4) {
1232         /* mbar IMM & 16 decodes to sleep.  */
1233         if (dc->rd & 16) {
1234             TCGv_i32 tmp_hlt = tcg_const_i32(EXCP_HLT);
1235             TCGv_i32 tmp_1 = tcg_const_i32(1);
1236 
1237             LOG_DIS("sleep\n");
1238 
1239             t_sync_flags(dc);
1240             tcg_gen_st_i32(tmp_1, cpu_env,
1241                            -offsetof(MicroBlazeCPU, env)
1242                            +offsetof(CPUState, halted));
1243             tcg_gen_movi_i64(cpu_SR[SR_PC], dc->pc + 4);
1244             gen_helper_raise_exception(cpu_env, tmp_hlt);
1245             tcg_temp_free_i32(tmp_hlt);
1246             tcg_temp_free_i32(tmp_1);
1247             return;
1248         }
1249         LOG_DIS("mbar %d\n", dc->rd);
1250         /* Break the TB.  */
1251         dc->cpustate_changed = 1;
1252         return;
1253     }
1254 
1255     LOG_DIS("br%s%s%s%s imm=%x\n",
1256              abs ? "a" : "", link ? "l" : "",
1257              dc->type_b ? "i" : "", dslot ? "d" : "",
1258              dc->imm);
1259 
1260     dc->delayed_branch = 1;
1261     if (dslot) {
1262         dec_setup_dslot(dc);
1263     }
1264     if (link && dc->rd)
1265         tcg_gen_movi_i32(cpu_R[dc->rd], dc->pc);
1266 
1267     dc->jmp = JMP_INDIRECT;
1268     if (abs) {
1269         tcg_gen_movi_i32(env_btaken, 1);
1270         tcg_gen_extu_i32_i64(env_btarget, *(dec_alu_op_b(dc)));
1271         if (link && !dslot) {
1272             if (!(dc->tb_flags & IMM_FLAG) && (dc->imm == 8 || dc->imm == 0x18))
1273                 t_gen_raise_exception(dc, EXCP_BREAK);
1274             if (dc->imm == 0) {
1275                 if (trap_userspace(dc, true)) {
1276                     return;
1277                 }
1278 
1279                 t_gen_raise_exception(dc, EXCP_DEBUG);
1280             }
1281         }
1282     } else {
1283         if (dec_alu_op_b_is_small_imm(dc)) {
1284             dc->jmp = JMP_DIRECT;
1285             dc->jmp_pc = dc->pc + (int32_t)((int16_t)dc->imm);
1286         } else {
1287             tcg_gen_movi_i32(env_btaken, 1);
1288             tcg_gen_extu_i32_i64(env_btarget, *(dec_alu_op_b(dc)));
1289             tcg_gen_addi_i64(env_btarget, env_btarget, dc->pc);
1290             tcg_gen_andi_i64(env_btarget, env_btarget, UINT32_MAX);
1291         }
1292     }
1293 }
1294 
1295 static inline void do_rti(DisasContext *dc)
1296 {
1297     TCGv_i32 t0, t1;
1298     t0 = tcg_temp_new_i32();
1299     t1 = tcg_temp_new_i32();
1300     tcg_gen_extrl_i64_i32(t1, cpu_SR[SR_MSR]);
1301     tcg_gen_shri_i32(t0, t1, 1);
1302     tcg_gen_ori_i32(t1, t1, MSR_IE);
1303     tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
1304 
1305     tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
1306     tcg_gen_or_i32(t1, t1, t0);
1307     msr_write(dc, t1);
1308     tcg_temp_free_i32(t1);
1309     tcg_temp_free_i32(t0);
1310     dc->tb_flags &= ~DRTI_FLAG;
1311 }
1312 
1313 static inline void do_rtb(DisasContext *dc)
1314 {
1315     TCGv_i32 t0, t1;
1316     t0 = tcg_temp_new_i32();
1317     t1 = tcg_temp_new_i32();
1318     tcg_gen_extrl_i64_i32(t1, cpu_SR[SR_MSR]);
1319     tcg_gen_andi_i32(t1, t1, ~MSR_BIP);
1320     tcg_gen_shri_i32(t0, t1, 1);
1321     tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
1322 
1323     tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
1324     tcg_gen_or_i32(t1, t1, t0);
1325     msr_write(dc, t1);
1326     tcg_temp_free_i32(t1);
1327     tcg_temp_free_i32(t0);
1328     dc->tb_flags &= ~DRTB_FLAG;
1329 }
1330 
1331 static inline void do_rte(DisasContext *dc)
1332 {
1333     TCGv_i32 t0, t1;
1334     t0 = tcg_temp_new_i32();
1335     t1 = tcg_temp_new_i32();
1336 
1337     tcg_gen_extrl_i64_i32(t1, cpu_SR[SR_MSR]);
1338     tcg_gen_ori_i32(t1, t1, MSR_EE);
1339     tcg_gen_andi_i32(t1, t1, ~MSR_EIP);
1340     tcg_gen_shri_i32(t0, t1, 1);
1341     tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
1342 
1343     tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
1344     tcg_gen_or_i32(t1, t1, t0);
1345     msr_write(dc, t1);
1346     tcg_temp_free_i32(t1);
1347     tcg_temp_free_i32(t0);
1348     dc->tb_flags &= ~DRTE_FLAG;
1349 }
1350 
1351 static void dec_rts(DisasContext *dc)
1352 {
1353     unsigned int b_bit, i_bit, e_bit;
1354     TCGv_i64 tmp64;
1355 
1356     i_bit = dc->ir & (1 << 21);
1357     b_bit = dc->ir & (1 << 22);
1358     e_bit = dc->ir & (1 << 23);
1359 
1360     if (trap_userspace(dc, i_bit || b_bit || e_bit)) {
1361         return;
1362     }
1363 
1364     dec_setup_dslot(dc);
1365 
1366     if (i_bit) {
1367         LOG_DIS("rtid ir=%x\n", dc->ir);
1368         dc->tb_flags |= DRTI_FLAG;
1369     } else if (b_bit) {
1370         LOG_DIS("rtbd ir=%x\n", dc->ir);
1371         dc->tb_flags |= DRTB_FLAG;
1372     } else if (e_bit) {
1373         LOG_DIS("rted ir=%x\n", dc->ir);
1374         dc->tb_flags |= DRTE_FLAG;
1375     } else
1376         LOG_DIS("rts ir=%x\n", dc->ir);
1377 
1378     dc->jmp = JMP_INDIRECT;
1379     tcg_gen_movi_i32(env_btaken, 1);
1380 
1381     tmp64 = tcg_temp_new_i64();
1382     tcg_gen_extu_i32_i64(env_btarget, *(dec_alu_op_b(dc)));
1383     tcg_gen_extu_i32_i64(tmp64, cpu_R[dc->ra]);
1384     tcg_gen_add_i64(env_btarget, env_btarget, tmp64);
1385     tcg_gen_andi_i64(env_btarget, env_btarget, UINT32_MAX);
1386     tcg_temp_free_i64(tmp64);
1387 }
1388 
1389 static int dec_check_fpuv2(DisasContext *dc)
1390 {
1391     if ((dc->cpu->cfg.use_fpu != 2) && (dc->tb_flags & MSR_EE_FLAG)) {
1392         tcg_gen_movi_i64(cpu_SR[SR_ESR], ESR_EC_FPU);
1393         t_gen_raise_exception(dc, EXCP_HW_EXCP);
1394     }
1395     return (dc->cpu->cfg.use_fpu == 2) ? PVR2_USE_FPU2_MASK : 0;
1396 }
1397 
1398 static void dec_fpu(DisasContext *dc)
1399 {
1400     unsigned int fpu_insn;
1401 
1402     if (trap_illegal(dc, !dc->cpu->cfg.use_fpu)) {
1403         return;
1404     }
1405 
1406     fpu_insn = (dc->ir >> 7) & 7;
1407 
1408     switch (fpu_insn) {
1409         case 0:
1410             gen_helper_fadd(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1411                             cpu_R[dc->rb]);
1412             break;
1413 
1414         case 1:
1415             gen_helper_frsub(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1416                              cpu_R[dc->rb]);
1417             break;
1418 
1419         case 2:
1420             gen_helper_fmul(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1421                             cpu_R[dc->rb]);
1422             break;
1423 
1424         case 3:
1425             gen_helper_fdiv(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1426                             cpu_R[dc->rb]);
1427             break;
1428 
1429         case 4:
1430             switch ((dc->ir >> 4) & 7) {
1431                 case 0:
1432                     gen_helper_fcmp_un(cpu_R[dc->rd], cpu_env,
1433                                        cpu_R[dc->ra], cpu_R[dc->rb]);
1434                     break;
1435                 case 1:
1436                     gen_helper_fcmp_lt(cpu_R[dc->rd], cpu_env,
1437                                        cpu_R[dc->ra], cpu_R[dc->rb]);
1438                     break;
1439                 case 2:
1440                     gen_helper_fcmp_eq(cpu_R[dc->rd], cpu_env,
1441                                        cpu_R[dc->ra], cpu_R[dc->rb]);
1442                     break;
1443                 case 3:
1444                     gen_helper_fcmp_le(cpu_R[dc->rd], cpu_env,
1445                                        cpu_R[dc->ra], cpu_R[dc->rb]);
1446                     break;
1447                 case 4:
1448                     gen_helper_fcmp_gt(cpu_R[dc->rd], cpu_env,
1449                                        cpu_R[dc->ra], cpu_R[dc->rb]);
1450                     break;
1451                 case 5:
1452                     gen_helper_fcmp_ne(cpu_R[dc->rd], cpu_env,
1453                                        cpu_R[dc->ra], cpu_R[dc->rb]);
1454                     break;
1455                 case 6:
1456                     gen_helper_fcmp_ge(cpu_R[dc->rd], cpu_env,
1457                                        cpu_R[dc->ra], cpu_R[dc->rb]);
1458                     break;
1459                 default:
1460                     qemu_log_mask(LOG_UNIMP,
1461                                   "unimplemented fcmp fpu_insn=%x pc=%x"
1462                                   " opc=%x\n",
1463                                   fpu_insn, dc->pc, dc->opcode);
1464                     dc->abort_at_next_insn = 1;
1465                     break;
1466             }
1467             break;
1468 
1469         case 5:
1470             if (!dec_check_fpuv2(dc)) {
1471                 return;
1472             }
1473             gen_helper_flt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1474             break;
1475 
1476         case 6:
1477             if (!dec_check_fpuv2(dc)) {
1478                 return;
1479             }
1480             gen_helper_fint(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1481             break;
1482 
1483         case 7:
1484             if (!dec_check_fpuv2(dc)) {
1485                 return;
1486             }
1487             gen_helper_fsqrt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1488             break;
1489 
1490         default:
1491             qemu_log_mask(LOG_UNIMP, "unimplemented FPU insn fpu_insn=%x pc=%x"
1492                           " opc=%x\n",
1493                           fpu_insn, dc->pc, dc->opcode);
1494             dc->abort_at_next_insn = 1;
1495             break;
1496     }
1497 }
1498 
1499 static void dec_null(DisasContext *dc)
1500 {
1501     if (trap_illegal(dc, true)) {
1502         return;
1503     }
1504     qemu_log_mask(LOG_GUEST_ERROR, "unknown insn pc=%x opc=%x\n", dc->pc, dc->opcode);
1505     dc->abort_at_next_insn = 1;
1506 }
1507 
1508 /* Insns connected to FSL or AXI stream attached devices.  */
1509 static void dec_stream(DisasContext *dc)
1510 {
1511     TCGv_i32 t_id, t_ctrl;
1512     int ctrl;
1513 
1514     LOG_DIS("%s%s imm=%x\n", dc->rd ? "get" : "put",
1515             dc->type_b ? "" : "d", dc->imm);
1516 
1517     if (trap_userspace(dc, true)) {
1518         return;
1519     }
1520 
1521     t_id = tcg_temp_new_i32();
1522     if (dc->type_b) {
1523         tcg_gen_movi_i32(t_id, dc->imm & 0xf);
1524         ctrl = dc->imm >> 10;
1525     } else {
1526         tcg_gen_andi_i32(t_id, cpu_R[dc->rb], 0xf);
1527         ctrl = dc->imm >> 5;
1528     }
1529 
1530     t_ctrl = tcg_const_i32(ctrl);
1531 
1532     if (dc->rd == 0) {
1533         gen_helper_put(t_id, t_ctrl, cpu_R[dc->ra]);
1534     } else {
1535         gen_helper_get(cpu_R[dc->rd], t_id, t_ctrl);
1536     }
1537     tcg_temp_free_i32(t_id);
1538     tcg_temp_free_i32(t_ctrl);
1539 }
1540 
1541 static struct decoder_info {
1542     struct {
1543         uint32_t bits;
1544         uint32_t mask;
1545     };
1546     void (*dec)(DisasContext *dc);
1547 } decinfo[] = {
1548     {DEC_ADD, dec_add},
1549     {DEC_SUB, dec_sub},
1550     {DEC_AND, dec_and},
1551     {DEC_XOR, dec_xor},
1552     {DEC_OR, dec_or},
1553     {DEC_BIT, dec_bit},
1554     {DEC_BARREL, dec_barrel},
1555     {DEC_LD, dec_load},
1556     {DEC_ST, dec_store},
1557     {DEC_IMM, dec_imm},
1558     {DEC_BR, dec_br},
1559     {DEC_BCC, dec_bcc},
1560     {DEC_RTS, dec_rts},
1561     {DEC_FPU, dec_fpu},
1562     {DEC_MUL, dec_mul},
1563     {DEC_DIV, dec_div},
1564     {DEC_MSR, dec_msr},
1565     {DEC_STREAM, dec_stream},
1566     {{0, 0}, dec_null}
1567 };
1568 
1569 static inline void decode(DisasContext *dc, uint32_t ir)
1570 {
1571     int i;
1572 
1573     dc->ir = ir;
1574     LOG_DIS("%8.8x\t", dc->ir);
1575 
1576     if (ir == 0) {
1577         trap_illegal(dc, dc->cpu->cfg.opcode_0_illegal);
1578         /* Don't decode nop/zero instructions any further.  */
1579         return;
1580     }
1581 
1582     /* bit 2 seems to indicate insn type.  */
1583     dc->type_b = ir & (1 << 29);
1584 
1585     dc->opcode = EXTRACT_FIELD(ir, 26, 31);
1586     dc->rd = EXTRACT_FIELD(ir, 21, 25);
1587     dc->ra = EXTRACT_FIELD(ir, 16, 20);
1588     dc->rb = EXTRACT_FIELD(ir, 11, 15);
1589     dc->imm = EXTRACT_FIELD(ir, 0, 15);
1590 
1591     /* Large switch for all insns.  */
1592     for (i = 0; i < ARRAY_SIZE(decinfo); i++) {
1593         if ((dc->opcode & decinfo[i].mask) == decinfo[i].bits) {
1594             decinfo[i].dec(dc);
1595             break;
1596         }
1597     }
1598 }
1599 
1600 /* generate intermediate code for basic block 'tb'.  */
1601 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
1602 {
1603     CPUMBState *env = cs->env_ptr;
1604     MicroBlazeCPU *cpu = env_archcpu(env);
1605     uint32_t pc_start;
1606     struct DisasContext ctx;
1607     struct DisasContext *dc = &ctx;
1608     uint32_t page_start, org_flags;
1609     uint32_t npc;
1610     int num_insns;
1611 
1612     pc_start = tb->pc;
1613     dc->cpu = cpu;
1614     dc->tb = tb;
1615     org_flags = dc->synced_flags = dc->tb_flags = tb->flags;
1616 
1617     dc->is_jmp = DISAS_NEXT;
1618     dc->jmp = 0;
1619     dc->delayed_branch = !!(dc->tb_flags & D_FLAG);
1620     if (dc->delayed_branch) {
1621         dc->jmp = JMP_INDIRECT;
1622     }
1623     dc->pc = pc_start;
1624     dc->singlestep_enabled = cs->singlestep_enabled;
1625     dc->cpustate_changed = 0;
1626     dc->abort_at_next_insn = 0;
1627 
1628     if (pc_start & 3) {
1629         cpu_abort(cs, "Microblaze: unaligned PC=%x\n", pc_start);
1630     }
1631 
1632     page_start = pc_start & TARGET_PAGE_MASK;
1633     num_insns = 0;
1634 
1635     gen_tb_start(tb);
1636     do
1637     {
1638         tcg_gen_insn_start(dc->pc);
1639         num_insns++;
1640 
1641 #if SIM_COMPAT
1642         if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1643             tcg_gen_movi_i64(cpu_SR[SR_PC], dc->pc);
1644             gen_helper_debug();
1645         }
1646 #endif
1647 
1648         if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
1649             t_gen_raise_exception(dc, EXCP_DEBUG);
1650             dc->is_jmp = DISAS_UPDATE;
1651             /* The address covered by the breakpoint must be included in
1652                [tb->pc, tb->pc + tb->size) in order to for it to be
1653                properly cleared -- thus we increment the PC here so that
1654                the logic setting tb->size below does the right thing.  */
1655             dc->pc += 4;
1656             break;
1657         }
1658 
1659         /* Pretty disas.  */
1660         LOG_DIS("%8.8x:\t", dc->pc);
1661 
1662         if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
1663             gen_io_start();
1664         }
1665 
1666         dc->clear_imm = 1;
1667         decode(dc, cpu_ldl_code(env, dc->pc));
1668         if (dc->clear_imm)
1669             dc->tb_flags &= ~IMM_FLAG;
1670         dc->pc += 4;
1671 
1672         if (dc->delayed_branch) {
1673             dc->delayed_branch--;
1674             if (!dc->delayed_branch) {
1675                 if (dc->tb_flags & DRTI_FLAG)
1676                     do_rti(dc);
1677                  if (dc->tb_flags & DRTB_FLAG)
1678                     do_rtb(dc);
1679                 if (dc->tb_flags & DRTE_FLAG)
1680                     do_rte(dc);
1681                 /* Clear the delay slot flag.  */
1682                 dc->tb_flags &= ~D_FLAG;
1683                 /* If it is a direct jump, try direct chaining.  */
1684                 if (dc->jmp == JMP_INDIRECT) {
1685                     TCGv_i64 tmp_pc = tcg_const_i64(dc->pc);
1686                     eval_cond_jmp(dc, env_btarget, tmp_pc);
1687                     tcg_temp_free_i64(tmp_pc);
1688 
1689                     dc->is_jmp = DISAS_JUMP;
1690                 } else if (dc->jmp == JMP_DIRECT) {
1691                     t_sync_flags(dc);
1692                     gen_goto_tb(dc, 0, dc->jmp_pc);
1693                     dc->is_jmp = DISAS_TB_JUMP;
1694                 } else if (dc->jmp == JMP_DIRECT_CC) {
1695                     TCGLabel *l1 = gen_new_label();
1696                     t_sync_flags(dc);
1697                     /* Conditional jmp.  */
1698                     tcg_gen_brcondi_i32(TCG_COND_NE, env_btaken, 0, l1);
1699                     gen_goto_tb(dc, 1, dc->pc);
1700                     gen_set_label(l1);
1701                     gen_goto_tb(dc, 0, dc->jmp_pc);
1702 
1703                     dc->is_jmp = DISAS_TB_JUMP;
1704                 }
1705                 break;
1706             }
1707         }
1708         if (cs->singlestep_enabled) {
1709             break;
1710         }
1711     } while (!dc->is_jmp && !dc->cpustate_changed
1712              && !tcg_op_buf_full()
1713              && !singlestep
1714              && (dc->pc - page_start < TARGET_PAGE_SIZE)
1715              && num_insns < max_insns);
1716 
1717     npc = dc->pc;
1718     if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
1719         if (dc->tb_flags & D_FLAG) {
1720             dc->is_jmp = DISAS_UPDATE;
1721             tcg_gen_movi_i64(cpu_SR[SR_PC], npc);
1722             sync_jmpstate(dc);
1723         } else
1724             npc = dc->jmp_pc;
1725     }
1726 
1727     /* Force an update if the per-tb cpu state has changed.  */
1728     if (dc->is_jmp == DISAS_NEXT
1729         && (dc->cpustate_changed || org_flags != dc->tb_flags)) {
1730         dc->is_jmp = DISAS_UPDATE;
1731         tcg_gen_movi_i64(cpu_SR[SR_PC], npc);
1732     }
1733     t_sync_flags(dc);
1734 
1735     if (unlikely(cs->singlestep_enabled)) {
1736         TCGv_i32 tmp = tcg_const_i32(EXCP_DEBUG);
1737 
1738         if (dc->is_jmp != DISAS_JUMP) {
1739             tcg_gen_movi_i64(cpu_SR[SR_PC], npc);
1740         }
1741         gen_helper_raise_exception(cpu_env, tmp);
1742         tcg_temp_free_i32(tmp);
1743     } else {
1744         switch(dc->is_jmp) {
1745             case DISAS_NEXT:
1746                 gen_goto_tb(dc, 1, npc);
1747                 break;
1748             default:
1749             case DISAS_JUMP:
1750             case DISAS_UPDATE:
1751                 /* indicate that the hash table must be used
1752                    to find the next TB */
1753                 tcg_gen_exit_tb(NULL, 0);
1754                 break;
1755             case DISAS_TB_JUMP:
1756                 /* nothing more to generate */
1757                 break;
1758         }
1759     }
1760     gen_tb_end(tb, num_insns);
1761 
1762     tb->size = dc->pc - pc_start;
1763     tb->icount = num_insns;
1764 
1765 #ifdef DEBUG_DISAS
1766 #if !SIM_COMPAT
1767     if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
1768         && qemu_log_in_addr_range(pc_start)) {
1769         FILE *logfile = qemu_log_lock();
1770         qemu_log("--------------\n");
1771         log_target_disas(cs, pc_start, dc->pc - pc_start);
1772         qemu_log_unlock(logfile);
1773     }
1774 #endif
1775 #endif
1776     assert(!dc->abort_at_next_insn);
1777 }
1778 
1779 void mb_cpu_dump_state(CPUState *cs, FILE *f, int flags)
1780 {
1781     MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1782     CPUMBState *env = &cpu->env;
1783     int i;
1784 
1785     if (!env) {
1786         return;
1787     }
1788 
1789     qemu_fprintf(f, "IN: PC=%" PRIx64 " %s\n",
1790                  env->sregs[SR_PC], lookup_symbol(env->sregs[SR_PC]));
1791     qemu_fprintf(f, "rmsr=%" PRIx64 " resr=%" PRIx64 " rear=%" PRIx64 " "
1792                  "debug=%x imm=%x iflags=%x fsr=%" PRIx64 " "
1793                  "rbtr=%" PRIx64 "\n",
1794                  env->sregs[SR_MSR], env->sregs[SR_ESR], env->sregs[SR_EAR],
1795                  env->debug, env->imm, env->iflags, env->sregs[SR_FSR],
1796                  env->sregs[SR_BTR]);
1797     qemu_fprintf(f, "btaken=%d btarget=%" PRIx64 " mode=%s(saved=%s) "
1798                  "eip=%d ie=%d\n",
1799                  env->btaken, env->btarget,
1800                  (env->sregs[SR_MSR] & MSR_UM) ? "user" : "kernel",
1801                  (env->sregs[SR_MSR] & MSR_UMS) ? "user" : "kernel",
1802                  (bool)(env->sregs[SR_MSR] & MSR_EIP),
1803                  (bool)(env->sregs[SR_MSR] & MSR_IE));
1804     for (i = 0; i < 12; i++) {
1805         qemu_fprintf(f, "rpvr%2.2d=%8.8x ", i, env->pvr.regs[i]);
1806         if ((i + 1) % 4 == 0) {
1807             qemu_fprintf(f, "\n");
1808         }
1809     }
1810 
1811     /* Registers that aren't modeled are reported as 0 */
1812     qemu_fprintf(f, "redr=%" PRIx64 " rpid=0 rzpr=0 rtlbx=0 rtlbsx=0 "
1813                     "rtlblo=0 rtlbhi=0\n", env->sregs[SR_EDR]);
1814     qemu_fprintf(f, "slr=%x shr=%x\n", env->slr, env->shr);
1815     for (i = 0; i < 32; i++) {
1816         qemu_fprintf(f, "r%2.2d=%8.8x ", i, env->regs[i]);
1817         if ((i + 1) % 4 == 0)
1818             qemu_fprintf(f, "\n");
1819         }
1820     qemu_fprintf(f, "\n\n");
1821 }
1822 
1823 void mb_tcg_init(void)
1824 {
1825     int i;
1826 
1827     env_debug = tcg_global_mem_new_i32(cpu_env,
1828                     offsetof(CPUMBState, debug),
1829                     "debug0");
1830     env_iflags = tcg_global_mem_new_i32(cpu_env,
1831                     offsetof(CPUMBState, iflags),
1832                     "iflags");
1833     env_imm = tcg_global_mem_new_i32(cpu_env,
1834                     offsetof(CPUMBState, imm),
1835                     "imm");
1836     env_btarget = tcg_global_mem_new_i64(cpu_env,
1837                      offsetof(CPUMBState, btarget),
1838                      "btarget");
1839     env_btaken = tcg_global_mem_new_i32(cpu_env,
1840                      offsetof(CPUMBState, btaken),
1841                      "btaken");
1842     env_res_addr = tcg_global_mem_new(cpu_env,
1843                      offsetof(CPUMBState, res_addr),
1844                      "res_addr");
1845     env_res_val = tcg_global_mem_new_i32(cpu_env,
1846                      offsetof(CPUMBState, res_val),
1847                      "res_val");
1848     for (i = 0; i < ARRAY_SIZE(cpu_R); i++) {
1849         cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
1850                           offsetof(CPUMBState, regs[i]),
1851                           regnames[i]);
1852     }
1853     for (i = 0; i < ARRAY_SIZE(cpu_SR); i++) {
1854         cpu_SR[i] = tcg_global_mem_new_i64(cpu_env,
1855                           offsetof(CPUMBState, sregs[i]),
1856                           special_regnames[i]);
1857     }
1858 }
1859 
1860 void restore_state_to_opc(CPUMBState *env, TranslationBlock *tb,
1861                           target_ulong *data)
1862 {
1863     env->sregs[SR_PC] = data[0];
1864 }
1865