xref: /openbmc/qemu/target/microblaze/translate.c (revision 709395f8)
1 /*
2  *  Xilinx MicroBlaze emulation for qemu: main translation routines.
3  *
4  *  Copyright (c) 2009 Edgar E. Iglesias.
5  *  Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
25 #include "tcg-op.h"
26 #include "exec/helper-proto.h"
27 #include "microblaze-decode.h"
28 #include "exec/cpu_ldst.h"
29 #include "exec/helper-gen.h"
30 #include "exec/translator.h"
31 
32 #include "trace-tcg.h"
33 #include "exec/log.h"
34 
35 
36 #define SIM_COMPAT 0
37 #define DISAS_GNU 1
38 #define DISAS_MB 1
39 #if DISAS_MB && !SIM_COMPAT
40 #  define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
41 #else
42 #  define LOG_DIS(...) do { } while (0)
43 #endif
44 
45 #define D(x)
46 
47 #define EXTRACT_FIELD(src, start, end) \
48             (((src) >> start) & ((1 << (end - start + 1)) - 1))
49 
50 /* is_jmp field values */
51 #define DISAS_JUMP    DISAS_TARGET_0 /* only pc was modified dynamically */
52 #define DISAS_UPDATE  DISAS_TARGET_1 /* cpu state was modified dynamically */
53 #define DISAS_TB_JUMP DISAS_TARGET_2 /* only pc was modified statically */
54 
55 static TCGv_i32 env_debug;
56 static TCGv_i32 cpu_R[32];
57 static TCGv_i64 cpu_SR[14];
58 static TCGv_i32 env_imm;
59 static TCGv_i32 env_btaken;
60 static TCGv_i64 env_btarget;
61 static TCGv_i32 env_iflags;
62 static TCGv env_res_addr;
63 static TCGv_i32 env_res_val;
64 
65 #include "exec/gen-icount.h"
66 
67 /* This is the state at translation time.  */
68 typedef struct DisasContext {
69     MicroBlazeCPU *cpu;
70     uint32_t pc;
71 
72     /* Decoder.  */
73     int type_b;
74     uint32_t ir;
75     uint8_t opcode;
76     uint8_t rd, ra, rb;
77     uint16_t imm;
78 
79     unsigned int cpustate_changed;
80     unsigned int delayed_branch;
81     unsigned int tb_flags, synced_flags; /* tb dependent flags.  */
82     unsigned int clear_imm;
83     int is_jmp;
84 
85 #define JMP_NOJMP     0
86 #define JMP_DIRECT    1
87 #define JMP_DIRECT_CC 2
88 #define JMP_INDIRECT  3
89     unsigned int jmp;
90     uint32_t jmp_pc;
91 
92     int abort_at_next_insn;
93     struct TranslationBlock *tb;
94     int singlestep_enabled;
95 } DisasContext;
96 
97 static const char *regnames[] =
98 {
99     "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
100     "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
101     "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
102     "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
103 };
104 
105 static const char *special_regnames[] =
106 {
107     "rpc", "rmsr", "sr2", "rear", "sr4", "resr", "sr6", "rfsr",
108     "sr8", "sr9", "sr10", "rbtr", "sr12", "redr"
109 };
110 
111 static inline void t_sync_flags(DisasContext *dc)
112 {
113     /* Synch the tb dependent flags between translator and runtime.  */
114     if (dc->tb_flags != dc->synced_flags) {
115         tcg_gen_movi_i32(env_iflags, dc->tb_flags);
116         dc->synced_flags = dc->tb_flags;
117     }
118 }
119 
120 static inline void t_gen_raise_exception(DisasContext *dc, uint32_t index)
121 {
122     TCGv_i32 tmp = tcg_const_i32(index);
123 
124     t_sync_flags(dc);
125     tcg_gen_movi_i64(cpu_SR[SR_PC], dc->pc);
126     gen_helper_raise_exception(cpu_env, tmp);
127     tcg_temp_free_i32(tmp);
128     dc->is_jmp = DISAS_UPDATE;
129 }
130 
131 static inline bool use_goto_tb(DisasContext *dc, target_ulong dest)
132 {
133 #ifndef CONFIG_USER_ONLY
134     return (dc->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
135 #else
136     return true;
137 #endif
138 }
139 
140 static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
141 {
142     if (use_goto_tb(dc, dest)) {
143         tcg_gen_goto_tb(n);
144         tcg_gen_movi_i64(cpu_SR[SR_PC], dest);
145         tcg_gen_exit_tb(dc->tb, n);
146     } else {
147         tcg_gen_movi_i64(cpu_SR[SR_PC], dest);
148         tcg_gen_exit_tb(NULL, 0);
149     }
150 }
151 
152 static void read_carry(DisasContext *dc, TCGv_i32 d)
153 {
154     tcg_gen_extrl_i64_i32(d, cpu_SR[SR_MSR]);
155     tcg_gen_shri_i32(d, d, 31);
156 }
157 
158 /*
159  * write_carry sets the carry bits in MSR based on bit 0 of v.
160  * v[31:1] are ignored.
161  */
162 static void write_carry(DisasContext *dc, TCGv_i32 v)
163 {
164     TCGv_i64 t0 = tcg_temp_new_i64();
165     tcg_gen_extu_i32_i64(t0, v);
166     /* Deposit bit 0 into MSR_C and the alias MSR_CC.  */
167     tcg_gen_deposit_i64(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t0, 2, 1);
168     tcg_gen_deposit_i64(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t0, 31, 1);
169     tcg_temp_free_i64(t0);
170 }
171 
172 static void write_carryi(DisasContext *dc, bool carry)
173 {
174     TCGv_i32 t0 = tcg_temp_new_i32();
175     tcg_gen_movi_i32(t0, carry);
176     write_carry(dc, t0);
177     tcg_temp_free_i32(t0);
178 }
179 
180 /*
181  * Returns true if the insn an illegal operation.
182  * If exceptions are enabled, an exception is raised.
183  */
184 static bool trap_illegal(DisasContext *dc, bool cond)
185 {
186     if (cond && (dc->tb_flags & MSR_EE_FLAG)
187         && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
188         tcg_gen_movi_i64(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
189         t_gen_raise_exception(dc, EXCP_HW_EXCP);
190     }
191     return cond;
192 }
193 
194 /*
195  * Returns true if the insn is illegal in userspace.
196  * If exceptions are enabled, an exception is raised.
197  */
198 static bool trap_userspace(DisasContext *dc, bool cond)
199 {
200     int mem_index = cpu_mmu_index(&dc->cpu->env, false);
201     bool cond_user = cond && mem_index == MMU_USER_IDX;
202 
203     if (cond_user && (dc->tb_flags & MSR_EE_FLAG)) {
204         tcg_gen_movi_i64(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
205         t_gen_raise_exception(dc, EXCP_HW_EXCP);
206     }
207     return cond_user;
208 }
209 
210 /* True if ALU operand b is a small immediate that may deserve
211    faster treatment.  */
212 static inline int dec_alu_op_b_is_small_imm(DisasContext *dc)
213 {
214     /* Immediate insn without the imm prefix ?  */
215     return dc->type_b && !(dc->tb_flags & IMM_FLAG);
216 }
217 
218 static inline TCGv_i32 *dec_alu_op_b(DisasContext *dc)
219 {
220     if (dc->type_b) {
221         if (dc->tb_flags & IMM_FLAG)
222             tcg_gen_ori_i32(env_imm, env_imm, dc->imm);
223         else
224             tcg_gen_movi_i32(env_imm, (int32_t)((int16_t)dc->imm));
225         return &env_imm;
226     } else
227         return &cpu_R[dc->rb];
228 }
229 
230 static void dec_add(DisasContext *dc)
231 {
232     unsigned int k, c;
233     TCGv_i32 cf;
234 
235     k = dc->opcode & 4;
236     c = dc->opcode & 2;
237 
238     LOG_DIS("add%s%s%s r%d r%d r%d\n",
239             dc->type_b ? "i" : "", k ? "k" : "", c ? "c" : "",
240             dc->rd, dc->ra, dc->rb);
241 
242     /* Take care of the easy cases first.  */
243     if (k) {
244         /* k - keep carry, no need to update MSR.  */
245         /* If rd == r0, it's a nop.  */
246         if (dc->rd) {
247             tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
248 
249             if (c) {
250                 /* c - Add carry into the result.  */
251                 cf = tcg_temp_new_i32();
252 
253                 read_carry(dc, cf);
254                 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
255                 tcg_temp_free_i32(cf);
256             }
257         }
258         return;
259     }
260 
261     /* From now on, we can assume k is zero.  So we need to update MSR.  */
262     /* Extract carry.  */
263     cf = tcg_temp_new_i32();
264     if (c) {
265         read_carry(dc, cf);
266     } else {
267         tcg_gen_movi_i32(cf, 0);
268     }
269 
270     if (dc->rd) {
271         TCGv_i32 ncf = tcg_temp_new_i32();
272         gen_helper_carry(ncf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
273         tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
274         tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
275         write_carry(dc, ncf);
276         tcg_temp_free_i32(ncf);
277     } else {
278         gen_helper_carry(cf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
279         write_carry(dc, cf);
280     }
281     tcg_temp_free_i32(cf);
282 }
283 
284 static void dec_sub(DisasContext *dc)
285 {
286     unsigned int u, cmp, k, c;
287     TCGv_i32 cf, na;
288 
289     u = dc->imm & 2;
290     k = dc->opcode & 4;
291     c = dc->opcode & 2;
292     cmp = (dc->imm & 1) && (!dc->type_b) && k;
293 
294     if (cmp) {
295         LOG_DIS("cmp%s r%d, r%d ir=%x\n", u ? "u" : "", dc->rd, dc->ra, dc->ir);
296         if (dc->rd) {
297             if (u)
298                 gen_helper_cmpu(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
299             else
300                 gen_helper_cmp(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
301         }
302         return;
303     }
304 
305     LOG_DIS("sub%s%s r%d, r%d r%d\n",
306              k ? "k" : "",  c ? "c" : "", dc->rd, dc->ra, dc->rb);
307 
308     /* Take care of the easy cases first.  */
309     if (k) {
310         /* k - keep carry, no need to update MSR.  */
311         /* If rd == r0, it's a nop.  */
312         if (dc->rd) {
313             tcg_gen_sub_i32(cpu_R[dc->rd], *(dec_alu_op_b(dc)), cpu_R[dc->ra]);
314 
315             if (c) {
316                 /* c - Add carry into the result.  */
317                 cf = tcg_temp_new_i32();
318 
319                 read_carry(dc, cf);
320                 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
321                 tcg_temp_free_i32(cf);
322             }
323         }
324         return;
325     }
326 
327     /* From now on, we can assume k is zero.  So we need to update MSR.  */
328     /* Extract carry. And complement a into na.  */
329     cf = tcg_temp_new_i32();
330     na = tcg_temp_new_i32();
331     if (c) {
332         read_carry(dc, cf);
333     } else {
334         tcg_gen_movi_i32(cf, 1);
335     }
336 
337     /* d = b + ~a + c. carry defaults to 1.  */
338     tcg_gen_not_i32(na, cpu_R[dc->ra]);
339 
340     if (dc->rd) {
341         TCGv_i32 ncf = tcg_temp_new_i32();
342         gen_helper_carry(ncf, na, *(dec_alu_op_b(dc)), cf);
343         tcg_gen_add_i32(cpu_R[dc->rd], na, *(dec_alu_op_b(dc)));
344         tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
345         write_carry(dc, ncf);
346         tcg_temp_free_i32(ncf);
347     } else {
348         gen_helper_carry(cf, na, *(dec_alu_op_b(dc)), cf);
349         write_carry(dc, cf);
350     }
351     tcg_temp_free_i32(cf);
352     tcg_temp_free_i32(na);
353 }
354 
355 static void dec_pattern(DisasContext *dc)
356 {
357     unsigned int mode;
358 
359     if (trap_illegal(dc, !dc->cpu->cfg.use_pcmp_instr)) {
360         return;
361     }
362 
363     mode = dc->opcode & 3;
364     switch (mode) {
365         case 0:
366             /* pcmpbf.  */
367             LOG_DIS("pcmpbf r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
368             if (dc->rd)
369                 gen_helper_pcmpbf(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
370             break;
371         case 2:
372             LOG_DIS("pcmpeq r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
373             if (dc->rd) {
374                 tcg_gen_setcond_i32(TCG_COND_EQ, cpu_R[dc->rd],
375                                    cpu_R[dc->ra], cpu_R[dc->rb]);
376             }
377             break;
378         case 3:
379             LOG_DIS("pcmpne r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
380             if (dc->rd) {
381                 tcg_gen_setcond_i32(TCG_COND_NE, cpu_R[dc->rd],
382                                    cpu_R[dc->ra], cpu_R[dc->rb]);
383             }
384             break;
385         default:
386             cpu_abort(CPU(dc->cpu),
387                       "unsupported pattern insn opcode=%x\n", dc->opcode);
388             break;
389     }
390 }
391 
392 static void dec_and(DisasContext *dc)
393 {
394     unsigned int not;
395 
396     if (!dc->type_b && (dc->imm & (1 << 10))) {
397         dec_pattern(dc);
398         return;
399     }
400 
401     not = dc->opcode & (1 << 1);
402     LOG_DIS("and%s\n", not ? "n" : "");
403 
404     if (!dc->rd)
405         return;
406 
407     if (not) {
408         tcg_gen_andc_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
409     } else
410         tcg_gen_and_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
411 }
412 
413 static void dec_or(DisasContext *dc)
414 {
415     if (!dc->type_b && (dc->imm & (1 << 10))) {
416         dec_pattern(dc);
417         return;
418     }
419 
420     LOG_DIS("or r%d r%d r%d imm=%x\n", dc->rd, dc->ra, dc->rb, dc->imm);
421     if (dc->rd)
422         tcg_gen_or_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
423 }
424 
425 static void dec_xor(DisasContext *dc)
426 {
427     if (!dc->type_b && (dc->imm & (1 << 10))) {
428         dec_pattern(dc);
429         return;
430     }
431 
432     LOG_DIS("xor r%d\n", dc->rd);
433     if (dc->rd)
434         tcg_gen_xor_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
435 }
436 
437 static inline void msr_read(DisasContext *dc, TCGv_i32 d)
438 {
439     tcg_gen_extrl_i64_i32(d, cpu_SR[SR_MSR]);
440 }
441 
442 static inline void msr_write(DisasContext *dc, TCGv_i32 v)
443 {
444     TCGv_i64 t;
445 
446     t = tcg_temp_new_i64();
447     dc->cpustate_changed = 1;
448     /* PVR bit is not writable.  */
449     tcg_gen_extu_i32_i64(t, v);
450     tcg_gen_andi_i64(t, t, ~MSR_PVR);
451     tcg_gen_andi_i64(cpu_SR[SR_MSR], cpu_SR[SR_MSR], MSR_PVR);
452     tcg_gen_or_i64(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t);
453     tcg_temp_free_i64(t);
454 }
455 
456 static void dec_msr(DisasContext *dc)
457 {
458     CPUState *cs = CPU(dc->cpu);
459     TCGv_i32 t0, t1;
460     unsigned int sr, rn;
461     bool to, clrset, extended = false;
462 
463     sr = extract32(dc->imm, 0, 14);
464     to = extract32(dc->imm, 14, 1);
465     clrset = extract32(dc->imm, 15, 1) == 0;
466     dc->type_b = 1;
467     if (to) {
468         dc->cpustate_changed = 1;
469     }
470 
471     /* Extended MSRs are only available if addr_size > 32.  */
472     if (dc->cpu->cfg.addr_size > 32) {
473         /* The E-bit is encoded differently for To/From MSR.  */
474         static const unsigned int e_bit[] = { 19, 24 };
475 
476         extended = extract32(dc->imm, e_bit[to], 1);
477     }
478 
479     /* msrclr and msrset.  */
480     if (clrset) {
481         bool clr = extract32(dc->ir, 16, 1);
482 
483         LOG_DIS("msr%s r%d imm=%x\n", clr ? "clr" : "set",
484                 dc->rd, dc->imm);
485 
486         if (!dc->cpu->cfg.use_msr_instr) {
487             /* nop??? */
488             return;
489         }
490 
491         if (trap_userspace(dc, dc->imm != 4 && dc->imm != 0)) {
492             return;
493         }
494 
495         if (dc->rd)
496             msr_read(dc, cpu_R[dc->rd]);
497 
498         t0 = tcg_temp_new_i32();
499         t1 = tcg_temp_new_i32();
500         msr_read(dc, t0);
501         tcg_gen_mov_i32(t1, *(dec_alu_op_b(dc)));
502 
503         if (clr) {
504             tcg_gen_not_i32(t1, t1);
505             tcg_gen_and_i32(t0, t0, t1);
506         } else
507             tcg_gen_or_i32(t0, t0, t1);
508         msr_write(dc, t0);
509         tcg_temp_free_i32(t0);
510         tcg_temp_free_i32(t1);
511         tcg_gen_movi_i64(cpu_SR[SR_PC], dc->pc + 4);
512         dc->is_jmp = DISAS_UPDATE;
513         return;
514     }
515 
516     if (trap_userspace(dc, to)) {
517         return;
518     }
519 
520 #if !defined(CONFIG_USER_ONLY)
521     /* Catch read/writes to the mmu block.  */
522     if ((sr & ~0xff) == 0x1000) {
523         TCGv_i32 tmp_ext = tcg_const_i32(extended);
524         TCGv_i32 tmp_sr;
525 
526         sr &= 7;
527         tmp_sr = tcg_const_i32(sr);
528         LOG_DIS("m%ss sr%d r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
529         if (to) {
530             gen_helper_mmu_write(cpu_env, tmp_ext, tmp_sr, cpu_R[dc->ra]);
531         } else {
532             gen_helper_mmu_read(cpu_R[dc->rd], cpu_env, tmp_ext, tmp_sr);
533         }
534         tcg_temp_free_i32(tmp_sr);
535         tcg_temp_free_i32(tmp_ext);
536         return;
537     }
538 #endif
539 
540     if (to) {
541         LOG_DIS("m%ss sr%x r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
542         switch (sr) {
543             case 0:
544                 break;
545             case 1:
546                 msr_write(dc, cpu_R[dc->ra]);
547                 break;
548             case SR_EAR:
549             case SR_ESR:
550             case SR_FSR:
551                 tcg_gen_extu_i32_i64(cpu_SR[sr], cpu_R[dc->ra]);
552                 break;
553             case 0x800:
554                 tcg_gen_st_i32(cpu_R[dc->ra],
555                                cpu_env, offsetof(CPUMBState, slr));
556                 break;
557             case 0x802:
558                 tcg_gen_st_i32(cpu_R[dc->ra],
559                                cpu_env, offsetof(CPUMBState, shr));
560                 break;
561             default:
562                 cpu_abort(CPU(dc->cpu), "unknown mts reg %x\n", sr);
563                 break;
564         }
565     } else {
566         LOG_DIS("m%ss r%d sr%x imm=%x\n", to ? "t" : "f", dc->rd, sr, dc->imm);
567 
568         switch (sr) {
569             case 0:
570                 tcg_gen_movi_i32(cpu_R[dc->rd], dc->pc);
571                 break;
572             case 1:
573                 msr_read(dc, cpu_R[dc->rd]);
574                 break;
575             case SR_EAR:
576                 if (extended) {
577                     tcg_gen_extrh_i64_i32(cpu_R[dc->rd], cpu_SR[sr]);
578                     break;
579                 }
580             case SR_ESR:
581             case SR_FSR:
582             case SR_BTR:
583                 tcg_gen_extrl_i64_i32(cpu_R[dc->rd], cpu_SR[sr]);
584                 break;
585             case 0x800:
586                 tcg_gen_ld_i32(cpu_R[dc->rd],
587                                cpu_env, offsetof(CPUMBState, slr));
588                 break;
589             case 0x802:
590                 tcg_gen_ld_i32(cpu_R[dc->rd],
591                                cpu_env, offsetof(CPUMBState, shr));
592                 break;
593             case 0x2000 ... 0x200c:
594                 rn = sr & 0xf;
595                 tcg_gen_ld_i32(cpu_R[dc->rd],
596                               cpu_env, offsetof(CPUMBState, pvr.regs[rn]));
597                 break;
598             default:
599                 cpu_abort(cs, "unknown mfs reg %x\n", sr);
600                 break;
601         }
602     }
603 
604     if (dc->rd == 0) {
605         tcg_gen_movi_i32(cpu_R[0], 0);
606     }
607 }
608 
609 /* Multiplier unit.  */
610 static void dec_mul(DisasContext *dc)
611 {
612     TCGv_i32 tmp;
613     unsigned int subcode;
614 
615     if (trap_illegal(dc, !dc->cpu->cfg.use_hw_mul)) {
616         return;
617     }
618 
619     subcode = dc->imm & 3;
620 
621     if (dc->type_b) {
622         LOG_DIS("muli r%d r%d %x\n", dc->rd, dc->ra, dc->imm);
623         tcg_gen_mul_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
624         return;
625     }
626 
627     /* mulh, mulhsu and mulhu are not available if C_USE_HW_MUL is < 2.  */
628     if (subcode >= 1 && subcode <= 3 && dc->cpu->cfg.use_hw_mul < 2) {
629         /* nop??? */
630     }
631 
632     tmp = tcg_temp_new_i32();
633     switch (subcode) {
634         case 0:
635             LOG_DIS("mul r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
636             tcg_gen_mul_i32(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
637             break;
638         case 1:
639             LOG_DIS("mulh r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
640             tcg_gen_muls2_i32(tmp, cpu_R[dc->rd],
641                               cpu_R[dc->ra], cpu_R[dc->rb]);
642             break;
643         case 2:
644             LOG_DIS("mulhsu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
645             tcg_gen_mulsu2_i32(tmp, cpu_R[dc->rd],
646                                cpu_R[dc->ra], cpu_R[dc->rb]);
647             break;
648         case 3:
649             LOG_DIS("mulhu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
650             tcg_gen_mulu2_i32(tmp, cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
651             break;
652         default:
653             cpu_abort(CPU(dc->cpu), "unknown MUL insn %x\n", subcode);
654             break;
655     }
656     tcg_temp_free_i32(tmp);
657 }
658 
659 /* Div unit.  */
660 static void dec_div(DisasContext *dc)
661 {
662     unsigned int u;
663 
664     u = dc->imm & 2;
665     LOG_DIS("div\n");
666 
667     if (trap_illegal(dc, !dc->cpu->cfg.use_div)) {
668         return;
669     }
670 
671     if (u)
672         gen_helper_divu(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
673                         cpu_R[dc->ra]);
674     else
675         gen_helper_divs(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
676                         cpu_R[dc->ra]);
677     if (!dc->rd)
678         tcg_gen_movi_i32(cpu_R[dc->rd], 0);
679 }
680 
681 static void dec_barrel(DisasContext *dc)
682 {
683     TCGv_i32 t0;
684     unsigned int imm_w, imm_s;
685     bool s, t, e = false, i = false;
686 
687     if (trap_illegal(dc, !dc->cpu->cfg.use_barrel)) {
688         return;
689     }
690 
691     if (dc->type_b) {
692         /* Insert and extract are only available in immediate mode.  */
693         i = extract32(dc->imm, 15, 1);
694         e = extract32(dc->imm, 14, 1);
695     }
696     s = extract32(dc->imm, 10, 1);
697     t = extract32(dc->imm, 9, 1);
698     imm_w = extract32(dc->imm, 6, 5);
699     imm_s = extract32(dc->imm, 0, 5);
700 
701     LOG_DIS("bs%s%s%s r%d r%d r%d\n",
702             e ? "e" : "",
703             s ? "l" : "r", t ? "a" : "l", dc->rd, dc->ra, dc->rb);
704 
705     if (e) {
706         if (imm_w + imm_s > 32 || imm_w == 0) {
707             /* These inputs have an undefined behavior.  */
708             qemu_log_mask(LOG_GUEST_ERROR, "bsefi: Bad input w=%d s=%d\n",
709                           imm_w, imm_s);
710         } else {
711             tcg_gen_extract_i32(cpu_R[dc->rd], cpu_R[dc->ra], imm_s, imm_w);
712         }
713     } else if (i) {
714         int width = imm_w - imm_s + 1;
715 
716         if (imm_w < imm_s) {
717             /* These inputs have an undefined behavior.  */
718             qemu_log_mask(LOG_GUEST_ERROR, "bsifi: Bad input w=%d s=%d\n",
719                           imm_w, imm_s);
720         } else {
721             tcg_gen_deposit_i32(cpu_R[dc->rd], cpu_R[dc->rd], cpu_R[dc->ra],
722                                 imm_s, width);
723         }
724     } else {
725         t0 = tcg_temp_new_i32();
726 
727         tcg_gen_mov_i32(t0, *(dec_alu_op_b(dc)));
728         tcg_gen_andi_i32(t0, t0, 31);
729 
730         if (s) {
731             tcg_gen_shl_i32(cpu_R[dc->rd], cpu_R[dc->ra], t0);
732         } else {
733             if (t) {
734                 tcg_gen_sar_i32(cpu_R[dc->rd], cpu_R[dc->ra], t0);
735             } else {
736                 tcg_gen_shr_i32(cpu_R[dc->rd], cpu_R[dc->ra], t0);
737             }
738         }
739         tcg_temp_free_i32(t0);
740     }
741 }
742 
743 static void dec_bit(DisasContext *dc)
744 {
745     CPUState *cs = CPU(dc->cpu);
746     TCGv_i32 t0;
747     unsigned int op;
748 
749     op = dc->ir & ((1 << 9) - 1);
750     switch (op) {
751         case 0x21:
752             /* src.  */
753             t0 = tcg_temp_new_i32();
754 
755             LOG_DIS("src r%d r%d\n", dc->rd, dc->ra);
756             tcg_gen_extrl_i64_i32(t0, cpu_SR[SR_MSR]);
757             tcg_gen_andi_i32(t0, t0, MSR_CC);
758             write_carry(dc, cpu_R[dc->ra]);
759             if (dc->rd) {
760                 tcg_gen_shri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 1);
761                 tcg_gen_or_i32(cpu_R[dc->rd], cpu_R[dc->rd], t0);
762             }
763             tcg_temp_free_i32(t0);
764             break;
765 
766         case 0x1:
767         case 0x41:
768             /* srl.  */
769             LOG_DIS("srl r%d r%d\n", dc->rd, dc->ra);
770 
771             /* Update carry. Note that write carry only looks at the LSB.  */
772             write_carry(dc, cpu_R[dc->ra]);
773             if (dc->rd) {
774                 if (op == 0x41)
775                     tcg_gen_shri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 1);
776                 else
777                     tcg_gen_sari_i32(cpu_R[dc->rd], cpu_R[dc->ra], 1);
778             }
779             break;
780         case 0x60:
781             LOG_DIS("ext8s r%d r%d\n", dc->rd, dc->ra);
782             tcg_gen_ext8s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
783             break;
784         case 0x61:
785             LOG_DIS("ext16s r%d r%d\n", dc->rd, dc->ra);
786             tcg_gen_ext16s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
787             break;
788         case 0x64:
789         case 0x66:
790         case 0x74:
791         case 0x76:
792             /* wdc.  */
793             LOG_DIS("wdc r%d\n", dc->ra);
794             trap_userspace(dc, true);
795             break;
796         case 0x68:
797             /* wic.  */
798             LOG_DIS("wic r%d\n", dc->ra);
799             trap_userspace(dc, true);
800             break;
801         case 0xe0:
802             if (trap_illegal(dc, !dc->cpu->cfg.use_pcmp_instr)) {
803                 return;
804             }
805             if (dc->cpu->cfg.use_pcmp_instr) {
806                 tcg_gen_clzi_i32(cpu_R[dc->rd], cpu_R[dc->ra], 32);
807             }
808             break;
809         case 0x1e0:
810             /* swapb */
811             LOG_DIS("swapb r%d r%d\n", dc->rd, dc->ra);
812             tcg_gen_bswap32_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
813             break;
814         case 0x1e2:
815             /*swaph */
816             LOG_DIS("swaph r%d r%d\n", dc->rd, dc->ra);
817             tcg_gen_rotri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 16);
818             break;
819         default:
820             cpu_abort(cs, "unknown bit oc=%x op=%x rd=%d ra=%d rb=%d\n",
821                       dc->pc, op, dc->rd, dc->ra, dc->rb);
822             break;
823     }
824 }
825 
826 static inline void sync_jmpstate(DisasContext *dc)
827 {
828     if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
829         if (dc->jmp == JMP_DIRECT) {
830             tcg_gen_movi_i32(env_btaken, 1);
831         }
832         dc->jmp = JMP_INDIRECT;
833         tcg_gen_movi_i64(env_btarget, dc->jmp_pc);
834     }
835 }
836 
837 static void dec_imm(DisasContext *dc)
838 {
839     LOG_DIS("imm %x\n", dc->imm << 16);
840     tcg_gen_movi_i32(env_imm, (dc->imm << 16));
841     dc->tb_flags |= IMM_FLAG;
842     dc->clear_imm = 0;
843 }
844 
845 static inline void compute_ldst_addr(DisasContext *dc, bool ea, TCGv t)
846 {
847     bool extimm = dc->tb_flags & IMM_FLAG;
848     /* Should be set to true if r1 is used by loadstores.  */
849     bool stackprot = false;
850     TCGv_i32 t32;
851 
852     /* All load/stores use ra.  */
853     if (dc->ra == 1 && dc->cpu->cfg.stackprot) {
854         stackprot = true;
855     }
856 
857     /* Treat the common cases first.  */
858     if (!dc->type_b) {
859         if (ea) {
860             int addr_size = dc->cpu->cfg.addr_size;
861 
862             if (addr_size == 32) {
863                 tcg_gen_extu_i32_tl(t, cpu_R[dc->rb]);
864                 return;
865             }
866 
867             tcg_gen_concat_i32_i64(t, cpu_R[dc->rb], cpu_R[dc->ra]);
868             if (addr_size < 64) {
869                 /* Mask off out of range bits.  */
870                 tcg_gen_andi_i64(t, t, MAKE_64BIT_MASK(0, addr_size));
871             }
872             return;
873         }
874 
875         /* If any of the regs is r0, set t to the value of the other reg.  */
876         if (dc->ra == 0) {
877             tcg_gen_extu_i32_tl(t, cpu_R[dc->rb]);
878             return;
879         } else if (dc->rb == 0) {
880             tcg_gen_extu_i32_tl(t, cpu_R[dc->ra]);
881             return;
882         }
883 
884         if (dc->rb == 1 && dc->cpu->cfg.stackprot) {
885             stackprot = true;
886         }
887 
888         t32 = tcg_temp_new_i32();
889         tcg_gen_add_i32(t32, cpu_R[dc->ra], cpu_R[dc->rb]);
890         tcg_gen_extu_i32_tl(t, t32);
891         tcg_temp_free_i32(t32);
892 
893         if (stackprot) {
894             gen_helper_stackprot(cpu_env, t);
895         }
896         return;
897     }
898     /* Immediate.  */
899     t32 = tcg_temp_new_i32();
900     if (!extimm) {
901         tcg_gen_addi_i32(t32, cpu_R[dc->ra], (int16_t)dc->imm);
902     } else {
903         tcg_gen_add_i32(t32, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
904     }
905     tcg_gen_extu_i32_tl(t, t32);
906     tcg_temp_free_i32(t32);
907 
908     if (stackprot) {
909         gen_helper_stackprot(cpu_env, t);
910     }
911     return;
912 }
913 
914 static void dec_load(DisasContext *dc)
915 {
916     TCGv_i32 v;
917     TCGv addr;
918     unsigned int size;
919     bool rev = false, ex = false, ea = false;
920     int mem_index = cpu_mmu_index(&dc->cpu->env, false);
921     TCGMemOp mop;
922 
923     mop = dc->opcode & 3;
924     size = 1 << mop;
925     if (!dc->type_b) {
926         ea = extract32(dc->ir, 7, 1);
927         rev = extract32(dc->ir, 9, 1);
928         ex = extract32(dc->ir, 10, 1);
929     }
930     mop |= MO_TE;
931     if (rev) {
932         mop ^= MO_BSWAP;
933     }
934 
935     if (trap_illegal(dc, size > 4)) {
936         return;
937     }
938 
939     if (trap_userspace(dc, ea)) {
940         return;
941     }
942 
943     LOG_DIS("l%d%s%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
944                                                         ex ? "x" : "",
945                                                         ea ? "ea" : "");
946 
947     t_sync_flags(dc);
948     addr = tcg_temp_new();
949     compute_ldst_addr(dc, ea, addr);
950     /* Extended addressing bypasses the MMU.  */
951     mem_index = ea ? MMU_NOMMU_IDX : mem_index;
952 
953     /*
954      * When doing reverse accesses we need to do two things.
955      *
956      * 1. Reverse the address wrt endianness.
957      * 2. Byteswap the data lanes on the way back into the CPU core.
958      */
959     if (rev && size != 4) {
960         /* Endian reverse the address. t is addr.  */
961         switch (size) {
962             case 1:
963             {
964                 /* 00 -> 11
965                    01 -> 10
966                    10 -> 10
967                    11 -> 00 */
968                 TCGv low = tcg_temp_new();
969 
970                 tcg_gen_andi_tl(low, addr, 3);
971                 tcg_gen_sub_tl(low, tcg_const_tl(3), low);
972                 tcg_gen_andi_tl(addr, addr, ~3);
973                 tcg_gen_or_tl(addr, addr, low);
974                 tcg_temp_free(low);
975                 break;
976             }
977 
978             case 2:
979                 /* 00 -> 10
980                    10 -> 00.  */
981                 tcg_gen_xori_tl(addr, addr, 2);
982                 break;
983             default:
984                 cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
985                 break;
986         }
987     }
988 
989     /* lwx does not throw unaligned access errors, so force alignment */
990     if (ex) {
991         tcg_gen_andi_tl(addr, addr, ~3);
992     }
993 
994     /* If we get a fault on a dslot, the jmpstate better be in sync.  */
995     sync_jmpstate(dc);
996 
997     /* Verify alignment if needed.  */
998     /*
999      * Microblaze gives MMU faults priority over faults due to
1000      * unaligned addresses. That's why we speculatively do the load
1001      * into v. If the load succeeds, we verify alignment of the
1002      * address and if that succeeds we write into the destination reg.
1003      */
1004     v = tcg_temp_new_i32();
1005     tcg_gen_qemu_ld_i32(v, addr, mem_index, mop);
1006 
1007     if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
1008         tcg_gen_movi_i64(cpu_SR[SR_PC], dc->pc);
1009         gen_helper_memalign(cpu_env, addr, tcg_const_i32(dc->rd),
1010                             tcg_const_i32(0), tcg_const_i32(size - 1));
1011     }
1012 
1013     if (ex) {
1014         tcg_gen_mov_tl(env_res_addr, addr);
1015         tcg_gen_mov_i32(env_res_val, v);
1016     }
1017     if (dc->rd) {
1018         tcg_gen_mov_i32(cpu_R[dc->rd], v);
1019     }
1020     tcg_temp_free_i32(v);
1021 
1022     if (ex) { /* lwx */
1023         /* no support for AXI exclusive so always clear C */
1024         write_carryi(dc, 0);
1025     }
1026 
1027     tcg_temp_free(addr);
1028 }
1029 
1030 static void dec_store(DisasContext *dc)
1031 {
1032     TCGv addr;
1033     TCGLabel *swx_skip = NULL;
1034     unsigned int size;
1035     bool rev = false, ex = false, ea = false;
1036     int mem_index = cpu_mmu_index(&dc->cpu->env, false);
1037     TCGMemOp mop;
1038 
1039     mop = dc->opcode & 3;
1040     size = 1 << mop;
1041     if (!dc->type_b) {
1042         ea = extract32(dc->ir, 7, 1);
1043         rev = extract32(dc->ir, 9, 1);
1044         ex = extract32(dc->ir, 10, 1);
1045     }
1046     mop |= MO_TE;
1047     if (rev) {
1048         mop ^= MO_BSWAP;
1049     }
1050 
1051     if (trap_illegal(dc, size > 4)) {
1052         return;
1053     }
1054 
1055     trap_userspace(dc, ea);
1056 
1057     LOG_DIS("s%d%s%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
1058                                                         ex ? "x" : "",
1059                                                         ea ? "ea" : "");
1060     t_sync_flags(dc);
1061     /* If we get a fault on a dslot, the jmpstate better be in sync.  */
1062     sync_jmpstate(dc);
1063     /* SWX needs a temp_local.  */
1064     addr = ex ? tcg_temp_local_new() : tcg_temp_new();
1065     compute_ldst_addr(dc, ea, addr);
1066     /* Extended addressing bypasses the MMU.  */
1067     mem_index = ea ? MMU_NOMMU_IDX : mem_index;
1068 
1069     if (ex) { /* swx */
1070         TCGv_i32 tval;
1071 
1072         /* swx does not throw unaligned access errors, so force alignment */
1073         tcg_gen_andi_tl(addr, addr, ~3);
1074 
1075         write_carryi(dc, 1);
1076         swx_skip = gen_new_label();
1077         tcg_gen_brcond_tl(TCG_COND_NE, env_res_addr, addr, swx_skip);
1078 
1079         /* Compare the value loaded at lwx with current contents of
1080            the reserved location.
1081            FIXME: This only works for system emulation where we can expect
1082            this compare and the following write to be atomic. For user
1083            emulation we need to add atomicity between threads.  */
1084         tval = tcg_temp_new_i32();
1085         tcg_gen_qemu_ld_i32(tval, addr, cpu_mmu_index(&dc->cpu->env, false),
1086                             MO_TEUL);
1087         tcg_gen_brcond_i32(TCG_COND_NE, env_res_val, tval, swx_skip);
1088         write_carryi(dc, 0);
1089         tcg_temp_free_i32(tval);
1090     }
1091 
1092     if (rev && size != 4) {
1093         /* Endian reverse the address. t is addr.  */
1094         switch (size) {
1095             case 1:
1096             {
1097                 /* 00 -> 11
1098                    01 -> 10
1099                    10 -> 10
1100                    11 -> 00 */
1101                 TCGv low = tcg_temp_new();
1102 
1103                 tcg_gen_andi_tl(low, addr, 3);
1104                 tcg_gen_sub_tl(low, tcg_const_tl(3), low);
1105                 tcg_gen_andi_tl(addr, addr, ~3);
1106                 tcg_gen_or_tl(addr, addr, low);
1107                 tcg_temp_free(low);
1108                 break;
1109             }
1110 
1111             case 2:
1112                 /* 00 -> 10
1113                    10 -> 00.  */
1114                 /* Force addr into the temp.  */
1115                 tcg_gen_xori_tl(addr, addr, 2);
1116                 break;
1117             default:
1118                 cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
1119                 break;
1120         }
1121     }
1122     tcg_gen_qemu_st_i32(cpu_R[dc->rd], addr, mem_index, mop);
1123 
1124     /* Verify alignment if needed.  */
1125     if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
1126         tcg_gen_movi_i64(cpu_SR[SR_PC], dc->pc);
1127         /* FIXME: if the alignment is wrong, we should restore the value
1128          *        in memory. One possible way to achieve this is to probe
1129          *        the MMU prior to the memaccess, thay way we could put
1130          *        the alignment checks in between the probe and the mem
1131          *        access.
1132          */
1133         gen_helper_memalign(cpu_env, addr, tcg_const_i32(dc->rd),
1134                             tcg_const_i32(1), tcg_const_i32(size - 1));
1135     }
1136 
1137     if (ex) {
1138         gen_set_label(swx_skip);
1139     }
1140 
1141     tcg_temp_free(addr);
1142 }
1143 
1144 static inline void eval_cc(DisasContext *dc, unsigned int cc,
1145                            TCGv_i32 d, TCGv_i32 a)
1146 {
1147     static const int mb_to_tcg_cc[] = {
1148         [CC_EQ] = TCG_COND_EQ,
1149         [CC_NE] = TCG_COND_NE,
1150         [CC_LT] = TCG_COND_LT,
1151         [CC_LE] = TCG_COND_LE,
1152         [CC_GE] = TCG_COND_GE,
1153         [CC_GT] = TCG_COND_GT,
1154     };
1155 
1156     switch (cc) {
1157     case CC_EQ:
1158     case CC_NE:
1159     case CC_LT:
1160     case CC_LE:
1161     case CC_GE:
1162     case CC_GT:
1163         tcg_gen_setcondi_i32(mb_to_tcg_cc[cc], d, a, 0);
1164         break;
1165     default:
1166         cpu_abort(CPU(dc->cpu), "Unknown condition code %x.\n", cc);
1167         break;
1168     }
1169 }
1170 
1171 static void eval_cond_jmp(DisasContext *dc, TCGv_i64 pc_true, TCGv_i64 pc_false)
1172 {
1173     TCGv_i64 tmp_btaken = tcg_temp_new_i64();
1174     TCGv_i64 tmp_zero = tcg_const_i64(0);
1175 
1176     tcg_gen_extu_i32_i64(tmp_btaken, env_btaken);
1177     tcg_gen_movcond_i64(TCG_COND_NE, cpu_SR[SR_PC],
1178                         tmp_btaken, tmp_zero,
1179                         pc_true, pc_false);
1180 
1181     tcg_temp_free_i64(tmp_btaken);
1182     tcg_temp_free_i64(tmp_zero);
1183 }
1184 
1185 static void dec_bcc(DisasContext *dc)
1186 {
1187     unsigned int cc;
1188     unsigned int dslot;
1189 
1190     cc = EXTRACT_FIELD(dc->ir, 21, 23);
1191     dslot = dc->ir & (1 << 25);
1192     LOG_DIS("bcc%s r%d %x\n", dslot ? "d" : "", dc->ra, dc->imm);
1193 
1194     dc->delayed_branch = 1;
1195     if (dslot) {
1196         dc->delayed_branch = 2;
1197         dc->tb_flags |= D_FLAG;
1198         tcg_gen_st_i32(tcg_const_i32(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1199                       cpu_env, offsetof(CPUMBState, bimm));
1200     }
1201 
1202     if (dec_alu_op_b_is_small_imm(dc)) {
1203         int32_t offset = (int32_t)((int16_t)dc->imm); /* sign-extend.  */
1204 
1205         tcg_gen_movi_i64(env_btarget, dc->pc + offset);
1206         dc->jmp = JMP_DIRECT_CC;
1207         dc->jmp_pc = dc->pc + offset;
1208     } else {
1209         dc->jmp = JMP_INDIRECT;
1210         tcg_gen_extu_i32_i64(env_btarget, *(dec_alu_op_b(dc)));
1211         tcg_gen_addi_i64(env_btarget, env_btarget, dc->pc);
1212         tcg_gen_andi_i64(env_btarget, env_btarget, UINT32_MAX);
1213     }
1214     eval_cc(dc, cc, env_btaken, cpu_R[dc->ra]);
1215 }
1216 
1217 static void dec_br(DisasContext *dc)
1218 {
1219     unsigned int dslot, link, abs, mbar;
1220 
1221     dslot = dc->ir & (1 << 20);
1222     abs = dc->ir & (1 << 19);
1223     link = dc->ir & (1 << 18);
1224 
1225     /* Memory barrier.  */
1226     mbar = (dc->ir >> 16) & 31;
1227     if (mbar == 2 && dc->imm == 4) {
1228         /* mbar IMM & 16 decodes to sleep.  */
1229         if (dc->rd & 16) {
1230             TCGv_i32 tmp_hlt = tcg_const_i32(EXCP_HLT);
1231             TCGv_i32 tmp_1 = tcg_const_i32(1);
1232 
1233             LOG_DIS("sleep\n");
1234 
1235             t_sync_flags(dc);
1236             tcg_gen_st_i32(tmp_1, cpu_env,
1237                            -offsetof(MicroBlazeCPU, env)
1238                            +offsetof(CPUState, halted));
1239             tcg_gen_movi_i64(cpu_SR[SR_PC], dc->pc + 4);
1240             gen_helper_raise_exception(cpu_env, tmp_hlt);
1241             tcg_temp_free_i32(tmp_hlt);
1242             tcg_temp_free_i32(tmp_1);
1243             return;
1244         }
1245         LOG_DIS("mbar %d\n", dc->rd);
1246         /* Break the TB.  */
1247         dc->cpustate_changed = 1;
1248         return;
1249     }
1250 
1251     LOG_DIS("br%s%s%s%s imm=%x\n",
1252              abs ? "a" : "", link ? "l" : "",
1253              dc->type_b ? "i" : "", dslot ? "d" : "",
1254              dc->imm);
1255 
1256     dc->delayed_branch = 1;
1257     if (dslot) {
1258         dc->delayed_branch = 2;
1259         dc->tb_flags |= D_FLAG;
1260         tcg_gen_st_i32(tcg_const_i32(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1261                       cpu_env, offsetof(CPUMBState, bimm));
1262     }
1263     if (link && dc->rd)
1264         tcg_gen_movi_i32(cpu_R[dc->rd], dc->pc);
1265 
1266     dc->jmp = JMP_INDIRECT;
1267     if (abs) {
1268         tcg_gen_movi_i32(env_btaken, 1);
1269         tcg_gen_extu_i32_i64(env_btarget, *(dec_alu_op_b(dc)));
1270         if (link && !dslot) {
1271             if (!(dc->tb_flags & IMM_FLAG) && (dc->imm == 8 || dc->imm == 0x18))
1272                 t_gen_raise_exception(dc, EXCP_BREAK);
1273             if (dc->imm == 0) {
1274                 if (trap_userspace(dc, true)) {
1275                     return;
1276                 }
1277 
1278                 t_gen_raise_exception(dc, EXCP_DEBUG);
1279             }
1280         }
1281     } else {
1282         if (dec_alu_op_b_is_small_imm(dc)) {
1283             dc->jmp = JMP_DIRECT;
1284             dc->jmp_pc = dc->pc + (int32_t)((int16_t)dc->imm);
1285         } else {
1286             tcg_gen_movi_i32(env_btaken, 1);
1287             tcg_gen_extu_i32_i64(env_btarget, *(dec_alu_op_b(dc)));
1288             tcg_gen_addi_i64(env_btarget, env_btarget, dc->pc);
1289             tcg_gen_andi_i64(env_btarget, env_btarget, UINT32_MAX);
1290         }
1291     }
1292 }
1293 
1294 static inline void do_rti(DisasContext *dc)
1295 {
1296     TCGv_i32 t0, t1;
1297     t0 = tcg_temp_new_i32();
1298     t1 = tcg_temp_new_i32();
1299     tcg_gen_extrl_i64_i32(t1, cpu_SR[SR_MSR]);
1300     tcg_gen_shri_i32(t0, t1, 1);
1301     tcg_gen_ori_i32(t1, t1, MSR_IE);
1302     tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
1303 
1304     tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
1305     tcg_gen_or_i32(t1, t1, t0);
1306     msr_write(dc, t1);
1307     tcg_temp_free_i32(t1);
1308     tcg_temp_free_i32(t0);
1309     dc->tb_flags &= ~DRTI_FLAG;
1310 }
1311 
1312 static inline void do_rtb(DisasContext *dc)
1313 {
1314     TCGv_i32 t0, t1;
1315     t0 = tcg_temp_new_i32();
1316     t1 = tcg_temp_new_i32();
1317     tcg_gen_extrl_i64_i32(t1, cpu_SR[SR_MSR]);
1318     tcg_gen_andi_i32(t1, t1, ~MSR_BIP);
1319     tcg_gen_shri_i32(t0, t1, 1);
1320     tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
1321 
1322     tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
1323     tcg_gen_or_i32(t1, t1, t0);
1324     msr_write(dc, t1);
1325     tcg_temp_free_i32(t1);
1326     tcg_temp_free_i32(t0);
1327     dc->tb_flags &= ~DRTB_FLAG;
1328 }
1329 
1330 static inline void do_rte(DisasContext *dc)
1331 {
1332     TCGv_i32 t0, t1;
1333     t0 = tcg_temp_new_i32();
1334     t1 = tcg_temp_new_i32();
1335 
1336     tcg_gen_extrl_i64_i32(t1, cpu_SR[SR_MSR]);
1337     tcg_gen_ori_i32(t1, t1, MSR_EE);
1338     tcg_gen_andi_i32(t1, t1, ~MSR_EIP);
1339     tcg_gen_shri_i32(t0, t1, 1);
1340     tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
1341 
1342     tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
1343     tcg_gen_or_i32(t1, t1, t0);
1344     msr_write(dc, t1);
1345     tcg_temp_free_i32(t1);
1346     tcg_temp_free_i32(t0);
1347     dc->tb_flags &= ~DRTE_FLAG;
1348 }
1349 
1350 static void dec_rts(DisasContext *dc)
1351 {
1352     unsigned int b_bit, i_bit, e_bit;
1353     TCGv_i64 tmp64;
1354 
1355     i_bit = dc->ir & (1 << 21);
1356     b_bit = dc->ir & (1 << 22);
1357     e_bit = dc->ir & (1 << 23);
1358 
1359     if (trap_userspace(dc, i_bit || b_bit || e_bit)) {
1360         return;
1361     }
1362 
1363     dc->delayed_branch = 2;
1364     dc->tb_flags |= D_FLAG;
1365     tcg_gen_st_i32(tcg_const_i32(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1366                   cpu_env, offsetof(CPUMBState, bimm));
1367 
1368     if (i_bit) {
1369         LOG_DIS("rtid ir=%x\n", dc->ir);
1370         dc->tb_flags |= DRTI_FLAG;
1371     } else if (b_bit) {
1372         LOG_DIS("rtbd ir=%x\n", dc->ir);
1373         dc->tb_flags |= DRTB_FLAG;
1374     } else if (e_bit) {
1375         LOG_DIS("rted ir=%x\n", dc->ir);
1376         dc->tb_flags |= DRTE_FLAG;
1377     } else
1378         LOG_DIS("rts ir=%x\n", dc->ir);
1379 
1380     dc->jmp = JMP_INDIRECT;
1381     tcg_gen_movi_i32(env_btaken, 1);
1382 
1383     tmp64 = tcg_temp_new_i64();
1384     tcg_gen_extu_i32_i64(env_btarget, *(dec_alu_op_b(dc)));
1385     tcg_gen_extu_i32_i64(tmp64, cpu_R[dc->ra]);
1386     tcg_gen_add_i64(env_btarget, env_btarget, tmp64);
1387     tcg_gen_andi_i64(env_btarget, env_btarget, UINT32_MAX);
1388     tcg_temp_free_i64(tmp64);
1389 }
1390 
1391 static int dec_check_fpuv2(DisasContext *dc)
1392 {
1393     if ((dc->cpu->cfg.use_fpu != 2) && (dc->tb_flags & MSR_EE_FLAG)) {
1394         tcg_gen_movi_i64(cpu_SR[SR_ESR], ESR_EC_FPU);
1395         t_gen_raise_exception(dc, EXCP_HW_EXCP);
1396     }
1397     return (dc->cpu->cfg.use_fpu == 2) ? 0 : PVR2_USE_FPU2_MASK;
1398 }
1399 
1400 static void dec_fpu(DisasContext *dc)
1401 {
1402     unsigned int fpu_insn;
1403 
1404     if (trap_illegal(dc, !dc->cpu->cfg.use_fpu)) {
1405         return;
1406     }
1407 
1408     fpu_insn = (dc->ir >> 7) & 7;
1409 
1410     switch (fpu_insn) {
1411         case 0:
1412             gen_helper_fadd(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1413                             cpu_R[dc->rb]);
1414             break;
1415 
1416         case 1:
1417             gen_helper_frsub(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1418                              cpu_R[dc->rb]);
1419             break;
1420 
1421         case 2:
1422             gen_helper_fmul(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1423                             cpu_R[dc->rb]);
1424             break;
1425 
1426         case 3:
1427             gen_helper_fdiv(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1428                             cpu_R[dc->rb]);
1429             break;
1430 
1431         case 4:
1432             switch ((dc->ir >> 4) & 7) {
1433                 case 0:
1434                     gen_helper_fcmp_un(cpu_R[dc->rd], cpu_env,
1435                                        cpu_R[dc->ra], cpu_R[dc->rb]);
1436                     break;
1437                 case 1:
1438                     gen_helper_fcmp_lt(cpu_R[dc->rd], cpu_env,
1439                                        cpu_R[dc->ra], cpu_R[dc->rb]);
1440                     break;
1441                 case 2:
1442                     gen_helper_fcmp_eq(cpu_R[dc->rd], cpu_env,
1443                                        cpu_R[dc->ra], cpu_R[dc->rb]);
1444                     break;
1445                 case 3:
1446                     gen_helper_fcmp_le(cpu_R[dc->rd], cpu_env,
1447                                        cpu_R[dc->ra], cpu_R[dc->rb]);
1448                     break;
1449                 case 4:
1450                     gen_helper_fcmp_gt(cpu_R[dc->rd], cpu_env,
1451                                        cpu_R[dc->ra], cpu_R[dc->rb]);
1452                     break;
1453                 case 5:
1454                     gen_helper_fcmp_ne(cpu_R[dc->rd], cpu_env,
1455                                        cpu_R[dc->ra], cpu_R[dc->rb]);
1456                     break;
1457                 case 6:
1458                     gen_helper_fcmp_ge(cpu_R[dc->rd], cpu_env,
1459                                        cpu_R[dc->ra], cpu_R[dc->rb]);
1460                     break;
1461                 default:
1462                     qemu_log_mask(LOG_UNIMP,
1463                                   "unimplemented fcmp fpu_insn=%x pc=%x"
1464                                   " opc=%x\n",
1465                                   fpu_insn, dc->pc, dc->opcode);
1466                     dc->abort_at_next_insn = 1;
1467                     break;
1468             }
1469             break;
1470 
1471         case 5:
1472             if (!dec_check_fpuv2(dc)) {
1473                 return;
1474             }
1475             gen_helper_flt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1476             break;
1477 
1478         case 6:
1479             if (!dec_check_fpuv2(dc)) {
1480                 return;
1481             }
1482             gen_helper_fint(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1483             break;
1484 
1485         case 7:
1486             if (!dec_check_fpuv2(dc)) {
1487                 return;
1488             }
1489             gen_helper_fsqrt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1490             break;
1491 
1492         default:
1493             qemu_log_mask(LOG_UNIMP, "unimplemented FPU insn fpu_insn=%x pc=%x"
1494                           " opc=%x\n",
1495                           fpu_insn, dc->pc, dc->opcode);
1496             dc->abort_at_next_insn = 1;
1497             break;
1498     }
1499 }
1500 
1501 static void dec_null(DisasContext *dc)
1502 {
1503     if (trap_illegal(dc, true)) {
1504         return;
1505     }
1506     qemu_log_mask(LOG_GUEST_ERROR, "unknown insn pc=%x opc=%x\n", dc->pc, dc->opcode);
1507     dc->abort_at_next_insn = 1;
1508 }
1509 
1510 /* Insns connected to FSL or AXI stream attached devices.  */
1511 static void dec_stream(DisasContext *dc)
1512 {
1513     TCGv_i32 t_id, t_ctrl;
1514     int ctrl;
1515 
1516     LOG_DIS("%s%s imm=%x\n", dc->rd ? "get" : "put",
1517             dc->type_b ? "" : "d", dc->imm);
1518 
1519     if (trap_userspace(dc, true)) {
1520         return;
1521     }
1522 
1523     t_id = tcg_temp_new_i32();
1524     if (dc->type_b) {
1525         tcg_gen_movi_i32(t_id, dc->imm & 0xf);
1526         ctrl = dc->imm >> 10;
1527     } else {
1528         tcg_gen_andi_i32(t_id, cpu_R[dc->rb], 0xf);
1529         ctrl = dc->imm >> 5;
1530     }
1531 
1532     t_ctrl = tcg_const_i32(ctrl);
1533 
1534     if (dc->rd == 0) {
1535         gen_helper_put(t_id, t_ctrl, cpu_R[dc->ra]);
1536     } else {
1537         gen_helper_get(cpu_R[dc->rd], t_id, t_ctrl);
1538     }
1539     tcg_temp_free_i32(t_id);
1540     tcg_temp_free_i32(t_ctrl);
1541 }
1542 
1543 static struct decoder_info {
1544     struct {
1545         uint32_t bits;
1546         uint32_t mask;
1547     };
1548     void (*dec)(DisasContext *dc);
1549 } decinfo[] = {
1550     {DEC_ADD, dec_add},
1551     {DEC_SUB, dec_sub},
1552     {DEC_AND, dec_and},
1553     {DEC_XOR, dec_xor},
1554     {DEC_OR, dec_or},
1555     {DEC_BIT, dec_bit},
1556     {DEC_BARREL, dec_barrel},
1557     {DEC_LD, dec_load},
1558     {DEC_ST, dec_store},
1559     {DEC_IMM, dec_imm},
1560     {DEC_BR, dec_br},
1561     {DEC_BCC, dec_bcc},
1562     {DEC_RTS, dec_rts},
1563     {DEC_FPU, dec_fpu},
1564     {DEC_MUL, dec_mul},
1565     {DEC_DIV, dec_div},
1566     {DEC_MSR, dec_msr},
1567     {DEC_STREAM, dec_stream},
1568     {{0, 0}, dec_null}
1569 };
1570 
1571 static inline void decode(DisasContext *dc, uint32_t ir)
1572 {
1573     int i;
1574 
1575     dc->ir = ir;
1576     LOG_DIS("%8.8x\t", dc->ir);
1577 
1578     if (ir == 0) {
1579         trap_illegal(dc, dc->cpu->env.pvr.regs[2] & PVR2_OPCODE_0x0_ILL_MASK);
1580         /* Don't decode nop/zero instructions any further.  */
1581         return;
1582     }
1583 
1584     /* bit 2 seems to indicate insn type.  */
1585     dc->type_b = ir & (1 << 29);
1586 
1587     dc->opcode = EXTRACT_FIELD(ir, 26, 31);
1588     dc->rd = EXTRACT_FIELD(ir, 21, 25);
1589     dc->ra = EXTRACT_FIELD(ir, 16, 20);
1590     dc->rb = EXTRACT_FIELD(ir, 11, 15);
1591     dc->imm = EXTRACT_FIELD(ir, 0, 15);
1592 
1593     /* Large switch for all insns.  */
1594     for (i = 0; i < ARRAY_SIZE(decinfo); i++) {
1595         if ((dc->opcode & decinfo[i].mask) == decinfo[i].bits) {
1596             decinfo[i].dec(dc);
1597             break;
1598         }
1599     }
1600 }
1601 
1602 /* generate intermediate code for basic block 'tb'.  */
1603 void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
1604 {
1605     CPUMBState *env = cs->env_ptr;
1606     MicroBlazeCPU *cpu = mb_env_get_cpu(env);
1607     uint32_t pc_start;
1608     struct DisasContext ctx;
1609     struct DisasContext *dc = &ctx;
1610     uint32_t page_start, org_flags;
1611     uint32_t npc;
1612     int num_insns;
1613     int max_insns;
1614 
1615     pc_start = tb->pc;
1616     dc->cpu = cpu;
1617     dc->tb = tb;
1618     org_flags = dc->synced_flags = dc->tb_flags = tb->flags;
1619 
1620     dc->is_jmp = DISAS_NEXT;
1621     dc->jmp = 0;
1622     dc->delayed_branch = !!(dc->tb_flags & D_FLAG);
1623     if (dc->delayed_branch) {
1624         dc->jmp = JMP_INDIRECT;
1625     }
1626     dc->pc = pc_start;
1627     dc->singlestep_enabled = cs->singlestep_enabled;
1628     dc->cpustate_changed = 0;
1629     dc->abort_at_next_insn = 0;
1630 
1631     if (pc_start & 3) {
1632         cpu_abort(cs, "Microblaze: unaligned PC=%x\n", pc_start);
1633     }
1634 
1635     page_start = pc_start & TARGET_PAGE_MASK;
1636     num_insns = 0;
1637     max_insns = tb_cflags(tb) & CF_COUNT_MASK;
1638     if (max_insns == 0) {
1639         max_insns = CF_COUNT_MASK;
1640     }
1641     if (max_insns > TCG_MAX_INSNS) {
1642         max_insns = TCG_MAX_INSNS;
1643     }
1644 
1645     gen_tb_start(tb);
1646     do
1647     {
1648         tcg_gen_insn_start(dc->pc);
1649         num_insns++;
1650 
1651 #if SIM_COMPAT
1652         if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1653             tcg_gen_movi_i64(cpu_SR[SR_PC], dc->pc);
1654             gen_helper_debug();
1655         }
1656 #endif
1657 
1658         if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
1659             t_gen_raise_exception(dc, EXCP_DEBUG);
1660             dc->is_jmp = DISAS_UPDATE;
1661             /* The address covered by the breakpoint must be included in
1662                [tb->pc, tb->pc + tb->size) in order to for it to be
1663                properly cleared -- thus we increment the PC here so that
1664                the logic setting tb->size below does the right thing.  */
1665             dc->pc += 4;
1666             break;
1667         }
1668 
1669         /* Pretty disas.  */
1670         LOG_DIS("%8.8x:\t", dc->pc);
1671 
1672         if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
1673             gen_io_start();
1674         }
1675 
1676         dc->clear_imm = 1;
1677         decode(dc, cpu_ldl_code(env, dc->pc));
1678         if (dc->clear_imm)
1679             dc->tb_flags &= ~IMM_FLAG;
1680         dc->pc += 4;
1681 
1682         if (dc->delayed_branch) {
1683             dc->delayed_branch--;
1684             if (!dc->delayed_branch) {
1685                 if (dc->tb_flags & DRTI_FLAG)
1686                     do_rti(dc);
1687                  if (dc->tb_flags & DRTB_FLAG)
1688                     do_rtb(dc);
1689                 if (dc->tb_flags & DRTE_FLAG)
1690                     do_rte(dc);
1691                 /* Clear the delay slot flag.  */
1692                 dc->tb_flags &= ~D_FLAG;
1693                 /* If it is a direct jump, try direct chaining.  */
1694                 if (dc->jmp == JMP_INDIRECT) {
1695                     eval_cond_jmp(dc, env_btarget, tcg_const_i64(dc->pc));
1696                     dc->is_jmp = DISAS_JUMP;
1697                 } else if (dc->jmp == JMP_DIRECT) {
1698                     t_sync_flags(dc);
1699                     gen_goto_tb(dc, 0, dc->jmp_pc);
1700                     dc->is_jmp = DISAS_TB_JUMP;
1701                 } else if (dc->jmp == JMP_DIRECT_CC) {
1702                     TCGLabel *l1 = gen_new_label();
1703                     t_sync_flags(dc);
1704                     /* Conditional jmp.  */
1705                     tcg_gen_brcondi_i32(TCG_COND_NE, env_btaken, 0, l1);
1706                     gen_goto_tb(dc, 1, dc->pc);
1707                     gen_set_label(l1);
1708                     gen_goto_tb(dc, 0, dc->jmp_pc);
1709 
1710                     dc->is_jmp = DISAS_TB_JUMP;
1711                 }
1712                 break;
1713             }
1714         }
1715         if (cs->singlestep_enabled) {
1716             break;
1717         }
1718     } while (!dc->is_jmp && !dc->cpustate_changed
1719              && !tcg_op_buf_full()
1720              && !singlestep
1721              && (dc->pc - page_start < TARGET_PAGE_SIZE)
1722              && num_insns < max_insns);
1723 
1724     npc = dc->pc;
1725     if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
1726         if (dc->tb_flags & D_FLAG) {
1727             dc->is_jmp = DISAS_UPDATE;
1728             tcg_gen_movi_i64(cpu_SR[SR_PC], npc);
1729             sync_jmpstate(dc);
1730         } else
1731             npc = dc->jmp_pc;
1732     }
1733 
1734     if (tb_cflags(tb) & CF_LAST_IO)
1735         gen_io_end();
1736     /* Force an update if the per-tb cpu state has changed.  */
1737     if (dc->is_jmp == DISAS_NEXT
1738         && (dc->cpustate_changed || org_flags != dc->tb_flags)) {
1739         dc->is_jmp = DISAS_UPDATE;
1740         tcg_gen_movi_i64(cpu_SR[SR_PC], npc);
1741     }
1742     t_sync_flags(dc);
1743 
1744     if (unlikely(cs->singlestep_enabled)) {
1745         TCGv_i32 tmp = tcg_const_i32(EXCP_DEBUG);
1746 
1747         if (dc->is_jmp != DISAS_JUMP) {
1748             tcg_gen_movi_i64(cpu_SR[SR_PC], npc);
1749         }
1750         gen_helper_raise_exception(cpu_env, tmp);
1751         tcg_temp_free_i32(tmp);
1752     } else {
1753         switch(dc->is_jmp) {
1754             case DISAS_NEXT:
1755                 gen_goto_tb(dc, 1, npc);
1756                 break;
1757             default:
1758             case DISAS_JUMP:
1759             case DISAS_UPDATE:
1760                 /* indicate that the hash table must be used
1761                    to find the next TB */
1762                 tcg_gen_exit_tb(NULL, 0);
1763                 break;
1764             case DISAS_TB_JUMP:
1765                 /* nothing more to generate */
1766                 break;
1767         }
1768     }
1769     gen_tb_end(tb, num_insns);
1770 
1771     tb->size = dc->pc - pc_start;
1772     tb->icount = num_insns;
1773 
1774 #ifdef DEBUG_DISAS
1775 #if !SIM_COMPAT
1776     if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
1777         && qemu_log_in_addr_range(pc_start)) {
1778         qemu_log_lock();
1779         qemu_log("--------------\n");
1780         log_target_disas(cs, pc_start, dc->pc - pc_start);
1781         qemu_log_unlock();
1782     }
1783 #endif
1784 #endif
1785     assert(!dc->abort_at_next_insn);
1786 }
1787 
1788 void mb_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
1789                        int flags)
1790 {
1791     MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1792     CPUMBState *env = &cpu->env;
1793     int i;
1794 
1795     if (!env || !f)
1796         return;
1797 
1798     cpu_fprintf(f, "IN: PC=%" PRIx64 " %s\n",
1799                 env->sregs[SR_PC], lookup_symbol(env->sregs[SR_PC]));
1800     cpu_fprintf(f, "rmsr=%" PRIx64 " resr=%" PRIx64 " rear=%" PRIx64 " "
1801                    "debug=%x imm=%x iflags=%x fsr=%" PRIx64 "\n",
1802              env->sregs[SR_MSR], env->sregs[SR_ESR], env->sregs[SR_EAR],
1803              env->debug, env->imm, env->iflags, env->sregs[SR_FSR]);
1804     cpu_fprintf(f, "btaken=%d btarget=%" PRIx64 " mode=%s(saved=%s) "
1805                    "eip=%d ie=%d\n",
1806              env->btaken, env->btarget,
1807              (env->sregs[SR_MSR] & MSR_UM) ? "user" : "kernel",
1808              (env->sregs[SR_MSR] & MSR_UMS) ? "user" : "kernel",
1809              (bool)(env->sregs[SR_MSR] & MSR_EIP),
1810              (bool)(env->sregs[SR_MSR] & MSR_IE));
1811 
1812     for (i = 0; i < 32; i++) {
1813         cpu_fprintf(f, "r%2.2d=%8.8x ", i, env->regs[i]);
1814         if ((i + 1) % 4 == 0)
1815             cpu_fprintf(f, "\n");
1816         }
1817     cpu_fprintf(f, "\n\n");
1818 }
1819 
1820 void mb_tcg_init(void)
1821 {
1822     int i;
1823 
1824     env_debug = tcg_global_mem_new_i32(cpu_env,
1825                     offsetof(CPUMBState, debug),
1826                     "debug0");
1827     env_iflags = tcg_global_mem_new_i32(cpu_env,
1828                     offsetof(CPUMBState, iflags),
1829                     "iflags");
1830     env_imm = tcg_global_mem_new_i32(cpu_env,
1831                     offsetof(CPUMBState, imm),
1832                     "imm");
1833     env_btarget = tcg_global_mem_new_i64(cpu_env,
1834                      offsetof(CPUMBState, btarget),
1835                      "btarget");
1836     env_btaken = tcg_global_mem_new_i32(cpu_env,
1837                      offsetof(CPUMBState, btaken),
1838                      "btaken");
1839     env_res_addr = tcg_global_mem_new(cpu_env,
1840                      offsetof(CPUMBState, res_addr),
1841                      "res_addr");
1842     env_res_val = tcg_global_mem_new_i32(cpu_env,
1843                      offsetof(CPUMBState, res_val),
1844                      "res_val");
1845     for (i = 0; i < ARRAY_SIZE(cpu_R); i++) {
1846         cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
1847                           offsetof(CPUMBState, regs[i]),
1848                           regnames[i]);
1849     }
1850     for (i = 0; i < ARRAY_SIZE(cpu_SR); i++) {
1851         cpu_SR[i] = tcg_global_mem_new_i64(cpu_env,
1852                           offsetof(CPUMBState, sregs[i]),
1853                           special_regnames[i]);
1854     }
1855 }
1856 
1857 void restore_state_to_opc(CPUMBState *env, TranslationBlock *tb,
1858                           target_ulong *data)
1859 {
1860     env->sregs[SR_PC] = data[0];
1861 }
1862