xref: /openbmc/qemu/target/microblaze/translate.c (revision 0dc4af5c1a0e8d3f73b176f8fd3159e77a4c2492)
1 /*
2  *  Xilinx MicroBlaze emulation for qemu: main translation routines.
3  *
4  *  Copyright (c) 2009 Edgar E. Iglesias.
5  *  Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
25 #include "tcg-op.h"
26 #include "exec/helper-proto.h"
27 #include "microblaze-decode.h"
28 #include "exec/cpu_ldst.h"
29 #include "exec/helper-gen.h"
30 #include "exec/translator.h"
31 
32 #include "trace-tcg.h"
33 #include "exec/log.h"
34 
35 
36 #define SIM_COMPAT 0
37 #define DISAS_GNU 1
38 #define DISAS_MB 1
39 #if DISAS_MB && !SIM_COMPAT
40 #  define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
41 #else
42 #  define LOG_DIS(...) do { } while (0)
43 #endif
44 
45 #define D(x)
46 
47 #define EXTRACT_FIELD(src, start, end) \
48             (((src) >> start) & ((1 << (end - start + 1)) - 1))
49 
50 /* is_jmp field values */
51 #define DISAS_JUMP    DISAS_TARGET_0 /* only pc was modified dynamically */
52 #define DISAS_UPDATE  DISAS_TARGET_1 /* cpu state was modified dynamically */
53 #define DISAS_TB_JUMP DISAS_TARGET_2 /* only pc was modified statically */
54 
55 static TCGv_i32 env_debug;
56 static TCGv_i32 cpu_R[32];
57 static TCGv_i32 cpu_SR[14];
58 static TCGv_i32 env_imm;
59 static TCGv_i32 env_btaken;
60 static TCGv_i32 env_btarget;
61 static TCGv_i32 env_iflags;
62 static TCGv_i32 env_res_addr;
63 static TCGv_i32 env_res_val;
64 
65 #include "exec/gen-icount.h"
66 
67 /* This is the state at translation time.  */
68 typedef struct DisasContext {
69     MicroBlazeCPU *cpu;
70     uint32_t pc;
71 
72     /* Decoder.  */
73     int type_b;
74     uint32_t ir;
75     uint8_t opcode;
76     uint8_t rd, ra, rb;
77     uint16_t imm;
78 
79     unsigned int cpustate_changed;
80     unsigned int delayed_branch;
81     unsigned int tb_flags, synced_flags; /* tb dependent flags.  */
82     unsigned int clear_imm;
83     int is_jmp;
84 
85 #define JMP_NOJMP     0
86 #define JMP_DIRECT    1
87 #define JMP_DIRECT_CC 2
88 #define JMP_INDIRECT  3
89     unsigned int jmp;
90     uint32_t jmp_pc;
91 
92     int abort_at_next_insn;
93     int nr_nops;
94     struct TranslationBlock *tb;
95     int singlestep_enabled;
96 } DisasContext;
97 
98 static const char *regnames[] =
99 {
100     "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
101     "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
102     "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
103     "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
104 };
105 
106 static const char *special_regnames[] =
107 {
108     "rpc", "rmsr", "sr2", "sr3", "sr4", "sr5", "sr6", "sr7",
109     "sr8", "sr9", "sr10", "sr11", "sr12", "sr13"
110 };
111 
112 static inline void t_sync_flags(DisasContext *dc)
113 {
114     /* Synch the tb dependent flags between translator and runtime.  */
115     if (dc->tb_flags != dc->synced_flags) {
116         tcg_gen_movi_i32(env_iflags, dc->tb_flags);
117         dc->synced_flags = dc->tb_flags;
118     }
119 }
120 
121 static inline void t_gen_raise_exception(DisasContext *dc, uint32_t index)
122 {
123     TCGv_i32 tmp = tcg_const_i32(index);
124 
125     t_sync_flags(dc);
126     tcg_gen_movi_i32(cpu_SR[SR_PC], dc->pc);
127     gen_helper_raise_exception(cpu_env, tmp);
128     tcg_temp_free_i32(tmp);
129     dc->is_jmp = DISAS_UPDATE;
130 }
131 
132 static inline bool use_goto_tb(DisasContext *dc, target_ulong dest)
133 {
134 #ifndef CONFIG_USER_ONLY
135     return (dc->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
136 #else
137     return true;
138 #endif
139 }
140 
141 static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
142 {
143     if (use_goto_tb(dc, dest)) {
144         tcg_gen_goto_tb(n);
145         tcg_gen_movi_i32(cpu_SR[SR_PC], dest);
146         tcg_gen_exit_tb((uintptr_t)dc->tb + n);
147     } else {
148         tcg_gen_movi_i32(cpu_SR[SR_PC], dest);
149         tcg_gen_exit_tb(0);
150     }
151 }
152 
153 static void read_carry(DisasContext *dc, TCGv_i32 d)
154 {
155     tcg_gen_shri_i32(d, cpu_SR[SR_MSR], 31);
156 }
157 
158 /*
159  * write_carry sets the carry bits in MSR based on bit 0 of v.
160  * v[31:1] are ignored.
161  */
162 static void write_carry(DisasContext *dc, TCGv_i32 v)
163 {
164     TCGv_i32 t0 = tcg_temp_new_i32();
165     tcg_gen_shli_i32(t0, v, 31);
166     tcg_gen_sari_i32(t0, t0, 31);
167     tcg_gen_andi_i32(t0, t0, (MSR_C | MSR_CC));
168     tcg_gen_andi_i32(cpu_SR[SR_MSR], cpu_SR[SR_MSR],
169                     ~(MSR_C | MSR_CC));
170     tcg_gen_or_i32(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t0);
171     tcg_temp_free_i32(t0);
172 }
173 
174 static void write_carryi(DisasContext *dc, bool carry)
175 {
176     TCGv_i32 t0 = tcg_temp_new_i32();
177     tcg_gen_movi_i32(t0, carry);
178     write_carry(dc, t0);
179     tcg_temp_free_i32(t0);
180 }
181 
182 /* True if ALU operand b is a small immediate that may deserve
183    faster treatment.  */
184 static inline int dec_alu_op_b_is_small_imm(DisasContext *dc)
185 {
186     /* Immediate insn without the imm prefix ?  */
187     return dc->type_b && !(dc->tb_flags & IMM_FLAG);
188 }
189 
190 static inline TCGv_i32 *dec_alu_op_b(DisasContext *dc)
191 {
192     if (dc->type_b) {
193         if (dc->tb_flags & IMM_FLAG)
194             tcg_gen_ori_i32(env_imm, env_imm, dc->imm);
195         else
196             tcg_gen_movi_i32(env_imm, (int32_t)((int16_t)dc->imm));
197         return &env_imm;
198     } else
199         return &cpu_R[dc->rb];
200 }
201 
202 static void dec_add(DisasContext *dc)
203 {
204     unsigned int k, c;
205     TCGv_i32 cf;
206 
207     k = dc->opcode & 4;
208     c = dc->opcode & 2;
209 
210     LOG_DIS("add%s%s%s r%d r%d r%d\n",
211             dc->type_b ? "i" : "", k ? "k" : "", c ? "c" : "",
212             dc->rd, dc->ra, dc->rb);
213 
214     /* Take care of the easy cases first.  */
215     if (k) {
216         /* k - keep carry, no need to update MSR.  */
217         /* If rd == r0, it's a nop.  */
218         if (dc->rd) {
219             tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
220 
221             if (c) {
222                 /* c - Add carry into the result.  */
223                 cf = tcg_temp_new_i32();
224 
225                 read_carry(dc, cf);
226                 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
227                 tcg_temp_free_i32(cf);
228             }
229         }
230         return;
231     }
232 
233     /* From now on, we can assume k is zero.  So we need to update MSR.  */
234     /* Extract carry.  */
235     cf = tcg_temp_new_i32();
236     if (c) {
237         read_carry(dc, cf);
238     } else {
239         tcg_gen_movi_i32(cf, 0);
240     }
241 
242     if (dc->rd) {
243         TCGv_i32 ncf = tcg_temp_new_i32();
244         gen_helper_carry(ncf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
245         tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
246         tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
247         write_carry(dc, ncf);
248         tcg_temp_free_i32(ncf);
249     } else {
250         gen_helper_carry(cf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
251         write_carry(dc, cf);
252     }
253     tcg_temp_free_i32(cf);
254 }
255 
256 static void dec_sub(DisasContext *dc)
257 {
258     unsigned int u, cmp, k, c;
259     TCGv_i32 cf, na;
260 
261     u = dc->imm & 2;
262     k = dc->opcode & 4;
263     c = dc->opcode & 2;
264     cmp = (dc->imm & 1) && (!dc->type_b) && k;
265 
266     if (cmp) {
267         LOG_DIS("cmp%s r%d, r%d ir=%x\n", u ? "u" : "", dc->rd, dc->ra, dc->ir);
268         if (dc->rd) {
269             if (u)
270                 gen_helper_cmpu(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
271             else
272                 gen_helper_cmp(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
273         }
274         return;
275     }
276 
277     LOG_DIS("sub%s%s r%d, r%d r%d\n",
278              k ? "k" : "",  c ? "c" : "", dc->rd, dc->ra, dc->rb);
279 
280     /* Take care of the easy cases first.  */
281     if (k) {
282         /* k - keep carry, no need to update MSR.  */
283         /* If rd == r0, it's a nop.  */
284         if (dc->rd) {
285             tcg_gen_sub_i32(cpu_R[dc->rd], *(dec_alu_op_b(dc)), cpu_R[dc->ra]);
286 
287             if (c) {
288                 /* c - Add carry into the result.  */
289                 cf = tcg_temp_new_i32();
290 
291                 read_carry(dc, cf);
292                 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
293                 tcg_temp_free_i32(cf);
294             }
295         }
296         return;
297     }
298 
299     /* From now on, we can assume k is zero.  So we need to update MSR.  */
300     /* Extract carry. And complement a into na.  */
301     cf = tcg_temp_new_i32();
302     na = tcg_temp_new_i32();
303     if (c) {
304         read_carry(dc, cf);
305     } else {
306         tcg_gen_movi_i32(cf, 1);
307     }
308 
309     /* d = b + ~a + c. carry defaults to 1.  */
310     tcg_gen_not_i32(na, cpu_R[dc->ra]);
311 
312     if (dc->rd) {
313         TCGv_i32 ncf = tcg_temp_new_i32();
314         gen_helper_carry(ncf, na, *(dec_alu_op_b(dc)), cf);
315         tcg_gen_add_i32(cpu_R[dc->rd], na, *(dec_alu_op_b(dc)));
316         tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
317         write_carry(dc, ncf);
318         tcg_temp_free_i32(ncf);
319     } else {
320         gen_helper_carry(cf, na, *(dec_alu_op_b(dc)), cf);
321         write_carry(dc, cf);
322     }
323     tcg_temp_free_i32(cf);
324     tcg_temp_free_i32(na);
325 }
326 
327 static void dec_pattern(DisasContext *dc)
328 {
329     unsigned int mode;
330 
331     if ((dc->tb_flags & MSR_EE_FLAG)
332           && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
333           && !dc->cpu->cfg.use_pcmp_instr) {
334         tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
335         t_gen_raise_exception(dc, EXCP_HW_EXCP);
336     }
337 
338     mode = dc->opcode & 3;
339     switch (mode) {
340         case 0:
341             /* pcmpbf.  */
342             LOG_DIS("pcmpbf r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
343             if (dc->rd)
344                 gen_helper_pcmpbf(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
345             break;
346         case 2:
347             LOG_DIS("pcmpeq r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
348             if (dc->rd) {
349                 tcg_gen_setcond_i32(TCG_COND_EQ, cpu_R[dc->rd],
350                                    cpu_R[dc->ra], cpu_R[dc->rb]);
351             }
352             break;
353         case 3:
354             LOG_DIS("pcmpne r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
355             if (dc->rd) {
356                 tcg_gen_setcond_i32(TCG_COND_NE, cpu_R[dc->rd],
357                                    cpu_R[dc->ra], cpu_R[dc->rb]);
358             }
359             break;
360         default:
361             cpu_abort(CPU(dc->cpu),
362                       "unsupported pattern insn opcode=%x\n", dc->opcode);
363             break;
364     }
365 }
366 
367 static void dec_and(DisasContext *dc)
368 {
369     unsigned int not;
370 
371     if (!dc->type_b && (dc->imm & (1 << 10))) {
372         dec_pattern(dc);
373         return;
374     }
375 
376     not = dc->opcode & (1 << 1);
377     LOG_DIS("and%s\n", not ? "n" : "");
378 
379     if (!dc->rd)
380         return;
381 
382     if (not) {
383         tcg_gen_andc_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
384     } else
385         tcg_gen_and_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
386 }
387 
388 static void dec_or(DisasContext *dc)
389 {
390     if (!dc->type_b && (dc->imm & (1 << 10))) {
391         dec_pattern(dc);
392         return;
393     }
394 
395     LOG_DIS("or r%d r%d r%d imm=%x\n", dc->rd, dc->ra, dc->rb, dc->imm);
396     if (dc->rd)
397         tcg_gen_or_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
398 }
399 
400 static void dec_xor(DisasContext *dc)
401 {
402     if (!dc->type_b && (dc->imm & (1 << 10))) {
403         dec_pattern(dc);
404         return;
405     }
406 
407     LOG_DIS("xor r%d\n", dc->rd);
408     if (dc->rd)
409         tcg_gen_xor_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
410 }
411 
412 static inline void msr_read(DisasContext *dc, TCGv_i32 d)
413 {
414     tcg_gen_mov_i32(d, cpu_SR[SR_MSR]);
415 }
416 
417 static inline void msr_write(DisasContext *dc, TCGv_i32 v)
418 {
419     TCGv_i32 t;
420 
421     t = tcg_temp_new_i32();
422     dc->cpustate_changed = 1;
423     /* PVR bit is not writable.  */
424     tcg_gen_andi_i32(t, v, ~MSR_PVR);
425     tcg_gen_andi_i32(cpu_SR[SR_MSR], cpu_SR[SR_MSR], MSR_PVR);
426     tcg_gen_or_i32(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t);
427     tcg_temp_free(t);
428 }
429 
430 static void dec_msr(DisasContext *dc)
431 {
432     CPUState *cs = CPU(dc->cpu);
433     TCGv_i32 t0, t1;
434     unsigned int sr, to, rn;
435     int mem_index = cpu_mmu_index(&dc->cpu->env, false);
436 
437     sr = dc->imm & ((1 << 14) - 1);
438     to = dc->imm & (1 << 14);
439     dc->type_b = 1;
440     if (to)
441         dc->cpustate_changed = 1;
442 
443     /* msrclr and msrset.  */
444     if (!(dc->imm & (1 << 15))) {
445         unsigned int clr = dc->ir & (1 << 16);
446 
447         LOG_DIS("msr%s r%d imm=%x\n", clr ? "clr" : "set",
448                 dc->rd, dc->imm);
449 
450         if (!dc->cpu->cfg.use_msr_instr) {
451             /* nop??? */
452             return;
453         }
454 
455         if ((dc->tb_flags & MSR_EE_FLAG)
456             && mem_index == MMU_USER_IDX && (dc->imm != 4 && dc->imm != 0)) {
457             tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
458             t_gen_raise_exception(dc, EXCP_HW_EXCP);
459             return;
460         }
461 
462         if (dc->rd)
463             msr_read(dc, cpu_R[dc->rd]);
464 
465         t0 = tcg_temp_new_i32();
466         t1 = tcg_temp_new_i32();
467         msr_read(dc, t0);
468         tcg_gen_mov_i32(t1, *(dec_alu_op_b(dc)));
469 
470         if (clr) {
471             tcg_gen_not_i32(t1, t1);
472             tcg_gen_and_i32(t0, t0, t1);
473         } else
474             tcg_gen_or_i32(t0, t0, t1);
475         msr_write(dc, t0);
476         tcg_temp_free_i32(t0);
477         tcg_temp_free_i32(t1);
478         tcg_gen_movi_i32(cpu_SR[SR_PC], dc->pc + 4);
479         dc->is_jmp = DISAS_UPDATE;
480         return;
481     }
482 
483     if (to) {
484         if ((dc->tb_flags & MSR_EE_FLAG)
485              && mem_index == MMU_USER_IDX) {
486             tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
487             t_gen_raise_exception(dc, EXCP_HW_EXCP);
488             return;
489         }
490     }
491 
492 #if !defined(CONFIG_USER_ONLY)
493     /* Catch read/writes to the mmu block.  */
494     if ((sr & ~0xff) == 0x1000) {
495         sr &= 7;
496         LOG_DIS("m%ss sr%d r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
497         if (to)
498             gen_helper_mmu_write(cpu_env, tcg_const_i32(sr), cpu_R[dc->ra]);
499         else
500             gen_helper_mmu_read(cpu_R[dc->rd], cpu_env, tcg_const_i32(sr));
501         return;
502     }
503 #endif
504 
505     if (to) {
506         LOG_DIS("m%ss sr%x r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
507         switch (sr) {
508             case 0:
509                 break;
510             case 1:
511                 msr_write(dc, cpu_R[dc->ra]);
512                 break;
513             case 0x3:
514                 tcg_gen_mov_i32(cpu_SR[SR_EAR], cpu_R[dc->ra]);
515                 break;
516             case 0x5:
517                 tcg_gen_mov_i32(cpu_SR[SR_ESR], cpu_R[dc->ra]);
518                 break;
519             case 0x7:
520                 tcg_gen_andi_i32(cpu_SR[SR_FSR], cpu_R[dc->ra], 31);
521                 break;
522             case 0x800:
523                 tcg_gen_st_i32(cpu_R[dc->ra],
524                                cpu_env, offsetof(CPUMBState, slr));
525                 break;
526             case 0x802:
527                 tcg_gen_st_i32(cpu_R[dc->ra],
528                                cpu_env, offsetof(CPUMBState, shr));
529                 break;
530             default:
531                 cpu_abort(CPU(dc->cpu), "unknown mts reg %x\n", sr);
532                 break;
533         }
534     } else {
535         LOG_DIS("m%ss r%d sr%x imm=%x\n", to ? "t" : "f", dc->rd, sr, dc->imm);
536 
537         switch (sr) {
538             case 0:
539                 tcg_gen_movi_i32(cpu_R[dc->rd], dc->pc);
540                 break;
541             case 1:
542                 msr_read(dc, cpu_R[dc->rd]);
543                 break;
544             case 0x3:
545                 tcg_gen_mov_i32(cpu_R[dc->rd], cpu_SR[SR_EAR]);
546                 break;
547             case 0x5:
548                 tcg_gen_mov_i32(cpu_R[dc->rd], cpu_SR[SR_ESR]);
549                 break;
550              case 0x7:
551                 tcg_gen_mov_i32(cpu_R[dc->rd], cpu_SR[SR_FSR]);
552                 break;
553             case 0xb:
554                 tcg_gen_mov_i32(cpu_R[dc->rd], cpu_SR[SR_BTR]);
555                 break;
556             case 0x800:
557                 tcg_gen_ld_i32(cpu_R[dc->rd],
558                                cpu_env, offsetof(CPUMBState, slr));
559                 break;
560             case 0x802:
561                 tcg_gen_ld_i32(cpu_R[dc->rd],
562                                cpu_env, offsetof(CPUMBState, shr));
563                 break;
564             case 0x2000:
565             case 0x2001:
566             case 0x2002:
567             case 0x2003:
568             case 0x2004:
569             case 0x2005:
570             case 0x2006:
571             case 0x2007:
572             case 0x2008:
573             case 0x2009:
574             case 0x200a:
575             case 0x200b:
576             case 0x200c:
577                 rn = sr & 0xf;
578                 tcg_gen_ld_i32(cpu_R[dc->rd],
579                               cpu_env, offsetof(CPUMBState, pvr.regs[rn]));
580                 break;
581             default:
582                 cpu_abort(cs, "unknown mfs reg %x\n", sr);
583                 break;
584         }
585     }
586 
587     if (dc->rd == 0) {
588         tcg_gen_movi_i32(cpu_R[0], 0);
589     }
590 }
591 
592 /* Multiplier unit.  */
593 static void dec_mul(DisasContext *dc)
594 {
595     TCGv_i32 tmp;
596     unsigned int subcode;
597 
598     if ((dc->tb_flags & MSR_EE_FLAG)
599          && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
600          && !dc->cpu->cfg.use_hw_mul) {
601         tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
602         t_gen_raise_exception(dc, EXCP_HW_EXCP);
603         return;
604     }
605 
606     subcode = dc->imm & 3;
607 
608     if (dc->type_b) {
609         LOG_DIS("muli r%d r%d %x\n", dc->rd, dc->ra, dc->imm);
610         tcg_gen_mul_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
611         return;
612     }
613 
614     /* mulh, mulhsu and mulhu are not available if C_USE_HW_MUL is < 2.  */
615     if (subcode >= 1 && subcode <= 3 && dc->cpu->cfg.use_hw_mul < 2) {
616         /* nop??? */
617     }
618 
619     tmp = tcg_temp_new_i32();
620     switch (subcode) {
621         case 0:
622             LOG_DIS("mul r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
623             tcg_gen_mul_i32(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
624             break;
625         case 1:
626             LOG_DIS("mulh r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
627             tcg_gen_muls2_i32(tmp, cpu_R[dc->rd],
628                               cpu_R[dc->ra], cpu_R[dc->rb]);
629             break;
630         case 2:
631             LOG_DIS("mulhsu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
632             tcg_gen_mulsu2_i32(tmp, cpu_R[dc->rd],
633                                cpu_R[dc->ra], cpu_R[dc->rb]);
634             break;
635         case 3:
636             LOG_DIS("mulhu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
637             tcg_gen_mulu2_i32(tmp, cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
638             break;
639         default:
640             cpu_abort(CPU(dc->cpu), "unknown MUL insn %x\n", subcode);
641             break;
642     }
643     tcg_temp_free_i32(tmp);
644 }
645 
646 /* Div unit.  */
647 static void dec_div(DisasContext *dc)
648 {
649     unsigned int u;
650 
651     u = dc->imm & 2;
652     LOG_DIS("div\n");
653 
654     if ((dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
655           && !dc->cpu->cfg.use_div) {
656         tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
657         t_gen_raise_exception(dc, EXCP_HW_EXCP);
658     }
659 
660     if (u)
661         gen_helper_divu(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
662                         cpu_R[dc->ra]);
663     else
664         gen_helper_divs(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
665                         cpu_R[dc->ra]);
666     if (!dc->rd)
667         tcg_gen_movi_i32(cpu_R[dc->rd], 0);
668 }
669 
670 static void dec_barrel(DisasContext *dc)
671 {
672     TCGv_i32 t0;
673     unsigned int imm_w, imm_s;
674     bool s, t, e = false, i = false;
675 
676     if ((dc->tb_flags & MSR_EE_FLAG)
677           && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
678           && !dc->cpu->cfg.use_barrel) {
679         tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
680         t_gen_raise_exception(dc, EXCP_HW_EXCP);
681         return;
682     }
683 
684     if (dc->type_b) {
685         /* Insert and extract are only available in immediate mode.  */
686         i = extract32(dc->imm, 15, 1);
687         e = extract32(dc->imm, 14, 1);
688     }
689     s = extract32(dc->imm, 10, 1);
690     t = extract32(dc->imm, 9, 1);
691     imm_w = extract32(dc->imm, 6, 5);
692     imm_s = extract32(dc->imm, 0, 5);
693 
694     LOG_DIS("bs%s%s%s r%d r%d r%d\n",
695             e ? "e" : "",
696             s ? "l" : "r", t ? "a" : "l", dc->rd, dc->ra, dc->rb);
697 
698     if (e) {
699         if (imm_w + imm_s > 32 || imm_w == 0) {
700             /* These inputs have an undefined behavior.  */
701             qemu_log_mask(LOG_GUEST_ERROR, "bsefi: Bad input w=%d s=%d\n",
702                           imm_w, imm_s);
703         } else {
704             tcg_gen_extract_i32(cpu_R[dc->rd], cpu_R[dc->ra], imm_s, imm_w);
705         }
706     } else if (i) {
707         int width = imm_w - imm_s + 1;
708 
709         if (imm_w < imm_s) {
710             /* These inputs have an undefined behavior.  */
711             qemu_log_mask(LOG_GUEST_ERROR, "bsifi: Bad input w=%d s=%d\n",
712                           imm_w, imm_s);
713         } else {
714             tcg_gen_deposit_i32(cpu_R[dc->rd], cpu_R[dc->rd], cpu_R[dc->ra],
715                                 imm_s, width);
716         }
717     } else {
718         t0 = tcg_temp_new_i32();
719 
720         tcg_gen_mov_i32(t0, *(dec_alu_op_b(dc)));
721         tcg_gen_andi_i32(t0, t0, 31);
722 
723         if (s) {
724             tcg_gen_shl_i32(cpu_R[dc->rd], cpu_R[dc->ra], t0);
725         } else {
726             if (t) {
727                 tcg_gen_sar_i32(cpu_R[dc->rd], cpu_R[dc->ra], t0);
728             } else {
729                 tcg_gen_shr_i32(cpu_R[dc->rd], cpu_R[dc->ra], t0);
730             }
731         }
732         tcg_temp_free_i32(t0);
733     }
734 }
735 
736 static void dec_bit(DisasContext *dc)
737 {
738     CPUState *cs = CPU(dc->cpu);
739     TCGv_i32 t0;
740     unsigned int op;
741     int mem_index = cpu_mmu_index(&dc->cpu->env, false);
742 
743     op = dc->ir & ((1 << 9) - 1);
744     switch (op) {
745         case 0x21:
746             /* src.  */
747             t0 = tcg_temp_new_i32();
748 
749             LOG_DIS("src r%d r%d\n", dc->rd, dc->ra);
750             tcg_gen_andi_i32(t0, cpu_SR[SR_MSR], MSR_CC);
751             write_carry(dc, cpu_R[dc->ra]);
752             if (dc->rd) {
753                 tcg_gen_shri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 1);
754                 tcg_gen_or_i32(cpu_R[dc->rd], cpu_R[dc->rd], t0);
755             }
756             tcg_temp_free_i32(t0);
757             break;
758 
759         case 0x1:
760         case 0x41:
761             /* srl.  */
762             LOG_DIS("srl r%d r%d\n", dc->rd, dc->ra);
763 
764             /* Update carry. Note that write carry only looks at the LSB.  */
765             write_carry(dc, cpu_R[dc->ra]);
766             if (dc->rd) {
767                 if (op == 0x41)
768                     tcg_gen_shri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 1);
769                 else
770                     tcg_gen_sari_i32(cpu_R[dc->rd], cpu_R[dc->ra], 1);
771             }
772             break;
773         case 0x60:
774             LOG_DIS("ext8s r%d r%d\n", dc->rd, dc->ra);
775             tcg_gen_ext8s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
776             break;
777         case 0x61:
778             LOG_DIS("ext16s r%d r%d\n", dc->rd, dc->ra);
779             tcg_gen_ext16s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
780             break;
781         case 0x64:
782         case 0x66:
783         case 0x74:
784         case 0x76:
785             /* wdc.  */
786             LOG_DIS("wdc r%d\n", dc->ra);
787             if ((dc->tb_flags & MSR_EE_FLAG)
788                  && mem_index == MMU_USER_IDX) {
789                 tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
790                 t_gen_raise_exception(dc, EXCP_HW_EXCP);
791                 return;
792             }
793             break;
794         case 0x68:
795             /* wic.  */
796             LOG_DIS("wic r%d\n", dc->ra);
797             if ((dc->tb_flags & MSR_EE_FLAG)
798                  && mem_index == MMU_USER_IDX) {
799                 tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
800                 t_gen_raise_exception(dc, EXCP_HW_EXCP);
801                 return;
802             }
803             break;
804         case 0xe0:
805             if ((dc->tb_flags & MSR_EE_FLAG)
806                 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
807                 && !dc->cpu->cfg.use_pcmp_instr) {
808                 tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
809                 t_gen_raise_exception(dc, EXCP_HW_EXCP);
810             }
811             if (dc->cpu->cfg.use_pcmp_instr) {
812                 tcg_gen_clzi_i32(cpu_R[dc->rd], cpu_R[dc->ra], 32);
813             }
814             break;
815         case 0x1e0:
816             /* swapb */
817             LOG_DIS("swapb r%d r%d\n", dc->rd, dc->ra);
818             tcg_gen_bswap32_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
819             break;
820         case 0x1e2:
821             /*swaph */
822             LOG_DIS("swaph r%d r%d\n", dc->rd, dc->ra);
823             tcg_gen_rotri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 16);
824             break;
825         default:
826             cpu_abort(cs, "unknown bit oc=%x op=%x rd=%d ra=%d rb=%d\n",
827                       dc->pc, op, dc->rd, dc->ra, dc->rb);
828             break;
829     }
830 }
831 
832 static inline void sync_jmpstate(DisasContext *dc)
833 {
834     if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
835         if (dc->jmp == JMP_DIRECT) {
836             tcg_gen_movi_i32(env_btaken, 1);
837         }
838         dc->jmp = JMP_INDIRECT;
839         tcg_gen_movi_i32(env_btarget, dc->jmp_pc);
840     }
841 }
842 
843 static void dec_imm(DisasContext *dc)
844 {
845     LOG_DIS("imm %x\n", dc->imm << 16);
846     tcg_gen_movi_i32(env_imm, (dc->imm << 16));
847     dc->tb_flags |= IMM_FLAG;
848     dc->clear_imm = 0;
849 }
850 
851 static inline void compute_ldst_addr(DisasContext *dc, TCGv_i32 *t)
852 {
853     bool extimm = dc->tb_flags & IMM_FLAG;
854     /* Should be set to true if r1 is used by loadstores.  */
855     bool stackprot = false;
856 
857     /* All load/stores use ra.  */
858     if (dc->ra == 1 && dc->cpu->cfg.stackprot) {
859         stackprot = true;
860     }
861 
862     /* Treat the common cases first.  */
863     if (!dc->type_b) {
864         /* If any of the regs is r0, set t to the value of the other reg.  */
865         if (dc->ra == 0) {
866             tcg_gen_mov_i32(*t, cpu_R[dc->rb]);
867             return;
868         } else if (dc->rb == 0) {
869             tcg_gen_mov_i32(*t, cpu_R[dc->ra]);
870             return;
871         }
872 
873         if (dc->rb == 1 && dc->cpu->cfg.stackprot) {
874             stackprot = true;
875         }
876 
877         tcg_gen_add_i32(*t, cpu_R[dc->ra], cpu_R[dc->rb]);
878 
879         if (stackprot) {
880             gen_helper_stackprot(cpu_env, *t);
881         }
882         return;
883     }
884     /* Immediate.  */
885     if (!extimm) {
886         if (dc->imm == 0) {
887             tcg_gen_mov_i32(*t, cpu_R[dc->ra]);
888             return;
889         }
890         tcg_gen_movi_i32(*t, (int32_t)((int16_t)dc->imm));
891         tcg_gen_add_i32(*t, cpu_R[dc->ra], *t);
892     } else {
893         tcg_gen_add_i32(*t, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
894     }
895 
896     if (stackprot) {
897         gen_helper_stackprot(cpu_env, *t);
898     }
899     return;
900 }
901 
902 static void dec_load(DisasContext *dc)
903 {
904     TCGv_i32 v, addr;
905     unsigned int size;
906     bool rev = false, ex = false;
907     TCGMemOp mop;
908 
909     mop = dc->opcode & 3;
910     size = 1 << mop;
911     if (!dc->type_b) {
912         rev = extract32(dc->ir, 9, 1);
913         ex = extract32(dc->ir, 10, 1);
914     }
915     mop |= MO_TE;
916     if (rev) {
917         mop ^= MO_BSWAP;
918     }
919 
920     if (size > 4 && (dc->tb_flags & MSR_EE_FLAG)
921           && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
922         tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
923         t_gen_raise_exception(dc, EXCP_HW_EXCP);
924         return;
925     }
926 
927     LOG_DIS("l%d%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
928                                                         ex ? "x" : "");
929 
930     t_sync_flags(dc);
931     addr = tcg_temp_new_i32();
932     compute_ldst_addr(dc, &addr);
933 
934     /*
935      * When doing reverse accesses we need to do two things.
936      *
937      * 1. Reverse the address wrt endianness.
938      * 2. Byteswap the data lanes on the way back into the CPU core.
939      */
940     if (rev && size != 4) {
941         /* Endian reverse the address. t is addr.  */
942         switch (size) {
943             case 1:
944             {
945                 /* 00 -> 11
946                    01 -> 10
947                    10 -> 10
948                    11 -> 00 */
949                 TCGv_i32 low = tcg_temp_new_i32();
950 
951                 tcg_gen_andi_i32(low, addr, 3);
952                 tcg_gen_sub_i32(low, tcg_const_i32(3), low);
953                 tcg_gen_andi_i32(addr, addr, ~3);
954                 tcg_gen_or_i32(addr, addr, low);
955                 tcg_temp_free_i32(low);
956                 break;
957             }
958 
959             case 2:
960                 /* 00 -> 10
961                    10 -> 00.  */
962                 tcg_gen_xori_i32(addr, addr, 2);
963                 break;
964             default:
965                 cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
966                 break;
967         }
968     }
969 
970     /* lwx does not throw unaligned access errors, so force alignment */
971     if (ex) {
972         tcg_gen_andi_i32(addr, addr, ~3);
973     }
974 
975     /* If we get a fault on a dslot, the jmpstate better be in sync.  */
976     sync_jmpstate(dc);
977 
978     /* Verify alignment if needed.  */
979     /*
980      * Microblaze gives MMU faults priority over faults due to
981      * unaligned addresses. That's why we speculatively do the load
982      * into v. If the load succeeds, we verify alignment of the
983      * address and if that succeeds we write into the destination reg.
984      */
985     v = tcg_temp_new_i32();
986     tcg_gen_qemu_ld_i32(v, addr, cpu_mmu_index(&dc->cpu->env, false), mop);
987 
988     if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
989         tcg_gen_movi_i32(cpu_SR[SR_PC], dc->pc);
990         gen_helper_memalign(cpu_env, addr, tcg_const_i32(dc->rd),
991                             tcg_const_i32(0), tcg_const_i32(size - 1));
992     }
993 
994     if (ex) {
995         tcg_gen_mov_i32(env_res_addr, addr);
996         tcg_gen_mov_i32(env_res_val, v);
997     }
998     if (dc->rd) {
999         tcg_gen_mov_i32(cpu_R[dc->rd], v);
1000     }
1001     tcg_temp_free_i32(v);
1002 
1003     if (ex) { /* lwx */
1004         /* no support for AXI exclusive so always clear C */
1005         write_carryi(dc, 0);
1006     }
1007 
1008     tcg_temp_free_i32(addr);
1009 }
1010 
1011 static void dec_store(DisasContext *dc)
1012 {
1013     TCGv_i32 addr;
1014     TCGLabel *swx_skip = NULL;
1015     unsigned int size;
1016     bool rev = false, ex = false;
1017     TCGMemOp mop;
1018 
1019     mop = dc->opcode & 3;
1020     size = 1 << mop;
1021     if (!dc->type_b) {
1022         rev = extract32(dc->ir, 9, 1);
1023         ex = extract32(dc->ir, 10, 1);
1024     }
1025     mop |= MO_TE;
1026     if (rev) {
1027         mop ^= MO_BSWAP;
1028     }
1029 
1030     if (size > 4 && (dc->tb_flags & MSR_EE_FLAG)
1031           && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
1032         tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1033         t_gen_raise_exception(dc, EXCP_HW_EXCP);
1034         return;
1035     }
1036 
1037     LOG_DIS("s%d%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
1038                                                         ex ? "x" : "");
1039     t_sync_flags(dc);
1040     /* If we get a fault on a dslot, the jmpstate better be in sync.  */
1041     sync_jmpstate(dc);
1042     /* SWX needs a temp_local.  */
1043     addr = ex ? tcg_temp_local_new_i32() : tcg_temp_new_i32();
1044     compute_ldst_addr(dc, &addr);
1045 
1046     if (ex) { /* swx */
1047         TCGv_i32 tval;
1048 
1049         /* swx does not throw unaligned access errors, so force alignment */
1050         tcg_gen_andi_i32(addr, addr, ~3);
1051 
1052         write_carryi(dc, 1);
1053         swx_skip = gen_new_label();
1054         tcg_gen_brcond_i32(TCG_COND_NE, env_res_addr, addr, swx_skip);
1055 
1056         /* Compare the value loaded at lwx with current contents of
1057            the reserved location.
1058            FIXME: This only works for system emulation where we can expect
1059            this compare and the following write to be atomic. For user
1060            emulation we need to add atomicity between threads.  */
1061         tval = tcg_temp_new_i32();
1062         tcg_gen_qemu_ld_i32(tval, addr, cpu_mmu_index(&dc->cpu->env, false),
1063                             MO_TEUL);
1064         tcg_gen_brcond_i32(TCG_COND_NE, env_res_val, tval, swx_skip);
1065         write_carryi(dc, 0);
1066         tcg_temp_free_i32(tval);
1067     }
1068 
1069     if (rev && size != 4) {
1070         /* Endian reverse the address. t is addr.  */
1071         switch (size) {
1072             case 1:
1073             {
1074                 /* 00 -> 11
1075                    01 -> 10
1076                    10 -> 10
1077                    11 -> 00 */
1078                 TCGv_i32 low = tcg_temp_new_i32();
1079 
1080                 tcg_gen_andi_i32(low, addr, 3);
1081                 tcg_gen_sub_i32(low, tcg_const_i32(3), low);
1082                 tcg_gen_andi_i32(addr, addr, ~3);
1083                 tcg_gen_or_i32(addr, addr, low);
1084                 tcg_temp_free_i32(low);
1085                 break;
1086             }
1087 
1088             case 2:
1089                 /* 00 -> 10
1090                    10 -> 00.  */
1091                 /* Force addr into the temp.  */
1092                 tcg_gen_xori_i32(addr, addr, 2);
1093                 break;
1094             default:
1095                 cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
1096                 break;
1097         }
1098     }
1099     tcg_gen_qemu_st_i32(cpu_R[dc->rd], addr,
1100                         cpu_mmu_index(&dc->cpu->env, false), mop);
1101 
1102     /* Verify alignment if needed.  */
1103     if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
1104         tcg_gen_movi_i32(cpu_SR[SR_PC], dc->pc);
1105         /* FIXME: if the alignment is wrong, we should restore the value
1106          *        in memory. One possible way to achieve this is to probe
1107          *        the MMU prior to the memaccess, thay way we could put
1108          *        the alignment checks in between the probe and the mem
1109          *        access.
1110          */
1111         gen_helper_memalign(cpu_env, addr, tcg_const_i32(dc->rd),
1112                             tcg_const_i32(1), tcg_const_i32(size - 1));
1113     }
1114 
1115     if (ex) {
1116         gen_set_label(swx_skip);
1117     }
1118 
1119     tcg_temp_free_i32(addr);
1120 }
1121 
1122 static inline void eval_cc(DisasContext *dc, unsigned int cc,
1123                            TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
1124 {
1125     switch (cc) {
1126         case CC_EQ:
1127             tcg_gen_setcond_i32(TCG_COND_EQ, d, a, b);
1128             break;
1129         case CC_NE:
1130             tcg_gen_setcond_i32(TCG_COND_NE, d, a, b);
1131             break;
1132         case CC_LT:
1133             tcg_gen_setcond_i32(TCG_COND_LT, d, a, b);
1134             break;
1135         case CC_LE:
1136             tcg_gen_setcond_i32(TCG_COND_LE, d, a, b);
1137             break;
1138         case CC_GE:
1139             tcg_gen_setcond_i32(TCG_COND_GE, d, a, b);
1140             break;
1141         case CC_GT:
1142             tcg_gen_setcond_i32(TCG_COND_GT, d, a, b);
1143             break;
1144         default:
1145             cpu_abort(CPU(dc->cpu), "Unknown condition code %x.\n", cc);
1146             break;
1147     }
1148 }
1149 
1150 static void eval_cond_jmp(DisasContext *dc, TCGv_i32 pc_true, TCGv_i32 pc_false)
1151 {
1152     TCGLabel *l1 = gen_new_label();
1153     /* Conditional jmp.  */
1154     tcg_gen_mov_i32(cpu_SR[SR_PC], pc_false);
1155     tcg_gen_brcondi_i32(TCG_COND_EQ, env_btaken, 0, l1);
1156     tcg_gen_mov_i32(cpu_SR[SR_PC], pc_true);
1157     gen_set_label(l1);
1158 }
1159 
1160 static void dec_bcc(DisasContext *dc)
1161 {
1162     unsigned int cc;
1163     unsigned int dslot;
1164 
1165     cc = EXTRACT_FIELD(dc->ir, 21, 23);
1166     dslot = dc->ir & (1 << 25);
1167     LOG_DIS("bcc%s r%d %x\n", dslot ? "d" : "", dc->ra, dc->imm);
1168 
1169     dc->delayed_branch = 1;
1170     if (dslot) {
1171         dc->delayed_branch = 2;
1172         dc->tb_flags |= D_FLAG;
1173         tcg_gen_st_i32(tcg_const_i32(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1174                       cpu_env, offsetof(CPUMBState, bimm));
1175     }
1176 
1177     if (dec_alu_op_b_is_small_imm(dc)) {
1178         int32_t offset = (int32_t)((int16_t)dc->imm); /* sign-extend.  */
1179 
1180         tcg_gen_movi_i32(env_btarget, dc->pc + offset);
1181         dc->jmp = JMP_DIRECT_CC;
1182         dc->jmp_pc = dc->pc + offset;
1183     } else {
1184         dc->jmp = JMP_INDIRECT;
1185         tcg_gen_movi_i32(env_btarget, dc->pc);
1186         tcg_gen_add_i32(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
1187     }
1188     eval_cc(dc, cc, env_btaken, cpu_R[dc->ra], tcg_const_i32(0));
1189 }
1190 
1191 static void dec_br(DisasContext *dc)
1192 {
1193     unsigned int dslot, link, abs, mbar;
1194     int mem_index = cpu_mmu_index(&dc->cpu->env, false);
1195 
1196     dslot = dc->ir & (1 << 20);
1197     abs = dc->ir & (1 << 19);
1198     link = dc->ir & (1 << 18);
1199 
1200     /* Memory barrier.  */
1201     mbar = (dc->ir >> 16) & 31;
1202     if (mbar == 2 && dc->imm == 4) {
1203         /* mbar IMM & 16 decodes to sleep.  */
1204         if (dc->rd & 16) {
1205             TCGv_i32 tmp_hlt = tcg_const_i32(EXCP_HLT);
1206             TCGv_i32 tmp_1 = tcg_const_i32(1);
1207 
1208             LOG_DIS("sleep\n");
1209 
1210             t_sync_flags(dc);
1211             tcg_gen_st_i32(tmp_1, cpu_env,
1212                            -offsetof(MicroBlazeCPU, env)
1213                            +offsetof(CPUState, halted));
1214             tcg_gen_movi_i32(cpu_SR[SR_PC], dc->pc + 4);
1215             gen_helper_raise_exception(cpu_env, tmp_hlt);
1216             tcg_temp_free_i32(tmp_hlt);
1217             tcg_temp_free_i32(tmp_1);
1218             return;
1219         }
1220         LOG_DIS("mbar %d\n", dc->rd);
1221         /* Break the TB.  */
1222         dc->cpustate_changed = 1;
1223         return;
1224     }
1225 
1226     LOG_DIS("br%s%s%s%s imm=%x\n",
1227              abs ? "a" : "", link ? "l" : "",
1228              dc->type_b ? "i" : "", dslot ? "d" : "",
1229              dc->imm);
1230 
1231     dc->delayed_branch = 1;
1232     if (dslot) {
1233         dc->delayed_branch = 2;
1234         dc->tb_flags |= D_FLAG;
1235         tcg_gen_st_i32(tcg_const_i32(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1236                       cpu_env, offsetof(CPUMBState, bimm));
1237     }
1238     if (link && dc->rd)
1239         tcg_gen_movi_i32(cpu_R[dc->rd], dc->pc);
1240 
1241     dc->jmp = JMP_INDIRECT;
1242     if (abs) {
1243         tcg_gen_movi_i32(env_btaken, 1);
1244         tcg_gen_mov_i32(env_btarget, *(dec_alu_op_b(dc)));
1245         if (link && !dslot) {
1246             if (!(dc->tb_flags & IMM_FLAG) && (dc->imm == 8 || dc->imm == 0x18))
1247                 t_gen_raise_exception(dc, EXCP_BREAK);
1248             if (dc->imm == 0) {
1249                 if ((dc->tb_flags & MSR_EE_FLAG) && mem_index == MMU_USER_IDX) {
1250                     tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1251                     t_gen_raise_exception(dc, EXCP_HW_EXCP);
1252                     return;
1253                 }
1254 
1255                 t_gen_raise_exception(dc, EXCP_DEBUG);
1256             }
1257         }
1258     } else {
1259         if (dec_alu_op_b_is_small_imm(dc)) {
1260             dc->jmp = JMP_DIRECT;
1261             dc->jmp_pc = dc->pc + (int32_t)((int16_t)dc->imm);
1262         } else {
1263             tcg_gen_movi_i32(env_btaken, 1);
1264             tcg_gen_movi_i32(env_btarget, dc->pc);
1265             tcg_gen_add_i32(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
1266         }
1267     }
1268 }
1269 
1270 static inline void do_rti(DisasContext *dc)
1271 {
1272     TCGv_i32 t0, t1;
1273     t0 = tcg_temp_new_i32();
1274     t1 = tcg_temp_new_i32();
1275     tcg_gen_shri_i32(t0, cpu_SR[SR_MSR], 1);
1276     tcg_gen_ori_i32(t1, cpu_SR[SR_MSR], MSR_IE);
1277     tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
1278 
1279     tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
1280     tcg_gen_or_i32(t1, t1, t0);
1281     msr_write(dc, t1);
1282     tcg_temp_free_i32(t1);
1283     tcg_temp_free_i32(t0);
1284     dc->tb_flags &= ~DRTI_FLAG;
1285 }
1286 
1287 static inline void do_rtb(DisasContext *dc)
1288 {
1289     TCGv_i32 t0, t1;
1290     t0 = tcg_temp_new_i32();
1291     t1 = tcg_temp_new_i32();
1292     tcg_gen_andi_i32(t1, cpu_SR[SR_MSR], ~MSR_BIP);
1293     tcg_gen_shri_i32(t0, t1, 1);
1294     tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
1295 
1296     tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
1297     tcg_gen_or_i32(t1, t1, t0);
1298     msr_write(dc, t1);
1299     tcg_temp_free_i32(t1);
1300     tcg_temp_free_i32(t0);
1301     dc->tb_flags &= ~DRTB_FLAG;
1302 }
1303 
1304 static inline void do_rte(DisasContext *dc)
1305 {
1306     TCGv_i32 t0, t1;
1307     t0 = tcg_temp_new_i32();
1308     t1 = tcg_temp_new_i32();
1309 
1310     tcg_gen_ori_i32(t1, cpu_SR[SR_MSR], MSR_EE);
1311     tcg_gen_andi_i32(t1, t1, ~MSR_EIP);
1312     tcg_gen_shri_i32(t0, t1, 1);
1313     tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
1314 
1315     tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
1316     tcg_gen_or_i32(t1, t1, t0);
1317     msr_write(dc, t1);
1318     tcg_temp_free_i32(t1);
1319     tcg_temp_free_i32(t0);
1320     dc->tb_flags &= ~DRTE_FLAG;
1321 }
1322 
1323 static void dec_rts(DisasContext *dc)
1324 {
1325     unsigned int b_bit, i_bit, e_bit;
1326     int mem_index = cpu_mmu_index(&dc->cpu->env, false);
1327 
1328     i_bit = dc->ir & (1 << 21);
1329     b_bit = dc->ir & (1 << 22);
1330     e_bit = dc->ir & (1 << 23);
1331 
1332     dc->delayed_branch = 2;
1333     dc->tb_flags |= D_FLAG;
1334     tcg_gen_st_i32(tcg_const_i32(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1335                   cpu_env, offsetof(CPUMBState, bimm));
1336 
1337     if (i_bit) {
1338         LOG_DIS("rtid ir=%x\n", dc->ir);
1339         if ((dc->tb_flags & MSR_EE_FLAG)
1340              && mem_index == MMU_USER_IDX) {
1341             tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1342             t_gen_raise_exception(dc, EXCP_HW_EXCP);
1343         }
1344         dc->tb_flags |= DRTI_FLAG;
1345     } else if (b_bit) {
1346         LOG_DIS("rtbd ir=%x\n", dc->ir);
1347         if ((dc->tb_flags & MSR_EE_FLAG)
1348              && mem_index == MMU_USER_IDX) {
1349             tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1350             t_gen_raise_exception(dc, EXCP_HW_EXCP);
1351         }
1352         dc->tb_flags |= DRTB_FLAG;
1353     } else if (e_bit) {
1354         LOG_DIS("rted ir=%x\n", dc->ir);
1355         if ((dc->tb_flags & MSR_EE_FLAG)
1356              && mem_index == MMU_USER_IDX) {
1357             tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1358             t_gen_raise_exception(dc, EXCP_HW_EXCP);
1359         }
1360         dc->tb_flags |= DRTE_FLAG;
1361     } else
1362         LOG_DIS("rts ir=%x\n", dc->ir);
1363 
1364     dc->jmp = JMP_INDIRECT;
1365     tcg_gen_movi_i32(env_btaken, 1);
1366     tcg_gen_add_i32(env_btarget, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
1367 }
1368 
1369 static int dec_check_fpuv2(DisasContext *dc)
1370 {
1371     if ((dc->cpu->cfg.use_fpu != 2) && (dc->tb_flags & MSR_EE_FLAG)) {
1372         tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_FPU);
1373         t_gen_raise_exception(dc, EXCP_HW_EXCP);
1374     }
1375     return (dc->cpu->cfg.use_fpu == 2) ? 0 : PVR2_USE_FPU2_MASK;
1376 }
1377 
1378 static void dec_fpu(DisasContext *dc)
1379 {
1380     unsigned int fpu_insn;
1381 
1382     if ((dc->tb_flags & MSR_EE_FLAG)
1383           && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
1384           && !dc->cpu->cfg.use_fpu) {
1385         tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1386         t_gen_raise_exception(dc, EXCP_HW_EXCP);
1387         return;
1388     }
1389 
1390     fpu_insn = (dc->ir >> 7) & 7;
1391 
1392     switch (fpu_insn) {
1393         case 0:
1394             gen_helper_fadd(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1395                             cpu_R[dc->rb]);
1396             break;
1397 
1398         case 1:
1399             gen_helper_frsub(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1400                              cpu_R[dc->rb]);
1401             break;
1402 
1403         case 2:
1404             gen_helper_fmul(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1405                             cpu_R[dc->rb]);
1406             break;
1407 
1408         case 3:
1409             gen_helper_fdiv(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1410                             cpu_R[dc->rb]);
1411             break;
1412 
1413         case 4:
1414             switch ((dc->ir >> 4) & 7) {
1415                 case 0:
1416                     gen_helper_fcmp_un(cpu_R[dc->rd], cpu_env,
1417                                        cpu_R[dc->ra], cpu_R[dc->rb]);
1418                     break;
1419                 case 1:
1420                     gen_helper_fcmp_lt(cpu_R[dc->rd], cpu_env,
1421                                        cpu_R[dc->ra], cpu_R[dc->rb]);
1422                     break;
1423                 case 2:
1424                     gen_helper_fcmp_eq(cpu_R[dc->rd], cpu_env,
1425                                        cpu_R[dc->ra], cpu_R[dc->rb]);
1426                     break;
1427                 case 3:
1428                     gen_helper_fcmp_le(cpu_R[dc->rd], cpu_env,
1429                                        cpu_R[dc->ra], cpu_R[dc->rb]);
1430                     break;
1431                 case 4:
1432                     gen_helper_fcmp_gt(cpu_R[dc->rd], cpu_env,
1433                                        cpu_R[dc->ra], cpu_R[dc->rb]);
1434                     break;
1435                 case 5:
1436                     gen_helper_fcmp_ne(cpu_R[dc->rd], cpu_env,
1437                                        cpu_R[dc->ra], cpu_R[dc->rb]);
1438                     break;
1439                 case 6:
1440                     gen_helper_fcmp_ge(cpu_R[dc->rd], cpu_env,
1441                                        cpu_R[dc->ra], cpu_R[dc->rb]);
1442                     break;
1443                 default:
1444                     qemu_log_mask(LOG_UNIMP,
1445                                   "unimplemented fcmp fpu_insn=%x pc=%x"
1446                                   " opc=%x\n",
1447                                   fpu_insn, dc->pc, dc->opcode);
1448                     dc->abort_at_next_insn = 1;
1449                     break;
1450             }
1451             break;
1452 
1453         case 5:
1454             if (!dec_check_fpuv2(dc)) {
1455                 return;
1456             }
1457             gen_helper_flt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1458             break;
1459 
1460         case 6:
1461             if (!dec_check_fpuv2(dc)) {
1462                 return;
1463             }
1464             gen_helper_fint(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1465             break;
1466 
1467         case 7:
1468             if (!dec_check_fpuv2(dc)) {
1469                 return;
1470             }
1471             gen_helper_fsqrt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1472             break;
1473 
1474         default:
1475             qemu_log_mask(LOG_UNIMP, "unimplemented FPU insn fpu_insn=%x pc=%x"
1476                           " opc=%x\n",
1477                           fpu_insn, dc->pc, dc->opcode);
1478             dc->abort_at_next_insn = 1;
1479             break;
1480     }
1481 }
1482 
1483 static void dec_null(DisasContext *dc)
1484 {
1485     if ((dc->tb_flags & MSR_EE_FLAG)
1486           && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
1487         tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1488         t_gen_raise_exception(dc, EXCP_HW_EXCP);
1489         return;
1490     }
1491     qemu_log_mask(LOG_GUEST_ERROR, "unknown insn pc=%x opc=%x\n", dc->pc, dc->opcode);
1492     dc->abort_at_next_insn = 1;
1493 }
1494 
1495 /* Insns connected to FSL or AXI stream attached devices.  */
1496 static void dec_stream(DisasContext *dc)
1497 {
1498     int mem_index = cpu_mmu_index(&dc->cpu->env, false);
1499     TCGv_i32 t_id, t_ctrl;
1500     int ctrl;
1501 
1502     LOG_DIS("%s%s imm=%x\n", dc->rd ? "get" : "put",
1503             dc->type_b ? "" : "d", dc->imm);
1504 
1505     if ((dc->tb_flags & MSR_EE_FLAG) && (mem_index == MMU_USER_IDX)) {
1506         tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1507         t_gen_raise_exception(dc, EXCP_HW_EXCP);
1508         return;
1509     }
1510 
1511     t_id = tcg_temp_new_i32();
1512     if (dc->type_b) {
1513         tcg_gen_movi_i32(t_id, dc->imm & 0xf);
1514         ctrl = dc->imm >> 10;
1515     } else {
1516         tcg_gen_andi_i32(t_id, cpu_R[dc->rb], 0xf);
1517         ctrl = dc->imm >> 5;
1518     }
1519 
1520     t_ctrl = tcg_const_i32(ctrl);
1521 
1522     if (dc->rd == 0) {
1523         gen_helper_put(t_id, t_ctrl, cpu_R[dc->ra]);
1524     } else {
1525         gen_helper_get(cpu_R[dc->rd], t_id, t_ctrl);
1526     }
1527     tcg_temp_free_i32(t_id);
1528     tcg_temp_free_i32(t_ctrl);
1529 }
1530 
1531 static struct decoder_info {
1532     struct {
1533         uint32_t bits;
1534         uint32_t mask;
1535     };
1536     void (*dec)(DisasContext *dc);
1537 } decinfo[] = {
1538     {DEC_ADD, dec_add},
1539     {DEC_SUB, dec_sub},
1540     {DEC_AND, dec_and},
1541     {DEC_XOR, dec_xor},
1542     {DEC_OR, dec_or},
1543     {DEC_BIT, dec_bit},
1544     {DEC_BARREL, dec_barrel},
1545     {DEC_LD, dec_load},
1546     {DEC_ST, dec_store},
1547     {DEC_IMM, dec_imm},
1548     {DEC_BR, dec_br},
1549     {DEC_BCC, dec_bcc},
1550     {DEC_RTS, dec_rts},
1551     {DEC_FPU, dec_fpu},
1552     {DEC_MUL, dec_mul},
1553     {DEC_DIV, dec_div},
1554     {DEC_MSR, dec_msr},
1555     {DEC_STREAM, dec_stream},
1556     {{0, 0}, dec_null}
1557 };
1558 
1559 static inline void decode(DisasContext *dc, uint32_t ir)
1560 {
1561     int i;
1562 
1563     dc->ir = ir;
1564     LOG_DIS("%8.8x\t", dc->ir);
1565 
1566     if (dc->ir)
1567         dc->nr_nops = 0;
1568     else {
1569         if ((dc->tb_flags & MSR_EE_FLAG)
1570               && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
1571               && (dc->cpu->env.pvr.regs[2] & PVR2_OPCODE_0x0_ILL_MASK)) {
1572             tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1573             t_gen_raise_exception(dc, EXCP_HW_EXCP);
1574             return;
1575         }
1576 
1577         LOG_DIS("nr_nops=%d\t", dc->nr_nops);
1578         dc->nr_nops++;
1579         if (dc->nr_nops > 4) {
1580             cpu_abort(CPU(dc->cpu), "fetching nop sequence\n");
1581         }
1582     }
1583     /* bit 2 seems to indicate insn type.  */
1584     dc->type_b = ir & (1 << 29);
1585 
1586     dc->opcode = EXTRACT_FIELD(ir, 26, 31);
1587     dc->rd = EXTRACT_FIELD(ir, 21, 25);
1588     dc->ra = EXTRACT_FIELD(ir, 16, 20);
1589     dc->rb = EXTRACT_FIELD(ir, 11, 15);
1590     dc->imm = EXTRACT_FIELD(ir, 0, 15);
1591 
1592     /* Large switch for all insns.  */
1593     for (i = 0; i < ARRAY_SIZE(decinfo); i++) {
1594         if ((dc->opcode & decinfo[i].mask) == decinfo[i].bits) {
1595             decinfo[i].dec(dc);
1596             break;
1597         }
1598     }
1599 }
1600 
1601 /* generate intermediate code for basic block 'tb'.  */
1602 void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
1603 {
1604     CPUMBState *env = cs->env_ptr;
1605     MicroBlazeCPU *cpu = mb_env_get_cpu(env);
1606     uint32_t pc_start;
1607     struct DisasContext ctx;
1608     struct DisasContext *dc = &ctx;
1609     uint32_t page_start, org_flags;
1610     uint32_t npc;
1611     int num_insns;
1612     int max_insns;
1613 
1614     pc_start = tb->pc;
1615     dc->cpu = cpu;
1616     dc->tb = tb;
1617     org_flags = dc->synced_flags = dc->tb_flags = tb->flags;
1618 
1619     dc->is_jmp = DISAS_NEXT;
1620     dc->jmp = 0;
1621     dc->delayed_branch = !!(dc->tb_flags & D_FLAG);
1622     if (dc->delayed_branch) {
1623         dc->jmp = JMP_INDIRECT;
1624     }
1625     dc->pc = pc_start;
1626     dc->singlestep_enabled = cs->singlestep_enabled;
1627     dc->cpustate_changed = 0;
1628     dc->abort_at_next_insn = 0;
1629     dc->nr_nops = 0;
1630 
1631     if (pc_start & 3) {
1632         cpu_abort(cs, "Microblaze: unaligned PC=%x\n", pc_start);
1633     }
1634 
1635     page_start = pc_start & TARGET_PAGE_MASK;
1636     num_insns = 0;
1637     max_insns = tb_cflags(tb) & CF_COUNT_MASK;
1638     if (max_insns == 0) {
1639         max_insns = CF_COUNT_MASK;
1640     }
1641     if (max_insns > TCG_MAX_INSNS) {
1642         max_insns = TCG_MAX_INSNS;
1643     }
1644 
1645     gen_tb_start(tb);
1646     do
1647     {
1648         tcg_gen_insn_start(dc->pc);
1649         num_insns++;
1650 
1651 #if SIM_COMPAT
1652         if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1653             tcg_gen_movi_i32(cpu_SR[SR_PC], dc->pc);
1654             gen_helper_debug();
1655         }
1656 #endif
1657 
1658         if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
1659             t_gen_raise_exception(dc, EXCP_DEBUG);
1660             dc->is_jmp = DISAS_UPDATE;
1661             /* The address covered by the breakpoint must be included in
1662                [tb->pc, tb->pc + tb->size) in order to for it to be
1663                properly cleared -- thus we increment the PC here so that
1664                the logic setting tb->size below does the right thing.  */
1665             dc->pc += 4;
1666             break;
1667         }
1668 
1669         /* Pretty disas.  */
1670         LOG_DIS("%8.8x:\t", dc->pc);
1671 
1672         if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
1673             gen_io_start();
1674         }
1675 
1676         dc->clear_imm = 1;
1677         decode(dc, cpu_ldl_code(env, dc->pc));
1678         if (dc->clear_imm)
1679             dc->tb_flags &= ~IMM_FLAG;
1680         dc->pc += 4;
1681 
1682         if (dc->delayed_branch) {
1683             dc->delayed_branch--;
1684             if (!dc->delayed_branch) {
1685                 if (dc->tb_flags & DRTI_FLAG)
1686                     do_rti(dc);
1687                  if (dc->tb_flags & DRTB_FLAG)
1688                     do_rtb(dc);
1689                 if (dc->tb_flags & DRTE_FLAG)
1690                     do_rte(dc);
1691                 /* Clear the delay slot flag.  */
1692                 dc->tb_flags &= ~D_FLAG;
1693                 /* If it is a direct jump, try direct chaining.  */
1694                 if (dc->jmp == JMP_INDIRECT) {
1695                     eval_cond_jmp(dc, env_btarget, tcg_const_i32(dc->pc));
1696                     dc->is_jmp = DISAS_JUMP;
1697                 } else if (dc->jmp == JMP_DIRECT) {
1698                     t_sync_flags(dc);
1699                     gen_goto_tb(dc, 0, dc->jmp_pc);
1700                     dc->is_jmp = DISAS_TB_JUMP;
1701                 } else if (dc->jmp == JMP_DIRECT_CC) {
1702                     TCGLabel *l1 = gen_new_label();
1703                     t_sync_flags(dc);
1704                     /* Conditional jmp.  */
1705                     tcg_gen_brcondi_i32(TCG_COND_NE, env_btaken, 0, l1);
1706                     gen_goto_tb(dc, 1, dc->pc);
1707                     gen_set_label(l1);
1708                     gen_goto_tb(dc, 0, dc->jmp_pc);
1709 
1710                     dc->is_jmp = DISAS_TB_JUMP;
1711                 }
1712                 break;
1713             }
1714         }
1715         if (cs->singlestep_enabled) {
1716             break;
1717         }
1718     } while (!dc->is_jmp && !dc->cpustate_changed
1719              && !tcg_op_buf_full()
1720              && !singlestep
1721              && (dc->pc - page_start < TARGET_PAGE_SIZE)
1722              && num_insns < max_insns);
1723 
1724     npc = dc->pc;
1725     if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
1726         if (dc->tb_flags & D_FLAG) {
1727             dc->is_jmp = DISAS_UPDATE;
1728             tcg_gen_movi_i32(cpu_SR[SR_PC], npc);
1729             sync_jmpstate(dc);
1730         } else
1731             npc = dc->jmp_pc;
1732     }
1733 
1734     if (tb_cflags(tb) & CF_LAST_IO)
1735         gen_io_end();
1736     /* Force an update if the per-tb cpu state has changed.  */
1737     if (dc->is_jmp == DISAS_NEXT
1738         && (dc->cpustate_changed || org_flags != dc->tb_flags)) {
1739         dc->is_jmp = DISAS_UPDATE;
1740         tcg_gen_movi_i32(cpu_SR[SR_PC], npc);
1741     }
1742     t_sync_flags(dc);
1743 
1744     if (unlikely(cs->singlestep_enabled)) {
1745         TCGv_i32 tmp = tcg_const_i32(EXCP_DEBUG);
1746 
1747         if (dc->is_jmp != DISAS_JUMP) {
1748             tcg_gen_movi_i32(cpu_SR[SR_PC], npc);
1749         }
1750         gen_helper_raise_exception(cpu_env, tmp);
1751         tcg_temp_free_i32(tmp);
1752     } else {
1753         switch(dc->is_jmp) {
1754             case DISAS_NEXT:
1755                 gen_goto_tb(dc, 1, npc);
1756                 break;
1757             default:
1758             case DISAS_JUMP:
1759             case DISAS_UPDATE:
1760                 /* indicate that the hash table must be used
1761                    to find the next TB */
1762                 tcg_gen_exit_tb(0);
1763                 break;
1764             case DISAS_TB_JUMP:
1765                 /* nothing more to generate */
1766                 break;
1767         }
1768     }
1769     gen_tb_end(tb, num_insns);
1770 
1771     tb->size = dc->pc - pc_start;
1772     tb->icount = num_insns;
1773 
1774 #ifdef DEBUG_DISAS
1775 #if !SIM_COMPAT
1776     if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
1777         && qemu_log_in_addr_range(pc_start)) {
1778         qemu_log_lock();
1779         qemu_log("--------------\n");
1780         log_target_disas(cs, pc_start, dc->pc - pc_start);
1781         qemu_log_unlock();
1782     }
1783 #endif
1784 #endif
1785     assert(!dc->abort_at_next_insn);
1786 }
1787 
1788 void mb_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
1789                        int flags)
1790 {
1791     MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1792     CPUMBState *env = &cpu->env;
1793     int i;
1794 
1795     if (!env || !f)
1796         return;
1797 
1798     cpu_fprintf(f, "IN: PC=%x %s\n",
1799                 env->sregs[SR_PC], lookup_symbol(env->sregs[SR_PC]));
1800     cpu_fprintf(f, "rmsr=%x resr=%x rear=%x debug=%x imm=%x iflags=%x fsr=%x\n",
1801              env->sregs[SR_MSR], env->sregs[SR_ESR], env->sregs[SR_EAR],
1802              env->debug, env->imm, env->iflags, env->sregs[SR_FSR]);
1803     cpu_fprintf(f, "btaken=%d btarget=%x mode=%s(saved=%s) eip=%d ie=%d\n",
1804              env->btaken, env->btarget,
1805              (env->sregs[SR_MSR] & MSR_UM) ? "user" : "kernel",
1806              (env->sregs[SR_MSR] & MSR_UMS) ? "user" : "kernel",
1807              (env->sregs[SR_MSR] & MSR_EIP),
1808              (env->sregs[SR_MSR] & MSR_IE));
1809 
1810     for (i = 0; i < 32; i++) {
1811         cpu_fprintf(f, "r%2.2d=%8.8x ", i, env->regs[i]);
1812         if ((i + 1) % 4 == 0)
1813             cpu_fprintf(f, "\n");
1814         }
1815     cpu_fprintf(f, "\n\n");
1816 }
1817 
1818 void mb_tcg_init(void)
1819 {
1820     int i;
1821 
1822     env_debug = tcg_global_mem_new_i32(cpu_env,
1823                     offsetof(CPUMBState, debug),
1824                     "debug0");
1825     env_iflags = tcg_global_mem_new_i32(cpu_env,
1826                     offsetof(CPUMBState, iflags),
1827                     "iflags");
1828     env_imm = tcg_global_mem_new_i32(cpu_env,
1829                     offsetof(CPUMBState, imm),
1830                     "imm");
1831     env_btarget = tcg_global_mem_new_i32(cpu_env,
1832                      offsetof(CPUMBState, btarget),
1833                      "btarget");
1834     env_btaken = tcg_global_mem_new_i32(cpu_env,
1835                      offsetof(CPUMBState, btaken),
1836                      "btaken");
1837     env_res_addr = tcg_global_mem_new_i32(cpu_env,
1838                      offsetof(CPUMBState, res_addr),
1839                      "res_addr");
1840     env_res_val = tcg_global_mem_new_i32(cpu_env,
1841                      offsetof(CPUMBState, res_val),
1842                      "res_val");
1843     for (i = 0; i < ARRAY_SIZE(cpu_R); i++) {
1844         cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
1845                           offsetof(CPUMBState, regs[i]),
1846                           regnames[i]);
1847     }
1848     for (i = 0; i < ARRAY_SIZE(cpu_SR); i++) {
1849         cpu_SR[i] = tcg_global_mem_new_i32(cpu_env,
1850                           offsetof(CPUMBState, sregs[i]),
1851                           special_regnames[i]);
1852     }
1853 }
1854 
1855 void restore_state_to_opc(CPUMBState *env, TranslationBlock *tb,
1856                           target_ulong *data)
1857 {
1858     env->sregs[SR_PC] = data[0];
1859 }
1860