xref: /openbmc/qemu/target/microblaze/translate.c (revision 403322ea6c383b3337fe3c52d9ed84958f94bcd1)
1 /*
2  *  Xilinx MicroBlaze emulation for qemu: main translation routines.
3  *
4  *  Copyright (c) 2009 Edgar E. Iglesias.
5  *  Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
25 #include "tcg-op.h"
26 #include "exec/helper-proto.h"
27 #include "microblaze-decode.h"
28 #include "exec/cpu_ldst.h"
29 #include "exec/helper-gen.h"
30 #include "exec/translator.h"
31 
32 #include "trace-tcg.h"
33 #include "exec/log.h"
34 
35 
36 #define SIM_COMPAT 0
37 #define DISAS_GNU 1
38 #define DISAS_MB 1
39 #if DISAS_MB && !SIM_COMPAT
40 #  define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
41 #else
42 #  define LOG_DIS(...) do { } while (0)
43 #endif
44 
45 #define D(x)
46 
47 #define EXTRACT_FIELD(src, start, end) \
48             (((src) >> start) & ((1 << (end - start + 1)) - 1))
49 
50 /* is_jmp field values */
51 #define DISAS_JUMP    DISAS_TARGET_0 /* only pc was modified dynamically */
52 #define DISAS_UPDATE  DISAS_TARGET_1 /* cpu state was modified dynamically */
53 #define DISAS_TB_JUMP DISAS_TARGET_2 /* only pc was modified statically */
54 
55 static TCGv_i32 env_debug;
56 static TCGv_i32 cpu_R[32];
57 static TCGv_i32 cpu_SR[14];
58 static TCGv_i32 env_imm;
59 static TCGv_i32 env_btaken;
60 static TCGv_i32 env_btarget;
61 static TCGv_i32 env_iflags;
62 static TCGv env_res_addr;
63 static TCGv_i32 env_res_val;
64 
65 #include "exec/gen-icount.h"
66 
67 /* This is the state at translation time.  */
68 typedef struct DisasContext {
69     MicroBlazeCPU *cpu;
70     uint32_t pc;
71 
72     /* Decoder.  */
73     int type_b;
74     uint32_t ir;
75     uint8_t opcode;
76     uint8_t rd, ra, rb;
77     uint16_t imm;
78 
79     unsigned int cpustate_changed;
80     unsigned int delayed_branch;
81     unsigned int tb_flags, synced_flags; /* tb dependent flags.  */
82     unsigned int clear_imm;
83     int is_jmp;
84 
85 #define JMP_NOJMP     0
86 #define JMP_DIRECT    1
87 #define JMP_DIRECT_CC 2
88 #define JMP_INDIRECT  3
89     unsigned int jmp;
90     uint32_t jmp_pc;
91 
92     int abort_at_next_insn;
93     int nr_nops;
94     struct TranslationBlock *tb;
95     int singlestep_enabled;
96 } DisasContext;
97 
98 static const char *regnames[] =
99 {
100     "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
101     "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
102     "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
103     "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
104 };
105 
106 static const char *special_regnames[] =
107 {
108     "rpc", "rmsr", "sr2", "sr3", "sr4", "sr5", "sr6", "sr7",
109     "sr8", "sr9", "sr10", "sr11", "sr12", "sr13"
110 };
111 
112 static inline void t_sync_flags(DisasContext *dc)
113 {
114     /* Synch the tb dependent flags between translator and runtime.  */
115     if (dc->tb_flags != dc->synced_flags) {
116         tcg_gen_movi_i32(env_iflags, dc->tb_flags);
117         dc->synced_flags = dc->tb_flags;
118     }
119 }
120 
121 static inline void t_gen_raise_exception(DisasContext *dc, uint32_t index)
122 {
123     TCGv_i32 tmp = tcg_const_i32(index);
124 
125     t_sync_flags(dc);
126     tcg_gen_movi_i32(cpu_SR[SR_PC], dc->pc);
127     gen_helper_raise_exception(cpu_env, tmp);
128     tcg_temp_free_i32(tmp);
129     dc->is_jmp = DISAS_UPDATE;
130 }
131 
132 static inline bool use_goto_tb(DisasContext *dc, target_ulong dest)
133 {
134 #ifndef CONFIG_USER_ONLY
135     return (dc->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
136 #else
137     return true;
138 #endif
139 }
140 
141 static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
142 {
143     if (use_goto_tb(dc, dest)) {
144         tcg_gen_goto_tb(n);
145         tcg_gen_movi_i32(cpu_SR[SR_PC], dest);
146         tcg_gen_exit_tb((uintptr_t)dc->tb + n);
147     } else {
148         tcg_gen_movi_i32(cpu_SR[SR_PC], dest);
149         tcg_gen_exit_tb(0);
150     }
151 }
152 
153 static void read_carry(DisasContext *dc, TCGv_i32 d)
154 {
155     tcg_gen_shri_i32(d, cpu_SR[SR_MSR], 31);
156 }
157 
158 /*
159  * write_carry sets the carry bits in MSR based on bit 0 of v.
160  * v[31:1] are ignored.
161  */
162 static void write_carry(DisasContext *dc, TCGv_i32 v)
163 {
164     TCGv_i32 t0 = tcg_temp_new_i32();
165     tcg_gen_shli_i32(t0, v, 31);
166     tcg_gen_sari_i32(t0, t0, 31);
167     tcg_gen_andi_i32(t0, t0, (MSR_C | MSR_CC));
168     tcg_gen_andi_i32(cpu_SR[SR_MSR], cpu_SR[SR_MSR],
169                     ~(MSR_C | MSR_CC));
170     tcg_gen_or_i32(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t0);
171     tcg_temp_free_i32(t0);
172 }
173 
174 static void write_carryi(DisasContext *dc, bool carry)
175 {
176     TCGv_i32 t0 = tcg_temp_new_i32();
177     tcg_gen_movi_i32(t0, carry);
178     write_carry(dc, t0);
179     tcg_temp_free_i32(t0);
180 }
181 
182 /* True if ALU operand b is a small immediate that may deserve
183    faster treatment.  */
184 static inline int dec_alu_op_b_is_small_imm(DisasContext *dc)
185 {
186     /* Immediate insn without the imm prefix ?  */
187     return dc->type_b && !(dc->tb_flags & IMM_FLAG);
188 }
189 
190 static inline TCGv_i32 *dec_alu_op_b(DisasContext *dc)
191 {
192     if (dc->type_b) {
193         if (dc->tb_flags & IMM_FLAG)
194             tcg_gen_ori_i32(env_imm, env_imm, dc->imm);
195         else
196             tcg_gen_movi_i32(env_imm, (int32_t)((int16_t)dc->imm));
197         return &env_imm;
198     } else
199         return &cpu_R[dc->rb];
200 }
201 
202 static void dec_add(DisasContext *dc)
203 {
204     unsigned int k, c;
205     TCGv_i32 cf;
206 
207     k = dc->opcode & 4;
208     c = dc->opcode & 2;
209 
210     LOG_DIS("add%s%s%s r%d r%d r%d\n",
211             dc->type_b ? "i" : "", k ? "k" : "", c ? "c" : "",
212             dc->rd, dc->ra, dc->rb);
213 
214     /* Take care of the easy cases first.  */
215     if (k) {
216         /* k - keep carry, no need to update MSR.  */
217         /* If rd == r0, it's a nop.  */
218         if (dc->rd) {
219             tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
220 
221             if (c) {
222                 /* c - Add carry into the result.  */
223                 cf = tcg_temp_new_i32();
224 
225                 read_carry(dc, cf);
226                 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
227                 tcg_temp_free_i32(cf);
228             }
229         }
230         return;
231     }
232 
233     /* From now on, we can assume k is zero.  So we need to update MSR.  */
234     /* Extract carry.  */
235     cf = tcg_temp_new_i32();
236     if (c) {
237         read_carry(dc, cf);
238     } else {
239         tcg_gen_movi_i32(cf, 0);
240     }
241 
242     if (dc->rd) {
243         TCGv_i32 ncf = tcg_temp_new_i32();
244         gen_helper_carry(ncf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
245         tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
246         tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
247         write_carry(dc, ncf);
248         tcg_temp_free_i32(ncf);
249     } else {
250         gen_helper_carry(cf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
251         write_carry(dc, cf);
252     }
253     tcg_temp_free_i32(cf);
254 }
255 
256 static void dec_sub(DisasContext *dc)
257 {
258     unsigned int u, cmp, k, c;
259     TCGv_i32 cf, na;
260 
261     u = dc->imm & 2;
262     k = dc->opcode & 4;
263     c = dc->opcode & 2;
264     cmp = (dc->imm & 1) && (!dc->type_b) && k;
265 
266     if (cmp) {
267         LOG_DIS("cmp%s r%d, r%d ir=%x\n", u ? "u" : "", dc->rd, dc->ra, dc->ir);
268         if (dc->rd) {
269             if (u)
270                 gen_helper_cmpu(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
271             else
272                 gen_helper_cmp(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
273         }
274         return;
275     }
276 
277     LOG_DIS("sub%s%s r%d, r%d r%d\n",
278              k ? "k" : "",  c ? "c" : "", dc->rd, dc->ra, dc->rb);
279 
280     /* Take care of the easy cases first.  */
281     if (k) {
282         /* k - keep carry, no need to update MSR.  */
283         /* If rd == r0, it's a nop.  */
284         if (dc->rd) {
285             tcg_gen_sub_i32(cpu_R[dc->rd], *(dec_alu_op_b(dc)), cpu_R[dc->ra]);
286 
287             if (c) {
288                 /* c - Add carry into the result.  */
289                 cf = tcg_temp_new_i32();
290 
291                 read_carry(dc, cf);
292                 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
293                 tcg_temp_free_i32(cf);
294             }
295         }
296         return;
297     }
298 
299     /* From now on, we can assume k is zero.  So we need to update MSR.  */
300     /* Extract carry. And complement a into na.  */
301     cf = tcg_temp_new_i32();
302     na = tcg_temp_new_i32();
303     if (c) {
304         read_carry(dc, cf);
305     } else {
306         tcg_gen_movi_i32(cf, 1);
307     }
308 
309     /* d = b + ~a + c. carry defaults to 1.  */
310     tcg_gen_not_i32(na, cpu_R[dc->ra]);
311 
312     if (dc->rd) {
313         TCGv_i32 ncf = tcg_temp_new_i32();
314         gen_helper_carry(ncf, na, *(dec_alu_op_b(dc)), cf);
315         tcg_gen_add_i32(cpu_R[dc->rd], na, *(dec_alu_op_b(dc)));
316         tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
317         write_carry(dc, ncf);
318         tcg_temp_free_i32(ncf);
319     } else {
320         gen_helper_carry(cf, na, *(dec_alu_op_b(dc)), cf);
321         write_carry(dc, cf);
322     }
323     tcg_temp_free_i32(cf);
324     tcg_temp_free_i32(na);
325 }
326 
327 static void dec_pattern(DisasContext *dc)
328 {
329     unsigned int mode;
330 
331     if ((dc->tb_flags & MSR_EE_FLAG)
332           && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
333           && !dc->cpu->cfg.use_pcmp_instr) {
334         tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
335         t_gen_raise_exception(dc, EXCP_HW_EXCP);
336     }
337 
338     mode = dc->opcode & 3;
339     switch (mode) {
340         case 0:
341             /* pcmpbf.  */
342             LOG_DIS("pcmpbf r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
343             if (dc->rd)
344                 gen_helper_pcmpbf(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
345             break;
346         case 2:
347             LOG_DIS("pcmpeq r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
348             if (dc->rd) {
349                 tcg_gen_setcond_i32(TCG_COND_EQ, cpu_R[dc->rd],
350                                    cpu_R[dc->ra], cpu_R[dc->rb]);
351             }
352             break;
353         case 3:
354             LOG_DIS("pcmpne r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
355             if (dc->rd) {
356                 tcg_gen_setcond_i32(TCG_COND_NE, cpu_R[dc->rd],
357                                    cpu_R[dc->ra], cpu_R[dc->rb]);
358             }
359             break;
360         default:
361             cpu_abort(CPU(dc->cpu),
362                       "unsupported pattern insn opcode=%x\n", dc->opcode);
363             break;
364     }
365 }
366 
367 static void dec_and(DisasContext *dc)
368 {
369     unsigned int not;
370 
371     if (!dc->type_b && (dc->imm & (1 << 10))) {
372         dec_pattern(dc);
373         return;
374     }
375 
376     not = dc->opcode & (1 << 1);
377     LOG_DIS("and%s\n", not ? "n" : "");
378 
379     if (!dc->rd)
380         return;
381 
382     if (not) {
383         tcg_gen_andc_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
384     } else
385         tcg_gen_and_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
386 }
387 
388 static void dec_or(DisasContext *dc)
389 {
390     if (!dc->type_b && (dc->imm & (1 << 10))) {
391         dec_pattern(dc);
392         return;
393     }
394 
395     LOG_DIS("or r%d r%d r%d imm=%x\n", dc->rd, dc->ra, dc->rb, dc->imm);
396     if (dc->rd)
397         tcg_gen_or_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
398 }
399 
400 static void dec_xor(DisasContext *dc)
401 {
402     if (!dc->type_b && (dc->imm & (1 << 10))) {
403         dec_pattern(dc);
404         return;
405     }
406 
407     LOG_DIS("xor r%d\n", dc->rd);
408     if (dc->rd)
409         tcg_gen_xor_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
410 }
411 
412 static inline void msr_read(DisasContext *dc, TCGv_i32 d)
413 {
414     tcg_gen_mov_i32(d, cpu_SR[SR_MSR]);
415 }
416 
417 static inline void msr_write(DisasContext *dc, TCGv_i32 v)
418 {
419     TCGv_i32 t;
420 
421     t = tcg_temp_new_i32();
422     dc->cpustate_changed = 1;
423     /* PVR bit is not writable.  */
424     tcg_gen_andi_i32(t, v, ~MSR_PVR);
425     tcg_gen_andi_i32(cpu_SR[SR_MSR], cpu_SR[SR_MSR], MSR_PVR);
426     tcg_gen_or_i32(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t);
427     tcg_temp_free(t);
428 }
429 
430 static void dec_msr(DisasContext *dc)
431 {
432     CPUState *cs = CPU(dc->cpu);
433     TCGv_i32 t0, t1;
434     unsigned int sr, to, rn;
435     int mem_index = cpu_mmu_index(&dc->cpu->env, false);
436 
437     sr = dc->imm & ((1 << 14) - 1);
438     to = dc->imm & (1 << 14);
439     dc->type_b = 1;
440     if (to)
441         dc->cpustate_changed = 1;
442 
443     /* msrclr and msrset.  */
444     if (!(dc->imm & (1 << 15))) {
445         unsigned int clr = dc->ir & (1 << 16);
446 
447         LOG_DIS("msr%s r%d imm=%x\n", clr ? "clr" : "set",
448                 dc->rd, dc->imm);
449 
450         if (!dc->cpu->cfg.use_msr_instr) {
451             /* nop??? */
452             return;
453         }
454 
455         if ((dc->tb_flags & MSR_EE_FLAG)
456             && mem_index == MMU_USER_IDX && (dc->imm != 4 && dc->imm != 0)) {
457             tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
458             t_gen_raise_exception(dc, EXCP_HW_EXCP);
459             return;
460         }
461 
462         if (dc->rd)
463             msr_read(dc, cpu_R[dc->rd]);
464 
465         t0 = tcg_temp_new_i32();
466         t1 = tcg_temp_new_i32();
467         msr_read(dc, t0);
468         tcg_gen_mov_i32(t1, *(dec_alu_op_b(dc)));
469 
470         if (clr) {
471             tcg_gen_not_i32(t1, t1);
472             tcg_gen_and_i32(t0, t0, t1);
473         } else
474             tcg_gen_or_i32(t0, t0, t1);
475         msr_write(dc, t0);
476         tcg_temp_free_i32(t0);
477         tcg_temp_free_i32(t1);
478         tcg_gen_movi_i32(cpu_SR[SR_PC], dc->pc + 4);
479         dc->is_jmp = DISAS_UPDATE;
480         return;
481     }
482 
483     if (to) {
484         if ((dc->tb_flags & MSR_EE_FLAG)
485              && mem_index == MMU_USER_IDX) {
486             tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
487             t_gen_raise_exception(dc, EXCP_HW_EXCP);
488             return;
489         }
490     }
491 
492 #if !defined(CONFIG_USER_ONLY)
493     /* Catch read/writes to the mmu block.  */
494     if ((sr & ~0xff) == 0x1000) {
495         sr &= 7;
496         LOG_DIS("m%ss sr%d r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
497         if (to)
498             gen_helper_mmu_write(cpu_env, tcg_const_i32(sr), cpu_R[dc->ra]);
499         else
500             gen_helper_mmu_read(cpu_R[dc->rd], cpu_env, tcg_const_i32(sr));
501         return;
502     }
503 #endif
504 
505     if (to) {
506         LOG_DIS("m%ss sr%x r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
507         switch (sr) {
508             case 0:
509                 break;
510             case 1:
511                 msr_write(dc, cpu_R[dc->ra]);
512                 break;
513             case 0x3:
514                 tcg_gen_mov_i32(cpu_SR[SR_EAR], cpu_R[dc->ra]);
515                 break;
516             case 0x5:
517                 tcg_gen_mov_i32(cpu_SR[SR_ESR], cpu_R[dc->ra]);
518                 break;
519             case 0x7:
520                 tcg_gen_andi_i32(cpu_SR[SR_FSR], cpu_R[dc->ra], 31);
521                 break;
522             case 0x800:
523                 tcg_gen_st_i32(cpu_R[dc->ra],
524                                cpu_env, offsetof(CPUMBState, slr));
525                 break;
526             case 0x802:
527                 tcg_gen_st_i32(cpu_R[dc->ra],
528                                cpu_env, offsetof(CPUMBState, shr));
529                 break;
530             default:
531                 cpu_abort(CPU(dc->cpu), "unknown mts reg %x\n", sr);
532                 break;
533         }
534     } else {
535         LOG_DIS("m%ss r%d sr%x imm=%x\n", to ? "t" : "f", dc->rd, sr, dc->imm);
536 
537         switch (sr) {
538             case 0:
539                 tcg_gen_movi_i32(cpu_R[dc->rd], dc->pc);
540                 break;
541             case 1:
542                 msr_read(dc, cpu_R[dc->rd]);
543                 break;
544             case 0x3:
545                 tcg_gen_mov_i32(cpu_R[dc->rd], cpu_SR[SR_EAR]);
546                 break;
547             case 0x5:
548                 tcg_gen_mov_i32(cpu_R[dc->rd], cpu_SR[SR_ESR]);
549                 break;
550              case 0x7:
551                 tcg_gen_mov_i32(cpu_R[dc->rd], cpu_SR[SR_FSR]);
552                 break;
553             case 0xb:
554                 tcg_gen_mov_i32(cpu_R[dc->rd], cpu_SR[SR_BTR]);
555                 break;
556             case 0x800:
557                 tcg_gen_ld_i32(cpu_R[dc->rd],
558                                cpu_env, offsetof(CPUMBState, slr));
559                 break;
560             case 0x802:
561                 tcg_gen_ld_i32(cpu_R[dc->rd],
562                                cpu_env, offsetof(CPUMBState, shr));
563                 break;
564             case 0x2000:
565             case 0x2001:
566             case 0x2002:
567             case 0x2003:
568             case 0x2004:
569             case 0x2005:
570             case 0x2006:
571             case 0x2007:
572             case 0x2008:
573             case 0x2009:
574             case 0x200a:
575             case 0x200b:
576             case 0x200c:
577                 rn = sr & 0xf;
578                 tcg_gen_ld_i32(cpu_R[dc->rd],
579                               cpu_env, offsetof(CPUMBState, pvr.regs[rn]));
580                 break;
581             default:
582                 cpu_abort(cs, "unknown mfs reg %x\n", sr);
583                 break;
584         }
585     }
586 
587     if (dc->rd == 0) {
588         tcg_gen_movi_i32(cpu_R[0], 0);
589     }
590 }
591 
592 /* Multiplier unit.  */
593 static void dec_mul(DisasContext *dc)
594 {
595     TCGv_i32 tmp;
596     unsigned int subcode;
597 
598     if ((dc->tb_flags & MSR_EE_FLAG)
599          && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
600          && !dc->cpu->cfg.use_hw_mul) {
601         tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
602         t_gen_raise_exception(dc, EXCP_HW_EXCP);
603         return;
604     }
605 
606     subcode = dc->imm & 3;
607 
608     if (dc->type_b) {
609         LOG_DIS("muli r%d r%d %x\n", dc->rd, dc->ra, dc->imm);
610         tcg_gen_mul_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
611         return;
612     }
613 
614     /* mulh, mulhsu and mulhu are not available if C_USE_HW_MUL is < 2.  */
615     if (subcode >= 1 && subcode <= 3 && dc->cpu->cfg.use_hw_mul < 2) {
616         /* nop??? */
617     }
618 
619     tmp = tcg_temp_new_i32();
620     switch (subcode) {
621         case 0:
622             LOG_DIS("mul r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
623             tcg_gen_mul_i32(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
624             break;
625         case 1:
626             LOG_DIS("mulh r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
627             tcg_gen_muls2_i32(tmp, cpu_R[dc->rd],
628                               cpu_R[dc->ra], cpu_R[dc->rb]);
629             break;
630         case 2:
631             LOG_DIS("mulhsu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
632             tcg_gen_mulsu2_i32(tmp, cpu_R[dc->rd],
633                                cpu_R[dc->ra], cpu_R[dc->rb]);
634             break;
635         case 3:
636             LOG_DIS("mulhu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
637             tcg_gen_mulu2_i32(tmp, cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
638             break;
639         default:
640             cpu_abort(CPU(dc->cpu), "unknown MUL insn %x\n", subcode);
641             break;
642     }
643     tcg_temp_free_i32(tmp);
644 }
645 
646 /* Div unit.  */
647 static void dec_div(DisasContext *dc)
648 {
649     unsigned int u;
650 
651     u = dc->imm & 2;
652     LOG_DIS("div\n");
653 
654     if ((dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
655           && !dc->cpu->cfg.use_div) {
656         tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
657         t_gen_raise_exception(dc, EXCP_HW_EXCP);
658     }
659 
660     if (u)
661         gen_helper_divu(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
662                         cpu_R[dc->ra]);
663     else
664         gen_helper_divs(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
665                         cpu_R[dc->ra]);
666     if (!dc->rd)
667         tcg_gen_movi_i32(cpu_R[dc->rd], 0);
668 }
669 
670 static void dec_barrel(DisasContext *dc)
671 {
672     TCGv_i32 t0;
673     unsigned int imm_w, imm_s;
674     bool s, t, e = false, i = false;
675 
676     if ((dc->tb_flags & MSR_EE_FLAG)
677           && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
678           && !dc->cpu->cfg.use_barrel) {
679         tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
680         t_gen_raise_exception(dc, EXCP_HW_EXCP);
681         return;
682     }
683 
684     if (dc->type_b) {
685         /* Insert and extract are only available in immediate mode.  */
686         i = extract32(dc->imm, 15, 1);
687         e = extract32(dc->imm, 14, 1);
688     }
689     s = extract32(dc->imm, 10, 1);
690     t = extract32(dc->imm, 9, 1);
691     imm_w = extract32(dc->imm, 6, 5);
692     imm_s = extract32(dc->imm, 0, 5);
693 
694     LOG_DIS("bs%s%s%s r%d r%d r%d\n",
695             e ? "e" : "",
696             s ? "l" : "r", t ? "a" : "l", dc->rd, dc->ra, dc->rb);
697 
698     if (e) {
699         if (imm_w + imm_s > 32 || imm_w == 0) {
700             /* These inputs have an undefined behavior.  */
701             qemu_log_mask(LOG_GUEST_ERROR, "bsefi: Bad input w=%d s=%d\n",
702                           imm_w, imm_s);
703         } else {
704             tcg_gen_extract_i32(cpu_R[dc->rd], cpu_R[dc->ra], imm_s, imm_w);
705         }
706     } else if (i) {
707         int width = imm_w - imm_s + 1;
708 
709         if (imm_w < imm_s) {
710             /* These inputs have an undefined behavior.  */
711             qemu_log_mask(LOG_GUEST_ERROR, "bsifi: Bad input w=%d s=%d\n",
712                           imm_w, imm_s);
713         } else {
714             tcg_gen_deposit_i32(cpu_R[dc->rd], cpu_R[dc->rd], cpu_R[dc->ra],
715                                 imm_s, width);
716         }
717     } else {
718         t0 = tcg_temp_new_i32();
719 
720         tcg_gen_mov_i32(t0, *(dec_alu_op_b(dc)));
721         tcg_gen_andi_i32(t0, t0, 31);
722 
723         if (s) {
724             tcg_gen_shl_i32(cpu_R[dc->rd], cpu_R[dc->ra], t0);
725         } else {
726             if (t) {
727                 tcg_gen_sar_i32(cpu_R[dc->rd], cpu_R[dc->ra], t0);
728             } else {
729                 tcg_gen_shr_i32(cpu_R[dc->rd], cpu_R[dc->ra], t0);
730             }
731         }
732         tcg_temp_free_i32(t0);
733     }
734 }
735 
736 static void dec_bit(DisasContext *dc)
737 {
738     CPUState *cs = CPU(dc->cpu);
739     TCGv_i32 t0;
740     unsigned int op;
741     int mem_index = cpu_mmu_index(&dc->cpu->env, false);
742 
743     op = dc->ir & ((1 << 9) - 1);
744     switch (op) {
745         case 0x21:
746             /* src.  */
747             t0 = tcg_temp_new_i32();
748 
749             LOG_DIS("src r%d r%d\n", dc->rd, dc->ra);
750             tcg_gen_andi_i32(t0, cpu_SR[SR_MSR], MSR_CC);
751             write_carry(dc, cpu_R[dc->ra]);
752             if (dc->rd) {
753                 tcg_gen_shri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 1);
754                 tcg_gen_or_i32(cpu_R[dc->rd], cpu_R[dc->rd], t0);
755             }
756             tcg_temp_free_i32(t0);
757             break;
758 
759         case 0x1:
760         case 0x41:
761             /* srl.  */
762             LOG_DIS("srl r%d r%d\n", dc->rd, dc->ra);
763 
764             /* Update carry. Note that write carry only looks at the LSB.  */
765             write_carry(dc, cpu_R[dc->ra]);
766             if (dc->rd) {
767                 if (op == 0x41)
768                     tcg_gen_shri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 1);
769                 else
770                     tcg_gen_sari_i32(cpu_R[dc->rd], cpu_R[dc->ra], 1);
771             }
772             break;
773         case 0x60:
774             LOG_DIS("ext8s r%d r%d\n", dc->rd, dc->ra);
775             tcg_gen_ext8s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
776             break;
777         case 0x61:
778             LOG_DIS("ext16s r%d r%d\n", dc->rd, dc->ra);
779             tcg_gen_ext16s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
780             break;
781         case 0x64:
782         case 0x66:
783         case 0x74:
784         case 0x76:
785             /* wdc.  */
786             LOG_DIS("wdc r%d\n", dc->ra);
787             if ((dc->tb_flags & MSR_EE_FLAG)
788                  && mem_index == MMU_USER_IDX) {
789                 tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
790                 t_gen_raise_exception(dc, EXCP_HW_EXCP);
791                 return;
792             }
793             break;
794         case 0x68:
795             /* wic.  */
796             LOG_DIS("wic r%d\n", dc->ra);
797             if ((dc->tb_flags & MSR_EE_FLAG)
798                  && mem_index == MMU_USER_IDX) {
799                 tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
800                 t_gen_raise_exception(dc, EXCP_HW_EXCP);
801                 return;
802             }
803             break;
804         case 0xe0:
805             if ((dc->tb_flags & MSR_EE_FLAG)
806                 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
807                 && !dc->cpu->cfg.use_pcmp_instr) {
808                 tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
809                 t_gen_raise_exception(dc, EXCP_HW_EXCP);
810             }
811             if (dc->cpu->cfg.use_pcmp_instr) {
812                 tcg_gen_clzi_i32(cpu_R[dc->rd], cpu_R[dc->ra], 32);
813             }
814             break;
815         case 0x1e0:
816             /* swapb */
817             LOG_DIS("swapb r%d r%d\n", dc->rd, dc->ra);
818             tcg_gen_bswap32_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
819             break;
820         case 0x1e2:
821             /*swaph */
822             LOG_DIS("swaph r%d r%d\n", dc->rd, dc->ra);
823             tcg_gen_rotri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 16);
824             break;
825         default:
826             cpu_abort(cs, "unknown bit oc=%x op=%x rd=%d ra=%d rb=%d\n",
827                       dc->pc, op, dc->rd, dc->ra, dc->rb);
828             break;
829     }
830 }
831 
832 static inline void sync_jmpstate(DisasContext *dc)
833 {
834     if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
835         if (dc->jmp == JMP_DIRECT) {
836             tcg_gen_movi_i32(env_btaken, 1);
837         }
838         dc->jmp = JMP_INDIRECT;
839         tcg_gen_movi_i32(env_btarget, dc->jmp_pc);
840     }
841 }
842 
843 static void dec_imm(DisasContext *dc)
844 {
845     LOG_DIS("imm %x\n", dc->imm << 16);
846     tcg_gen_movi_i32(env_imm, (dc->imm << 16));
847     dc->tb_flags |= IMM_FLAG;
848     dc->clear_imm = 0;
849 }
850 
851 static inline void compute_ldst_addr(DisasContext *dc, TCGv t)
852 {
853     bool extimm = dc->tb_flags & IMM_FLAG;
854     /* Should be set to true if r1 is used by loadstores.  */
855     bool stackprot = false;
856     TCGv_i32 t32;
857 
858     /* All load/stores use ra.  */
859     if (dc->ra == 1 && dc->cpu->cfg.stackprot) {
860         stackprot = true;
861     }
862 
863     /* Treat the common cases first.  */
864     if (!dc->type_b) {
865         /* If any of the regs is r0, set t to the value of the other reg.  */
866         if (dc->ra == 0) {
867             tcg_gen_extu_i32_tl(t, cpu_R[dc->rb]);
868             return;
869         } else if (dc->rb == 0) {
870             tcg_gen_extu_i32_tl(t, cpu_R[dc->ra]);
871             return;
872         }
873 
874         if (dc->rb == 1 && dc->cpu->cfg.stackprot) {
875             stackprot = true;
876         }
877 
878         t32 = tcg_temp_new_i32();
879         tcg_gen_add_i32(t32, cpu_R[dc->ra], cpu_R[dc->rb]);
880         tcg_gen_extu_i32_tl(t, t32);
881         tcg_temp_free_i32(t32);
882 
883         if (stackprot) {
884             gen_helper_stackprot(cpu_env, t);
885         }
886         return;
887     }
888     /* Immediate.  */
889     t32 = tcg_temp_new_i32();
890     if (!extimm) {
891         if (dc->imm == 0) {
892             tcg_gen_mov_i32(t32, cpu_R[dc->ra]);
893         } else {
894             tcg_gen_movi_i32(t32, (int32_t)((int16_t)dc->imm));
895             tcg_gen_add_i32(t32, cpu_R[dc->ra], t32);
896         }
897     } else {
898         tcg_gen_add_i32(t32, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
899     }
900     tcg_gen_extu_i32_tl(t, t32);
901     tcg_temp_free_i32(t32);
902 
903     if (stackprot) {
904         gen_helper_stackprot(cpu_env, t);
905     }
906     return;
907 }
908 
909 static void dec_load(DisasContext *dc)
910 {
911     TCGv_i32 v;
912     TCGv addr;
913     unsigned int size;
914     bool rev = false, ex = false;
915     TCGMemOp mop;
916 
917     mop = dc->opcode & 3;
918     size = 1 << mop;
919     if (!dc->type_b) {
920         rev = extract32(dc->ir, 9, 1);
921         ex = extract32(dc->ir, 10, 1);
922     }
923     mop |= MO_TE;
924     if (rev) {
925         mop ^= MO_BSWAP;
926     }
927 
928     if (size > 4 && (dc->tb_flags & MSR_EE_FLAG)
929           && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
930         tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
931         t_gen_raise_exception(dc, EXCP_HW_EXCP);
932         return;
933     }
934 
935     LOG_DIS("l%d%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
936                                                         ex ? "x" : "");
937 
938     t_sync_flags(dc);
939     addr = tcg_temp_new();
940     compute_ldst_addr(dc, addr);
941 
942     /*
943      * When doing reverse accesses we need to do two things.
944      *
945      * 1. Reverse the address wrt endianness.
946      * 2. Byteswap the data lanes on the way back into the CPU core.
947      */
948     if (rev && size != 4) {
949         /* Endian reverse the address. t is addr.  */
950         switch (size) {
951             case 1:
952             {
953                 /* 00 -> 11
954                    01 -> 10
955                    10 -> 10
956                    11 -> 00 */
957                 TCGv low = tcg_temp_new();
958 
959                 tcg_gen_andi_tl(low, addr, 3);
960                 tcg_gen_sub_tl(low, tcg_const_tl(3), low);
961                 tcg_gen_andi_tl(addr, addr, ~3);
962                 tcg_gen_or_tl(addr, addr, low);
963                 tcg_temp_free(low);
964                 break;
965             }
966 
967             case 2:
968                 /* 00 -> 10
969                    10 -> 00.  */
970                 tcg_gen_xori_tl(addr, addr, 2);
971                 break;
972             default:
973                 cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
974                 break;
975         }
976     }
977 
978     /* lwx does not throw unaligned access errors, so force alignment */
979     if (ex) {
980         tcg_gen_andi_tl(addr, addr, ~3);
981     }
982 
983     /* If we get a fault on a dslot, the jmpstate better be in sync.  */
984     sync_jmpstate(dc);
985 
986     /* Verify alignment if needed.  */
987     /*
988      * Microblaze gives MMU faults priority over faults due to
989      * unaligned addresses. That's why we speculatively do the load
990      * into v. If the load succeeds, we verify alignment of the
991      * address and if that succeeds we write into the destination reg.
992      */
993     v = tcg_temp_new_i32();
994     tcg_gen_qemu_ld_i32(v, addr, cpu_mmu_index(&dc->cpu->env, false), mop);
995 
996     if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
997         tcg_gen_movi_i32(cpu_SR[SR_PC], dc->pc);
998         gen_helper_memalign(cpu_env, addr, tcg_const_i32(dc->rd),
999                             tcg_const_i32(0), tcg_const_i32(size - 1));
1000     }
1001 
1002     if (ex) {
1003         tcg_gen_mov_tl(env_res_addr, addr);
1004         tcg_gen_mov_i32(env_res_val, v);
1005     }
1006     if (dc->rd) {
1007         tcg_gen_mov_i32(cpu_R[dc->rd], v);
1008     }
1009     tcg_temp_free_i32(v);
1010 
1011     if (ex) { /* lwx */
1012         /* no support for AXI exclusive so always clear C */
1013         write_carryi(dc, 0);
1014     }
1015 
1016     tcg_temp_free(addr);
1017 }
1018 
1019 static void dec_store(DisasContext *dc)
1020 {
1021     TCGv addr;
1022     TCGLabel *swx_skip = NULL;
1023     unsigned int size;
1024     bool rev = false, ex = false;
1025     TCGMemOp mop;
1026 
1027     mop = dc->opcode & 3;
1028     size = 1 << mop;
1029     if (!dc->type_b) {
1030         rev = extract32(dc->ir, 9, 1);
1031         ex = extract32(dc->ir, 10, 1);
1032     }
1033     mop |= MO_TE;
1034     if (rev) {
1035         mop ^= MO_BSWAP;
1036     }
1037 
1038     if (size > 4 && (dc->tb_flags & MSR_EE_FLAG)
1039           && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
1040         tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1041         t_gen_raise_exception(dc, EXCP_HW_EXCP);
1042         return;
1043     }
1044 
1045     LOG_DIS("s%d%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
1046                                                         ex ? "x" : "");
1047     t_sync_flags(dc);
1048     /* If we get a fault on a dslot, the jmpstate better be in sync.  */
1049     sync_jmpstate(dc);
1050     /* SWX needs a temp_local.  */
1051     addr = ex ? tcg_temp_local_new() : tcg_temp_new();
1052     compute_ldst_addr(dc, addr);
1053 
1054     if (ex) { /* swx */
1055         TCGv_i32 tval;
1056 
1057         /* swx does not throw unaligned access errors, so force alignment */
1058         tcg_gen_andi_tl(addr, addr, ~3);
1059 
1060         write_carryi(dc, 1);
1061         swx_skip = gen_new_label();
1062         tcg_gen_brcond_tl(TCG_COND_NE, env_res_addr, addr, swx_skip);
1063 
1064         /* Compare the value loaded at lwx with current contents of
1065            the reserved location.
1066            FIXME: This only works for system emulation where we can expect
1067            this compare and the following write to be atomic. For user
1068            emulation we need to add atomicity between threads.  */
1069         tval = tcg_temp_new_i32();
1070         tcg_gen_qemu_ld_i32(tval, addr, cpu_mmu_index(&dc->cpu->env, false),
1071                             MO_TEUL);
1072         tcg_gen_brcond_i32(TCG_COND_NE, env_res_val, tval, swx_skip);
1073         write_carryi(dc, 0);
1074         tcg_temp_free_i32(tval);
1075     }
1076 
1077     if (rev && size != 4) {
1078         /* Endian reverse the address. t is addr.  */
1079         switch (size) {
1080             case 1:
1081             {
1082                 /* 00 -> 11
1083                    01 -> 10
1084                    10 -> 10
1085                    11 -> 00 */
1086                 TCGv low = tcg_temp_new();
1087 
1088                 tcg_gen_andi_tl(low, addr, 3);
1089                 tcg_gen_sub_tl(low, tcg_const_tl(3), low);
1090                 tcg_gen_andi_tl(addr, addr, ~3);
1091                 tcg_gen_or_tl(addr, addr, low);
1092                 tcg_temp_free(low);
1093                 break;
1094             }
1095 
1096             case 2:
1097                 /* 00 -> 10
1098                    10 -> 00.  */
1099                 /* Force addr into the temp.  */
1100                 tcg_gen_xori_tl(addr, addr, 2);
1101                 break;
1102             default:
1103                 cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
1104                 break;
1105         }
1106     }
1107     tcg_gen_qemu_st_i32(cpu_R[dc->rd], addr,
1108                         cpu_mmu_index(&dc->cpu->env, false), mop);
1109 
1110     /* Verify alignment if needed.  */
1111     if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
1112         tcg_gen_movi_i32(cpu_SR[SR_PC], dc->pc);
1113         /* FIXME: if the alignment is wrong, we should restore the value
1114          *        in memory. One possible way to achieve this is to probe
1115          *        the MMU prior to the memaccess, thay way we could put
1116          *        the alignment checks in between the probe and the mem
1117          *        access.
1118          */
1119         gen_helper_memalign(cpu_env, addr, tcg_const_i32(dc->rd),
1120                             tcg_const_i32(1), tcg_const_i32(size - 1));
1121     }
1122 
1123     if (ex) {
1124         gen_set_label(swx_skip);
1125     }
1126 
1127     tcg_temp_free(addr);
1128 }
1129 
1130 static inline void eval_cc(DisasContext *dc, unsigned int cc,
1131                            TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
1132 {
1133     switch (cc) {
1134         case CC_EQ:
1135             tcg_gen_setcond_i32(TCG_COND_EQ, d, a, b);
1136             break;
1137         case CC_NE:
1138             tcg_gen_setcond_i32(TCG_COND_NE, d, a, b);
1139             break;
1140         case CC_LT:
1141             tcg_gen_setcond_i32(TCG_COND_LT, d, a, b);
1142             break;
1143         case CC_LE:
1144             tcg_gen_setcond_i32(TCG_COND_LE, d, a, b);
1145             break;
1146         case CC_GE:
1147             tcg_gen_setcond_i32(TCG_COND_GE, d, a, b);
1148             break;
1149         case CC_GT:
1150             tcg_gen_setcond_i32(TCG_COND_GT, d, a, b);
1151             break;
1152         default:
1153             cpu_abort(CPU(dc->cpu), "Unknown condition code %x.\n", cc);
1154             break;
1155     }
1156 }
1157 
1158 static void eval_cond_jmp(DisasContext *dc, TCGv_i32 pc_true, TCGv_i32 pc_false)
1159 {
1160     TCGLabel *l1 = gen_new_label();
1161     /* Conditional jmp.  */
1162     tcg_gen_mov_i32(cpu_SR[SR_PC], pc_false);
1163     tcg_gen_brcondi_i32(TCG_COND_EQ, env_btaken, 0, l1);
1164     tcg_gen_mov_i32(cpu_SR[SR_PC], pc_true);
1165     gen_set_label(l1);
1166 }
1167 
1168 static void dec_bcc(DisasContext *dc)
1169 {
1170     unsigned int cc;
1171     unsigned int dslot;
1172 
1173     cc = EXTRACT_FIELD(dc->ir, 21, 23);
1174     dslot = dc->ir & (1 << 25);
1175     LOG_DIS("bcc%s r%d %x\n", dslot ? "d" : "", dc->ra, dc->imm);
1176 
1177     dc->delayed_branch = 1;
1178     if (dslot) {
1179         dc->delayed_branch = 2;
1180         dc->tb_flags |= D_FLAG;
1181         tcg_gen_st_i32(tcg_const_i32(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1182                       cpu_env, offsetof(CPUMBState, bimm));
1183     }
1184 
1185     if (dec_alu_op_b_is_small_imm(dc)) {
1186         int32_t offset = (int32_t)((int16_t)dc->imm); /* sign-extend.  */
1187 
1188         tcg_gen_movi_i32(env_btarget, dc->pc + offset);
1189         dc->jmp = JMP_DIRECT_CC;
1190         dc->jmp_pc = dc->pc + offset;
1191     } else {
1192         dc->jmp = JMP_INDIRECT;
1193         tcg_gen_movi_i32(env_btarget, dc->pc);
1194         tcg_gen_add_i32(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
1195     }
1196     eval_cc(dc, cc, env_btaken, cpu_R[dc->ra], tcg_const_i32(0));
1197 }
1198 
1199 static void dec_br(DisasContext *dc)
1200 {
1201     unsigned int dslot, link, abs, mbar;
1202     int mem_index = cpu_mmu_index(&dc->cpu->env, false);
1203 
1204     dslot = dc->ir & (1 << 20);
1205     abs = dc->ir & (1 << 19);
1206     link = dc->ir & (1 << 18);
1207 
1208     /* Memory barrier.  */
1209     mbar = (dc->ir >> 16) & 31;
1210     if (mbar == 2 && dc->imm == 4) {
1211         /* mbar IMM & 16 decodes to sleep.  */
1212         if (dc->rd & 16) {
1213             TCGv_i32 tmp_hlt = tcg_const_i32(EXCP_HLT);
1214             TCGv_i32 tmp_1 = tcg_const_i32(1);
1215 
1216             LOG_DIS("sleep\n");
1217 
1218             t_sync_flags(dc);
1219             tcg_gen_st_i32(tmp_1, cpu_env,
1220                            -offsetof(MicroBlazeCPU, env)
1221                            +offsetof(CPUState, halted));
1222             tcg_gen_movi_i32(cpu_SR[SR_PC], dc->pc + 4);
1223             gen_helper_raise_exception(cpu_env, tmp_hlt);
1224             tcg_temp_free_i32(tmp_hlt);
1225             tcg_temp_free_i32(tmp_1);
1226             return;
1227         }
1228         LOG_DIS("mbar %d\n", dc->rd);
1229         /* Break the TB.  */
1230         dc->cpustate_changed = 1;
1231         return;
1232     }
1233 
1234     LOG_DIS("br%s%s%s%s imm=%x\n",
1235              abs ? "a" : "", link ? "l" : "",
1236              dc->type_b ? "i" : "", dslot ? "d" : "",
1237              dc->imm);
1238 
1239     dc->delayed_branch = 1;
1240     if (dslot) {
1241         dc->delayed_branch = 2;
1242         dc->tb_flags |= D_FLAG;
1243         tcg_gen_st_i32(tcg_const_i32(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1244                       cpu_env, offsetof(CPUMBState, bimm));
1245     }
1246     if (link && dc->rd)
1247         tcg_gen_movi_i32(cpu_R[dc->rd], dc->pc);
1248 
1249     dc->jmp = JMP_INDIRECT;
1250     if (abs) {
1251         tcg_gen_movi_i32(env_btaken, 1);
1252         tcg_gen_mov_i32(env_btarget, *(dec_alu_op_b(dc)));
1253         if (link && !dslot) {
1254             if (!(dc->tb_flags & IMM_FLAG) && (dc->imm == 8 || dc->imm == 0x18))
1255                 t_gen_raise_exception(dc, EXCP_BREAK);
1256             if (dc->imm == 0) {
1257                 if ((dc->tb_flags & MSR_EE_FLAG) && mem_index == MMU_USER_IDX) {
1258                     tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1259                     t_gen_raise_exception(dc, EXCP_HW_EXCP);
1260                     return;
1261                 }
1262 
1263                 t_gen_raise_exception(dc, EXCP_DEBUG);
1264             }
1265         }
1266     } else {
1267         if (dec_alu_op_b_is_small_imm(dc)) {
1268             dc->jmp = JMP_DIRECT;
1269             dc->jmp_pc = dc->pc + (int32_t)((int16_t)dc->imm);
1270         } else {
1271             tcg_gen_movi_i32(env_btaken, 1);
1272             tcg_gen_movi_i32(env_btarget, dc->pc);
1273             tcg_gen_add_i32(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
1274         }
1275     }
1276 }
1277 
1278 static inline void do_rti(DisasContext *dc)
1279 {
1280     TCGv_i32 t0, t1;
1281     t0 = tcg_temp_new_i32();
1282     t1 = tcg_temp_new_i32();
1283     tcg_gen_shri_i32(t0, cpu_SR[SR_MSR], 1);
1284     tcg_gen_ori_i32(t1, cpu_SR[SR_MSR], MSR_IE);
1285     tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
1286 
1287     tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
1288     tcg_gen_or_i32(t1, t1, t0);
1289     msr_write(dc, t1);
1290     tcg_temp_free_i32(t1);
1291     tcg_temp_free_i32(t0);
1292     dc->tb_flags &= ~DRTI_FLAG;
1293 }
1294 
1295 static inline void do_rtb(DisasContext *dc)
1296 {
1297     TCGv_i32 t0, t1;
1298     t0 = tcg_temp_new_i32();
1299     t1 = tcg_temp_new_i32();
1300     tcg_gen_andi_i32(t1, cpu_SR[SR_MSR], ~MSR_BIP);
1301     tcg_gen_shri_i32(t0, t1, 1);
1302     tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
1303 
1304     tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
1305     tcg_gen_or_i32(t1, t1, t0);
1306     msr_write(dc, t1);
1307     tcg_temp_free_i32(t1);
1308     tcg_temp_free_i32(t0);
1309     dc->tb_flags &= ~DRTB_FLAG;
1310 }
1311 
1312 static inline void do_rte(DisasContext *dc)
1313 {
1314     TCGv_i32 t0, t1;
1315     t0 = tcg_temp_new_i32();
1316     t1 = tcg_temp_new_i32();
1317 
1318     tcg_gen_ori_i32(t1, cpu_SR[SR_MSR], MSR_EE);
1319     tcg_gen_andi_i32(t1, t1, ~MSR_EIP);
1320     tcg_gen_shri_i32(t0, t1, 1);
1321     tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
1322 
1323     tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
1324     tcg_gen_or_i32(t1, t1, t0);
1325     msr_write(dc, t1);
1326     tcg_temp_free_i32(t1);
1327     tcg_temp_free_i32(t0);
1328     dc->tb_flags &= ~DRTE_FLAG;
1329 }
1330 
1331 static void dec_rts(DisasContext *dc)
1332 {
1333     unsigned int b_bit, i_bit, e_bit;
1334     int mem_index = cpu_mmu_index(&dc->cpu->env, false);
1335 
1336     i_bit = dc->ir & (1 << 21);
1337     b_bit = dc->ir & (1 << 22);
1338     e_bit = dc->ir & (1 << 23);
1339 
1340     dc->delayed_branch = 2;
1341     dc->tb_flags |= D_FLAG;
1342     tcg_gen_st_i32(tcg_const_i32(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1343                   cpu_env, offsetof(CPUMBState, bimm));
1344 
1345     if (i_bit) {
1346         LOG_DIS("rtid ir=%x\n", dc->ir);
1347         if ((dc->tb_flags & MSR_EE_FLAG)
1348              && mem_index == MMU_USER_IDX) {
1349             tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1350             t_gen_raise_exception(dc, EXCP_HW_EXCP);
1351         }
1352         dc->tb_flags |= DRTI_FLAG;
1353     } else if (b_bit) {
1354         LOG_DIS("rtbd ir=%x\n", dc->ir);
1355         if ((dc->tb_flags & MSR_EE_FLAG)
1356              && mem_index == MMU_USER_IDX) {
1357             tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1358             t_gen_raise_exception(dc, EXCP_HW_EXCP);
1359         }
1360         dc->tb_flags |= DRTB_FLAG;
1361     } else if (e_bit) {
1362         LOG_DIS("rted ir=%x\n", dc->ir);
1363         if ((dc->tb_flags & MSR_EE_FLAG)
1364              && mem_index == MMU_USER_IDX) {
1365             tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1366             t_gen_raise_exception(dc, EXCP_HW_EXCP);
1367         }
1368         dc->tb_flags |= DRTE_FLAG;
1369     } else
1370         LOG_DIS("rts ir=%x\n", dc->ir);
1371 
1372     dc->jmp = JMP_INDIRECT;
1373     tcg_gen_movi_i32(env_btaken, 1);
1374     tcg_gen_add_i32(env_btarget, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
1375 }
1376 
1377 static int dec_check_fpuv2(DisasContext *dc)
1378 {
1379     if ((dc->cpu->cfg.use_fpu != 2) && (dc->tb_flags & MSR_EE_FLAG)) {
1380         tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_FPU);
1381         t_gen_raise_exception(dc, EXCP_HW_EXCP);
1382     }
1383     return (dc->cpu->cfg.use_fpu == 2) ? 0 : PVR2_USE_FPU2_MASK;
1384 }
1385 
1386 static void dec_fpu(DisasContext *dc)
1387 {
1388     unsigned int fpu_insn;
1389 
1390     if ((dc->tb_flags & MSR_EE_FLAG)
1391           && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
1392           && !dc->cpu->cfg.use_fpu) {
1393         tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1394         t_gen_raise_exception(dc, EXCP_HW_EXCP);
1395         return;
1396     }
1397 
1398     fpu_insn = (dc->ir >> 7) & 7;
1399 
1400     switch (fpu_insn) {
1401         case 0:
1402             gen_helper_fadd(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1403                             cpu_R[dc->rb]);
1404             break;
1405 
1406         case 1:
1407             gen_helper_frsub(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1408                              cpu_R[dc->rb]);
1409             break;
1410 
1411         case 2:
1412             gen_helper_fmul(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1413                             cpu_R[dc->rb]);
1414             break;
1415 
1416         case 3:
1417             gen_helper_fdiv(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1418                             cpu_R[dc->rb]);
1419             break;
1420 
1421         case 4:
1422             switch ((dc->ir >> 4) & 7) {
1423                 case 0:
1424                     gen_helper_fcmp_un(cpu_R[dc->rd], cpu_env,
1425                                        cpu_R[dc->ra], cpu_R[dc->rb]);
1426                     break;
1427                 case 1:
1428                     gen_helper_fcmp_lt(cpu_R[dc->rd], cpu_env,
1429                                        cpu_R[dc->ra], cpu_R[dc->rb]);
1430                     break;
1431                 case 2:
1432                     gen_helper_fcmp_eq(cpu_R[dc->rd], cpu_env,
1433                                        cpu_R[dc->ra], cpu_R[dc->rb]);
1434                     break;
1435                 case 3:
1436                     gen_helper_fcmp_le(cpu_R[dc->rd], cpu_env,
1437                                        cpu_R[dc->ra], cpu_R[dc->rb]);
1438                     break;
1439                 case 4:
1440                     gen_helper_fcmp_gt(cpu_R[dc->rd], cpu_env,
1441                                        cpu_R[dc->ra], cpu_R[dc->rb]);
1442                     break;
1443                 case 5:
1444                     gen_helper_fcmp_ne(cpu_R[dc->rd], cpu_env,
1445                                        cpu_R[dc->ra], cpu_R[dc->rb]);
1446                     break;
1447                 case 6:
1448                     gen_helper_fcmp_ge(cpu_R[dc->rd], cpu_env,
1449                                        cpu_R[dc->ra], cpu_R[dc->rb]);
1450                     break;
1451                 default:
1452                     qemu_log_mask(LOG_UNIMP,
1453                                   "unimplemented fcmp fpu_insn=%x pc=%x"
1454                                   " opc=%x\n",
1455                                   fpu_insn, dc->pc, dc->opcode);
1456                     dc->abort_at_next_insn = 1;
1457                     break;
1458             }
1459             break;
1460 
1461         case 5:
1462             if (!dec_check_fpuv2(dc)) {
1463                 return;
1464             }
1465             gen_helper_flt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1466             break;
1467 
1468         case 6:
1469             if (!dec_check_fpuv2(dc)) {
1470                 return;
1471             }
1472             gen_helper_fint(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1473             break;
1474 
1475         case 7:
1476             if (!dec_check_fpuv2(dc)) {
1477                 return;
1478             }
1479             gen_helper_fsqrt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1480             break;
1481 
1482         default:
1483             qemu_log_mask(LOG_UNIMP, "unimplemented FPU insn fpu_insn=%x pc=%x"
1484                           " opc=%x\n",
1485                           fpu_insn, dc->pc, dc->opcode);
1486             dc->abort_at_next_insn = 1;
1487             break;
1488     }
1489 }
1490 
1491 static void dec_null(DisasContext *dc)
1492 {
1493     if ((dc->tb_flags & MSR_EE_FLAG)
1494           && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
1495         tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1496         t_gen_raise_exception(dc, EXCP_HW_EXCP);
1497         return;
1498     }
1499     qemu_log_mask(LOG_GUEST_ERROR, "unknown insn pc=%x opc=%x\n", dc->pc, dc->opcode);
1500     dc->abort_at_next_insn = 1;
1501 }
1502 
1503 /* Insns connected to FSL or AXI stream attached devices.  */
1504 static void dec_stream(DisasContext *dc)
1505 {
1506     int mem_index = cpu_mmu_index(&dc->cpu->env, false);
1507     TCGv_i32 t_id, t_ctrl;
1508     int ctrl;
1509 
1510     LOG_DIS("%s%s imm=%x\n", dc->rd ? "get" : "put",
1511             dc->type_b ? "" : "d", dc->imm);
1512 
1513     if ((dc->tb_flags & MSR_EE_FLAG) && (mem_index == MMU_USER_IDX)) {
1514         tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1515         t_gen_raise_exception(dc, EXCP_HW_EXCP);
1516         return;
1517     }
1518 
1519     t_id = tcg_temp_new_i32();
1520     if (dc->type_b) {
1521         tcg_gen_movi_i32(t_id, dc->imm & 0xf);
1522         ctrl = dc->imm >> 10;
1523     } else {
1524         tcg_gen_andi_i32(t_id, cpu_R[dc->rb], 0xf);
1525         ctrl = dc->imm >> 5;
1526     }
1527 
1528     t_ctrl = tcg_const_i32(ctrl);
1529 
1530     if (dc->rd == 0) {
1531         gen_helper_put(t_id, t_ctrl, cpu_R[dc->ra]);
1532     } else {
1533         gen_helper_get(cpu_R[dc->rd], t_id, t_ctrl);
1534     }
1535     tcg_temp_free_i32(t_id);
1536     tcg_temp_free_i32(t_ctrl);
1537 }
1538 
1539 static struct decoder_info {
1540     struct {
1541         uint32_t bits;
1542         uint32_t mask;
1543     };
1544     void (*dec)(DisasContext *dc);
1545 } decinfo[] = {
1546     {DEC_ADD, dec_add},
1547     {DEC_SUB, dec_sub},
1548     {DEC_AND, dec_and},
1549     {DEC_XOR, dec_xor},
1550     {DEC_OR, dec_or},
1551     {DEC_BIT, dec_bit},
1552     {DEC_BARREL, dec_barrel},
1553     {DEC_LD, dec_load},
1554     {DEC_ST, dec_store},
1555     {DEC_IMM, dec_imm},
1556     {DEC_BR, dec_br},
1557     {DEC_BCC, dec_bcc},
1558     {DEC_RTS, dec_rts},
1559     {DEC_FPU, dec_fpu},
1560     {DEC_MUL, dec_mul},
1561     {DEC_DIV, dec_div},
1562     {DEC_MSR, dec_msr},
1563     {DEC_STREAM, dec_stream},
1564     {{0, 0}, dec_null}
1565 };
1566 
1567 static inline void decode(DisasContext *dc, uint32_t ir)
1568 {
1569     int i;
1570 
1571     dc->ir = ir;
1572     LOG_DIS("%8.8x\t", dc->ir);
1573 
1574     if (dc->ir)
1575         dc->nr_nops = 0;
1576     else {
1577         if ((dc->tb_flags & MSR_EE_FLAG)
1578               && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
1579               && (dc->cpu->env.pvr.regs[2] & PVR2_OPCODE_0x0_ILL_MASK)) {
1580             tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1581             t_gen_raise_exception(dc, EXCP_HW_EXCP);
1582             return;
1583         }
1584 
1585         LOG_DIS("nr_nops=%d\t", dc->nr_nops);
1586         dc->nr_nops++;
1587         if (dc->nr_nops > 4) {
1588             cpu_abort(CPU(dc->cpu), "fetching nop sequence\n");
1589         }
1590     }
1591     /* bit 2 seems to indicate insn type.  */
1592     dc->type_b = ir & (1 << 29);
1593 
1594     dc->opcode = EXTRACT_FIELD(ir, 26, 31);
1595     dc->rd = EXTRACT_FIELD(ir, 21, 25);
1596     dc->ra = EXTRACT_FIELD(ir, 16, 20);
1597     dc->rb = EXTRACT_FIELD(ir, 11, 15);
1598     dc->imm = EXTRACT_FIELD(ir, 0, 15);
1599 
1600     /* Large switch for all insns.  */
1601     for (i = 0; i < ARRAY_SIZE(decinfo); i++) {
1602         if ((dc->opcode & decinfo[i].mask) == decinfo[i].bits) {
1603             decinfo[i].dec(dc);
1604             break;
1605         }
1606     }
1607 }
1608 
1609 /* generate intermediate code for basic block 'tb'.  */
1610 void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
1611 {
1612     CPUMBState *env = cs->env_ptr;
1613     MicroBlazeCPU *cpu = mb_env_get_cpu(env);
1614     uint32_t pc_start;
1615     struct DisasContext ctx;
1616     struct DisasContext *dc = &ctx;
1617     uint32_t page_start, org_flags;
1618     uint32_t npc;
1619     int num_insns;
1620     int max_insns;
1621 
1622     pc_start = tb->pc;
1623     dc->cpu = cpu;
1624     dc->tb = tb;
1625     org_flags = dc->synced_flags = dc->tb_flags = tb->flags;
1626 
1627     dc->is_jmp = DISAS_NEXT;
1628     dc->jmp = 0;
1629     dc->delayed_branch = !!(dc->tb_flags & D_FLAG);
1630     if (dc->delayed_branch) {
1631         dc->jmp = JMP_INDIRECT;
1632     }
1633     dc->pc = pc_start;
1634     dc->singlestep_enabled = cs->singlestep_enabled;
1635     dc->cpustate_changed = 0;
1636     dc->abort_at_next_insn = 0;
1637     dc->nr_nops = 0;
1638 
1639     if (pc_start & 3) {
1640         cpu_abort(cs, "Microblaze: unaligned PC=%x\n", pc_start);
1641     }
1642 
1643     page_start = pc_start & TARGET_PAGE_MASK;
1644     num_insns = 0;
1645     max_insns = tb_cflags(tb) & CF_COUNT_MASK;
1646     if (max_insns == 0) {
1647         max_insns = CF_COUNT_MASK;
1648     }
1649     if (max_insns > TCG_MAX_INSNS) {
1650         max_insns = TCG_MAX_INSNS;
1651     }
1652 
1653     gen_tb_start(tb);
1654     do
1655     {
1656         tcg_gen_insn_start(dc->pc);
1657         num_insns++;
1658 
1659 #if SIM_COMPAT
1660         if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1661             tcg_gen_movi_i32(cpu_SR[SR_PC], dc->pc);
1662             gen_helper_debug();
1663         }
1664 #endif
1665 
1666         if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
1667             t_gen_raise_exception(dc, EXCP_DEBUG);
1668             dc->is_jmp = DISAS_UPDATE;
1669             /* The address covered by the breakpoint must be included in
1670                [tb->pc, tb->pc + tb->size) in order to for it to be
1671                properly cleared -- thus we increment the PC here so that
1672                the logic setting tb->size below does the right thing.  */
1673             dc->pc += 4;
1674             break;
1675         }
1676 
1677         /* Pretty disas.  */
1678         LOG_DIS("%8.8x:\t", dc->pc);
1679 
1680         if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
1681             gen_io_start();
1682         }
1683 
1684         dc->clear_imm = 1;
1685         decode(dc, cpu_ldl_code(env, dc->pc));
1686         if (dc->clear_imm)
1687             dc->tb_flags &= ~IMM_FLAG;
1688         dc->pc += 4;
1689 
1690         if (dc->delayed_branch) {
1691             dc->delayed_branch--;
1692             if (!dc->delayed_branch) {
1693                 if (dc->tb_flags & DRTI_FLAG)
1694                     do_rti(dc);
1695                  if (dc->tb_flags & DRTB_FLAG)
1696                     do_rtb(dc);
1697                 if (dc->tb_flags & DRTE_FLAG)
1698                     do_rte(dc);
1699                 /* Clear the delay slot flag.  */
1700                 dc->tb_flags &= ~D_FLAG;
1701                 /* If it is a direct jump, try direct chaining.  */
1702                 if (dc->jmp == JMP_INDIRECT) {
1703                     eval_cond_jmp(dc, env_btarget, tcg_const_i32(dc->pc));
1704                     dc->is_jmp = DISAS_JUMP;
1705                 } else if (dc->jmp == JMP_DIRECT) {
1706                     t_sync_flags(dc);
1707                     gen_goto_tb(dc, 0, dc->jmp_pc);
1708                     dc->is_jmp = DISAS_TB_JUMP;
1709                 } else if (dc->jmp == JMP_DIRECT_CC) {
1710                     TCGLabel *l1 = gen_new_label();
1711                     t_sync_flags(dc);
1712                     /* Conditional jmp.  */
1713                     tcg_gen_brcondi_i32(TCG_COND_NE, env_btaken, 0, l1);
1714                     gen_goto_tb(dc, 1, dc->pc);
1715                     gen_set_label(l1);
1716                     gen_goto_tb(dc, 0, dc->jmp_pc);
1717 
1718                     dc->is_jmp = DISAS_TB_JUMP;
1719                 }
1720                 break;
1721             }
1722         }
1723         if (cs->singlestep_enabled) {
1724             break;
1725         }
1726     } while (!dc->is_jmp && !dc->cpustate_changed
1727              && !tcg_op_buf_full()
1728              && !singlestep
1729              && (dc->pc - page_start < TARGET_PAGE_SIZE)
1730              && num_insns < max_insns);
1731 
1732     npc = dc->pc;
1733     if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
1734         if (dc->tb_flags & D_FLAG) {
1735             dc->is_jmp = DISAS_UPDATE;
1736             tcg_gen_movi_i32(cpu_SR[SR_PC], npc);
1737             sync_jmpstate(dc);
1738         } else
1739             npc = dc->jmp_pc;
1740     }
1741 
1742     if (tb_cflags(tb) & CF_LAST_IO)
1743         gen_io_end();
1744     /* Force an update if the per-tb cpu state has changed.  */
1745     if (dc->is_jmp == DISAS_NEXT
1746         && (dc->cpustate_changed || org_flags != dc->tb_flags)) {
1747         dc->is_jmp = DISAS_UPDATE;
1748         tcg_gen_movi_i32(cpu_SR[SR_PC], npc);
1749     }
1750     t_sync_flags(dc);
1751 
1752     if (unlikely(cs->singlestep_enabled)) {
1753         TCGv_i32 tmp = tcg_const_i32(EXCP_DEBUG);
1754 
1755         if (dc->is_jmp != DISAS_JUMP) {
1756             tcg_gen_movi_i32(cpu_SR[SR_PC], npc);
1757         }
1758         gen_helper_raise_exception(cpu_env, tmp);
1759         tcg_temp_free_i32(tmp);
1760     } else {
1761         switch(dc->is_jmp) {
1762             case DISAS_NEXT:
1763                 gen_goto_tb(dc, 1, npc);
1764                 break;
1765             default:
1766             case DISAS_JUMP:
1767             case DISAS_UPDATE:
1768                 /* indicate that the hash table must be used
1769                    to find the next TB */
1770                 tcg_gen_exit_tb(0);
1771                 break;
1772             case DISAS_TB_JUMP:
1773                 /* nothing more to generate */
1774                 break;
1775         }
1776     }
1777     gen_tb_end(tb, num_insns);
1778 
1779     tb->size = dc->pc - pc_start;
1780     tb->icount = num_insns;
1781 
1782 #ifdef DEBUG_DISAS
1783 #if !SIM_COMPAT
1784     if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
1785         && qemu_log_in_addr_range(pc_start)) {
1786         qemu_log_lock();
1787         qemu_log("--------------\n");
1788         log_target_disas(cs, pc_start, dc->pc - pc_start);
1789         qemu_log_unlock();
1790     }
1791 #endif
1792 #endif
1793     assert(!dc->abort_at_next_insn);
1794 }
1795 
1796 void mb_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
1797                        int flags)
1798 {
1799     MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1800     CPUMBState *env = &cpu->env;
1801     int i;
1802 
1803     if (!env || !f)
1804         return;
1805 
1806     cpu_fprintf(f, "IN: PC=%x %s\n",
1807                 env->sregs[SR_PC], lookup_symbol(env->sregs[SR_PC]));
1808     cpu_fprintf(f, "rmsr=%x resr=%x rear=%x debug=%x imm=%x iflags=%x fsr=%x\n",
1809              env->sregs[SR_MSR], env->sregs[SR_ESR], env->sregs[SR_EAR],
1810              env->debug, env->imm, env->iflags, env->sregs[SR_FSR]);
1811     cpu_fprintf(f, "btaken=%d btarget=%x mode=%s(saved=%s) eip=%d ie=%d\n",
1812              env->btaken, env->btarget,
1813              (env->sregs[SR_MSR] & MSR_UM) ? "user" : "kernel",
1814              (env->sregs[SR_MSR] & MSR_UMS) ? "user" : "kernel",
1815              (env->sregs[SR_MSR] & MSR_EIP),
1816              (env->sregs[SR_MSR] & MSR_IE));
1817 
1818     for (i = 0; i < 32; i++) {
1819         cpu_fprintf(f, "r%2.2d=%8.8x ", i, env->regs[i]);
1820         if ((i + 1) % 4 == 0)
1821             cpu_fprintf(f, "\n");
1822         }
1823     cpu_fprintf(f, "\n\n");
1824 }
1825 
1826 void mb_tcg_init(void)
1827 {
1828     int i;
1829 
1830     env_debug = tcg_global_mem_new_i32(cpu_env,
1831                     offsetof(CPUMBState, debug),
1832                     "debug0");
1833     env_iflags = tcg_global_mem_new_i32(cpu_env,
1834                     offsetof(CPUMBState, iflags),
1835                     "iflags");
1836     env_imm = tcg_global_mem_new_i32(cpu_env,
1837                     offsetof(CPUMBState, imm),
1838                     "imm");
1839     env_btarget = tcg_global_mem_new_i32(cpu_env,
1840                      offsetof(CPUMBState, btarget),
1841                      "btarget");
1842     env_btaken = tcg_global_mem_new_i32(cpu_env,
1843                      offsetof(CPUMBState, btaken),
1844                      "btaken");
1845     env_res_addr = tcg_global_mem_new(cpu_env,
1846                      offsetof(CPUMBState, res_addr),
1847                      "res_addr");
1848     env_res_val = tcg_global_mem_new_i32(cpu_env,
1849                      offsetof(CPUMBState, res_val),
1850                      "res_val");
1851     for (i = 0; i < ARRAY_SIZE(cpu_R); i++) {
1852         cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
1853                           offsetof(CPUMBState, regs[i]),
1854                           regnames[i]);
1855     }
1856     for (i = 0; i < ARRAY_SIZE(cpu_SR); i++) {
1857         cpu_SR[i] = tcg_global_mem_new_i32(cpu_env,
1858                           offsetof(CPUMBState, sregs[i]),
1859                           special_regnames[i]);
1860     }
1861 }
1862 
1863 void restore_state_to_opc(CPUMBState *env, TranslationBlock *tb,
1864                           target_ulong *data)
1865 {
1866     env->sregs[SR_PC] = data[0];
1867 }
1868