xref: /openbmc/qemu/target/microblaze/translate.c (revision cfeea807e5af996979b2c13ab3b6eb447e1796bb)
1 /*
2  *  Xilinx MicroBlaze emulation for qemu: main translation routines.
3  *
4  *  Copyright (c) 2009 Edgar E. Iglesias.
5  *  Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
25 #include "tcg-op.h"
26 #include "exec/helper-proto.h"
27 #include "microblaze-decode.h"
28 #include "exec/cpu_ldst.h"
29 #include "exec/helper-gen.h"
30 #include "exec/translator.h"
31 
32 #include "trace-tcg.h"
33 #include "exec/log.h"
34 
35 
36 #define SIM_COMPAT 0
37 #define DISAS_GNU 1
38 #define DISAS_MB 1
39 #if DISAS_MB && !SIM_COMPAT
40 #  define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
41 #else
42 #  define LOG_DIS(...) do { } while (0)
43 #endif
44 
45 #define D(x)
46 
47 #define EXTRACT_FIELD(src, start, end) \
48             (((src) >> start) & ((1 << (end - start + 1)) - 1))
49 
50 /* is_jmp field values */
51 #define DISAS_JUMP    DISAS_TARGET_0 /* only pc was modified dynamically */
52 #define DISAS_UPDATE  DISAS_TARGET_1 /* cpu state was modified dynamically */
53 #define DISAS_TB_JUMP DISAS_TARGET_2 /* only pc was modified statically */
54 
55 static TCGv_i32 env_debug;
56 static TCGv_i32 cpu_R[32];
57 static TCGv_i32 cpu_SR[14];
58 static TCGv_i32 env_imm;
59 static TCGv_i32 env_btaken;
60 static TCGv_i32 env_btarget;
61 static TCGv_i32 env_iflags;
62 static TCGv_i32 env_res_addr;
63 static TCGv_i32 env_res_val;
64 
65 #include "exec/gen-icount.h"
66 
67 /* This is the state at translation time.  */
68 typedef struct DisasContext {
69     MicroBlazeCPU *cpu;
70     uint32_t pc;
71 
72     /* Decoder.  */
73     int type_b;
74     uint32_t ir;
75     uint8_t opcode;
76     uint8_t rd, ra, rb;
77     uint16_t imm;
78 
79     unsigned int cpustate_changed;
80     unsigned int delayed_branch;
81     unsigned int tb_flags, synced_flags; /* tb dependent flags.  */
82     unsigned int clear_imm;
83     int is_jmp;
84 
85 #define JMP_NOJMP     0
86 #define JMP_DIRECT    1
87 #define JMP_DIRECT_CC 2
88 #define JMP_INDIRECT  3
89     unsigned int jmp;
90     uint32_t jmp_pc;
91 
92     int abort_at_next_insn;
93     int nr_nops;
94     struct TranslationBlock *tb;
95     int singlestep_enabled;
96 } DisasContext;
97 
98 static const char *regnames[] =
99 {
100     "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
101     "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
102     "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
103     "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
104 };
105 
106 static const char *special_regnames[] =
107 {
108     "rpc", "rmsr", "sr2", "sr3", "sr4", "sr5", "sr6", "sr7",
109     "sr8", "sr9", "sr10", "sr11", "sr12", "sr13"
110 };
111 
112 static inline void t_sync_flags(DisasContext *dc)
113 {
114     /* Synch the tb dependent flags between translator and runtime.  */
115     if (dc->tb_flags != dc->synced_flags) {
116         tcg_gen_movi_i32(env_iflags, dc->tb_flags);
117         dc->synced_flags = dc->tb_flags;
118     }
119 }
120 
121 static inline void t_gen_raise_exception(DisasContext *dc, uint32_t index)
122 {
123     TCGv_i32 tmp = tcg_const_i32(index);
124 
125     t_sync_flags(dc);
126     tcg_gen_movi_i32(cpu_SR[SR_PC], dc->pc);
127     gen_helper_raise_exception(cpu_env, tmp);
128     tcg_temp_free_i32(tmp);
129     dc->is_jmp = DISAS_UPDATE;
130 }
131 
132 static inline bool use_goto_tb(DisasContext *dc, target_ulong dest)
133 {
134 #ifndef CONFIG_USER_ONLY
135     return (dc->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
136 #else
137     return true;
138 #endif
139 }
140 
141 static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
142 {
143     if (use_goto_tb(dc, dest)) {
144         tcg_gen_goto_tb(n);
145         tcg_gen_movi_i32(cpu_SR[SR_PC], dest);
146         tcg_gen_exit_tb((uintptr_t)dc->tb + n);
147     } else {
148         tcg_gen_movi_i32(cpu_SR[SR_PC], dest);
149         tcg_gen_exit_tb(0);
150     }
151 }
152 
153 static void read_carry(DisasContext *dc, TCGv_i32 d)
154 {
155     tcg_gen_shri_i32(d, cpu_SR[SR_MSR], 31);
156 }
157 
158 /*
159  * write_carry sets the carry bits in MSR based on bit 0 of v.
160  * v[31:1] are ignored.
161  */
162 static void write_carry(DisasContext *dc, TCGv_i32 v)
163 {
164     TCGv_i32 t0 = tcg_temp_new_i32();
165     tcg_gen_shli_i32(t0, v, 31);
166     tcg_gen_sari_i32(t0, t0, 31);
167     tcg_gen_andi_i32(t0, t0, (MSR_C | MSR_CC));
168     tcg_gen_andi_i32(cpu_SR[SR_MSR], cpu_SR[SR_MSR],
169                     ~(MSR_C | MSR_CC));
170     tcg_gen_or_i32(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t0);
171     tcg_temp_free_i32(t0);
172 }
173 
174 static void write_carryi(DisasContext *dc, bool carry)
175 {
176     TCGv_i32 t0 = tcg_temp_new_i32();
177     tcg_gen_movi_i32(t0, carry);
178     write_carry(dc, t0);
179     tcg_temp_free_i32(t0);
180 }
181 
182 /* True if ALU operand b is a small immediate that may deserve
183    faster treatment.  */
184 static inline int dec_alu_op_b_is_small_imm(DisasContext *dc)
185 {
186     /* Immediate insn without the imm prefix ?  */
187     return dc->type_b && !(dc->tb_flags & IMM_FLAG);
188 }
189 
190 static inline TCGv_i32 *dec_alu_op_b(DisasContext *dc)
191 {
192     if (dc->type_b) {
193         if (dc->tb_flags & IMM_FLAG)
194             tcg_gen_ori_i32(env_imm, env_imm, dc->imm);
195         else
196             tcg_gen_movi_i32(env_imm, (int32_t)((int16_t)dc->imm));
197         return &env_imm;
198     } else
199         return &cpu_R[dc->rb];
200 }
201 
202 static void dec_add(DisasContext *dc)
203 {
204     unsigned int k, c;
205     TCGv_i32 cf;
206 
207     k = dc->opcode & 4;
208     c = dc->opcode & 2;
209 
210     LOG_DIS("add%s%s%s r%d r%d r%d\n",
211             dc->type_b ? "i" : "", k ? "k" : "", c ? "c" : "",
212             dc->rd, dc->ra, dc->rb);
213 
214     /* Take care of the easy cases first.  */
215     if (k) {
216         /* k - keep carry, no need to update MSR.  */
217         /* If rd == r0, it's a nop.  */
218         if (dc->rd) {
219             tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
220 
221             if (c) {
222                 /* c - Add carry into the result.  */
223                 cf = tcg_temp_new_i32();
224 
225                 read_carry(dc, cf);
226                 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
227                 tcg_temp_free_i32(cf);
228             }
229         }
230         return;
231     }
232 
233     /* From now on, we can assume k is zero.  So we need to update MSR.  */
234     /* Extract carry.  */
235     cf = tcg_temp_new_i32();
236     if (c) {
237         read_carry(dc, cf);
238     } else {
239         tcg_gen_movi_i32(cf, 0);
240     }
241 
242     if (dc->rd) {
243         TCGv_i32 ncf = tcg_temp_new_i32();
244         gen_helper_carry(ncf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
245         tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
246         tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
247         write_carry(dc, ncf);
248         tcg_temp_free_i32(ncf);
249     } else {
250         gen_helper_carry(cf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
251         write_carry(dc, cf);
252     }
253     tcg_temp_free_i32(cf);
254 }
255 
256 static void dec_sub(DisasContext *dc)
257 {
258     unsigned int u, cmp, k, c;
259     TCGv_i32 cf, na;
260 
261     u = dc->imm & 2;
262     k = dc->opcode & 4;
263     c = dc->opcode & 2;
264     cmp = (dc->imm & 1) && (!dc->type_b) && k;
265 
266     if (cmp) {
267         LOG_DIS("cmp%s r%d, r%d ir=%x\n", u ? "u" : "", dc->rd, dc->ra, dc->ir);
268         if (dc->rd) {
269             if (u)
270                 gen_helper_cmpu(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
271             else
272                 gen_helper_cmp(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
273         }
274         return;
275     }
276 
277     LOG_DIS("sub%s%s r%d, r%d r%d\n",
278              k ? "k" : "",  c ? "c" : "", dc->rd, dc->ra, dc->rb);
279 
280     /* Take care of the easy cases first.  */
281     if (k) {
282         /* k - keep carry, no need to update MSR.  */
283         /* If rd == r0, it's a nop.  */
284         if (dc->rd) {
285             tcg_gen_sub_i32(cpu_R[dc->rd], *(dec_alu_op_b(dc)), cpu_R[dc->ra]);
286 
287             if (c) {
288                 /* c - Add carry into the result.  */
289                 cf = tcg_temp_new_i32();
290 
291                 read_carry(dc, cf);
292                 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
293                 tcg_temp_free_i32(cf);
294             }
295         }
296         return;
297     }
298 
299     /* From now on, we can assume k is zero.  So we need to update MSR.  */
300     /* Extract carry. And complement a into na.  */
301     cf = tcg_temp_new_i32();
302     na = tcg_temp_new_i32();
303     if (c) {
304         read_carry(dc, cf);
305     } else {
306         tcg_gen_movi_i32(cf, 1);
307     }
308 
309     /* d = b + ~a + c. carry defaults to 1.  */
310     tcg_gen_not_i32(na, cpu_R[dc->ra]);
311 
312     if (dc->rd) {
313         TCGv_i32 ncf = tcg_temp_new_i32();
314         gen_helper_carry(ncf, na, *(dec_alu_op_b(dc)), cf);
315         tcg_gen_add_i32(cpu_R[dc->rd], na, *(dec_alu_op_b(dc)));
316         tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
317         write_carry(dc, ncf);
318         tcg_temp_free_i32(ncf);
319     } else {
320         gen_helper_carry(cf, na, *(dec_alu_op_b(dc)), cf);
321         write_carry(dc, cf);
322     }
323     tcg_temp_free_i32(cf);
324     tcg_temp_free_i32(na);
325 }
326 
327 static void dec_pattern(DisasContext *dc)
328 {
329     unsigned int mode;
330 
331     if ((dc->tb_flags & MSR_EE_FLAG)
332           && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
333           && !dc->cpu->cfg.use_pcmp_instr) {
334         tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
335         t_gen_raise_exception(dc, EXCP_HW_EXCP);
336     }
337 
338     mode = dc->opcode & 3;
339     switch (mode) {
340         case 0:
341             /* pcmpbf.  */
342             LOG_DIS("pcmpbf r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
343             if (dc->rd)
344                 gen_helper_pcmpbf(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
345             break;
346         case 2:
347             LOG_DIS("pcmpeq r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
348             if (dc->rd) {
349                 tcg_gen_setcond_i32(TCG_COND_EQ, cpu_R[dc->rd],
350                                    cpu_R[dc->ra], cpu_R[dc->rb]);
351             }
352             break;
353         case 3:
354             LOG_DIS("pcmpne r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
355             if (dc->rd) {
356                 tcg_gen_setcond_i32(TCG_COND_NE, cpu_R[dc->rd],
357                                    cpu_R[dc->ra], cpu_R[dc->rb]);
358             }
359             break;
360         default:
361             cpu_abort(CPU(dc->cpu),
362                       "unsupported pattern insn opcode=%x\n", dc->opcode);
363             break;
364     }
365 }
366 
367 static void dec_and(DisasContext *dc)
368 {
369     unsigned int not;
370 
371     if (!dc->type_b && (dc->imm & (1 << 10))) {
372         dec_pattern(dc);
373         return;
374     }
375 
376     not = dc->opcode & (1 << 1);
377     LOG_DIS("and%s\n", not ? "n" : "");
378 
379     if (!dc->rd)
380         return;
381 
382     if (not) {
383         tcg_gen_andc_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
384     } else
385         tcg_gen_and_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
386 }
387 
388 static void dec_or(DisasContext *dc)
389 {
390     if (!dc->type_b && (dc->imm & (1 << 10))) {
391         dec_pattern(dc);
392         return;
393     }
394 
395     LOG_DIS("or r%d r%d r%d imm=%x\n", dc->rd, dc->ra, dc->rb, dc->imm);
396     if (dc->rd)
397         tcg_gen_or_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
398 }
399 
400 static void dec_xor(DisasContext *dc)
401 {
402     if (!dc->type_b && (dc->imm & (1 << 10))) {
403         dec_pattern(dc);
404         return;
405     }
406 
407     LOG_DIS("xor r%d\n", dc->rd);
408     if (dc->rd)
409         tcg_gen_xor_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
410 }
411 
412 static inline void msr_read(DisasContext *dc, TCGv_i32 d)
413 {
414     tcg_gen_mov_i32(d, cpu_SR[SR_MSR]);
415 }
416 
417 static inline void msr_write(DisasContext *dc, TCGv_i32 v)
418 {
419     TCGv_i32 t;
420 
421     t = tcg_temp_new_i32();
422     dc->cpustate_changed = 1;
423     /* PVR bit is not writable.  */
424     tcg_gen_andi_i32(t, v, ~MSR_PVR);
425     tcg_gen_andi_i32(cpu_SR[SR_MSR], cpu_SR[SR_MSR], MSR_PVR);
426     tcg_gen_or_i32(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t);
427     tcg_temp_free(t);
428 }
429 
430 static void dec_msr(DisasContext *dc)
431 {
432     CPUState *cs = CPU(dc->cpu);
433     TCGv_i32 t0, t1;
434     unsigned int sr, to, rn;
435     int mem_index = cpu_mmu_index(&dc->cpu->env, false);
436 
437     sr = dc->imm & ((1 << 14) - 1);
438     to = dc->imm & (1 << 14);
439     dc->type_b = 1;
440     if (to)
441         dc->cpustate_changed = 1;
442 
443     /* msrclr and msrset.  */
444     if (!(dc->imm & (1 << 15))) {
445         unsigned int clr = dc->ir & (1 << 16);
446 
447         LOG_DIS("msr%s r%d imm=%x\n", clr ? "clr" : "set",
448                 dc->rd, dc->imm);
449 
450         if (!dc->cpu->cfg.use_msr_instr) {
451             /* nop??? */
452             return;
453         }
454 
455         if ((dc->tb_flags & MSR_EE_FLAG)
456             && mem_index == MMU_USER_IDX && (dc->imm != 4 && dc->imm != 0)) {
457             tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
458             t_gen_raise_exception(dc, EXCP_HW_EXCP);
459             return;
460         }
461 
462         if (dc->rd)
463             msr_read(dc, cpu_R[dc->rd]);
464 
465         t0 = tcg_temp_new_i32();
466         t1 = tcg_temp_new_i32();
467         msr_read(dc, t0);
468         tcg_gen_mov_i32(t1, *(dec_alu_op_b(dc)));
469 
470         if (clr) {
471             tcg_gen_not_i32(t1, t1);
472             tcg_gen_and_i32(t0, t0, t1);
473         } else
474             tcg_gen_or_i32(t0, t0, t1);
475         msr_write(dc, t0);
476         tcg_temp_free_i32(t0);
477         tcg_temp_free_i32(t1);
478         tcg_gen_movi_i32(cpu_SR[SR_PC], dc->pc + 4);
479         dc->is_jmp = DISAS_UPDATE;
480         return;
481     }
482 
483     if (to) {
484         if ((dc->tb_flags & MSR_EE_FLAG)
485              && mem_index == MMU_USER_IDX) {
486             tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
487             t_gen_raise_exception(dc, EXCP_HW_EXCP);
488             return;
489         }
490     }
491 
492 #if !defined(CONFIG_USER_ONLY)
493     /* Catch read/writes to the mmu block.  */
494     if ((sr & ~0xff) == 0x1000) {
495         sr &= 7;
496         LOG_DIS("m%ss sr%d r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
497         if (to)
498             gen_helper_mmu_write(cpu_env, tcg_const_i32(sr), cpu_R[dc->ra]);
499         else
500             gen_helper_mmu_read(cpu_R[dc->rd], cpu_env, tcg_const_i32(sr));
501         return;
502     }
503 #endif
504 
505     if (to) {
506         LOG_DIS("m%ss sr%x r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
507         switch (sr) {
508             case 0:
509                 break;
510             case 1:
511                 msr_write(dc, cpu_R[dc->ra]);
512                 break;
513             case 0x3:
514                 tcg_gen_mov_i32(cpu_SR[SR_EAR], cpu_R[dc->ra]);
515                 break;
516             case 0x5:
517                 tcg_gen_mov_i32(cpu_SR[SR_ESR], cpu_R[dc->ra]);
518                 break;
519             case 0x7:
520                 tcg_gen_andi_i32(cpu_SR[SR_FSR], cpu_R[dc->ra], 31);
521                 break;
522             case 0x800:
523                 tcg_gen_st_i32(cpu_R[dc->ra],
524                                cpu_env, offsetof(CPUMBState, slr));
525                 break;
526             case 0x802:
527                 tcg_gen_st_i32(cpu_R[dc->ra],
528                                cpu_env, offsetof(CPUMBState, shr));
529                 break;
530             default:
531                 cpu_abort(CPU(dc->cpu), "unknown mts reg %x\n", sr);
532                 break;
533         }
534     } else {
535         LOG_DIS("m%ss r%d sr%x imm=%x\n", to ? "t" : "f", dc->rd, sr, dc->imm);
536 
537         switch (sr) {
538             case 0:
539                 tcg_gen_movi_i32(cpu_R[dc->rd], dc->pc);
540                 break;
541             case 1:
542                 msr_read(dc, cpu_R[dc->rd]);
543                 break;
544             case 0x3:
545                 tcg_gen_mov_i32(cpu_R[dc->rd], cpu_SR[SR_EAR]);
546                 break;
547             case 0x5:
548                 tcg_gen_mov_i32(cpu_R[dc->rd], cpu_SR[SR_ESR]);
549                 break;
550              case 0x7:
551                 tcg_gen_mov_i32(cpu_R[dc->rd], cpu_SR[SR_FSR]);
552                 break;
553             case 0xb:
554                 tcg_gen_mov_i32(cpu_R[dc->rd], cpu_SR[SR_BTR]);
555                 break;
556             case 0x800:
557                 tcg_gen_ld_i32(cpu_R[dc->rd],
558                                cpu_env, offsetof(CPUMBState, slr));
559                 break;
560             case 0x802:
561                 tcg_gen_ld_i32(cpu_R[dc->rd],
562                                cpu_env, offsetof(CPUMBState, shr));
563                 break;
564             case 0x2000:
565             case 0x2001:
566             case 0x2002:
567             case 0x2003:
568             case 0x2004:
569             case 0x2005:
570             case 0x2006:
571             case 0x2007:
572             case 0x2008:
573             case 0x2009:
574             case 0x200a:
575             case 0x200b:
576             case 0x200c:
577                 rn = sr & 0xf;
578                 tcg_gen_ld_i32(cpu_R[dc->rd],
579                               cpu_env, offsetof(CPUMBState, pvr.regs[rn]));
580                 break;
581             default:
582                 cpu_abort(cs, "unknown mfs reg %x\n", sr);
583                 break;
584         }
585     }
586 
587     if (dc->rd == 0) {
588         tcg_gen_movi_i32(cpu_R[0], 0);
589     }
590 }
591 
592 /* Multiplier unit.  */
593 static void dec_mul(DisasContext *dc)
594 {
595     TCGv_i32 tmp;
596     unsigned int subcode;
597 
598     if ((dc->tb_flags & MSR_EE_FLAG)
599          && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
600          && !dc->cpu->cfg.use_hw_mul) {
601         tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
602         t_gen_raise_exception(dc, EXCP_HW_EXCP);
603         return;
604     }
605 
606     subcode = dc->imm & 3;
607 
608     if (dc->type_b) {
609         LOG_DIS("muli r%d r%d %x\n", dc->rd, dc->ra, dc->imm);
610         tcg_gen_mul_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
611         return;
612     }
613 
614     /* mulh, mulhsu and mulhu are not available if C_USE_HW_MUL is < 2.  */
615     if (subcode >= 1 && subcode <= 3 && dc->cpu->cfg.use_hw_mul < 2) {
616         /* nop??? */
617     }
618 
619     tmp = tcg_temp_new_i32();
620     switch (subcode) {
621         case 0:
622             LOG_DIS("mul r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
623             tcg_gen_mul_i32(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
624             break;
625         case 1:
626             LOG_DIS("mulh r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
627             tcg_gen_muls2_i32(tmp, cpu_R[dc->rd],
628                               cpu_R[dc->ra], cpu_R[dc->rb]);
629             break;
630         case 2:
631             LOG_DIS("mulhsu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
632             tcg_gen_mulsu2_i32(tmp, cpu_R[dc->rd],
633                                cpu_R[dc->ra], cpu_R[dc->rb]);
634             break;
635         case 3:
636             LOG_DIS("mulhu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
637             tcg_gen_mulu2_i32(tmp, cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
638             break;
639         default:
640             cpu_abort(CPU(dc->cpu), "unknown MUL insn %x\n", subcode);
641             break;
642     }
643     tcg_temp_free_i32(tmp);
644 }
645 
646 /* Div unit.  */
647 static void dec_div(DisasContext *dc)
648 {
649     unsigned int u;
650 
651     u = dc->imm & 2;
652     LOG_DIS("div\n");
653 
654     if ((dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
655           && !dc->cpu->cfg.use_div) {
656         tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
657         t_gen_raise_exception(dc, EXCP_HW_EXCP);
658     }
659 
660     if (u)
661         gen_helper_divu(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
662                         cpu_R[dc->ra]);
663     else
664         gen_helper_divs(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
665                         cpu_R[dc->ra]);
666     if (!dc->rd)
667         tcg_gen_movi_i32(cpu_R[dc->rd], 0);
668 }
669 
670 static void dec_barrel(DisasContext *dc)
671 {
672     TCGv_i32 t0;
673     unsigned int imm_w, imm_s;
674     bool s, t, e = false, i = false;
675 
676     if ((dc->tb_flags & MSR_EE_FLAG)
677           && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
678           && !dc->cpu->cfg.use_barrel) {
679         tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
680         t_gen_raise_exception(dc, EXCP_HW_EXCP);
681         return;
682     }
683 
684     if (dc->type_b) {
685         /* Insert and extract are only available in immediate mode.  */
686         i = extract32(dc->imm, 15, 1);
687         e = extract32(dc->imm, 14, 1);
688     }
689     s = extract32(dc->imm, 10, 1);
690     t = extract32(dc->imm, 9, 1);
691     imm_w = extract32(dc->imm, 6, 5);
692     imm_s = extract32(dc->imm, 0, 5);
693 
694     LOG_DIS("bs%s%s%s r%d r%d r%d\n",
695             e ? "e" : "",
696             s ? "l" : "r", t ? "a" : "l", dc->rd, dc->ra, dc->rb);
697 
698     if (e) {
699         if (imm_w + imm_s > 32 || imm_w == 0) {
700             /* These inputs have an undefined behavior.  */
701             qemu_log_mask(LOG_GUEST_ERROR, "bsefi: Bad input w=%d s=%d\n",
702                           imm_w, imm_s);
703         } else {
704             tcg_gen_extract_i32(cpu_R[dc->rd], cpu_R[dc->ra], imm_s, imm_w);
705         }
706     } else if (i) {
707         int width = imm_w - imm_s + 1;
708 
709         if (imm_w < imm_s) {
710             /* These inputs have an undefined behavior.  */
711             qemu_log_mask(LOG_GUEST_ERROR, "bsifi: Bad input w=%d s=%d\n",
712                           imm_w, imm_s);
713         } else {
714             tcg_gen_deposit_i32(cpu_R[dc->rd], cpu_R[dc->rd], cpu_R[dc->ra],
715                                 imm_s, width);
716         }
717     } else {
718         t0 = tcg_temp_new_i32();
719 
720         tcg_gen_mov_i32(t0, *(dec_alu_op_b(dc)));
721         tcg_gen_andi_i32(t0, t0, 31);
722 
723         if (s) {
724             tcg_gen_shl_i32(cpu_R[dc->rd], cpu_R[dc->ra], t0);
725         } else {
726             if (t) {
727                 tcg_gen_sar_i32(cpu_R[dc->rd], cpu_R[dc->ra], t0);
728             } else {
729                 tcg_gen_shr_i32(cpu_R[dc->rd], cpu_R[dc->ra], t0);
730             }
731         }
732         tcg_temp_free_i32(t0);
733     }
734 }
735 
736 static void dec_bit(DisasContext *dc)
737 {
738     CPUState *cs = CPU(dc->cpu);
739     TCGv_i32 t0;
740     unsigned int op;
741     int mem_index = cpu_mmu_index(&dc->cpu->env, false);
742 
743     op = dc->ir & ((1 << 9) - 1);
744     switch (op) {
745         case 0x21:
746             /* src.  */
747             t0 = tcg_temp_new_i32();
748 
749             LOG_DIS("src r%d r%d\n", dc->rd, dc->ra);
750             tcg_gen_andi_i32(t0, cpu_SR[SR_MSR], MSR_CC);
751             write_carry(dc, cpu_R[dc->ra]);
752             if (dc->rd) {
753                 tcg_gen_shri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 1);
754                 tcg_gen_or_i32(cpu_R[dc->rd], cpu_R[dc->rd], t0);
755             }
756             tcg_temp_free_i32(t0);
757             break;
758 
759         case 0x1:
760         case 0x41:
761             /* srl.  */
762             LOG_DIS("srl r%d r%d\n", dc->rd, dc->ra);
763 
764             /* Update carry. Note that write carry only looks at the LSB.  */
765             write_carry(dc, cpu_R[dc->ra]);
766             if (dc->rd) {
767                 if (op == 0x41)
768                     tcg_gen_shri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 1);
769                 else
770                     tcg_gen_sari_i32(cpu_R[dc->rd], cpu_R[dc->ra], 1);
771             }
772             break;
773         case 0x60:
774             LOG_DIS("ext8s r%d r%d\n", dc->rd, dc->ra);
775             tcg_gen_ext8s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
776             break;
777         case 0x61:
778             LOG_DIS("ext16s r%d r%d\n", dc->rd, dc->ra);
779             tcg_gen_ext16s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
780             break;
781         case 0x64:
782         case 0x66:
783         case 0x74:
784         case 0x76:
785             /* wdc.  */
786             LOG_DIS("wdc r%d\n", dc->ra);
787             if ((dc->tb_flags & MSR_EE_FLAG)
788                  && mem_index == MMU_USER_IDX) {
789                 tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
790                 t_gen_raise_exception(dc, EXCP_HW_EXCP);
791                 return;
792             }
793             break;
794         case 0x68:
795             /* wic.  */
796             LOG_DIS("wic r%d\n", dc->ra);
797             if ((dc->tb_flags & MSR_EE_FLAG)
798                  && mem_index == MMU_USER_IDX) {
799                 tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
800                 t_gen_raise_exception(dc, EXCP_HW_EXCP);
801                 return;
802             }
803             break;
804         case 0xe0:
805             if ((dc->tb_flags & MSR_EE_FLAG)
806                 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
807                 && !dc->cpu->cfg.use_pcmp_instr) {
808                 tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
809                 t_gen_raise_exception(dc, EXCP_HW_EXCP);
810             }
811             if (dc->cpu->cfg.use_pcmp_instr) {
812                 tcg_gen_clzi_i32(cpu_R[dc->rd], cpu_R[dc->ra], 32);
813             }
814             break;
815         case 0x1e0:
816             /* swapb */
817             LOG_DIS("swapb r%d r%d\n", dc->rd, dc->ra);
818             tcg_gen_bswap32_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
819             break;
820         case 0x1e2:
821             /*swaph */
822             LOG_DIS("swaph r%d r%d\n", dc->rd, dc->ra);
823             tcg_gen_rotri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 16);
824             break;
825         default:
826             cpu_abort(cs, "unknown bit oc=%x op=%x rd=%d ra=%d rb=%d\n",
827                       dc->pc, op, dc->rd, dc->ra, dc->rb);
828             break;
829     }
830 }
831 
832 static inline void sync_jmpstate(DisasContext *dc)
833 {
834     if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
835         if (dc->jmp == JMP_DIRECT) {
836             tcg_gen_movi_i32(env_btaken, 1);
837         }
838         dc->jmp = JMP_INDIRECT;
839         tcg_gen_movi_i32(env_btarget, dc->jmp_pc);
840     }
841 }
842 
843 static void dec_imm(DisasContext *dc)
844 {
845     LOG_DIS("imm %x\n", dc->imm << 16);
846     tcg_gen_movi_i32(env_imm, (dc->imm << 16));
847     dc->tb_flags |= IMM_FLAG;
848     dc->clear_imm = 0;
849 }
850 
851 static inline TCGv_i32 *compute_ldst_addr(DisasContext *dc, TCGv_i32 *t)
852 {
853     bool extimm = dc->tb_flags & IMM_FLAG;
854     /* Should be set to true if r1 is used by loadstores.  */
855     bool stackprot = false;
856 
857     /* All load/stores use ra.  */
858     if (dc->ra == 1 && dc->cpu->cfg.stackprot) {
859         stackprot = true;
860     }
861 
862     /* Treat the common cases first.  */
863     if (!dc->type_b) {
864         /* If any of the regs is r0, return a ptr to the other.  */
865         if (dc->ra == 0) {
866             return &cpu_R[dc->rb];
867         } else if (dc->rb == 0) {
868             return &cpu_R[dc->ra];
869         }
870 
871         if (dc->rb == 1 && dc->cpu->cfg.stackprot) {
872             stackprot = true;
873         }
874 
875         *t = tcg_temp_new_i32();
876         tcg_gen_add_i32(*t, cpu_R[dc->ra], cpu_R[dc->rb]);
877 
878         if (stackprot) {
879             gen_helper_stackprot(cpu_env, *t);
880         }
881         return t;
882     }
883     /* Immediate.  */
884     if (!extimm) {
885         if (dc->imm == 0) {
886             return &cpu_R[dc->ra];
887         }
888         *t = tcg_temp_new_i32();
889         tcg_gen_movi_i32(*t, (int32_t)((int16_t)dc->imm));
890         tcg_gen_add_i32(*t, cpu_R[dc->ra], *t);
891     } else {
892         *t = tcg_temp_new_i32();
893         tcg_gen_add_i32(*t, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
894     }
895 
896     if (stackprot) {
897         gen_helper_stackprot(cpu_env, *t);
898     }
899     return t;
900 }
901 
902 static void dec_load(DisasContext *dc)
903 {
904     TCGv_i32 t, v, *addr;
905     unsigned int size;
906     bool rev = false, ex = false;
907     TCGMemOp mop;
908 
909     mop = dc->opcode & 3;
910     size = 1 << mop;
911     if (!dc->type_b) {
912         rev = extract32(dc->ir, 9, 1);
913         ex = extract32(dc->ir, 10, 1);
914     }
915     mop |= MO_TE;
916     if (rev) {
917         mop ^= MO_BSWAP;
918     }
919 
920     if (size > 4 && (dc->tb_flags & MSR_EE_FLAG)
921           && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
922         tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
923         t_gen_raise_exception(dc, EXCP_HW_EXCP);
924         return;
925     }
926 
927     LOG_DIS("l%d%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
928                                                         ex ? "x" : "");
929 
930     t_sync_flags(dc);
931     addr = compute_ldst_addr(dc, &t);
932 
933     /*
934      * When doing reverse accesses we need to do two things.
935      *
936      * 1. Reverse the address wrt endianness.
937      * 2. Byteswap the data lanes on the way back into the CPU core.
938      */
939     if (rev && size != 4) {
940         /* Endian reverse the address. t is addr.  */
941         switch (size) {
942             case 1:
943             {
944                 /* 00 -> 11
945                    01 -> 10
946                    10 -> 10
947                    11 -> 00 */
948                 TCGv_i32 low = tcg_temp_new_i32();
949 
950                 /* Force addr into the temp.  */
951                 if (addr != &t) {
952                     t = tcg_temp_new_i32();
953                     tcg_gen_mov_i32(t, *addr);
954                     addr = &t;
955                 }
956 
957                 tcg_gen_andi_i32(low, t, 3);
958                 tcg_gen_sub_i32(low, tcg_const_i32(3), low);
959                 tcg_gen_andi_i32(t, t, ~3);
960                 tcg_gen_or_i32(t, t, low);
961                 tcg_temp_free_i32(low);
962                 break;
963             }
964 
965             case 2:
966                 /* 00 -> 10
967                    10 -> 00.  */
968                 /* Force addr into the temp.  */
969                 if (addr != &t) {
970                     t = tcg_temp_new_i32();
971                     tcg_gen_xori_i32(t, *addr, 2);
972                     addr = &t;
973                 } else {
974                     tcg_gen_xori_i32(t, t, 2);
975                 }
976                 break;
977             default:
978                 cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
979                 break;
980         }
981     }
982 
983     /* lwx does not throw unaligned access errors, so force alignment */
984     if (ex) {
985         /* Force addr into the temp.  */
986         if (addr != &t) {
987             t = tcg_temp_new_i32();
988             tcg_gen_mov_i32(t, *addr);
989             addr = &t;
990         }
991         tcg_gen_andi_i32(t, t, ~3);
992     }
993 
994     /* If we get a fault on a dslot, the jmpstate better be in sync.  */
995     sync_jmpstate(dc);
996 
997     /* Verify alignment if needed.  */
998     /*
999      * Microblaze gives MMU faults priority over faults due to
1000      * unaligned addresses. That's why we speculatively do the load
1001      * into v. If the load succeeds, we verify alignment of the
1002      * address and if that succeeds we write into the destination reg.
1003      */
1004     v = tcg_temp_new_i32();
1005     tcg_gen_qemu_ld_i32(v, *addr, cpu_mmu_index(&dc->cpu->env, false), mop);
1006 
1007     if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
1008         tcg_gen_movi_i32(cpu_SR[SR_PC], dc->pc);
1009         gen_helper_memalign(cpu_env, *addr, tcg_const_i32(dc->rd),
1010                             tcg_const_i32(0), tcg_const_i32(size - 1));
1011     }
1012 
1013     if (ex) {
1014         tcg_gen_mov_i32(env_res_addr, *addr);
1015         tcg_gen_mov_i32(env_res_val, v);
1016     }
1017     if (dc->rd) {
1018         tcg_gen_mov_i32(cpu_R[dc->rd], v);
1019     }
1020     tcg_temp_free_i32(v);
1021 
1022     if (ex) { /* lwx */
1023         /* no support for AXI exclusive so always clear C */
1024         write_carryi(dc, 0);
1025     }
1026 
1027     if (addr == &t)
1028         tcg_temp_free_i32(t);
1029 }
1030 
1031 static void dec_store(DisasContext *dc)
1032 {
1033     TCGv_i32 t, *addr, swx_addr;
1034     TCGLabel *swx_skip = NULL;
1035     unsigned int size;
1036     bool rev = false, ex = false;
1037     TCGMemOp mop;
1038 
1039     mop = dc->opcode & 3;
1040     size = 1 << mop;
1041     if (!dc->type_b) {
1042         rev = extract32(dc->ir, 9, 1);
1043         ex = extract32(dc->ir, 10, 1);
1044     }
1045     mop |= MO_TE;
1046     if (rev) {
1047         mop ^= MO_BSWAP;
1048     }
1049 
1050     if (size > 4 && (dc->tb_flags & MSR_EE_FLAG)
1051           && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
1052         tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1053         t_gen_raise_exception(dc, EXCP_HW_EXCP);
1054         return;
1055     }
1056 
1057     LOG_DIS("s%d%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
1058                                                         ex ? "x" : "");
1059     t_sync_flags(dc);
1060     /* If we get a fault on a dslot, the jmpstate better be in sync.  */
1061     sync_jmpstate(dc);
1062     addr = compute_ldst_addr(dc, &t);
1063 
1064     swx_addr = tcg_temp_local_new_i32();
1065     if (ex) { /* swx */
1066         TCGv_i32 tval;
1067 
1068         /* Force addr into the swx_addr. */
1069         tcg_gen_mov_i32(swx_addr, *addr);
1070         addr = &swx_addr;
1071         /* swx does not throw unaligned access errors, so force alignment */
1072         tcg_gen_andi_i32(swx_addr, swx_addr, ~3);
1073 
1074         write_carryi(dc, 1);
1075         swx_skip = gen_new_label();
1076         tcg_gen_brcond_i32(TCG_COND_NE, env_res_addr, swx_addr, swx_skip);
1077 
1078         /* Compare the value loaded at lwx with current contents of
1079            the reserved location.
1080            FIXME: This only works for system emulation where we can expect
1081            this compare and the following write to be atomic. For user
1082            emulation we need to add atomicity between threads.  */
1083         tval = tcg_temp_new_i32();
1084         tcg_gen_qemu_ld_i32(tval, swx_addr, cpu_mmu_index(&dc->cpu->env, false),
1085                            MO_TEUL);
1086         tcg_gen_brcond_i32(TCG_COND_NE, env_res_val, tval, swx_skip);
1087         write_carryi(dc, 0);
1088         tcg_temp_free_i32(tval);
1089     }
1090 
1091     if (rev && size != 4) {
1092         /* Endian reverse the address. t is addr.  */
1093         switch (size) {
1094             case 1:
1095             {
1096                 /* 00 -> 11
1097                    01 -> 10
1098                    10 -> 10
1099                    11 -> 00 */
1100                 TCGv_i32 low = tcg_temp_new_i32();
1101 
1102                 /* Force addr into the temp.  */
1103                 if (addr != &t) {
1104                     t = tcg_temp_new_i32();
1105                     tcg_gen_mov_i32(t, *addr);
1106                     addr = &t;
1107                 }
1108 
1109                 tcg_gen_andi_i32(low, t, 3);
1110                 tcg_gen_sub_i32(low, tcg_const_i32(3), low);
1111                 tcg_gen_andi_i32(t, t, ~3);
1112                 tcg_gen_or_i32(t, t, low);
1113                 tcg_temp_free_i32(low);
1114                 break;
1115             }
1116 
1117             case 2:
1118                 /* 00 -> 10
1119                    10 -> 00.  */
1120                 /* Force addr into the temp.  */
1121                 if (addr != &t) {
1122                     t = tcg_temp_new_i32();
1123                     tcg_gen_xori_i32(t, *addr, 2);
1124                     addr = &t;
1125                 } else {
1126                     tcg_gen_xori_i32(t, t, 2);
1127                 }
1128                 break;
1129             default:
1130                 cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
1131                 break;
1132         }
1133     }
1134     tcg_gen_qemu_st_i32(cpu_R[dc->rd], *addr,
1135                         cpu_mmu_index(&dc->cpu->env, false), mop);
1136 
1137     /* Verify alignment if needed.  */
1138     if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
1139         tcg_gen_movi_i32(cpu_SR[SR_PC], dc->pc);
1140         /* FIXME: if the alignment is wrong, we should restore the value
1141          *        in memory. One possible way to achieve this is to probe
1142          *        the MMU prior to the memaccess, thay way we could put
1143          *        the alignment checks in between the probe and the mem
1144          *        access.
1145          */
1146         gen_helper_memalign(cpu_env, *addr, tcg_const_i32(dc->rd),
1147                             tcg_const_i32(1), tcg_const_i32(size - 1));
1148     }
1149 
1150     if (ex) {
1151         gen_set_label(swx_skip);
1152     }
1153     tcg_temp_free_i32(swx_addr);
1154 
1155     if (addr == &t)
1156         tcg_temp_free_i32(t);
1157 }
1158 
1159 static inline void eval_cc(DisasContext *dc, unsigned int cc,
1160                            TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
1161 {
1162     switch (cc) {
1163         case CC_EQ:
1164             tcg_gen_setcond_i32(TCG_COND_EQ, d, a, b);
1165             break;
1166         case CC_NE:
1167             tcg_gen_setcond_i32(TCG_COND_NE, d, a, b);
1168             break;
1169         case CC_LT:
1170             tcg_gen_setcond_i32(TCG_COND_LT, d, a, b);
1171             break;
1172         case CC_LE:
1173             tcg_gen_setcond_i32(TCG_COND_LE, d, a, b);
1174             break;
1175         case CC_GE:
1176             tcg_gen_setcond_i32(TCG_COND_GE, d, a, b);
1177             break;
1178         case CC_GT:
1179             tcg_gen_setcond_i32(TCG_COND_GT, d, a, b);
1180             break;
1181         default:
1182             cpu_abort(CPU(dc->cpu), "Unknown condition code %x.\n", cc);
1183             break;
1184     }
1185 }
1186 
1187 static void eval_cond_jmp(DisasContext *dc, TCGv_i32 pc_true, TCGv_i32 pc_false)
1188 {
1189     TCGLabel *l1 = gen_new_label();
1190     /* Conditional jmp.  */
1191     tcg_gen_mov_i32(cpu_SR[SR_PC], pc_false);
1192     tcg_gen_brcondi_i32(TCG_COND_EQ, env_btaken, 0, l1);
1193     tcg_gen_mov_i32(cpu_SR[SR_PC], pc_true);
1194     gen_set_label(l1);
1195 }
1196 
1197 static void dec_bcc(DisasContext *dc)
1198 {
1199     unsigned int cc;
1200     unsigned int dslot;
1201 
1202     cc = EXTRACT_FIELD(dc->ir, 21, 23);
1203     dslot = dc->ir & (1 << 25);
1204     LOG_DIS("bcc%s r%d %x\n", dslot ? "d" : "", dc->ra, dc->imm);
1205 
1206     dc->delayed_branch = 1;
1207     if (dslot) {
1208         dc->delayed_branch = 2;
1209         dc->tb_flags |= D_FLAG;
1210         tcg_gen_st_i32(tcg_const_i32(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1211                       cpu_env, offsetof(CPUMBState, bimm));
1212     }
1213 
1214     if (dec_alu_op_b_is_small_imm(dc)) {
1215         int32_t offset = (int32_t)((int16_t)dc->imm); /* sign-extend.  */
1216 
1217         tcg_gen_movi_i32(env_btarget, dc->pc + offset);
1218         dc->jmp = JMP_DIRECT_CC;
1219         dc->jmp_pc = dc->pc + offset;
1220     } else {
1221         dc->jmp = JMP_INDIRECT;
1222         tcg_gen_movi_i32(env_btarget, dc->pc);
1223         tcg_gen_add_i32(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
1224     }
1225     eval_cc(dc, cc, env_btaken, cpu_R[dc->ra], tcg_const_i32(0));
1226 }
1227 
1228 static void dec_br(DisasContext *dc)
1229 {
1230     unsigned int dslot, link, abs, mbar;
1231     int mem_index = cpu_mmu_index(&dc->cpu->env, false);
1232 
1233     dslot = dc->ir & (1 << 20);
1234     abs = dc->ir & (1 << 19);
1235     link = dc->ir & (1 << 18);
1236 
1237     /* Memory barrier.  */
1238     mbar = (dc->ir >> 16) & 31;
1239     if (mbar == 2 && dc->imm == 4) {
1240         /* mbar IMM & 16 decodes to sleep.  */
1241         if (dc->rd & 16) {
1242             TCGv_i32 tmp_hlt = tcg_const_i32(EXCP_HLT);
1243             TCGv_i32 tmp_1 = tcg_const_i32(1);
1244 
1245             LOG_DIS("sleep\n");
1246 
1247             t_sync_flags(dc);
1248             tcg_gen_st_i32(tmp_1, cpu_env,
1249                            -offsetof(MicroBlazeCPU, env)
1250                            +offsetof(CPUState, halted));
1251             tcg_gen_movi_i32(cpu_SR[SR_PC], dc->pc + 4);
1252             gen_helper_raise_exception(cpu_env, tmp_hlt);
1253             tcg_temp_free_i32(tmp_hlt);
1254             tcg_temp_free_i32(tmp_1);
1255             return;
1256         }
1257         LOG_DIS("mbar %d\n", dc->rd);
1258         /* Break the TB.  */
1259         dc->cpustate_changed = 1;
1260         return;
1261     }
1262 
1263     LOG_DIS("br%s%s%s%s imm=%x\n",
1264              abs ? "a" : "", link ? "l" : "",
1265              dc->type_b ? "i" : "", dslot ? "d" : "",
1266              dc->imm);
1267 
1268     dc->delayed_branch = 1;
1269     if (dslot) {
1270         dc->delayed_branch = 2;
1271         dc->tb_flags |= D_FLAG;
1272         tcg_gen_st_i32(tcg_const_i32(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1273                       cpu_env, offsetof(CPUMBState, bimm));
1274     }
1275     if (link && dc->rd)
1276         tcg_gen_movi_i32(cpu_R[dc->rd], dc->pc);
1277 
1278     dc->jmp = JMP_INDIRECT;
1279     if (abs) {
1280         tcg_gen_movi_i32(env_btaken, 1);
1281         tcg_gen_mov_i32(env_btarget, *(dec_alu_op_b(dc)));
1282         if (link && !dslot) {
1283             if (!(dc->tb_flags & IMM_FLAG) && (dc->imm == 8 || dc->imm == 0x18))
1284                 t_gen_raise_exception(dc, EXCP_BREAK);
1285             if (dc->imm == 0) {
1286                 if ((dc->tb_flags & MSR_EE_FLAG) && mem_index == MMU_USER_IDX) {
1287                     tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1288                     t_gen_raise_exception(dc, EXCP_HW_EXCP);
1289                     return;
1290                 }
1291 
1292                 t_gen_raise_exception(dc, EXCP_DEBUG);
1293             }
1294         }
1295     } else {
1296         if (dec_alu_op_b_is_small_imm(dc)) {
1297             dc->jmp = JMP_DIRECT;
1298             dc->jmp_pc = dc->pc + (int32_t)((int16_t)dc->imm);
1299         } else {
1300             tcg_gen_movi_i32(env_btaken, 1);
1301             tcg_gen_movi_i32(env_btarget, dc->pc);
1302             tcg_gen_add_i32(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
1303         }
1304     }
1305 }
1306 
1307 static inline void do_rti(DisasContext *dc)
1308 {
1309     TCGv_i32 t0, t1;
1310     t0 = tcg_temp_new_i32();
1311     t1 = tcg_temp_new_i32();
1312     tcg_gen_shri_i32(t0, cpu_SR[SR_MSR], 1);
1313     tcg_gen_ori_i32(t1, cpu_SR[SR_MSR], MSR_IE);
1314     tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
1315 
1316     tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
1317     tcg_gen_or_i32(t1, t1, t0);
1318     msr_write(dc, t1);
1319     tcg_temp_free_i32(t1);
1320     tcg_temp_free_i32(t0);
1321     dc->tb_flags &= ~DRTI_FLAG;
1322 }
1323 
1324 static inline void do_rtb(DisasContext *dc)
1325 {
1326     TCGv_i32 t0, t1;
1327     t0 = tcg_temp_new_i32();
1328     t1 = tcg_temp_new_i32();
1329     tcg_gen_andi_i32(t1, cpu_SR[SR_MSR], ~MSR_BIP);
1330     tcg_gen_shri_i32(t0, t1, 1);
1331     tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
1332 
1333     tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
1334     tcg_gen_or_i32(t1, t1, t0);
1335     msr_write(dc, t1);
1336     tcg_temp_free_i32(t1);
1337     tcg_temp_free_i32(t0);
1338     dc->tb_flags &= ~DRTB_FLAG;
1339 }
1340 
1341 static inline void do_rte(DisasContext *dc)
1342 {
1343     TCGv_i32 t0, t1;
1344     t0 = tcg_temp_new_i32();
1345     t1 = tcg_temp_new_i32();
1346 
1347     tcg_gen_ori_i32(t1, cpu_SR[SR_MSR], MSR_EE);
1348     tcg_gen_andi_i32(t1, t1, ~MSR_EIP);
1349     tcg_gen_shri_i32(t0, t1, 1);
1350     tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
1351 
1352     tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
1353     tcg_gen_or_i32(t1, t1, t0);
1354     msr_write(dc, t1);
1355     tcg_temp_free_i32(t1);
1356     tcg_temp_free_i32(t0);
1357     dc->tb_flags &= ~DRTE_FLAG;
1358 }
1359 
1360 static void dec_rts(DisasContext *dc)
1361 {
1362     unsigned int b_bit, i_bit, e_bit;
1363     int mem_index = cpu_mmu_index(&dc->cpu->env, false);
1364 
1365     i_bit = dc->ir & (1 << 21);
1366     b_bit = dc->ir & (1 << 22);
1367     e_bit = dc->ir & (1 << 23);
1368 
1369     dc->delayed_branch = 2;
1370     dc->tb_flags |= D_FLAG;
1371     tcg_gen_st_i32(tcg_const_i32(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1372                   cpu_env, offsetof(CPUMBState, bimm));
1373 
1374     if (i_bit) {
1375         LOG_DIS("rtid ir=%x\n", dc->ir);
1376         if ((dc->tb_flags & MSR_EE_FLAG)
1377              && mem_index == MMU_USER_IDX) {
1378             tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1379             t_gen_raise_exception(dc, EXCP_HW_EXCP);
1380         }
1381         dc->tb_flags |= DRTI_FLAG;
1382     } else if (b_bit) {
1383         LOG_DIS("rtbd ir=%x\n", dc->ir);
1384         if ((dc->tb_flags & MSR_EE_FLAG)
1385              && mem_index == MMU_USER_IDX) {
1386             tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1387             t_gen_raise_exception(dc, EXCP_HW_EXCP);
1388         }
1389         dc->tb_flags |= DRTB_FLAG;
1390     } else if (e_bit) {
1391         LOG_DIS("rted ir=%x\n", dc->ir);
1392         if ((dc->tb_flags & MSR_EE_FLAG)
1393              && mem_index == MMU_USER_IDX) {
1394             tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1395             t_gen_raise_exception(dc, EXCP_HW_EXCP);
1396         }
1397         dc->tb_flags |= DRTE_FLAG;
1398     } else
1399         LOG_DIS("rts ir=%x\n", dc->ir);
1400 
1401     dc->jmp = JMP_INDIRECT;
1402     tcg_gen_movi_i32(env_btaken, 1);
1403     tcg_gen_add_i32(env_btarget, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
1404 }
1405 
1406 static int dec_check_fpuv2(DisasContext *dc)
1407 {
1408     if ((dc->cpu->cfg.use_fpu != 2) && (dc->tb_flags & MSR_EE_FLAG)) {
1409         tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_FPU);
1410         t_gen_raise_exception(dc, EXCP_HW_EXCP);
1411     }
1412     return (dc->cpu->cfg.use_fpu == 2) ? 0 : PVR2_USE_FPU2_MASK;
1413 }
1414 
1415 static void dec_fpu(DisasContext *dc)
1416 {
1417     unsigned int fpu_insn;
1418 
1419     if ((dc->tb_flags & MSR_EE_FLAG)
1420           && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
1421           && !dc->cpu->cfg.use_fpu) {
1422         tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1423         t_gen_raise_exception(dc, EXCP_HW_EXCP);
1424         return;
1425     }
1426 
1427     fpu_insn = (dc->ir >> 7) & 7;
1428 
1429     switch (fpu_insn) {
1430         case 0:
1431             gen_helper_fadd(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1432                             cpu_R[dc->rb]);
1433             break;
1434 
1435         case 1:
1436             gen_helper_frsub(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1437                              cpu_R[dc->rb]);
1438             break;
1439 
1440         case 2:
1441             gen_helper_fmul(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1442                             cpu_R[dc->rb]);
1443             break;
1444 
1445         case 3:
1446             gen_helper_fdiv(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1447                             cpu_R[dc->rb]);
1448             break;
1449 
1450         case 4:
1451             switch ((dc->ir >> 4) & 7) {
1452                 case 0:
1453                     gen_helper_fcmp_un(cpu_R[dc->rd], cpu_env,
1454                                        cpu_R[dc->ra], cpu_R[dc->rb]);
1455                     break;
1456                 case 1:
1457                     gen_helper_fcmp_lt(cpu_R[dc->rd], cpu_env,
1458                                        cpu_R[dc->ra], cpu_R[dc->rb]);
1459                     break;
1460                 case 2:
1461                     gen_helper_fcmp_eq(cpu_R[dc->rd], cpu_env,
1462                                        cpu_R[dc->ra], cpu_R[dc->rb]);
1463                     break;
1464                 case 3:
1465                     gen_helper_fcmp_le(cpu_R[dc->rd], cpu_env,
1466                                        cpu_R[dc->ra], cpu_R[dc->rb]);
1467                     break;
1468                 case 4:
1469                     gen_helper_fcmp_gt(cpu_R[dc->rd], cpu_env,
1470                                        cpu_R[dc->ra], cpu_R[dc->rb]);
1471                     break;
1472                 case 5:
1473                     gen_helper_fcmp_ne(cpu_R[dc->rd], cpu_env,
1474                                        cpu_R[dc->ra], cpu_R[dc->rb]);
1475                     break;
1476                 case 6:
1477                     gen_helper_fcmp_ge(cpu_R[dc->rd], cpu_env,
1478                                        cpu_R[dc->ra], cpu_R[dc->rb]);
1479                     break;
1480                 default:
1481                     qemu_log_mask(LOG_UNIMP,
1482                                   "unimplemented fcmp fpu_insn=%x pc=%x"
1483                                   " opc=%x\n",
1484                                   fpu_insn, dc->pc, dc->opcode);
1485                     dc->abort_at_next_insn = 1;
1486                     break;
1487             }
1488             break;
1489 
1490         case 5:
1491             if (!dec_check_fpuv2(dc)) {
1492                 return;
1493             }
1494             gen_helper_flt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1495             break;
1496 
1497         case 6:
1498             if (!dec_check_fpuv2(dc)) {
1499                 return;
1500             }
1501             gen_helper_fint(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1502             break;
1503 
1504         case 7:
1505             if (!dec_check_fpuv2(dc)) {
1506                 return;
1507             }
1508             gen_helper_fsqrt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1509             break;
1510 
1511         default:
1512             qemu_log_mask(LOG_UNIMP, "unimplemented FPU insn fpu_insn=%x pc=%x"
1513                           " opc=%x\n",
1514                           fpu_insn, dc->pc, dc->opcode);
1515             dc->abort_at_next_insn = 1;
1516             break;
1517     }
1518 }
1519 
1520 static void dec_null(DisasContext *dc)
1521 {
1522     if ((dc->tb_flags & MSR_EE_FLAG)
1523           && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
1524         tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1525         t_gen_raise_exception(dc, EXCP_HW_EXCP);
1526         return;
1527     }
1528     qemu_log_mask(LOG_GUEST_ERROR, "unknown insn pc=%x opc=%x\n", dc->pc, dc->opcode);
1529     dc->abort_at_next_insn = 1;
1530 }
1531 
1532 /* Insns connected to FSL or AXI stream attached devices.  */
1533 static void dec_stream(DisasContext *dc)
1534 {
1535     int mem_index = cpu_mmu_index(&dc->cpu->env, false);
1536     TCGv_i32 t_id, t_ctrl;
1537     int ctrl;
1538 
1539     LOG_DIS("%s%s imm=%x\n", dc->rd ? "get" : "put",
1540             dc->type_b ? "" : "d", dc->imm);
1541 
1542     if ((dc->tb_flags & MSR_EE_FLAG) && (mem_index == MMU_USER_IDX)) {
1543         tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1544         t_gen_raise_exception(dc, EXCP_HW_EXCP);
1545         return;
1546     }
1547 
1548     t_id = tcg_temp_new_i32();
1549     if (dc->type_b) {
1550         tcg_gen_movi_i32(t_id, dc->imm & 0xf);
1551         ctrl = dc->imm >> 10;
1552     } else {
1553         tcg_gen_andi_i32(t_id, cpu_R[dc->rb], 0xf);
1554         ctrl = dc->imm >> 5;
1555     }
1556 
1557     t_ctrl = tcg_const_i32(ctrl);
1558 
1559     if (dc->rd == 0) {
1560         gen_helper_put(t_id, t_ctrl, cpu_R[dc->ra]);
1561     } else {
1562         gen_helper_get(cpu_R[dc->rd], t_id, t_ctrl);
1563     }
1564     tcg_temp_free_i32(t_id);
1565     tcg_temp_free_i32(t_ctrl);
1566 }
1567 
1568 static struct decoder_info {
1569     struct {
1570         uint32_t bits;
1571         uint32_t mask;
1572     };
1573     void (*dec)(DisasContext *dc);
1574 } decinfo[] = {
1575     {DEC_ADD, dec_add},
1576     {DEC_SUB, dec_sub},
1577     {DEC_AND, dec_and},
1578     {DEC_XOR, dec_xor},
1579     {DEC_OR, dec_or},
1580     {DEC_BIT, dec_bit},
1581     {DEC_BARREL, dec_barrel},
1582     {DEC_LD, dec_load},
1583     {DEC_ST, dec_store},
1584     {DEC_IMM, dec_imm},
1585     {DEC_BR, dec_br},
1586     {DEC_BCC, dec_bcc},
1587     {DEC_RTS, dec_rts},
1588     {DEC_FPU, dec_fpu},
1589     {DEC_MUL, dec_mul},
1590     {DEC_DIV, dec_div},
1591     {DEC_MSR, dec_msr},
1592     {DEC_STREAM, dec_stream},
1593     {{0, 0}, dec_null}
1594 };
1595 
1596 static inline void decode(DisasContext *dc, uint32_t ir)
1597 {
1598     int i;
1599 
1600     dc->ir = ir;
1601     LOG_DIS("%8.8x\t", dc->ir);
1602 
1603     if (dc->ir)
1604         dc->nr_nops = 0;
1605     else {
1606         if ((dc->tb_flags & MSR_EE_FLAG)
1607               && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
1608               && (dc->cpu->env.pvr.regs[2] & PVR2_OPCODE_0x0_ILL_MASK)) {
1609             tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1610             t_gen_raise_exception(dc, EXCP_HW_EXCP);
1611             return;
1612         }
1613 
1614         LOG_DIS("nr_nops=%d\t", dc->nr_nops);
1615         dc->nr_nops++;
1616         if (dc->nr_nops > 4) {
1617             cpu_abort(CPU(dc->cpu), "fetching nop sequence\n");
1618         }
1619     }
1620     /* bit 2 seems to indicate insn type.  */
1621     dc->type_b = ir & (1 << 29);
1622 
1623     dc->opcode = EXTRACT_FIELD(ir, 26, 31);
1624     dc->rd = EXTRACT_FIELD(ir, 21, 25);
1625     dc->ra = EXTRACT_FIELD(ir, 16, 20);
1626     dc->rb = EXTRACT_FIELD(ir, 11, 15);
1627     dc->imm = EXTRACT_FIELD(ir, 0, 15);
1628 
1629     /* Large switch for all insns.  */
1630     for (i = 0; i < ARRAY_SIZE(decinfo); i++) {
1631         if ((dc->opcode & decinfo[i].mask) == decinfo[i].bits) {
1632             decinfo[i].dec(dc);
1633             break;
1634         }
1635     }
1636 }
1637 
1638 /* generate intermediate code for basic block 'tb'.  */
1639 void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
1640 {
1641     CPUMBState *env = cs->env_ptr;
1642     MicroBlazeCPU *cpu = mb_env_get_cpu(env);
1643     uint32_t pc_start;
1644     struct DisasContext ctx;
1645     struct DisasContext *dc = &ctx;
1646     uint32_t page_start, org_flags;
1647     uint32_t npc;
1648     int num_insns;
1649     int max_insns;
1650 
1651     pc_start = tb->pc;
1652     dc->cpu = cpu;
1653     dc->tb = tb;
1654     org_flags = dc->synced_flags = dc->tb_flags = tb->flags;
1655 
1656     dc->is_jmp = DISAS_NEXT;
1657     dc->jmp = 0;
1658     dc->delayed_branch = !!(dc->tb_flags & D_FLAG);
1659     if (dc->delayed_branch) {
1660         dc->jmp = JMP_INDIRECT;
1661     }
1662     dc->pc = pc_start;
1663     dc->singlestep_enabled = cs->singlestep_enabled;
1664     dc->cpustate_changed = 0;
1665     dc->abort_at_next_insn = 0;
1666     dc->nr_nops = 0;
1667 
1668     if (pc_start & 3) {
1669         cpu_abort(cs, "Microblaze: unaligned PC=%x\n", pc_start);
1670     }
1671 
1672     page_start = pc_start & TARGET_PAGE_MASK;
1673     num_insns = 0;
1674     max_insns = tb_cflags(tb) & CF_COUNT_MASK;
1675     if (max_insns == 0) {
1676         max_insns = CF_COUNT_MASK;
1677     }
1678     if (max_insns > TCG_MAX_INSNS) {
1679         max_insns = TCG_MAX_INSNS;
1680     }
1681 
1682     gen_tb_start(tb);
1683     do
1684     {
1685         tcg_gen_insn_start(dc->pc);
1686         num_insns++;
1687 
1688 #if SIM_COMPAT
1689         if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1690             tcg_gen_movi_i32(cpu_SR[SR_PC], dc->pc);
1691             gen_helper_debug();
1692         }
1693 #endif
1694 
1695         if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
1696             t_gen_raise_exception(dc, EXCP_DEBUG);
1697             dc->is_jmp = DISAS_UPDATE;
1698             /* The address covered by the breakpoint must be included in
1699                [tb->pc, tb->pc + tb->size) in order to for it to be
1700                properly cleared -- thus we increment the PC here so that
1701                the logic setting tb->size below does the right thing.  */
1702             dc->pc += 4;
1703             break;
1704         }
1705 
1706         /* Pretty disas.  */
1707         LOG_DIS("%8.8x:\t", dc->pc);
1708 
1709         if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
1710             gen_io_start();
1711         }
1712 
1713         dc->clear_imm = 1;
1714         decode(dc, cpu_ldl_code(env, dc->pc));
1715         if (dc->clear_imm)
1716             dc->tb_flags &= ~IMM_FLAG;
1717         dc->pc += 4;
1718 
1719         if (dc->delayed_branch) {
1720             dc->delayed_branch--;
1721             if (!dc->delayed_branch) {
1722                 if (dc->tb_flags & DRTI_FLAG)
1723                     do_rti(dc);
1724                  if (dc->tb_flags & DRTB_FLAG)
1725                     do_rtb(dc);
1726                 if (dc->tb_flags & DRTE_FLAG)
1727                     do_rte(dc);
1728                 /* Clear the delay slot flag.  */
1729                 dc->tb_flags &= ~D_FLAG;
1730                 /* If it is a direct jump, try direct chaining.  */
1731                 if (dc->jmp == JMP_INDIRECT) {
1732                     eval_cond_jmp(dc, env_btarget, tcg_const_i32(dc->pc));
1733                     dc->is_jmp = DISAS_JUMP;
1734                 } else if (dc->jmp == JMP_DIRECT) {
1735                     t_sync_flags(dc);
1736                     gen_goto_tb(dc, 0, dc->jmp_pc);
1737                     dc->is_jmp = DISAS_TB_JUMP;
1738                 } else if (dc->jmp == JMP_DIRECT_CC) {
1739                     TCGLabel *l1 = gen_new_label();
1740                     t_sync_flags(dc);
1741                     /* Conditional jmp.  */
1742                     tcg_gen_brcondi_i32(TCG_COND_NE, env_btaken, 0, l1);
1743                     gen_goto_tb(dc, 1, dc->pc);
1744                     gen_set_label(l1);
1745                     gen_goto_tb(dc, 0, dc->jmp_pc);
1746 
1747                     dc->is_jmp = DISAS_TB_JUMP;
1748                 }
1749                 break;
1750             }
1751         }
1752         if (cs->singlestep_enabled) {
1753             break;
1754         }
1755     } while (!dc->is_jmp && !dc->cpustate_changed
1756              && !tcg_op_buf_full()
1757              && !singlestep
1758              && (dc->pc - page_start < TARGET_PAGE_SIZE)
1759              && num_insns < max_insns);
1760 
1761     npc = dc->pc;
1762     if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
1763         if (dc->tb_flags & D_FLAG) {
1764             dc->is_jmp = DISAS_UPDATE;
1765             tcg_gen_movi_i32(cpu_SR[SR_PC], npc);
1766             sync_jmpstate(dc);
1767         } else
1768             npc = dc->jmp_pc;
1769     }
1770 
1771     if (tb_cflags(tb) & CF_LAST_IO)
1772         gen_io_end();
1773     /* Force an update if the per-tb cpu state has changed.  */
1774     if (dc->is_jmp == DISAS_NEXT
1775         && (dc->cpustate_changed || org_flags != dc->tb_flags)) {
1776         dc->is_jmp = DISAS_UPDATE;
1777         tcg_gen_movi_i32(cpu_SR[SR_PC], npc);
1778     }
1779     t_sync_flags(dc);
1780 
1781     if (unlikely(cs->singlestep_enabled)) {
1782         TCGv_i32 tmp = tcg_const_i32(EXCP_DEBUG);
1783 
1784         if (dc->is_jmp != DISAS_JUMP) {
1785             tcg_gen_movi_i32(cpu_SR[SR_PC], npc);
1786         }
1787         gen_helper_raise_exception(cpu_env, tmp);
1788         tcg_temp_free_i32(tmp);
1789     } else {
1790         switch(dc->is_jmp) {
1791             case DISAS_NEXT:
1792                 gen_goto_tb(dc, 1, npc);
1793                 break;
1794             default:
1795             case DISAS_JUMP:
1796             case DISAS_UPDATE:
1797                 /* indicate that the hash table must be used
1798                    to find the next TB */
1799                 tcg_gen_exit_tb(0);
1800                 break;
1801             case DISAS_TB_JUMP:
1802                 /* nothing more to generate */
1803                 break;
1804         }
1805     }
1806     gen_tb_end(tb, num_insns);
1807 
1808     tb->size = dc->pc - pc_start;
1809     tb->icount = num_insns;
1810 
1811 #ifdef DEBUG_DISAS
1812 #if !SIM_COMPAT
1813     if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
1814         && qemu_log_in_addr_range(pc_start)) {
1815         qemu_log_lock();
1816         qemu_log("--------------\n");
1817         log_target_disas(cs, pc_start, dc->pc - pc_start);
1818         qemu_log_unlock();
1819     }
1820 #endif
1821 #endif
1822     assert(!dc->abort_at_next_insn);
1823 }
1824 
1825 void mb_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
1826                        int flags)
1827 {
1828     MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1829     CPUMBState *env = &cpu->env;
1830     int i;
1831 
1832     if (!env || !f)
1833         return;
1834 
1835     cpu_fprintf(f, "IN: PC=%x %s\n",
1836                 env->sregs[SR_PC], lookup_symbol(env->sregs[SR_PC]));
1837     cpu_fprintf(f, "rmsr=%x resr=%x rear=%x debug=%x imm=%x iflags=%x fsr=%x\n",
1838              env->sregs[SR_MSR], env->sregs[SR_ESR], env->sregs[SR_EAR],
1839              env->debug, env->imm, env->iflags, env->sregs[SR_FSR]);
1840     cpu_fprintf(f, "btaken=%d btarget=%x mode=%s(saved=%s) eip=%d ie=%d\n",
1841              env->btaken, env->btarget,
1842              (env->sregs[SR_MSR] & MSR_UM) ? "user" : "kernel",
1843              (env->sregs[SR_MSR] & MSR_UMS) ? "user" : "kernel",
1844              (env->sregs[SR_MSR] & MSR_EIP),
1845              (env->sregs[SR_MSR] & MSR_IE));
1846 
1847     for (i = 0; i < 32; i++) {
1848         cpu_fprintf(f, "r%2.2d=%8.8x ", i, env->regs[i]);
1849         if ((i + 1) % 4 == 0)
1850             cpu_fprintf(f, "\n");
1851         }
1852     cpu_fprintf(f, "\n\n");
1853 }
1854 
1855 void mb_tcg_init(void)
1856 {
1857     int i;
1858 
1859     env_debug = tcg_global_mem_new_i32(cpu_env,
1860                     offsetof(CPUMBState, debug),
1861                     "debug0");
1862     env_iflags = tcg_global_mem_new_i32(cpu_env,
1863                     offsetof(CPUMBState, iflags),
1864                     "iflags");
1865     env_imm = tcg_global_mem_new_i32(cpu_env,
1866                     offsetof(CPUMBState, imm),
1867                     "imm");
1868     env_btarget = tcg_global_mem_new_i32(cpu_env,
1869                      offsetof(CPUMBState, btarget),
1870                      "btarget");
1871     env_btaken = tcg_global_mem_new_i32(cpu_env,
1872                      offsetof(CPUMBState, btaken),
1873                      "btaken");
1874     env_res_addr = tcg_global_mem_new_i32(cpu_env,
1875                      offsetof(CPUMBState, res_addr),
1876                      "res_addr");
1877     env_res_val = tcg_global_mem_new_i32(cpu_env,
1878                      offsetof(CPUMBState, res_val),
1879                      "res_val");
1880     for (i = 0; i < ARRAY_SIZE(cpu_R); i++) {
1881         cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
1882                           offsetof(CPUMBState, regs[i]),
1883                           regnames[i]);
1884     }
1885     for (i = 0; i < ARRAY_SIZE(cpu_SR); i++) {
1886         cpu_SR[i] = tcg_global_mem_new_i32(cpu_env,
1887                           offsetof(CPUMBState, sregs[i]),
1888                           special_regnames[i]);
1889     }
1890 }
1891 
1892 void restore_state_to_opc(CPUMBState *env, TranslationBlock *tb,
1893                           target_ulong *data)
1894 {
1895     env->sregs[SR_PC] = data[0];
1896 }
1897