xref: /openbmc/qemu/target/microblaze/translate.c (revision 4a09d0bb)
1 /*
2  *  Xilinx MicroBlaze emulation for qemu: main translation routines.
3  *
4  *  Copyright (c) 2009 Edgar E. Iglesias.
5  *  Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
25 #include "tcg-op.h"
26 #include "exec/helper-proto.h"
27 #include "microblaze-decode.h"
28 #include "exec/cpu_ldst.h"
29 #include "exec/helper-gen.h"
30 
31 #include "trace-tcg.h"
32 #include "exec/log.h"
33 
34 
35 #define SIM_COMPAT 0
36 #define DISAS_GNU 1
37 #define DISAS_MB 1
38 #if DISAS_MB && !SIM_COMPAT
39 #  define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
40 #else
41 #  define LOG_DIS(...) do { } while (0)
42 #endif
43 
44 #define D(x)
45 
46 #define EXTRACT_FIELD(src, start, end) \
47             (((src) >> start) & ((1 << (end - start + 1)) - 1))
48 
49 static TCGv env_debug;
50 static TCGv_env cpu_env;
51 static TCGv cpu_R[32];
52 static TCGv cpu_SR[18];
53 static TCGv env_imm;
54 static TCGv env_btaken;
55 static TCGv env_btarget;
56 static TCGv env_iflags;
57 static TCGv env_res_addr;
58 static TCGv env_res_val;
59 
60 #include "exec/gen-icount.h"
61 
62 /* This is the state at translation time.  */
63 typedef struct DisasContext {
64     MicroBlazeCPU *cpu;
65     target_ulong pc;
66 
67     /* Decoder.  */
68     int type_b;
69     uint32_t ir;
70     uint8_t opcode;
71     uint8_t rd, ra, rb;
72     uint16_t imm;
73 
74     unsigned int cpustate_changed;
75     unsigned int delayed_branch;
76     unsigned int tb_flags, synced_flags; /* tb dependent flags.  */
77     unsigned int clear_imm;
78     int is_jmp;
79 
80 #define JMP_NOJMP     0
81 #define JMP_DIRECT    1
82 #define JMP_DIRECT_CC 2
83 #define JMP_INDIRECT  3
84     unsigned int jmp;
85     uint32_t jmp_pc;
86 
87     int abort_at_next_insn;
88     int nr_nops;
89     struct TranslationBlock *tb;
90     int singlestep_enabled;
91 } DisasContext;
92 
93 static const char *regnames[] =
94 {
95     "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
96     "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
97     "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
98     "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
99 };
100 
101 static const char *special_regnames[] =
102 {
103     "rpc", "rmsr", "sr2", "sr3", "sr4", "sr5", "sr6", "sr7",
104     "sr8", "sr9", "sr10", "sr11", "sr12", "sr13", "sr14", "sr15",
105     "sr16", "sr17", "sr18"
106 };
107 
108 static inline void t_sync_flags(DisasContext *dc)
109 {
110     /* Synch the tb dependent flags between translator and runtime.  */
111     if (dc->tb_flags != dc->synced_flags) {
112         tcg_gen_movi_tl(env_iflags, dc->tb_flags);
113         dc->synced_flags = dc->tb_flags;
114     }
115 }
116 
117 static inline void t_gen_raise_exception(DisasContext *dc, uint32_t index)
118 {
119     TCGv_i32 tmp = tcg_const_i32(index);
120 
121     t_sync_flags(dc);
122     tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
123     gen_helper_raise_exception(cpu_env, tmp);
124     tcg_temp_free_i32(tmp);
125     dc->is_jmp = DISAS_UPDATE;
126 }
127 
128 static inline bool use_goto_tb(DisasContext *dc, target_ulong dest)
129 {
130 #ifndef CONFIG_USER_ONLY
131     return (dc->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
132 #else
133     return true;
134 #endif
135 }
136 
137 static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
138 {
139     if (use_goto_tb(dc, dest)) {
140         tcg_gen_goto_tb(n);
141         tcg_gen_movi_tl(cpu_SR[SR_PC], dest);
142         tcg_gen_exit_tb((uintptr_t)dc->tb + n);
143     } else {
144         tcg_gen_movi_tl(cpu_SR[SR_PC], dest);
145         tcg_gen_exit_tb(0);
146     }
147 }
148 
149 static void read_carry(DisasContext *dc, TCGv d)
150 {
151     tcg_gen_shri_tl(d, cpu_SR[SR_MSR], 31);
152 }
153 
154 /*
155  * write_carry sets the carry bits in MSR based on bit 0 of v.
156  * v[31:1] are ignored.
157  */
158 static void write_carry(DisasContext *dc, TCGv v)
159 {
160     TCGv t0 = tcg_temp_new();
161     tcg_gen_shli_tl(t0, v, 31);
162     tcg_gen_sari_tl(t0, t0, 31);
163     tcg_gen_andi_tl(t0, t0, (MSR_C | MSR_CC));
164     tcg_gen_andi_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR],
165                     ~(MSR_C | MSR_CC));
166     tcg_gen_or_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t0);
167     tcg_temp_free(t0);
168 }
169 
170 static void write_carryi(DisasContext *dc, bool carry)
171 {
172     TCGv t0 = tcg_temp_new();
173     tcg_gen_movi_tl(t0, carry);
174     write_carry(dc, t0);
175     tcg_temp_free(t0);
176 }
177 
178 /* True if ALU operand b is a small immediate that may deserve
179    faster treatment.  */
180 static inline int dec_alu_op_b_is_small_imm(DisasContext *dc)
181 {
182     /* Immediate insn without the imm prefix ?  */
183     return dc->type_b && !(dc->tb_flags & IMM_FLAG);
184 }
185 
186 static inline TCGv *dec_alu_op_b(DisasContext *dc)
187 {
188     if (dc->type_b) {
189         if (dc->tb_flags & IMM_FLAG)
190             tcg_gen_ori_tl(env_imm, env_imm, dc->imm);
191         else
192             tcg_gen_movi_tl(env_imm, (int32_t)((int16_t)dc->imm));
193         return &env_imm;
194     } else
195         return &cpu_R[dc->rb];
196 }
197 
198 static void dec_add(DisasContext *dc)
199 {
200     unsigned int k, c;
201     TCGv cf;
202 
203     k = dc->opcode & 4;
204     c = dc->opcode & 2;
205 
206     LOG_DIS("add%s%s%s r%d r%d r%d\n",
207             dc->type_b ? "i" : "", k ? "k" : "", c ? "c" : "",
208             dc->rd, dc->ra, dc->rb);
209 
210     /* Take care of the easy cases first.  */
211     if (k) {
212         /* k - keep carry, no need to update MSR.  */
213         /* If rd == r0, it's a nop.  */
214         if (dc->rd) {
215             tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
216 
217             if (c) {
218                 /* c - Add carry into the result.  */
219                 cf = tcg_temp_new();
220 
221                 read_carry(dc, cf);
222                 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
223                 tcg_temp_free(cf);
224             }
225         }
226         return;
227     }
228 
229     /* From now on, we can assume k is zero.  So we need to update MSR.  */
230     /* Extract carry.  */
231     cf = tcg_temp_new();
232     if (c) {
233         read_carry(dc, cf);
234     } else {
235         tcg_gen_movi_tl(cf, 0);
236     }
237 
238     if (dc->rd) {
239         TCGv ncf = tcg_temp_new();
240         gen_helper_carry(ncf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
241         tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
242         tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
243         write_carry(dc, ncf);
244         tcg_temp_free(ncf);
245     } else {
246         gen_helper_carry(cf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
247         write_carry(dc, cf);
248     }
249     tcg_temp_free(cf);
250 }
251 
252 static void dec_sub(DisasContext *dc)
253 {
254     unsigned int u, cmp, k, c;
255     TCGv cf, na;
256 
257     u = dc->imm & 2;
258     k = dc->opcode & 4;
259     c = dc->opcode & 2;
260     cmp = (dc->imm & 1) && (!dc->type_b) && k;
261 
262     if (cmp) {
263         LOG_DIS("cmp%s r%d, r%d ir=%x\n", u ? "u" : "", dc->rd, dc->ra, dc->ir);
264         if (dc->rd) {
265             if (u)
266                 gen_helper_cmpu(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
267             else
268                 gen_helper_cmp(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
269         }
270         return;
271     }
272 
273     LOG_DIS("sub%s%s r%d, r%d r%d\n",
274              k ? "k" : "",  c ? "c" : "", dc->rd, dc->ra, dc->rb);
275 
276     /* Take care of the easy cases first.  */
277     if (k) {
278         /* k - keep carry, no need to update MSR.  */
279         /* If rd == r0, it's a nop.  */
280         if (dc->rd) {
281             tcg_gen_sub_tl(cpu_R[dc->rd], *(dec_alu_op_b(dc)), cpu_R[dc->ra]);
282 
283             if (c) {
284                 /* c - Add carry into the result.  */
285                 cf = tcg_temp_new();
286 
287                 read_carry(dc, cf);
288                 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
289                 tcg_temp_free(cf);
290             }
291         }
292         return;
293     }
294 
295     /* From now on, we can assume k is zero.  So we need to update MSR.  */
296     /* Extract carry. And complement a into na.  */
297     cf = tcg_temp_new();
298     na = tcg_temp_new();
299     if (c) {
300         read_carry(dc, cf);
301     } else {
302         tcg_gen_movi_tl(cf, 1);
303     }
304 
305     /* d = b + ~a + c. carry defaults to 1.  */
306     tcg_gen_not_tl(na, cpu_R[dc->ra]);
307 
308     if (dc->rd) {
309         TCGv ncf = tcg_temp_new();
310         gen_helper_carry(ncf, na, *(dec_alu_op_b(dc)), cf);
311         tcg_gen_add_tl(cpu_R[dc->rd], na, *(dec_alu_op_b(dc)));
312         tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
313         write_carry(dc, ncf);
314         tcg_temp_free(ncf);
315     } else {
316         gen_helper_carry(cf, na, *(dec_alu_op_b(dc)), cf);
317         write_carry(dc, cf);
318     }
319     tcg_temp_free(cf);
320     tcg_temp_free(na);
321 }
322 
323 static void dec_pattern(DisasContext *dc)
324 {
325     unsigned int mode;
326 
327     if ((dc->tb_flags & MSR_EE_FLAG)
328           && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
329           && !((dc->cpu->env.pvr.regs[2] & PVR2_USE_PCMP_INSTR))) {
330         tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
331         t_gen_raise_exception(dc, EXCP_HW_EXCP);
332     }
333 
334     mode = dc->opcode & 3;
335     switch (mode) {
336         case 0:
337             /* pcmpbf.  */
338             LOG_DIS("pcmpbf r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
339             if (dc->rd)
340                 gen_helper_pcmpbf(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
341             break;
342         case 2:
343             LOG_DIS("pcmpeq r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
344             if (dc->rd) {
345                 tcg_gen_setcond_tl(TCG_COND_EQ, cpu_R[dc->rd],
346                                    cpu_R[dc->ra], cpu_R[dc->rb]);
347             }
348             break;
349         case 3:
350             LOG_DIS("pcmpne r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
351             if (dc->rd) {
352                 tcg_gen_setcond_tl(TCG_COND_NE, cpu_R[dc->rd],
353                                    cpu_R[dc->ra], cpu_R[dc->rb]);
354             }
355             break;
356         default:
357             cpu_abort(CPU(dc->cpu),
358                       "unsupported pattern insn opcode=%x\n", dc->opcode);
359             break;
360     }
361 }
362 
363 static void dec_and(DisasContext *dc)
364 {
365     unsigned int not;
366 
367     if (!dc->type_b && (dc->imm & (1 << 10))) {
368         dec_pattern(dc);
369         return;
370     }
371 
372     not = dc->opcode & (1 << 1);
373     LOG_DIS("and%s\n", not ? "n" : "");
374 
375     if (!dc->rd)
376         return;
377 
378     if (not) {
379         tcg_gen_andc_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
380     } else
381         tcg_gen_and_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
382 }
383 
384 static void dec_or(DisasContext *dc)
385 {
386     if (!dc->type_b && (dc->imm & (1 << 10))) {
387         dec_pattern(dc);
388         return;
389     }
390 
391     LOG_DIS("or r%d r%d r%d imm=%x\n", dc->rd, dc->ra, dc->rb, dc->imm);
392     if (dc->rd)
393         tcg_gen_or_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
394 }
395 
396 static void dec_xor(DisasContext *dc)
397 {
398     if (!dc->type_b && (dc->imm & (1 << 10))) {
399         dec_pattern(dc);
400         return;
401     }
402 
403     LOG_DIS("xor r%d\n", dc->rd);
404     if (dc->rd)
405         tcg_gen_xor_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
406 }
407 
408 static inline void msr_read(DisasContext *dc, TCGv d)
409 {
410     tcg_gen_mov_tl(d, cpu_SR[SR_MSR]);
411 }
412 
413 static inline void msr_write(DisasContext *dc, TCGv v)
414 {
415     TCGv t;
416 
417     t = tcg_temp_new();
418     dc->cpustate_changed = 1;
419     /* PVR bit is not writable.  */
420     tcg_gen_andi_tl(t, v, ~MSR_PVR);
421     tcg_gen_andi_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], MSR_PVR);
422     tcg_gen_or_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], v);
423     tcg_temp_free(t);
424 }
425 
426 static void dec_msr(DisasContext *dc)
427 {
428     CPUState *cs = CPU(dc->cpu);
429     TCGv t0, t1;
430     unsigned int sr, to, rn;
431     int mem_index = cpu_mmu_index(&dc->cpu->env, false);
432 
433     sr = dc->imm & ((1 << 14) - 1);
434     to = dc->imm & (1 << 14);
435     dc->type_b = 1;
436     if (to)
437         dc->cpustate_changed = 1;
438 
439     /* msrclr and msrset.  */
440     if (!(dc->imm & (1 << 15))) {
441         unsigned int clr = dc->ir & (1 << 16);
442 
443         LOG_DIS("msr%s r%d imm=%x\n", clr ? "clr" : "set",
444                 dc->rd, dc->imm);
445 
446         if (!(dc->cpu->env.pvr.regs[2] & PVR2_USE_MSR_INSTR)) {
447             /* nop??? */
448             return;
449         }
450 
451         if ((dc->tb_flags & MSR_EE_FLAG)
452             && mem_index == MMU_USER_IDX && (dc->imm != 4 && dc->imm != 0)) {
453             tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
454             t_gen_raise_exception(dc, EXCP_HW_EXCP);
455             return;
456         }
457 
458         if (dc->rd)
459             msr_read(dc, cpu_R[dc->rd]);
460 
461         t0 = tcg_temp_new();
462         t1 = tcg_temp_new();
463         msr_read(dc, t0);
464         tcg_gen_mov_tl(t1, *(dec_alu_op_b(dc)));
465 
466         if (clr) {
467             tcg_gen_not_tl(t1, t1);
468             tcg_gen_and_tl(t0, t0, t1);
469         } else
470             tcg_gen_or_tl(t0, t0, t1);
471         msr_write(dc, t0);
472         tcg_temp_free(t0);
473         tcg_temp_free(t1);
474 	tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc + 4);
475         dc->is_jmp = DISAS_UPDATE;
476         return;
477     }
478 
479     if (to) {
480         if ((dc->tb_flags & MSR_EE_FLAG)
481              && mem_index == MMU_USER_IDX) {
482             tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
483             t_gen_raise_exception(dc, EXCP_HW_EXCP);
484             return;
485         }
486     }
487 
488 #if !defined(CONFIG_USER_ONLY)
489     /* Catch read/writes to the mmu block.  */
490     if ((sr & ~0xff) == 0x1000) {
491         sr &= 7;
492         LOG_DIS("m%ss sr%d r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
493         if (to)
494             gen_helper_mmu_write(cpu_env, tcg_const_tl(sr), cpu_R[dc->ra]);
495         else
496             gen_helper_mmu_read(cpu_R[dc->rd], cpu_env, tcg_const_tl(sr));
497         return;
498     }
499 #endif
500 
501     if (to) {
502         LOG_DIS("m%ss sr%x r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
503         switch (sr) {
504             case 0:
505                 break;
506             case 1:
507                 msr_write(dc, cpu_R[dc->ra]);
508                 break;
509             case 0x3:
510                 tcg_gen_mov_tl(cpu_SR[SR_EAR], cpu_R[dc->ra]);
511                 break;
512             case 0x5:
513                 tcg_gen_mov_tl(cpu_SR[SR_ESR], cpu_R[dc->ra]);
514                 break;
515             case 0x7:
516                 tcg_gen_andi_tl(cpu_SR[SR_FSR], cpu_R[dc->ra], 31);
517                 break;
518             case 0x800:
519                 tcg_gen_st_tl(cpu_R[dc->ra], cpu_env, offsetof(CPUMBState, slr));
520                 break;
521             case 0x802:
522                 tcg_gen_st_tl(cpu_R[dc->ra], cpu_env, offsetof(CPUMBState, shr));
523                 break;
524             default:
525                 cpu_abort(CPU(dc->cpu), "unknown mts reg %x\n", sr);
526                 break;
527         }
528     } else {
529         LOG_DIS("m%ss r%d sr%x imm=%x\n", to ? "t" : "f", dc->rd, sr, dc->imm);
530 
531         switch (sr) {
532             case 0:
533                 tcg_gen_movi_tl(cpu_R[dc->rd], dc->pc);
534                 break;
535             case 1:
536                 msr_read(dc, cpu_R[dc->rd]);
537                 break;
538             case 0x3:
539                 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_EAR]);
540                 break;
541             case 0x5:
542                 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_ESR]);
543                 break;
544              case 0x7:
545                 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_FSR]);
546                 break;
547             case 0xb:
548                 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_BTR]);
549                 break;
550             case 0x800:
551                 tcg_gen_ld_tl(cpu_R[dc->rd], cpu_env, offsetof(CPUMBState, slr));
552                 break;
553             case 0x802:
554                 tcg_gen_ld_tl(cpu_R[dc->rd], cpu_env, offsetof(CPUMBState, shr));
555                 break;
556             case 0x2000:
557             case 0x2001:
558             case 0x2002:
559             case 0x2003:
560             case 0x2004:
561             case 0x2005:
562             case 0x2006:
563             case 0x2007:
564             case 0x2008:
565             case 0x2009:
566             case 0x200a:
567             case 0x200b:
568             case 0x200c:
569                 rn = sr & 0xf;
570                 tcg_gen_ld_tl(cpu_R[dc->rd],
571                               cpu_env, offsetof(CPUMBState, pvr.regs[rn]));
572                 break;
573             default:
574                 cpu_abort(cs, "unknown mfs reg %x\n", sr);
575                 break;
576         }
577     }
578 
579     if (dc->rd == 0) {
580         tcg_gen_movi_tl(cpu_R[0], 0);
581     }
582 }
583 
584 /* Multiplier unit.  */
585 static void dec_mul(DisasContext *dc)
586 {
587     TCGv tmp;
588     unsigned int subcode;
589 
590     if ((dc->tb_flags & MSR_EE_FLAG)
591          && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
592          && !(dc->cpu->env.pvr.regs[0] & PVR0_USE_HW_MUL_MASK)) {
593         tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
594         t_gen_raise_exception(dc, EXCP_HW_EXCP);
595         return;
596     }
597 
598     subcode = dc->imm & 3;
599 
600     if (dc->type_b) {
601         LOG_DIS("muli r%d r%d %x\n", dc->rd, dc->ra, dc->imm);
602         tcg_gen_mul_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
603         return;
604     }
605 
606     /* mulh, mulhsu and mulhu are not available if C_USE_HW_MUL is < 2.  */
607     if (subcode >= 1 && subcode <= 3
608         && !((dc->cpu->env.pvr.regs[2] & PVR2_USE_MUL64_MASK))) {
609         /* nop??? */
610     }
611 
612     tmp = tcg_temp_new();
613     switch (subcode) {
614         case 0:
615             LOG_DIS("mul r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
616             tcg_gen_mul_tl(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
617             break;
618         case 1:
619             LOG_DIS("mulh r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
620             tcg_gen_muls2_tl(tmp, cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
621             break;
622         case 2:
623             LOG_DIS("mulhsu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
624             tcg_gen_mulsu2_tl(tmp, cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
625             break;
626         case 3:
627             LOG_DIS("mulhu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
628             tcg_gen_mulu2_tl(tmp, cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
629             break;
630         default:
631             cpu_abort(CPU(dc->cpu), "unknown MUL insn %x\n", subcode);
632             break;
633     }
634     tcg_temp_free(tmp);
635 }
636 
637 /* Div unit.  */
638 static void dec_div(DisasContext *dc)
639 {
640     unsigned int u;
641 
642     u = dc->imm & 2;
643     LOG_DIS("div\n");
644 
645     if ((dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
646           && !((dc->cpu->env.pvr.regs[0] & PVR0_USE_DIV_MASK))) {
647         tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
648         t_gen_raise_exception(dc, EXCP_HW_EXCP);
649     }
650 
651     if (u)
652         gen_helper_divu(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
653                         cpu_R[dc->ra]);
654     else
655         gen_helper_divs(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
656                         cpu_R[dc->ra]);
657     if (!dc->rd)
658         tcg_gen_movi_tl(cpu_R[dc->rd], 0);
659 }
660 
661 static void dec_barrel(DisasContext *dc)
662 {
663     TCGv t0;
664     unsigned int s, t;
665 
666     if ((dc->tb_flags & MSR_EE_FLAG)
667           && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
668           && !(dc->cpu->env.pvr.regs[0] & PVR0_USE_BARREL_MASK)) {
669         tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
670         t_gen_raise_exception(dc, EXCP_HW_EXCP);
671         return;
672     }
673 
674     s = dc->imm & (1 << 10);
675     t = dc->imm & (1 << 9);
676 
677     LOG_DIS("bs%s%s r%d r%d r%d\n",
678             s ? "l" : "r", t ? "a" : "l", dc->rd, dc->ra, dc->rb);
679 
680     t0 = tcg_temp_new();
681 
682     tcg_gen_mov_tl(t0, *(dec_alu_op_b(dc)));
683     tcg_gen_andi_tl(t0, t0, 31);
684 
685     if (s)
686         tcg_gen_shl_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
687     else {
688         if (t)
689             tcg_gen_sar_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
690         else
691             tcg_gen_shr_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
692     }
693 }
694 
695 static void dec_bit(DisasContext *dc)
696 {
697     CPUState *cs = CPU(dc->cpu);
698     TCGv t0;
699     unsigned int op;
700     int mem_index = cpu_mmu_index(&dc->cpu->env, false);
701 
702     op = dc->ir & ((1 << 9) - 1);
703     switch (op) {
704         case 0x21:
705             /* src.  */
706             t0 = tcg_temp_new();
707 
708             LOG_DIS("src r%d r%d\n", dc->rd, dc->ra);
709             tcg_gen_andi_tl(t0, cpu_SR[SR_MSR], MSR_CC);
710             write_carry(dc, cpu_R[dc->ra]);
711             if (dc->rd) {
712                 tcg_gen_shri_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
713                 tcg_gen_or_tl(cpu_R[dc->rd], cpu_R[dc->rd], t0);
714             }
715             tcg_temp_free(t0);
716             break;
717 
718         case 0x1:
719         case 0x41:
720             /* srl.  */
721             LOG_DIS("srl r%d r%d\n", dc->rd, dc->ra);
722 
723             /* Update carry. Note that write carry only looks at the LSB.  */
724             write_carry(dc, cpu_R[dc->ra]);
725             if (dc->rd) {
726                 if (op == 0x41)
727                     tcg_gen_shri_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
728                 else
729                     tcg_gen_sari_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
730             }
731             break;
732         case 0x60:
733             LOG_DIS("ext8s r%d r%d\n", dc->rd, dc->ra);
734             tcg_gen_ext8s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
735             break;
736         case 0x61:
737             LOG_DIS("ext16s r%d r%d\n", dc->rd, dc->ra);
738             tcg_gen_ext16s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
739             break;
740         case 0x64:
741         case 0x66:
742         case 0x74:
743         case 0x76:
744             /* wdc.  */
745             LOG_DIS("wdc r%d\n", dc->ra);
746             if ((dc->tb_flags & MSR_EE_FLAG)
747                  && mem_index == MMU_USER_IDX) {
748                 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
749                 t_gen_raise_exception(dc, EXCP_HW_EXCP);
750                 return;
751             }
752             break;
753         case 0x68:
754             /* wic.  */
755             LOG_DIS("wic r%d\n", dc->ra);
756             if ((dc->tb_flags & MSR_EE_FLAG)
757                  && mem_index == MMU_USER_IDX) {
758                 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
759                 t_gen_raise_exception(dc, EXCP_HW_EXCP);
760                 return;
761             }
762             break;
763         case 0xe0:
764             if ((dc->tb_flags & MSR_EE_FLAG)
765                 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
766                 && !((dc->cpu->env.pvr.regs[2] & PVR2_USE_PCMP_INSTR))) {
767                 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
768                 t_gen_raise_exception(dc, EXCP_HW_EXCP);
769             }
770             if (dc->cpu->env.pvr.regs[2] & PVR2_USE_PCMP_INSTR) {
771                 tcg_gen_clzi_i32(cpu_R[dc->rd], cpu_R[dc->ra], 32);
772             }
773             break;
774         case 0x1e0:
775             /* swapb */
776             LOG_DIS("swapb r%d r%d\n", dc->rd, dc->ra);
777             tcg_gen_bswap32_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
778             break;
779         case 0x1e2:
780             /*swaph */
781             LOG_DIS("swaph r%d r%d\n", dc->rd, dc->ra);
782             tcg_gen_rotri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 16);
783             break;
784         default:
785             cpu_abort(cs, "unknown bit oc=%x op=%x rd=%d ra=%d rb=%d\n",
786                       dc->pc, op, dc->rd, dc->ra, dc->rb);
787             break;
788     }
789 }
790 
791 static inline void sync_jmpstate(DisasContext *dc)
792 {
793     if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
794         if (dc->jmp == JMP_DIRECT) {
795             tcg_gen_movi_tl(env_btaken, 1);
796         }
797         dc->jmp = JMP_INDIRECT;
798         tcg_gen_movi_tl(env_btarget, dc->jmp_pc);
799     }
800 }
801 
802 static void dec_imm(DisasContext *dc)
803 {
804     LOG_DIS("imm %x\n", dc->imm << 16);
805     tcg_gen_movi_tl(env_imm, (dc->imm << 16));
806     dc->tb_flags |= IMM_FLAG;
807     dc->clear_imm = 0;
808 }
809 
810 static inline TCGv *compute_ldst_addr(DisasContext *dc, TCGv *t)
811 {
812     unsigned int extimm = dc->tb_flags & IMM_FLAG;
813     /* Should be set to one if r1 is used by loadstores.  */
814     int stackprot = 0;
815 
816     /* All load/stores use ra.  */
817     if (dc->ra == 1 && dc->cpu->cfg.stackprot) {
818         stackprot = 1;
819     }
820 
821     /* Treat the common cases first.  */
822     if (!dc->type_b) {
823         /* If any of the regs is r0, return a ptr to the other.  */
824         if (dc->ra == 0) {
825             return &cpu_R[dc->rb];
826         } else if (dc->rb == 0) {
827             return &cpu_R[dc->ra];
828         }
829 
830         if (dc->rb == 1 && dc->cpu->cfg.stackprot) {
831             stackprot = 1;
832         }
833 
834         *t = tcg_temp_new();
835         tcg_gen_add_tl(*t, cpu_R[dc->ra], cpu_R[dc->rb]);
836 
837         if (stackprot) {
838             gen_helper_stackprot(cpu_env, *t);
839         }
840         return t;
841     }
842     /* Immediate.  */
843     if (!extimm) {
844         if (dc->imm == 0) {
845             return &cpu_R[dc->ra];
846         }
847         *t = tcg_temp_new();
848         tcg_gen_movi_tl(*t, (int32_t)((int16_t)dc->imm));
849         tcg_gen_add_tl(*t, cpu_R[dc->ra], *t);
850     } else {
851         *t = tcg_temp_new();
852         tcg_gen_add_tl(*t, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
853     }
854 
855     if (stackprot) {
856         gen_helper_stackprot(cpu_env, *t);
857     }
858     return t;
859 }
860 
861 static void dec_load(DisasContext *dc)
862 {
863     TCGv t, v, *addr;
864     unsigned int size, rev = 0, ex = 0;
865     TCGMemOp mop;
866 
867     mop = dc->opcode & 3;
868     size = 1 << mop;
869     if (!dc->type_b) {
870         rev = (dc->ir >> 9) & 1;
871         ex = (dc->ir >> 10) & 1;
872     }
873     mop |= MO_TE;
874     if (rev) {
875         mop ^= MO_BSWAP;
876     }
877 
878     if (size > 4 && (dc->tb_flags & MSR_EE_FLAG)
879           && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
880         tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
881         t_gen_raise_exception(dc, EXCP_HW_EXCP);
882         return;
883     }
884 
885     LOG_DIS("l%d%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
886                                                         ex ? "x" : "");
887 
888     t_sync_flags(dc);
889     addr = compute_ldst_addr(dc, &t);
890 
891     /*
892      * When doing reverse accesses we need to do two things.
893      *
894      * 1. Reverse the address wrt endianness.
895      * 2. Byteswap the data lanes on the way back into the CPU core.
896      */
897     if (rev && size != 4) {
898         /* Endian reverse the address. t is addr.  */
899         switch (size) {
900             case 1:
901             {
902                 /* 00 -> 11
903                    01 -> 10
904                    10 -> 10
905                    11 -> 00 */
906                 TCGv low = tcg_temp_new();
907 
908                 /* Force addr into the temp.  */
909                 if (addr != &t) {
910                     t = tcg_temp_new();
911                     tcg_gen_mov_tl(t, *addr);
912                     addr = &t;
913                 }
914 
915                 tcg_gen_andi_tl(low, t, 3);
916                 tcg_gen_sub_tl(low, tcg_const_tl(3), low);
917                 tcg_gen_andi_tl(t, t, ~3);
918                 tcg_gen_or_tl(t, t, low);
919                 tcg_gen_mov_tl(env_imm, t);
920                 tcg_temp_free(low);
921                 break;
922             }
923 
924             case 2:
925                 /* 00 -> 10
926                    10 -> 00.  */
927                 /* Force addr into the temp.  */
928                 if (addr != &t) {
929                     t = tcg_temp_new();
930                     tcg_gen_xori_tl(t, *addr, 2);
931                     addr = &t;
932                 } else {
933                     tcg_gen_xori_tl(t, t, 2);
934                 }
935                 break;
936             default:
937                 cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
938                 break;
939         }
940     }
941 
942     /* lwx does not throw unaligned access errors, so force alignment */
943     if (ex) {
944         /* Force addr into the temp.  */
945         if (addr != &t) {
946             t = tcg_temp_new();
947             tcg_gen_mov_tl(t, *addr);
948             addr = &t;
949         }
950         tcg_gen_andi_tl(t, t, ~3);
951     }
952 
953     /* If we get a fault on a dslot, the jmpstate better be in sync.  */
954     sync_jmpstate(dc);
955 
956     /* Verify alignment if needed.  */
957     /*
958      * Microblaze gives MMU faults priority over faults due to
959      * unaligned addresses. That's why we speculatively do the load
960      * into v. If the load succeeds, we verify alignment of the
961      * address and if that succeeds we write into the destination reg.
962      */
963     v = tcg_temp_new();
964     tcg_gen_qemu_ld_tl(v, *addr, cpu_mmu_index(&dc->cpu->env, false), mop);
965 
966     if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
967         tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
968         gen_helper_memalign(cpu_env, *addr, tcg_const_tl(dc->rd),
969                             tcg_const_tl(0), tcg_const_tl(size - 1));
970     }
971 
972     if (ex) {
973         tcg_gen_mov_tl(env_res_addr, *addr);
974         tcg_gen_mov_tl(env_res_val, v);
975     }
976     if (dc->rd) {
977         tcg_gen_mov_tl(cpu_R[dc->rd], v);
978     }
979     tcg_temp_free(v);
980 
981     if (ex) { /* lwx */
982         /* no support for AXI exclusive so always clear C */
983         write_carryi(dc, 0);
984     }
985 
986     if (addr == &t)
987         tcg_temp_free(t);
988 }
989 
990 static void dec_store(DisasContext *dc)
991 {
992     TCGv t, *addr, swx_addr;
993     TCGLabel *swx_skip = NULL;
994     unsigned int size, rev = 0, ex = 0;
995     TCGMemOp mop;
996 
997     mop = dc->opcode & 3;
998     size = 1 << mop;
999     if (!dc->type_b) {
1000         rev = (dc->ir >> 9) & 1;
1001         ex = (dc->ir >> 10) & 1;
1002     }
1003     mop |= MO_TE;
1004     if (rev) {
1005         mop ^= MO_BSWAP;
1006     }
1007 
1008     if (size > 4 && (dc->tb_flags & MSR_EE_FLAG)
1009           && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
1010         tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1011         t_gen_raise_exception(dc, EXCP_HW_EXCP);
1012         return;
1013     }
1014 
1015     LOG_DIS("s%d%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
1016                                                         ex ? "x" : "");
1017     t_sync_flags(dc);
1018     /* If we get a fault on a dslot, the jmpstate better be in sync.  */
1019     sync_jmpstate(dc);
1020     addr = compute_ldst_addr(dc, &t);
1021 
1022     swx_addr = tcg_temp_local_new();
1023     if (ex) { /* swx */
1024         TCGv tval;
1025 
1026         /* Force addr into the swx_addr. */
1027         tcg_gen_mov_tl(swx_addr, *addr);
1028         addr = &swx_addr;
1029         /* swx does not throw unaligned access errors, so force alignment */
1030         tcg_gen_andi_tl(swx_addr, swx_addr, ~3);
1031 
1032         write_carryi(dc, 1);
1033         swx_skip = gen_new_label();
1034         tcg_gen_brcond_tl(TCG_COND_NE, env_res_addr, swx_addr, swx_skip);
1035 
1036         /* Compare the value loaded at lwx with current contents of
1037            the reserved location.
1038            FIXME: This only works for system emulation where we can expect
1039            this compare and the following write to be atomic. For user
1040            emulation we need to add atomicity between threads.  */
1041         tval = tcg_temp_new();
1042         tcg_gen_qemu_ld_tl(tval, swx_addr, cpu_mmu_index(&dc->cpu->env, false),
1043                            MO_TEUL);
1044         tcg_gen_brcond_tl(TCG_COND_NE, env_res_val, tval, swx_skip);
1045         write_carryi(dc, 0);
1046         tcg_temp_free(tval);
1047     }
1048 
1049     if (rev && size != 4) {
1050         /* Endian reverse the address. t is addr.  */
1051         switch (size) {
1052             case 1:
1053             {
1054                 /* 00 -> 11
1055                    01 -> 10
1056                    10 -> 10
1057                    11 -> 00 */
1058                 TCGv low = tcg_temp_new();
1059 
1060                 /* Force addr into the temp.  */
1061                 if (addr != &t) {
1062                     t = tcg_temp_new();
1063                     tcg_gen_mov_tl(t, *addr);
1064                     addr = &t;
1065                 }
1066 
1067                 tcg_gen_andi_tl(low, t, 3);
1068                 tcg_gen_sub_tl(low, tcg_const_tl(3), low);
1069                 tcg_gen_andi_tl(t, t, ~3);
1070                 tcg_gen_or_tl(t, t, low);
1071                 tcg_gen_mov_tl(env_imm, t);
1072                 tcg_temp_free(low);
1073                 break;
1074             }
1075 
1076             case 2:
1077                 /* 00 -> 10
1078                    10 -> 00.  */
1079                 /* Force addr into the temp.  */
1080                 if (addr != &t) {
1081                     t = tcg_temp_new();
1082                     tcg_gen_xori_tl(t, *addr, 2);
1083                     addr = &t;
1084                 } else {
1085                     tcg_gen_xori_tl(t, t, 2);
1086                 }
1087                 break;
1088             default:
1089                 cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
1090                 break;
1091         }
1092     }
1093     tcg_gen_qemu_st_tl(cpu_R[dc->rd], *addr, cpu_mmu_index(&dc->cpu->env, false), mop);
1094 
1095     /* Verify alignment if needed.  */
1096     if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
1097         tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
1098         /* FIXME: if the alignment is wrong, we should restore the value
1099          *        in memory. One possible way to achieve this is to probe
1100          *        the MMU prior to the memaccess, thay way we could put
1101          *        the alignment checks in between the probe and the mem
1102          *        access.
1103          */
1104         gen_helper_memalign(cpu_env, *addr, tcg_const_tl(dc->rd),
1105                             tcg_const_tl(1), tcg_const_tl(size - 1));
1106     }
1107 
1108     if (ex) {
1109         gen_set_label(swx_skip);
1110     }
1111     tcg_temp_free(swx_addr);
1112 
1113     if (addr == &t)
1114         tcg_temp_free(t);
1115 }
1116 
1117 static inline void eval_cc(DisasContext *dc, unsigned int cc,
1118                            TCGv d, TCGv a, TCGv b)
1119 {
1120     switch (cc) {
1121         case CC_EQ:
1122             tcg_gen_setcond_tl(TCG_COND_EQ, d, a, b);
1123             break;
1124         case CC_NE:
1125             tcg_gen_setcond_tl(TCG_COND_NE, d, a, b);
1126             break;
1127         case CC_LT:
1128             tcg_gen_setcond_tl(TCG_COND_LT, d, a, b);
1129             break;
1130         case CC_LE:
1131             tcg_gen_setcond_tl(TCG_COND_LE, d, a, b);
1132             break;
1133         case CC_GE:
1134             tcg_gen_setcond_tl(TCG_COND_GE, d, a, b);
1135             break;
1136         case CC_GT:
1137             tcg_gen_setcond_tl(TCG_COND_GT, d, a, b);
1138             break;
1139         default:
1140             cpu_abort(CPU(dc->cpu), "Unknown condition code %x.\n", cc);
1141             break;
1142     }
1143 }
1144 
1145 static void eval_cond_jmp(DisasContext *dc, TCGv pc_true, TCGv pc_false)
1146 {
1147     TCGLabel *l1 = gen_new_label();
1148     /* Conditional jmp.  */
1149     tcg_gen_mov_tl(cpu_SR[SR_PC], pc_false);
1150     tcg_gen_brcondi_tl(TCG_COND_EQ, env_btaken, 0, l1);
1151     tcg_gen_mov_tl(cpu_SR[SR_PC], pc_true);
1152     gen_set_label(l1);
1153 }
1154 
1155 static void dec_bcc(DisasContext *dc)
1156 {
1157     unsigned int cc;
1158     unsigned int dslot;
1159 
1160     cc = EXTRACT_FIELD(dc->ir, 21, 23);
1161     dslot = dc->ir & (1 << 25);
1162     LOG_DIS("bcc%s r%d %x\n", dslot ? "d" : "", dc->ra, dc->imm);
1163 
1164     dc->delayed_branch = 1;
1165     if (dslot) {
1166         dc->delayed_branch = 2;
1167         dc->tb_flags |= D_FLAG;
1168         tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1169                       cpu_env, offsetof(CPUMBState, bimm));
1170     }
1171 
1172     if (dec_alu_op_b_is_small_imm(dc)) {
1173         int32_t offset = (int32_t)((int16_t)dc->imm); /* sign-extend.  */
1174 
1175         tcg_gen_movi_tl(env_btarget, dc->pc + offset);
1176         dc->jmp = JMP_DIRECT_CC;
1177         dc->jmp_pc = dc->pc + offset;
1178     } else {
1179         dc->jmp = JMP_INDIRECT;
1180         tcg_gen_movi_tl(env_btarget, dc->pc);
1181         tcg_gen_add_tl(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
1182     }
1183     eval_cc(dc, cc, env_btaken, cpu_R[dc->ra], tcg_const_tl(0));
1184 }
1185 
1186 static void dec_br(DisasContext *dc)
1187 {
1188     unsigned int dslot, link, abs, mbar;
1189     int mem_index = cpu_mmu_index(&dc->cpu->env, false);
1190 
1191     dslot = dc->ir & (1 << 20);
1192     abs = dc->ir & (1 << 19);
1193     link = dc->ir & (1 << 18);
1194 
1195     /* Memory barrier.  */
1196     mbar = (dc->ir >> 16) & 31;
1197     if (mbar == 2 && dc->imm == 4) {
1198         /* mbar IMM & 16 decodes to sleep.  */
1199         if (dc->rd & 16) {
1200             TCGv_i32 tmp_hlt = tcg_const_i32(EXCP_HLT);
1201             TCGv_i32 tmp_1 = tcg_const_i32(1);
1202 
1203             LOG_DIS("sleep\n");
1204 
1205             t_sync_flags(dc);
1206             tcg_gen_st_i32(tmp_1, cpu_env,
1207                            -offsetof(MicroBlazeCPU, env)
1208                            +offsetof(CPUState, halted));
1209             tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc + 4);
1210             gen_helper_raise_exception(cpu_env, tmp_hlt);
1211             tcg_temp_free_i32(tmp_hlt);
1212             tcg_temp_free_i32(tmp_1);
1213             return;
1214         }
1215         LOG_DIS("mbar %d\n", dc->rd);
1216         /* Break the TB.  */
1217         dc->cpustate_changed = 1;
1218         return;
1219     }
1220 
1221     LOG_DIS("br%s%s%s%s imm=%x\n",
1222              abs ? "a" : "", link ? "l" : "",
1223              dc->type_b ? "i" : "", dslot ? "d" : "",
1224              dc->imm);
1225 
1226     dc->delayed_branch = 1;
1227     if (dslot) {
1228         dc->delayed_branch = 2;
1229         dc->tb_flags |= D_FLAG;
1230         tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1231                       cpu_env, offsetof(CPUMBState, bimm));
1232     }
1233     if (link && dc->rd)
1234         tcg_gen_movi_tl(cpu_R[dc->rd], dc->pc);
1235 
1236     dc->jmp = JMP_INDIRECT;
1237     if (abs) {
1238         tcg_gen_movi_tl(env_btaken, 1);
1239         tcg_gen_mov_tl(env_btarget, *(dec_alu_op_b(dc)));
1240         if (link && !dslot) {
1241             if (!(dc->tb_flags & IMM_FLAG) && (dc->imm == 8 || dc->imm == 0x18))
1242                 t_gen_raise_exception(dc, EXCP_BREAK);
1243             if (dc->imm == 0) {
1244                 if ((dc->tb_flags & MSR_EE_FLAG) && mem_index == MMU_USER_IDX) {
1245                     tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1246                     t_gen_raise_exception(dc, EXCP_HW_EXCP);
1247                     return;
1248                 }
1249 
1250                 t_gen_raise_exception(dc, EXCP_DEBUG);
1251             }
1252         }
1253     } else {
1254         if (dec_alu_op_b_is_small_imm(dc)) {
1255             dc->jmp = JMP_DIRECT;
1256             dc->jmp_pc = dc->pc + (int32_t)((int16_t)dc->imm);
1257         } else {
1258             tcg_gen_movi_tl(env_btaken, 1);
1259             tcg_gen_movi_tl(env_btarget, dc->pc);
1260             tcg_gen_add_tl(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
1261         }
1262     }
1263 }
1264 
1265 static inline void do_rti(DisasContext *dc)
1266 {
1267     TCGv t0, t1;
1268     t0 = tcg_temp_new();
1269     t1 = tcg_temp_new();
1270     tcg_gen_shri_tl(t0, cpu_SR[SR_MSR], 1);
1271     tcg_gen_ori_tl(t1, cpu_SR[SR_MSR], MSR_IE);
1272     tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1273 
1274     tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1275     tcg_gen_or_tl(t1, t1, t0);
1276     msr_write(dc, t1);
1277     tcg_temp_free(t1);
1278     tcg_temp_free(t0);
1279     dc->tb_flags &= ~DRTI_FLAG;
1280 }
1281 
1282 static inline void do_rtb(DisasContext *dc)
1283 {
1284     TCGv t0, t1;
1285     t0 = tcg_temp_new();
1286     t1 = tcg_temp_new();
1287     tcg_gen_andi_tl(t1, cpu_SR[SR_MSR], ~MSR_BIP);
1288     tcg_gen_shri_tl(t0, t1, 1);
1289     tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1290 
1291     tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1292     tcg_gen_or_tl(t1, t1, t0);
1293     msr_write(dc, t1);
1294     tcg_temp_free(t1);
1295     tcg_temp_free(t0);
1296     dc->tb_flags &= ~DRTB_FLAG;
1297 }
1298 
1299 static inline void do_rte(DisasContext *dc)
1300 {
1301     TCGv t0, t1;
1302     t0 = tcg_temp_new();
1303     t1 = tcg_temp_new();
1304 
1305     tcg_gen_ori_tl(t1, cpu_SR[SR_MSR], MSR_EE);
1306     tcg_gen_andi_tl(t1, t1, ~MSR_EIP);
1307     tcg_gen_shri_tl(t0, t1, 1);
1308     tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1309 
1310     tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1311     tcg_gen_or_tl(t1, t1, t0);
1312     msr_write(dc, t1);
1313     tcg_temp_free(t1);
1314     tcg_temp_free(t0);
1315     dc->tb_flags &= ~DRTE_FLAG;
1316 }
1317 
1318 static void dec_rts(DisasContext *dc)
1319 {
1320     unsigned int b_bit, i_bit, e_bit;
1321     int mem_index = cpu_mmu_index(&dc->cpu->env, false);
1322 
1323     i_bit = dc->ir & (1 << 21);
1324     b_bit = dc->ir & (1 << 22);
1325     e_bit = dc->ir & (1 << 23);
1326 
1327     dc->delayed_branch = 2;
1328     dc->tb_flags |= D_FLAG;
1329     tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1330                   cpu_env, offsetof(CPUMBState, bimm));
1331 
1332     if (i_bit) {
1333         LOG_DIS("rtid ir=%x\n", dc->ir);
1334         if ((dc->tb_flags & MSR_EE_FLAG)
1335              && mem_index == MMU_USER_IDX) {
1336             tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1337             t_gen_raise_exception(dc, EXCP_HW_EXCP);
1338         }
1339         dc->tb_flags |= DRTI_FLAG;
1340     } else if (b_bit) {
1341         LOG_DIS("rtbd ir=%x\n", dc->ir);
1342         if ((dc->tb_flags & MSR_EE_FLAG)
1343              && mem_index == MMU_USER_IDX) {
1344             tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1345             t_gen_raise_exception(dc, EXCP_HW_EXCP);
1346         }
1347         dc->tb_flags |= DRTB_FLAG;
1348     } else if (e_bit) {
1349         LOG_DIS("rted ir=%x\n", dc->ir);
1350         if ((dc->tb_flags & MSR_EE_FLAG)
1351              && mem_index == MMU_USER_IDX) {
1352             tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1353             t_gen_raise_exception(dc, EXCP_HW_EXCP);
1354         }
1355         dc->tb_flags |= DRTE_FLAG;
1356     } else
1357         LOG_DIS("rts ir=%x\n", dc->ir);
1358 
1359     dc->jmp = JMP_INDIRECT;
1360     tcg_gen_movi_tl(env_btaken, 1);
1361     tcg_gen_add_tl(env_btarget, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
1362 }
1363 
1364 static int dec_check_fpuv2(DisasContext *dc)
1365 {
1366     if ((dc->cpu->cfg.use_fpu != 2) && (dc->tb_flags & MSR_EE_FLAG)) {
1367         tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_FPU);
1368         t_gen_raise_exception(dc, EXCP_HW_EXCP);
1369     }
1370     return (dc->cpu->cfg.use_fpu == 2) ? 0 : PVR2_USE_FPU2_MASK;
1371 }
1372 
1373 static void dec_fpu(DisasContext *dc)
1374 {
1375     unsigned int fpu_insn;
1376 
1377     if ((dc->tb_flags & MSR_EE_FLAG)
1378           && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
1379           && (dc->cpu->cfg.use_fpu != 1)) {
1380         tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1381         t_gen_raise_exception(dc, EXCP_HW_EXCP);
1382         return;
1383     }
1384 
1385     fpu_insn = (dc->ir >> 7) & 7;
1386 
1387     switch (fpu_insn) {
1388         case 0:
1389             gen_helper_fadd(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1390                             cpu_R[dc->rb]);
1391             break;
1392 
1393         case 1:
1394             gen_helper_frsub(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1395                              cpu_R[dc->rb]);
1396             break;
1397 
1398         case 2:
1399             gen_helper_fmul(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1400                             cpu_R[dc->rb]);
1401             break;
1402 
1403         case 3:
1404             gen_helper_fdiv(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1405                             cpu_R[dc->rb]);
1406             break;
1407 
1408         case 4:
1409             switch ((dc->ir >> 4) & 7) {
1410                 case 0:
1411                     gen_helper_fcmp_un(cpu_R[dc->rd], cpu_env,
1412                                        cpu_R[dc->ra], cpu_R[dc->rb]);
1413                     break;
1414                 case 1:
1415                     gen_helper_fcmp_lt(cpu_R[dc->rd], cpu_env,
1416                                        cpu_R[dc->ra], cpu_R[dc->rb]);
1417                     break;
1418                 case 2:
1419                     gen_helper_fcmp_eq(cpu_R[dc->rd], cpu_env,
1420                                        cpu_R[dc->ra], cpu_R[dc->rb]);
1421                     break;
1422                 case 3:
1423                     gen_helper_fcmp_le(cpu_R[dc->rd], cpu_env,
1424                                        cpu_R[dc->ra], cpu_R[dc->rb]);
1425                     break;
1426                 case 4:
1427                     gen_helper_fcmp_gt(cpu_R[dc->rd], cpu_env,
1428                                        cpu_R[dc->ra], cpu_R[dc->rb]);
1429                     break;
1430                 case 5:
1431                     gen_helper_fcmp_ne(cpu_R[dc->rd], cpu_env,
1432                                        cpu_R[dc->ra], cpu_R[dc->rb]);
1433                     break;
1434                 case 6:
1435                     gen_helper_fcmp_ge(cpu_R[dc->rd], cpu_env,
1436                                        cpu_R[dc->ra], cpu_R[dc->rb]);
1437                     break;
1438                 default:
1439                     qemu_log_mask(LOG_UNIMP,
1440                                   "unimplemented fcmp fpu_insn=%x pc=%x"
1441                                   " opc=%x\n",
1442                                   fpu_insn, dc->pc, dc->opcode);
1443                     dc->abort_at_next_insn = 1;
1444                     break;
1445             }
1446             break;
1447 
1448         case 5:
1449             if (!dec_check_fpuv2(dc)) {
1450                 return;
1451             }
1452             gen_helper_flt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1453             break;
1454 
1455         case 6:
1456             if (!dec_check_fpuv2(dc)) {
1457                 return;
1458             }
1459             gen_helper_fint(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1460             break;
1461 
1462         case 7:
1463             if (!dec_check_fpuv2(dc)) {
1464                 return;
1465             }
1466             gen_helper_fsqrt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1467             break;
1468 
1469         default:
1470             qemu_log_mask(LOG_UNIMP, "unimplemented FPU insn fpu_insn=%x pc=%x"
1471                           " opc=%x\n",
1472                           fpu_insn, dc->pc, dc->opcode);
1473             dc->abort_at_next_insn = 1;
1474             break;
1475     }
1476 }
1477 
1478 static void dec_null(DisasContext *dc)
1479 {
1480     if ((dc->tb_flags & MSR_EE_FLAG)
1481           && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
1482         tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1483         t_gen_raise_exception(dc, EXCP_HW_EXCP);
1484         return;
1485     }
1486     qemu_log_mask(LOG_GUEST_ERROR, "unknown insn pc=%x opc=%x\n", dc->pc, dc->opcode);
1487     dc->abort_at_next_insn = 1;
1488 }
1489 
1490 /* Insns connected to FSL or AXI stream attached devices.  */
1491 static void dec_stream(DisasContext *dc)
1492 {
1493     int mem_index = cpu_mmu_index(&dc->cpu->env, false);
1494     TCGv_i32 t_id, t_ctrl;
1495     int ctrl;
1496 
1497     LOG_DIS("%s%s imm=%x\n", dc->rd ? "get" : "put",
1498             dc->type_b ? "" : "d", dc->imm);
1499 
1500     if ((dc->tb_flags & MSR_EE_FLAG) && (mem_index == MMU_USER_IDX)) {
1501         tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1502         t_gen_raise_exception(dc, EXCP_HW_EXCP);
1503         return;
1504     }
1505 
1506     t_id = tcg_temp_new();
1507     if (dc->type_b) {
1508         tcg_gen_movi_tl(t_id, dc->imm & 0xf);
1509         ctrl = dc->imm >> 10;
1510     } else {
1511         tcg_gen_andi_tl(t_id, cpu_R[dc->rb], 0xf);
1512         ctrl = dc->imm >> 5;
1513     }
1514 
1515     t_ctrl = tcg_const_tl(ctrl);
1516 
1517     if (dc->rd == 0) {
1518         gen_helper_put(t_id, t_ctrl, cpu_R[dc->ra]);
1519     } else {
1520         gen_helper_get(cpu_R[dc->rd], t_id, t_ctrl);
1521     }
1522     tcg_temp_free(t_id);
1523     tcg_temp_free(t_ctrl);
1524 }
1525 
1526 static struct decoder_info {
1527     struct {
1528         uint32_t bits;
1529         uint32_t mask;
1530     };
1531     void (*dec)(DisasContext *dc);
1532 } decinfo[] = {
1533     {DEC_ADD, dec_add},
1534     {DEC_SUB, dec_sub},
1535     {DEC_AND, dec_and},
1536     {DEC_XOR, dec_xor},
1537     {DEC_OR, dec_or},
1538     {DEC_BIT, dec_bit},
1539     {DEC_BARREL, dec_barrel},
1540     {DEC_LD, dec_load},
1541     {DEC_ST, dec_store},
1542     {DEC_IMM, dec_imm},
1543     {DEC_BR, dec_br},
1544     {DEC_BCC, dec_bcc},
1545     {DEC_RTS, dec_rts},
1546     {DEC_FPU, dec_fpu},
1547     {DEC_MUL, dec_mul},
1548     {DEC_DIV, dec_div},
1549     {DEC_MSR, dec_msr},
1550     {DEC_STREAM, dec_stream},
1551     {{0, 0}, dec_null}
1552 };
1553 
1554 static inline void decode(DisasContext *dc, uint32_t ir)
1555 {
1556     int i;
1557 
1558     dc->ir = ir;
1559     LOG_DIS("%8.8x\t", dc->ir);
1560 
1561     if (dc->ir)
1562         dc->nr_nops = 0;
1563     else {
1564         if ((dc->tb_flags & MSR_EE_FLAG)
1565               && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
1566               && (dc->cpu->env.pvr.regs[2] & PVR2_OPCODE_0x0_ILL_MASK)) {
1567             tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1568             t_gen_raise_exception(dc, EXCP_HW_EXCP);
1569             return;
1570         }
1571 
1572         LOG_DIS("nr_nops=%d\t", dc->nr_nops);
1573         dc->nr_nops++;
1574         if (dc->nr_nops > 4) {
1575             cpu_abort(CPU(dc->cpu), "fetching nop sequence\n");
1576         }
1577     }
1578     /* bit 2 seems to indicate insn type.  */
1579     dc->type_b = ir & (1 << 29);
1580 
1581     dc->opcode = EXTRACT_FIELD(ir, 26, 31);
1582     dc->rd = EXTRACT_FIELD(ir, 21, 25);
1583     dc->ra = EXTRACT_FIELD(ir, 16, 20);
1584     dc->rb = EXTRACT_FIELD(ir, 11, 15);
1585     dc->imm = EXTRACT_FIELD(ir, 0, 15);
1586 
1587     /* Large switch for all insns.  */
1588     for (i = 0; i < ARRAY_SIZE(decinfo); i++) {
1589         if ((dc->opcode & decinfo[i].mask) == decinfo[i].bits) {
1590             decinfo[i].dec(dc);
1591             break;
1592         }
1593     }
1594 }
1595 
1596 /* generate intermediate code for basic block 'tb'.  */
1597 void gen_intermediate_code(CPUMBState *env, struct TranslationBlock *tb)
1598 {
1599     MicroBlazeCPU *cpu = mb_env_get_cpu(env);
1600     CPUState *cs = CPU(cpu);
1601     uint32_t pc_start;
1602     struct DisasContext ctx;
1603     struct DisasContext *dc = &ctx;
1604     uint32_t next_page_start, org_flags;
1605     target_ulong npc;
1606     int num_insns;
1607     int max_insns;
1608 
1609     pc_start = tb->pc;
1610     dc->cpu = cpu;
1611     dc->tb = tb;
1612     org_flags = dc->synced_flags = dc->tb_flags = tb->flags;
1613 
1614     dc->is_jmp = DISAS_NEXT;
1615     dc->jmp = 0;
1616     dc->delayed_branch = !!(dc->tb_flags & D_FLAG);
1617     if (dc->delayed_branch) {
1618         dc->jmp = JMP_INDIRECT;
1619     }
1620     dc->pc = pc_start;
1621     dc->singlestep_enabled = cs->singlestep_enabled;
1622     dc->cpustate_changed = 0;
1623     dc->abort_at_next_insn = 0;
1624     dc->nr_nops = 0;
1625 
1626     if (pc_start & 3) {
1627         cpu_abort(cs, "Microblaze: unaligned PC=%x\n", pc_start);
1628     }
1629 
1630     next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
1631     num_insns = 0;
1632     max_insns = tb->cflags & CF_COUNT_MASK;
1633     if (max_insns == 0) {
1634         max_insns = CF_COUNT_MASK;
1635     }
1636     if (max_insns > TCG_MAX_INSNS) {
1637         max_insns = TCG_MAX_INSNS;
1638     }
1639 
1640     gen_tb_start(tb);
1641     do
1642     {
1643         tcg_gen_insn_start(dc->pc);
1644         num_insns++;
1645 
1646 #if SIM_COMPAT
1647         if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1648             tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
1649             gen_helper_debug();
1650         }
1651 #endif
1652 
1653         if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
1654             t_gen_raise_exception(dc, EXCP_DEBUG);
1655             dc->is_jmp = DISAS_UPDATE;
1656             /* The address covered by the breakpoint must be included in
1657                [tb->pc, tb->pc + tb->size) in order to for it to be
1658                properly cleared -- thus we increment the PC here so that
1659                the logic setting tb->size below does the right thing.  */
1660             dc->pc += 4;
1661             break;
1662         }
1663 
1664         /* Pretty disas.  */
1665         LOG_DIS("%8.8x:\t", dc->pc);
1666 
1667         if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
1668             gen_io_start();
1669         }
1670 
1671         dc->clear_imm = 1;
1672         decode(dc, cpu_ldl_code(env, dc->pc));
1673         if (dc->clear_imm)
1674             dc->tb_flags &= ~IMM_FLAG;
1675         dc->pc += 4;
1676 
1677         if (dc->delayed_branch) {
1678             dc->delayed_branch--;
1679             if (!dc->delayed_branch) {
1680                 if (dc->tb_flags & DRTI_FLAG)
1681                     do_rti(dc);
1682                  if (dc->tb_flags & DRTB_FLAG)
1683                     do_rtb(dc);
1684                 if (dc->tb_flags & DRTE_FLAG)
1685                     do_rte(dc);
1686                 /* Clear the delay slot flag.  */
1687                 dc->tb_flags &= ~D_FLAG;
1688                 /* If it is a direct jump, try direct chaining.  */
1689                 if (dc->jmp == JMP_INDIRECT) {
1690                     eval_cond_jmp(dc, env_btarget, tcg_const_tl(dc->pc));
1691                     dc->is_jmp = DISAS_JUMP;
1692                 } else if (dc->jmp == JMP_DIRECT) {
1693                     t_sync_flags(dc);
1694                     gen_goto_tb(dc, 0, dc->jmp_pc);
1695                     dc->is_jmp = DISAS_TB_JUMP;
1696                 } else if (dc->jmp == JMP_DIRECT_CC) {
1697                     TCGLabel *l1 = gen_new_label();
1698                     t_sync_flags(dc);
1699                     /* Conditional jmp.  */
1700                     tcg_gen_brcondi_tl(TCG_COND_NE, env_btaken, 0, l1);
1701                     gen_goto_tb(dc, 1, dc->pc);
1702                     gen_set_label(l1);
1703                     gen_goto_tb(dc, 0, dc->jmp_pc);
1704 
1705                     dc->is_jmp = DISAS_TB_JUMP;
1706                 }
1707                 break;
1708             }
1709         }
1710         if (cs->singlestep_enabled) {
1711             break;
1712         }
1713     } while (!dc->is_jmp && !dc->cpustate_changed
1714              && !tcg_op_buf_full()
1715              && !singlestep
1716              && (dc->pc < next_page_start)
1717              && num_insns < max_insns);
1718 
1719     npc = dc->pc;
1720     if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
1721         if (dc->tb_flags & D_FLAG) {
1722             dc->is_jmp = DISAS_UPDATE;
1723             tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1724             sync_jmpstate(dc);
1725         } else
1726             npc = dc->jmp_pc;
1727     }
1728 
1729     if (tb->cflags & CF_LAST_IO)
1730         gen_io_end();
1731     /* Force an update if the per-tb cpu state has changed.  */
1732     if (dc->is_jmp == DISAS_NEXT
1733         && (dc->cpustate_changed || org_flags != dc->tb_flags)) {
1734         dc->is_jmp = DISAS_UPDATE;
1735         tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1736     }
1737     t_sync_flags(dc);
1738 
1739     if (unlikely(cs->singlestep_enabled)) {
1740         TCGv_i32 tmp = tcg_const_i32(EXCP_DEBUG);
1741 
1742         if (dc->is_jmp != DISAS_JUMP) {
1743             tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1744         }
1745         gen_helper_raise_exception(cpu_env, tmp);
1746         tcg_temp_free_i32(tmp);
1747     } else {
1748         switch(dc->is_jmp) {
1749             case DISAS_NEXT:
1750                 gen_goto_tb(dc, 1, npc);
1751                 break;
1752             default:
1753             case DISAS_JUMP:
1754             case DISAS_UPDATE:
1755                 /* indicate that the hash table must be used
1756                    to find the next TB */
1757                 tcg_gen_exit_tb(0);
1758                 break;
1759             case DISAS_TB_JUMP:
1760                 /* nothing more to generate */
1761                 break;
1762         }
1763     }
1764     gen_tb_end(tb, num_insns);
1765 
1766     tb->size = dc->pc - pc_start;
1767     tb->icount = num_insns;
1768 
1769 #ifdef DEBUG_DISAS
1770 #if !SIM_COMPAT
1771     if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
1772         && qemu_log_in_addr_range(pc_start)) {
1773         qemu_log_lock();
1774         qemu_log("--------------\n");
1775 #if DISAS_GNU
1776         log_target_disas(cs, pc_start, dc->pc - pc_start, 0);
1777 #endif
1778         qemu_log("\nisize=%d osize=%d\n",
1779                  dc->pc - pc_start, tcg_op_buf_count());
1780         qemu_log_unlock();
1781     }
1782 #endif
1783 #endif
1784     assert(!dc->abort_at_next_insn);
1785 }
1786 
1787 void mb_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
1788                        int flags)
1789 {
1790     MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1791     CPUMBState *env = &cpu->env;
1792     int i;
1793 
1794     if (!env || !f)
1795         return;
1796 
1797     cpu_fprintf(f, "IN: PC=%x %s\n",
1798                 env->sregs[SR_PC], lookup_symbol(env->sregs[SR_PC]));
1799     cpu_fprintf(f, "rmsr=%x resr=%x rear=%x debug=%x imm=%x iflags=%x fsr=%x\n",
1800              env->sregs[SR_MSR], env->sregs[SR_ESR], env->sregs[SR_EAR],
1801              env->debug, env->imm, env->iflags, env->sregs[SR_FSR]);
1802     cpu_fprintf(f, "btaken=%d btarget=%x mode=%s(saved=%s) eip=%d ie=%d\n",
1803              env->btaken, env->btarget,
1804              (env->sregs[SR_MSR] & MSR_UM) ? "user" : "kernel",
1805              (env->sregs[SR_MSR] & MSR_UMS) ? "user" : "kernel",
1806              (env->sregs[SR_MSR] & MSR_EIP),
1807              (env->sregs[SR_MSR] & MSR_IE));
1808 
1809     for (i = 0; i < 32; i++) {
1810         cpu_fprintf(f, "r%2.2d=%8.8x ", i, env->regs[i]);
1811         if ((i + 1) % 4 == 0)
1812             cpu_fprintf(f, "\n");
1813         }
1814     cpu_fprintf(f, "\n\n");
1815 }
1816 
1817 MicroBlazeCPU *cpu_mb_init(const char *cpu_model)
1818 {
1819     MicroBlazeCPU *cpu;
1820 
1821     cpu = MICROBLAZE_CPU(object_new(TYPE_MICROBLAZE_CPU));
1822 
1823     object_property_set_bool(OBJECT(cpu), true, "realized", NULL);
1824 
1825     return cpu;
1826 }
1827 
1828 void mb_tcg_init(void)
1829 {
1830     int i;
1831 
1832     cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
1833     tcg_ctx.tcg_env = cpu_env;
1834 
1835     env_debug = tcg_global_mem_new(cpu_env,
1836                     offsetof(CPUMBState, debug),
1837                     "debug0");
1838     env_iflags = tcg_global_mem_new(cpu_env,
1839                     offsetof(CPUMBState, iflags),
1840                     "iflags");
1841     env_imm = tcg_global_mem_new(cpu_env,
1842                     offsetof(CPUMBState, imm),
1843                     "imm");
1844     env_btarget = tcg_global_mem_new(cpu_env,
1845                      offsetof(CPUMBState, btarget),
1846                      "btarget");
1847     env_btaken = tcg_global_mem_new(cpu_env,
1848                      offsetof(CPUMBState, btaken),
1849                      "btaken");
1850     env_res_addr = tcg_global_mem_new(cpu_env,
1851                      offsetof(CPUMBState, res_addr),
1852                      "res_addr");
1853     env_res_val = tcg_global_mem_new(cpu_env,
1854                      offsetof(CPUMBState, res_val),
1855                      "res_val");
1856     for (i = 0; i < ARRAY_SIZE(cpu_R); i++) {
1857         cpu_R[i] = tcg_global_mem_new(cpu_env,
1858                           offsetof(CPUMBState, regs[i]),
1859                           regnames[i]);
1860     }
1861     for (i = 0; i < ARRAY_SIZE(cpu_SR); i++) {
1862         cpu_SR[i] = tcg_global_mem_new(cpu_env,
1863                           offsetof(CPUMBState, sregs[i]),
1864                           special_regnames[i]);
1865     }
1866 }
1867 
1868 void restore_state_to_opc(CPUMBState *env, TranslationBlock *tb,
1869                           target_ulong *data)
1870 {
1871     env->sregs[SR_PC] = data[0];
1872 }
1873