xref: /openbmc/qemu/target/microblaze/translate.c (revision b68686bd)
1 /*
2  *  Xilinx MicroBlaze emulation for qemu: main translation routines.
3  *
4  *  Copyright (c) 2009 Edgar E. Iglesias.
5  *  Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
25 #include "tcg-op.h"
26 #include "exec/helper-proto.h"
27 #include "microblaze-decode.h"
28 #include "exec/cpu_ldst.h"
29 #include "exec/helper-gen.h"
30 
31 #include "trace-tcg.h"
32 #include "exec/log.h"
33 
34 
35 #define SIM_COMPAT 0
36 #define DISAS_GNU 1
37 #define DISAS_MB 1
38 #if DISAS_MB && !SIM_COMPAT
39 #  define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
40 #else
41 #  define LOG_DIS(...) do { } while (0)
42 #endif
43 
44 #define D(x)
45 
46 #define EXTRACT_FIELD(src, start, end) \
47             (((src) >> start) & ((1 << (end - start + 1)) - 1))
48 
49 static TCGv env_debug;
50 static TCGv_env cpu_env;
51 static TCGv cpu_R[32];
52 static TCGv cpu_SR[18];
53 static TCGv env_imm;
54 static TCGv env_btaken;
55 static TCGv env_btarget;
56 static TCGv env_iflags;
57 static TCGv env_res_addr;
58 static TCGv env_res_val;
59 
60 #include "exec/gen-icount.h"
61 
62 /* This is the state at translation time.  */
63 typedef struct DisasContext {
64     MicroBlazeCPU *cpu;
65     target_ulong pc;
66 
67     /* Decoder.  */
68     int type_b;
69     uint32_t ir;
70     uint8_t opcode;
71     uint8_t rd, ra, rb;
72     uint16_t imm;
73 
74     unsigned int cpustate_changed;
75     unsigned int delayed_branch;
76     unsigned int tb_flags, synced_flags; /* tb dependent flags.  */
77     unsigned int clear_imm;
78     int is_jmp;
79 
80 #define JMP_NOJMP     0
81 #define JMP_DIRECT    1
82 #define JMP_DIRECT_CC 2
83 #define JMP_INDIRECT  3
84     unsigned int jmp;
85     uint32_t jmp_pc;
86 
87     int abort_at_next_insn;
88     int nr_nops;
89     struct TranslationBlock *tb;
90     int singlestep_enabled;
91 } DisasContext;
92 
93 static const char *regnames[] =
94 {
95     "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
96     "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
97     "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
98     "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
99 };
100 
101 static const char *special_regnames[] =
102 {
103     "rpc", "rmsr", "sr2", "sr3", "sr4", "sr5", "sr6", "sr7",
104     "sr8", "sr9", "sr10", "sr11", "sr12", "sr13", "sr14", "sr15",
105     "sr16", "sr17", "sr18"
106 };
107 
108 static inline void t_sync_flags(DisasContext *dc)
109 {
110     /* Synch the tb dependent flags between translator and runtime.  */
111     if (dc->tb_flags != dc->synced_flags) {
112         tcg_gen_movi_tl(env_iflags, dc->tb_flags);
113         dc->synced_flags = dc->tb_flags;
114     }
115 }
116 
117 static inline void t_gen_raise_exception(DisasContext *dc, uint32_t index)
118 {
119     TCGv_i32 tmp = tcg_const_i32(index);
120 
121     t_sync_flags(dc);
122     tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
123     gen_helper_raise_exception(cpu_env, tmp);
124     tcg_temp_free_i32(tmp);
125     dc->is_jmp = DISAS_UPDATE;
126 }
127 
128 static inline bool use_goto_tb(DisasContext *dc, target_ulong dest)
129 {
130 #ifndef CONFIG_USER_ONLY
131     return (dc->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
132 #else
133     return true;
134 #endif
135 }
136 
137 static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
138 {
139     if (use_goto_tb(dc, dest)) {
140         tcg_gen_goto_tb(n);
141         tcg_gen_movi_tl(cpu_SR[SR_PC], dest);
142         tcg_gen_exit_tb((uintptr_t)dc->tb + n);
143     } else {
144         tcg_gen_movi_tl(cpu_SR[SR_PC], dest);
145         tcg_gen_exit_tb(0);
146     }
147 }
148 
149 static void read_carry(DisasContext *dc, TCGv d)
150 {
151     tcg_gen_shri_tl(d, cpu_SR[SR_MSR], 31);
152 }
153 
154 /*
155  * write_carry sets the carry bits in MSR based on bit 0 of v.
156  * v[31:1] are ignored.
157  */
158 static void write_carry(DisasContext *dc, TCGv v)
159 {
160     TCGv t0 = tcg_temp_new();
161     tcg_gen_shli_tl(t0, v, 31);
162     tcg_gen_sari_tl(t0, t0, 31);
163     tcg_gen_andi_tl(t0, t0, (MSR_C | MSR_CC));
164     tcg_gen_andi_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR],
165                     ~(MSR_C | MSR_CC));
166     tcg_gen_or_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t0);
167     tcg_temp_free(t0);
168 }
169 
170 static void write_carryi(DisasContext *dc, bool carry)
171 {
172     TCGv t0 = tcg_temp_new();
173     tcg_gen_movi_tl(t0, carry);
174     write_carry(dc, t0);
175     tcg_temp_free(t0);
176 }
177 
178 /* True if ALU operand b is a small immediate that may deserve
179    faster treatment.  */
180 static inline int dec_alu_op_b_is_small_imm(DisasContext *dc)
181 {
182     /* Immediate insn without the imm prefix ?  */
183     return dc->type_b && !(dc->tb_flags & IMM_FLAG);
184 }
185 
186 static inline TCGv *dec_alu_op_b(DisasContext *dc)
187 {
188     if (dc->type_b) {
189         if (dc->tb_flags & IMM_FLAG)
190             tcg_gen_ori_tl(env_imm, env_imm, dc->imm);
191         else
192             tcg_gen_movi_tl(env_imm, (int32_t)((int16_t)dc->imm));
193         return &env_imm;
194     } else
195         return &cpu_R[dc->rb];
196 }
197 
198 static void dec_add(DisasContext *dc)
199 {
200     unsigned int k, c;
201     TCGv cf;
202 
203     k = dc->opcode & 4;
204     c = dc->opcode & 2;
205 
206     LOG_DIS("add%s%s%s r%d r%d r%d\n",
207             dc->type_b ? "i" : "", k ? "k" : "", c ? "c" : "",
208             dc->rd, dc->ra, dc->rb);
209 
210     /* Take care of the easy cases first.  */
211     if (k) {
212         /* k - keep carry, no need to update MSR.  */
213         /* If rd == r0, it's a nop.  */
214         if (dc->rd) {
215             tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
216 
217             if (c) {
218                 /* c - Add carry into the result.  */
219                 cf = tcg_temp_new();
220 
221                 read_carry(dc, cf);
222                 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
223                 tcg_temp_free(cf);
224             }
225         }
226         return;
227     }
228 
229     /* From now on, we can assume k is zero.  So we need to update MSR.  */
230     /* Extract carry.  */
231     cf = tcg_temp_new();
232     if (c) {
233         read_carry(dc, cf);
234     } else {
235         tcg_gen_movi_tl(cf, 0);
236     }
237 
238     if (dc->rd) {
239         TCGv ncf = tcg_temp_new();
240         gen_helper_carry(ncf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
241         tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
242         tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
243         write_carry(dc, ncf);
244         tcg_temp_free(ncf);
245     } else {
246         gen_helper_carry(cf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
247         write_carry(dc, cf);
248     }
249     tcg_temp_free(cf);
250 }
251 
252 static void dec_sub(DisasContext *dc)
253 {
254     unsigned int u, cmp, k, c;
255     TCGv cf, na;
256 
257     u = dc->imm & 2;
258     k = dc->opcode & 4;
259     c = dc->opcode & 2;
260     cmp = (dc->imm & 1) && (!dc->type_b) && k;
261 
262     if (cmp) {
263         LOG_DIS("cmp%s r%d, r%d ir=%x\n", u ? "u" : "", dc->rd, dc->ra, dc->ir);
264         if (dc->rd) {
265             if (u)
266                 gen_helper_cmpu(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
267             else
268                 gen_helper_cmp(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
269         }
270         return;
271     }
272 
273     LOG_DIS("sub%s%s r%d, r%d r%d\n",
274              k ? "k" : "",  c ? "c" : "", dc->rd, dc->ra, dc->rb);
275 
276     /* Take care of the easy cases first.  */
277     if (k) {
278         /* k - keep carry, no need to update MSR.  */
279         /* If rd == r0, it's a nop.  */
280         if (dc->rd) {
281             tcg_gen_sub_tl(cpu_R[dc->rd], *(dec_alu_op_b(dc)), cpu_R[dc->ra]);
282 
283             if (c) {
284                 /* c - Add carry into the result.  */
285                 cf = tcg_temp_new();
286 
287                 read_carry(dc, cf);
288                 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
289                 tcg_temp_free(cf);
290             }
291         }
292         return;
293     }
294 
295     /* From now on, we can assume k is zero.  So we need to update MSR.  */
296     /* Extract carry. And complement a into na.  */
297     cf = tcg_temp_new();
298     na = tcg_temp_new();
299     if (c) {
300         read_carry(dc, cf);
301     } else {
302         tcg_gen_movi_tl(cf, 1);
303     }
304 
305     /* d = b + ~a + c. carry defaults to 1.  */
306     tcg_gen_not_tl(na, cpu_R[dc->ra]);
307 
308     if (dc->rd) {
309         TCGv ncf = tcg_temp_new();
310         gen_helper_carry(ncf, na, *(dec_alu_op_b(dc)), cf);
311         tcg_gen_add_tl(cpu_R[dc->rd], na, *(dec_alu_op_b(dc)));
312         tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
313         write_carry(dc, ncf);
314         tcg_temp_free(ncf);
315     } else {
316         gen_helper_carry(cf, na, *(dec_alu_op_b(dc)), cf);
317         write_carry(dc, cf);
318     }
319     tcg_temp_free(cf);
320     tcg_temp_free(na);
321 }
322 
323 static void dec_pattern(DisasContext *dc)
324 {
325     unsigned int mode;
326 
327     if ((dc->tb_flags & MSR_EE_FLAG)
328           && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
329           && !dc->cpu->cfg.use_pcmp_instr) {
330         tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
331         t_gen_raise_exception(dc, EXCP_HW_EXCP);
332     }
333 
334     mode = dc->opcode & 3;
335     switch (mode) {
336         case 0:
337             /* pcmpbf.  */
338             LOG_DIS("pcmpbf r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
339             if (dc->rd)
340                 gen_helper_pcmpbf(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
341             break;
342         case 2:
343             LOG_DIS("pcmpeq r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
344             if (dc->rd) {
345                 tcg_gen_setcond_tl(TCG_COND_EQ, cpu_R[dc->rd],
346                                    cpu_R[dc->ra], cpu_R[dc->rb]);
347             }
348             break;
349         case 3:
350             LOG_DIS("pcmpne r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
351             if (dc->rd) {
352                 tcg_gen_setcond_tl(TCG_COND_NE, cpu_R[dc->rd],
353                                    cpu_R[dc->ra], cpu_R[dc->rb]);
354             }
355             break;
356         default:
357             cpu_abort(CPU(dc->cpu),
358                       "unsupported pattern insn opcode=%x\n", dc->opcode);
359             break;
360     }
361 }
362 
363 static void dec_and(DisasContext *dc)
364 {
365     unsigned int not;
366 
367     if (!dc->type_b && (dc->imm & (1 << 10))) {
368         dec_pattern(dc);
369         return;
370     }
371 
372     not = dc->opcode & (1 << 1);
373     LOG_DIS("and%s\n", not ? "n" : "");
374 
375     if (!dc->rd)
376         return;
377 
378     if (not) {
379         tcg_gen_andc_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
380     } else
381         tcg_gen_and_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
382 }
383 
384 static void dec_or(DisasContext *dc)
385 {
386     if (!dc->type_b && (dc->imm & (1 << 10))) {
387         dec_pattern(dc);
388         return;
389     }
390 
391     LOG_DIS("or r%d r%d r%d imm=%x\n", dc->rd, dc->ra, dc->rb, dc->imm);
392     if (dc->rd)
393         tcg_gen_or_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
394 }
395 
396 static void dec_xor(DisasContext *dc)
397 {
398     if (!dc->type_b && (dc->imm & (1 << 10))) {
399         dec_pattern(dc);
400         return;
401     }
402 
403     LOG_DIS("xor r%d\n", dc->rd);
404     if (dc->rd)
405         tcg_gen_xor_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
406 }
407 
408 static inline void msr_read(DisasContext *dc, TCGv d)
409 {
410     tcg_gen_mov_tl(d, cpu_SR[SR_MSR]);
411 }
412 
413 static inline void msr_write(DisasContext *dc, TCGv v)
414 {
415     TCGv t;
416 
417     t = tcg_temp_new();
418     dc->cpustate_changed = 1;
419     /* PVR bit is not writable.  */
420     tcg_gen_andi_tl(t, v, ~MSR_PVR);
421     tcg_gen_andi_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], MSR_PVR);
422     tcg_gen_or_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], v);
423     tcg_temp_free(t);
424 }
425 
426 static void dec_msr(DisasContext *dc)
427 {
428     CPUState *cs = CPU(dc->cpu);
429     TCGv t0, t1;
430     unsigned int sr, to, rn;
431     int mem_index = cpu_mmu_index(&dc->cpu->env, false);
432 
433     sr = dc->imm & ((1 << 14) - 1);
434     to = dc->imm & (1 << 14);
435     dc->type_b = 1;
436     if (to)
437         dc->cpustate_changed = 1;
438 
439     /* msrclr and msrset.  */
440     if (!(dc->imm & (1 << 15))) {
441         unsigned int clr = dc->ir & (1 << 16);
442 
443         LOG_DIS("msr%s r%d imm=%x\n", clr ? "clr" : "set",
444                 dc->rd, dc->imm);
445 
446         if (!dc->cpu->cfg.use_msr_instr) {
447             /* nop??? */
448             return;
449         }
450 
451         if ((dc->tb_flags & MSR_EE_FLAG)
452             && mem_index == MMU_USER_IDX && (dc->imm != 4 && dc->imm != 0)) {
453             tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
454             t_gen_raise_exception(dc, EXCP_HW_EXCP);
455             return;
456         }
457 
458         if (dc->rd)
459             msr_read(dc, cpu_R[dc->rd]);
460 
461         t0 = tcg_temp_new();
462         t1 = tcg_temp_new();
463         msr_read(dc, t0);
464         tcg_gen_mov_tl(t1, *(dec_alu_op_b(dc)));
465 
466         if (clr) {
467             tcg_gen_not_tl(t1, t1);
468             tcg_gen_and_tl(t0, t0, t1);
469         } else
470             tcg_gen_or_tl(t0, t0, t1);
471         msr_write(dc, t0);
472         tcg_temp_free(t0);
473         tcg_temp_free(t1);
474 	tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc + 4);
475         dc->is_jmp = DISAS_UPDATE;
476         return;
477     }
478 
479     if (to) {
480         if ((dc->tb_flags & MSR_EE_FLAG)
481              && mem_index == MMU_USER_IDX) {
482             tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
483             t_gen_raise_exception(dc, EXCP_HW_EXCP);
484             return;
485         }
486     }
487 
488 #if !defined(CONFIG_USER_ONLY)
489     /* Catch read/writes to the mmu block.  */
490     if ((sr & ~0xff) == 0x1000) {
491         sr &= 7;
492         LOG_DIS("m%ss sr%d r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
493         if (to)
494             gen_helper_mmu_write(cpu_env, tcg_const_tl(sr), cpu_R[dc->ra]);
495         else
496             gen_helper_mmu_read(cpu_R[dc->rd], cpu_env, tcg_const_tl(sr));
497         return;
498     }
499 #endif
500 
501     if (to) {
502         LOG_DIS("m%ss sr%x r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
503         switch (sr) {
504             case 0:
505                 break;
506             case 1:
507                 msr_write(dc, cpu_R[dc->ra]);
508                 break;
509             case 0x3:
510                 tcg_gen_mov_tl(cpu_SR[SR_EAR], cpu_R[dc->ra]);
511                 break;
512             case 0x5:
513                 tcg_gen_mov_tl(cpu_SR[SR_ESR], cpu_R[dc->ra]);
514                 break;
515             case 0x7:
516                 tcg_gen_andi_tl(cpu_SR[SR_FSR], cpu_R[dc->ra], 31);
517                 break;
518             case 0x800:
519                 tcg_gen_st_tl(cpu_R[dc->ra], cpu_env, offsetof(CPUMBState, slr));
520                 break;
521             case 0x802:
522                 tcg_gen_st_tl(cpu_R[dc->ra], cpu_env, offsetof(CPUMBState, shr));
523                 break;
524             default:
525                 cpu_abort(CPU(dc->cpu), "unknown mts reg %x\n", sr);
526                 break;
527         }
528     } else {
529         LOG_DIS("m%ss r%d sr%x imm=%x\n", to ? "t" : "f", dc->rd, sr, dc->imm);
530 
531         switch (sr) {
532             case 0:
533                 tcg_gen_movi_tl(cpu_R[dc->rd], dc->pc);
534                 break;
535             case 1:
536                 msr_read(dc, cpu_R[dc->rd]);
537                 break;
538             case 0x3:
539                 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_EAR]);
540                 break;
541             case 0x5:
542                 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_ESR]);
543                 break;
544              case 0x7:
545                 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_FSR]);
546                 break;
547             case 0xb:
548                 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_BTR]);
549                 break;
550             case 0x800:
551                 tcg_gen_ld_tl(cpu_R[dc->rd], cpu_env, offsetof(CPUMBState, slr));
552                 break;
553             case 0x802:
554                 tcg_gen_ld_tl(cpu_R[dc->rd], cpu_env, offsetof(CPUMBState, shr));
555                 break;
556             case 0x2000:
557             case 0x2001:
558             case 0x2002:
559             case 0x2003:
560             case 0x2004:
561             case 0x2005:
562             case 0x2006:
563             case 0x2007:
564             case 0x2008:
565             case 0x2009:
566             case 0x200a:
567             case 0x200b:
568             case 0x200c:
569                 rn = sr & 0xf;
570                 tcg_gen_ld_tl(cpu_R[dc->rd],
571                               cpu_env, offsetof(CPUMBState, pvr.regs[rn]));
572                 break;
573             default:
574                 cpu_abort(cs, "unknown mfs reg %x\n", sr);
575                 break;
576         }
577     }
578 
579     if (dc->rd == 0) {
580         tcg_gen_movi_tl(cpu_R[0], 0);
581     }
582 }
583 
584 /* Multiplier unit.  */
585 static void dec_mul(DisasContext *dc)
586 {
587     TCGv tmp;
588     unsigned int subcode;
589 
590     if ((dc->tb_flags & MSR_EE_FLAG)
591          && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
592          && !dc->cpu->cfg.use_hw_mul) {
593         tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
594         t_gen_raise_exception(dc, EXCP_HW_EXCP);
595         return;
596     }
597 
598     subcode = dc->imm & 3;
599 
600     if (dc->type_b) {
601         LOG_DIS("muli r%d r%d %x\n", dc->rd, dc->ra, dc->imm);
602         tcg_gen_mul_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
603         return;
604     }
605 
606     /* mulh, mulhsu and mulhu are not available if C_USE_HW_MUL is < 2.  */
607     if (subcode >= 1 && subcode <= 3 && dc->cpu->cfg.use_hw_mul < 2) {
608         /* nop??? */
609     }
610 
611     tmp = tcg_temp_new();
612     switch (subcode) {
613         case 0:
614             LOG_DIS("mul r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
615             tcg_gen_mul_tl(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
616             break;
617         case 1:
618             LOG_DIS("mulh r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
619             tcg_gen_muls2_tl(tmp, cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
620             break;
621         case 2:
622             LOG_DIS("mulhsu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
623             tcg_gen_mulsu2_tl(tmp, cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
624             break;
625         case 3:
626             LOG_DIS("mulhu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
627             tcg_gen_mulu2_tl(tmp, cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
628             break;
629         default:
630             cpu_abort(CPU(dc->cpu), "unknown MUL insn %x\n", subcode);
631             break;
632     }
633     tcg_temp_free(tmp);
634 }
635 
636 /* Div unit.  */
637 static void dec_div(DisasContext *dc)
638 {
639     unsigned int u;
640 
641     u = dc->imm & 2;
642     LOG_DIS("div\n");
643 
644     if ((dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
645           && !dc->cpu->cfg.use_div) {
646         tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
647         t_gen_raise_exception(dc, EXCP_HW_EXCP);
648     }
649 
650     if (u)
651         gen_helper_divu(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
652                         cpu_R[dc->ra]);
653     else
654         gen_helper_divs(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
655                         cpu_R[dc->ra]);
656     if (!dc->rd)
657         tcg_gen_movi_tl(cpu_R[dc->rd], 0);
658 }
659 
660 static void dec_barrel(DisasContext *dc)
661 {
662     TCGv t0;
663     unsigned int imm_w, imm_s;
664     bool s, t, e = false, i = false;
665 
666     if ((dc->tb_flags & MSR_EE_FLAG)
667           && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
668           && !dc->cpu->cfg.use_barrel) {
669         tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
670         t_gen_raise_exception(dc, EXCP_HW_EXCP);
671         return;
672     }
673 
674     if (dc->type_b) {
675         /* Insert and extract are only available in immediate mode.  */
676         i = extract32(dc->imm, 15, 1);
677         e = extract32(dc->imm, 14, 1);
678     }
679     s = extract32(dc->imm, 10, 1);
680     t = extract32(dc->imm, 9, 1);
681     imm_w = extract32(dc->imm, 6, 5);
682     imm_s = extract32(dc->imm, 0, 5);
683 
684     LOG_DIS("bs%s%s%s r%d r%d r%d\n",
685             e ? "e" : "",
686             s ? "l" : "r", t ? "a" : "l", dc->rd, dc->ra, dc->rb);
687 
688     if (e) {
689         if (imm_w + imm_s > 32 || imm_w == 0) {
690             /* These inputs have an undefined behavior.  */
691             qemu_log_mask(LOG_GUEST_ERROR, "bsefi: Bad input w=%d s=%d\n",
692                           imm_w, imm_s);
693         } else {
694             tcg_gen_extract_i32(cpu_R[dc->rd], cpu_R[dc->ra], imm_s, imm_w);
695         }
696     } else if (i) {
697         int width = imm_w - imm_s + 1;
698 
699         if (imm_w < imm_s) {
700             /* These inputs have an undefined behavior.  */
701             qemu_log_mask(LOG_GUEST_ERROR, "bsifi: Bad input w=%d s=%d\n",
702                           imm_w, imm_s);
703         } else {
704             tcg_gen_deposit_i32(cpu_R[dc->rd], cpu_R[dc->rd], cpu_R[dc->ra],
705                                 imm_s, width);
706         }
707     } else {
708         t0 = tcg_temp_new();
709 
710         tcg_gen_mov_tl(t0, *(dec_alu_op_b(dc)));
711         tcg_gen_andi_tl(t0, t0, 31);
712 
713         if (s) {
714             tcg_gen_shl_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
715         } else {
716             if (t) {
717                 tcg_gen_sar_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
718             } else {
719                 tcg_gen_shr_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
720             }
721         }
722         tcg_temp_free(t0);
723     }
724 }
725 
726 static void dec_bit(DisasContext *dc)
727 {
728     CPUState *cs = CPU(dc->cpu);
729     TCGv t0;
730     unsigned int op;
731     int mem_index = cpu_mmu_index(&dc->cpu->env, false);
732 
733     op = dc->ir & ((1 << 9) - 1);
734     switch (op) {
735         case 0x21:
736             /* src.  */
737             t0 = tcg_temp_new();
738 
739             LOG_DIS("src r%d r%d\n", dc->rd, dc->ra);
740             tcg_gen_andi_tl(t0, cpu_SR[SR_MSR], MSR_CC);
741             write_carry(dc, cpu_R[dc->ra]);
742             if (dc->rd) {
743                 tcg_gen_shri_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
744                 tcg_gen_or_tl(cpu_R[dc->rd], cpu_R[dc->rd], t0);
745             }
746             tcg_temp_free(t0);
747             break;
748 
749         case 0x1:
750         case 0x41:
751             /* srl.  */
752             LOG_DIS("srl r%d r%d\n", dc->rd, dc->ra);
753 
754             /* Update carry. Note that write carry only looks at the LSB.  */
755             write_carry(dc, cpu_R[dc->ra]);
756             if (dc->rd) {
757                 if (op == 0x41)
758                     tcg_gen_shri_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
759                 else
760                     tcg_gen_sari_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
761             }
762             break;
763         case 0x60:
764             LOG_DIS("ext8s r%d r%d\n", dc->rd, dc->ra);
765             tcg_gen_ext8s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
766             break;
767         case 0x61:
768             LOG_DIS("ext16s r%d r%d\n", dc->rd, dc->ra);
769             tcg_gen_ext16s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
770             break;
771         case 0x64:
772         case 0x66:
773         case 0x74:
774         case 0x76:
775             /* wdc.  */
776             LOG_DIS("wdc r%d\n", dc->ra);
777             if ((dc->tb_flags & MSR_EE_FLAG)
778                  && mem_index == MMU_USER_IDX) {
779                 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
780                 t_gen_raise_exception(dc, EXCP_HW_EXCP);
781                 return;
782             }
783             break;
784         case 0x68:
785             /* wic.  */
786             LOG_DIS("wic r%d\n", dc->ra);
787             if ((dc->tb_flags & MSR_EE_FLAG)
788                  && mem_index == MMU_USER_IDX) {
789                 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
790                 t_gen_raise_exception(dc, EXCP_HW_EXCP);
791                 return;
792             }
793             break;
794         case 0xe0:
795             if ((dc->tb_flags & MSR_EE_FLAG)
796                 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
797                 && !dc->cpu->cfg.use_pcmp_instr) {
798                 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
799                 t_gen_raise_exception(dc, EXCP_HW_EXCP);
800             }
801             if (dc->cpu->cfg.use_pcmp_instr) {
802                 tcg_gen_clzi_i32(cpu_R[dc->rd], cpu_R[dc->ra], 32);
803             }
804             break;
805         case 0x1e0:
806             /* swapb */
807             LOG_DIS("swapb r%d r%d\n", dc->rd, dc->ra);
808             tcg_gen_bswap32_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
809             break;
810         case 0x1e2:
811             /*swaph */
812             LOG_DIS("swaph r%d r%d\n", dc->rd, dc->ra);
813             tcg_gen_rotri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 16);
814             break;
815         default:
816             cpu_abort(cs, "unknown bit oc=%x op=%x rd=%d ra=%d rb=%d\n",
817                       dc->pc, op, dc->rd, dc->ra, dc->rb);
818             break;
819     }
820 }
821 
822 static inline void sync_jmpstate(DisasContext *dc)
823 {
824     if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
825         if (dc->jmp == JMP_DIRECT) {
826             tcg_gen_movi_tl(env_btaken, 1);
827         }
828         dc->jmp = JMP_INDIRECT;
829         tcg_gen_movi_tl(env_btarget, dc->jmp_pc);
830     }
831 }
832 
833 static void dec_imm(DisasContext *dc)
834 {
835     LOG_DIS("imm %x\n", dc->imm << 16);
836     tcg_gen_movi_tl(env_imm, (dc->imm << 16));
837     dc->tb_flags |= IMM_FLAG;
838     dc->clear_imm = 0;
839 }
840 
841 static inline TCGv *compute_ldst_addr(DisasContext *dc, TCGv *t)
842 {
843     unsigned int extimm = dc->tb_flags & IMM_FLAG;
844     /* Should be set to one if r1 is used by loadstores.  */
845     int stackprot = 0;
846 
847     /* All load/stores use ra.  */
848     if (dc->ra == 1 && dc->cpu->cfg.stackprot) {
849         stackprot = 1;
850     }
851 
852     /* Treat the common cases first.  */
853     if (!dc->type_b) {
854         /* If any of the regs is r0, return a ptr to the other.  */
855         if (dc->ra == 0) {
856             return &cpu_R[dc->rb];
857         } else if (dc->rb == 0) {
858             return &cpu_R[dc->ra];
859         }
860 
861         if (dc->rb == 1 && dc->cpu->cfg.stackprot) {
862             stackprot = 1;
863         }
864 
865         *t = tcg_temp_new();
866         tcg_gen_add_tl(*t, cpu_R[dc->ra], cpu_R[dc->rb]);
867 
868         if (stackprot) {
869             gen_helper_stackprot(cpu_env, *t);
870         }
871         return t;
872     }
873     /* Immediate.  */
874     if (!extimm) {
875         if (dc->imm == 0) {
876             return &cpu_R[dc->ra];
877         }
878         *t = tcg_temp_new();
879         tcg_gen_movi_tl(*t, (int32_t)((int16_t)dc->imm));
880         tcg_gen_add_tl(*t, cpu_R[dc->ra], *t);
881     } else {
882         *t = tcg_temp_new();
883         tcg_gen_add_tl(*t, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
884     }
885 
886     if (stackprot) {
887         gen_helper_stackprot(cpu_env, *t);
888     }
889     return t;
890 }
891 
892 static void dec_load(DisasContext *dc)
893 {
894     TCGv t, v, *addr;
895     unsigned int size, rev = 0, ex = 0;
896     TCGMemOp mop;
897 
898     mop = dc->opcode & 3;
899     size = 1 << mop;
900     if (!dc->type_b) {
901         rev = (dc->ir >> 9) & 1;
902         ex = (dc->ir >> 10) & 1;
903     }
904     mop |= MO_TE;
905     if (rev) {
906         mop ^= MO_BSWAP;
907     }
908 
909     if (size > 4 && (dc->tb_flags & MSR_EE_FLAG)
910           && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
911         tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
912         t_gen_raise_exception(dc, EXCP_HW_EXCP);
913         return;
914     }
915 
916     LOG_DIS("l%d%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
917                                                         ex ? "x" : "");
918 
919     t_sync_flags(dc);
920     addr = compute_ldst_addr(dc, &t);
921 
922     /*
923      * When doing reverse accesses we need to do two things.
924      *
925      * 1. Reverse the address wrt endianness.
926      * 2. Byteswap the data lanes on the way back into the CPU core.
927      */
928     if (rev && size != 4) {
929         /* Endian reverse the address. t is addr.  */
930         switch (size) {
931             case 1:
932             {
933                 /* 00 -> 11
934                    01 -> 10
935                    10 -> 10
936                    11 -> 00 */
937                 TCGv low = tcg_temp_new();
938 
939                 /* Force addr into the temp.  */
940                 if (addr != &t) {
941                     t = tcg_temp_new();
942                     tcg_gen_mov_tl(t, *addr);
943                     addr = &t;
944                 }
945 
946                 tcg_gen_andi_tl(low, t, 3);
947                 tcg_gen_sub_tl(low, tcg_const_tl(3), low);
948                 tcg_gen_andi_tl(t, t, ~3);
949                 tcg_gen_or_tl(t, t, low);
950                 tcg_gen_mov_tl(env_imm, t);
951                 tcg_temp_free(low);
952                 break;
953             }
954 
955             case 2:
956                 /* 00 -> 10
957                    10 -> 00.  */
958                 /* Force addr into the temp.  */
959                 if (addr != &t) {
960                     t = tcg_temp_new();
961                     tcg_gen_xori_tl(t, *addr, 2);
962                     addr = &t;
963                 } else {
964                     tcg_gen_xori_tl(t, t, 2);
965                 }
966                 break;
967             default:
968                 cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
969                 break;
970         }
971     }
972 
973     /* lwx does not throw unaligned access errors, so force alignment */
974     if (ex) {
975         /* Force addr into the temp.  */
976         if (addr != &t) {
977             t = tcg_temp_new();
978             tcg_gen_mov_tl(t, *addr);
979             addr = &t;
980         }
981         tcg_gen_andi_tl(t, t, ~3);
982     }
983 
984     /* If we get a fault on a dslot, the jmpstate better be in sync.  */
985     sync_jmpstate(dc);
986 
987     /* Verify alignment if needed.  */
988     /*
989      * Microblaze gives MMU faults priority over faults due to
990      * unaligned addresses. That's why we speculatively do the load
991      * into v. If the load succeeds, we verify alignment of the
992      * address and if that succeeds we write into the destination reg.
993      */
994     v = tcg_temp_new();
995     tcg_gen_qemu_ld_tl(v, *addr, cpu_mmu_index(&dc->cpu->env, false), mop);
996 
997     if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
998         tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
999         gen_helper_memalign(cpu_env, *addr, tcg_const_tl(dc->rd),
1000                             tcg_const_tl(0), tcg_const_tl(size - 1));
1001     }
1002 
1003     if (ex) {
1004         tcg_gen_mov_tl(env_res_addr, *addr);
1005         tcg_gen_mov_tl(env_res_val, v);
1006     }
1007     if (dc->rd) {
1008         tcg_gen_mov_tl(cpu_R[dc->rd], v);
1009     }
1010     tcg_temp_free(v);
1011 
1012     if (ex) { /* lwx */
1013         /* no support for AXI exclusive so always clear C */
1014         write_carryi(dc, 0);
1015     }
1016 
1017     if (addr == &t)
1018         tcg_temp_free(t);
1019 }
1020 
1021 static void dec_store(DisasContext *dc)
1022 {
1023     TCGv t, *addr, swx_addr;
1024     TCGLabel *swx_skip = NULL;
1025     unsigned int size, rev = 0, ex = 0;
1026     TCGMemOp mop;
1027 
1028     mop = dc->opcode & 3;
1029     size = 1 << mop;
1030     if (!dc->type_b) {
1031         rev = (dc->ir >> 9) & 1;
1032         ex = (dc->ir >> 10) & 1;
1033     }
1034     mop |= MO_TE;
1035     if (rev) {
1036         mop ^= MO_BSWAP;
1037     }
1038 
1039     if (size > 4 && (dc->tb_flags & MSR_EE_FLAG)
1040           && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
1041         tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1042         t_gen_raise_exception(dc, EXCP_HW_EXCP);
1043         return;
1044     }
1045 
1046     LOG_DIS("s%d%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
1047                                                         ex ? "x" : "");
1048     t_sync_flags(dc);
1049     /* If we get a fault on a dslot, the jmpstate better be in sync.  */
1050     sync_jmpstate(dc);
1051     addr = compute_ldst_addr(dc, &t);
1052 
1053     swx_addr = tcg_temp_local_new();
1054     if (ex) { /* swx */
1055         TCGv tval;
1056 
1057         /* Force addr into the swx_addr. */
1058         tcg_gen_mov_tl(swx_addr, *addr);
1059         addr = &swx_addr;
1060         /* swx does not throw unaligned access errors, so force alignment */
1061         tcg_gen_andi_tl(swx_addr, swx_addr, ~3);
1062 
1063         write_carryi(dc, 1);
1064         swx_skip = gen_new_label();
1065         tcg_gen_brcond_tl(TCG_COND_NE, env_res_addr, swx_addr, swx_skip);
1066 
1067         /* Compare the value loaded at lwx with current contents of
1068            the reserved location.
1069            FIXME: This only works for system emulation where we can expect
1070            this compare and the following write to be atomic. For user
1071            emulation we need to add atomicity between threads.  */
1072         tval = tcg_temp_new();
1073         tcg_gen_qemu_ld_tl(tval, swx_addr, cpu_mmu_index(&dc->cpu->env, false),
1074                            MO_TEUL);
1075         tcg_gen_brcond_tl(TCG_COND_NE, env_res_val, tval, swx_skip);
1076         write_carryi(dc, 0);
1077         tcg_temp_free(tval);
1078     }
1079 
1080     if (rev && size != 4) {
1081         /* Endian reverse the address. t is addr.  */
1082         switch (size) {
1083             case 1:
1084             {
1085                 /* 00 -> 11
1086                    01 -> 10
1087                    10 -> 10
1088                    11 -> 00 */
1089                 TCGv low = tcg_temp_new();
1090 
1091                 /* Force addr into the temp.  */
1092                 if (addr != &t) {
1093                     t = tcg_temp_new();
1094                     tcg_gen_mov_tl(t, *addr);
1095                     addr = &t;
1096                 }
1097 
1098                 tcg_gen_andi_tl(low, t, 3);
1099                 tcg_gen_sub_tl(low, tcg_const_tl(3), low);
1100                 tcg_gen_andi_tl(t, t, ~3);
1101                 tcg_gen_or_tl(t, t, low);
1102                 tcg_gen_mov_tl(env_imm, t);
1103                 tcg_temp_free(low);
1104                 break;
1105             }
1106 
1107             case 2:
1108                 /* 00 -> 10
1109                    10 -> 00.  */
1110                 /* Force addr into the temp.  */
1111                 if (addr != &t) {
1112                     t = tcg_temp_new();
1113                     tcg_gen_xori_tl(t, *addr, 2);
1114                     addr = &t;
1115                 } else {
1116                     tcg_gen_xori_tl(t, t, 2);
1117                 }
1118                 break;
1119             default:
1120                 cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
1121                 break;
1122         }
1123     }
1124     tcg_gen_qemu_st_tl(cpu_R[dc->rd], *addr, cpu_mmu_index(&dc->cpu->env, false), mop);
1125 
1126     /* Verify alignment if needed.  */
1127     if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
1128         tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
1129         /* FIXME: if the alignment is wrong, we should restore the value
1130          *        in memory. One possible way to achieve this is to probe
1131          *        the MMU prior to the memaccess, thay way we could put
1132          *        the alignment checks in between the probe and the mem
1133          *        access.
1134          */
1135         gen_helper_memalign(cpu_env, *addr, tcg_const_tl(dc->rd),
1136                             tcg_const_tl(1), tcg_const_tl(size - 1));
1137     }
1138 
1139     if (ex) {
1140         gen_set_label(swx_skip);
1141     }
1142     tcg_temp_free(swx_addr);
1143 
1144     if (addr == &t)
1145         tcg_temp_free(t);
1146 }
1147 
1148 static inline void eval_cc(DisasContext *dc, unsigned int cc,
1149                            TCGv d, TCGv a, TCGv b)
1150 {
1151     switch (cc) {
1152         case CC_EQ:
1153             tcg_gen_setcond_tl(TCG_COND_EQ, d, a, b);
1154             break;
1155         case CC_NE:
1156             tcg_gen_setcond_tl(TCG_COND_NE, d, a, b);
1157             break;
1158         case CC_LT:
1159             tcg_gen_setcond_tl(TCG_COND_LT, d, a, b);
1160             break;
1161         case CC_LE:
1162             tcg_gen_setcond_tl(TCG_COND_LE, d, a, b);
1163             break;
1164         case CC_GE:
1165             tcg_gen_setcond_tl(TCG_COND_GE, d, a, b);
1166             break;
1167         case CC_GT:
1168             tcg_gen_setcond_tl(TCG_COND_GT, d, a, b);
1169             break;
1170         default:
1171             cpu_abort(CPU(dc->cpu), "Unknown condition code %x.\n", cc);
1172             break;
1173     }
1174 }
1175 
1176 static void eval_cond_jmp(DisasContext *dc, TCGv pc_true, TCGv pc_false)
1177 {
1178     TCGLabel *l1 = gen_new_label();
1179     /* Conditional jmp.  */
1180     tcg_gen_mov_tl(cpu_SR[SR_PC], pc_false);
1181     tcg_gen_brcondi_tl(TCG_COND_EQ, env_btaken, 0, l1);
1182     tcg_gen_mov_tl(cpu_SR[SR_PC], pc_true);
1183     gen_set_label(l1);
1184 }
1185 
1186 static void dec_bcc(DisasContext *dc)
1187 {
1188     unsigned int cc;
1189     unsigned int dslot;
1190 
1191     cc = EXTRACT_FIELD(dc->ir, 21, 23);
1192     dslot = dc->ir & (1 << 25);
1193     LOG_DIS("bcc%s r%d %x\n", dslot ? "d" : "", dc->ra, dc->imm);
1194 
1195     dc->delayed_branch = 1;
1196     if (dslot) {
1197         dc->delayed_branch = 2;
1198         dc->tb_flags |= D_FLAG;
1199         tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1200                       cpu_env, offsetof(CPUMBState, bimm));
1201     }
1202 
1203     if (dec_alu_op_b_is_small_imm(dc)) {
1204         int32_t offset = (int32_t)((int16_t)dc->imm); /* sign-extend.  */
1205 
1206         tcg_gen_movi_tl(env_btarget, dc->pc + offset);
1207         dc->jmp = JMP_DIRECT_CC;
1208         dc->jmp_pc = dc->pc + offset;
1209     } else {
1210         dc->jmp = JMP_INDIRECT;
1211         tcg_gen_movi_tl(env_btarget, dc->pc);
1212         tcg_gen_add_tl(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
1213     }
1214     eval_cc(dc, cc, env_btaken, cpu_R[dc->ra], tcg_const_tl(0));
1215 }
1216 
1217 static void dec_br(DisasContext *dc)
1218 {
1219     unsigned int dslot, link, abs, mbar;
1220     int mem_index = cpu_mmu_index(&dc->cpu->env, false);
1221 
1222     dslot = dc->ir & (1 << 20);
1223     abs = dc->ir & (1 << 19);
1224     link = dc->ir & (1 << 18);
1225 
1226     /* Memory barrier.  */
1227     mbar = (dc->ir >> 16) & 31;
1228     if (mbar == 2 && dc->imm == 4) {
1229         /* mbar IMM & 16 decodes to sleep.  */
1230         if (dc->rd & 16) {
1231             TCGv_i32 tmp_hlt = tcg_const_i32(EXCP_HLT);
1232             TCGv_i32 tmp_1 = tcg_const_i32(1);
1233 
1234             LOG_DIS("sleep\n");
1235 
1236             t_sync_flags(dc);
1237             tcg_gen_st_i32(tmp_1, cpu_env,
1238                            -offsetof(MicroBlazeCPU, env)
1239                            +offsetof(CPUState, halted));
1240             tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc + 4);
1241             gen_helper_raise_exception(cpu_env, tmp_hlt);
1242             tcg_temp_free_i32(tmp_hlt);
1243             tcg_temp_free_i32(tmp_1);
1244             return;
1245         }
1246         LOG_DIS("mbar %d\n", dc->rd);
1247         /* Break the TB.  */
1248         dc->cpustate_changed = 1;
1249         return;
1250     }
1251 
1252     LOG_DIS("br%s%s%s%s imm=%x\n",
1253              abs ? "a" : "", link ? "l" : "",
1254              dc->type_b ? "i" : "", dslot ? "d" : "",
1255              dc->imm);
1256 
1257     dc->delayed_branch = 1;
1258     if (dslot) {
1259         dc->delayed_branch = 2;
1260         dc->tb_flags |= D_FLAG;
1261         tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1262                       cpu_env, offsetof(CPUMBState, bimm));
1263     }
1264     if (link && dc->rd)
1265         tcg_gen_movi_tl(cpu_R[dc->rd], dc->pc);
1266 
1267     dc->jmp = JMP_INDIRECT;
1268     if (abs) {
1269         tcg_gen_movi_tl(env_btaken, 1);
1270         tcg_gen_mov_tl(env_btarget, *(dec_alu_op_b(dc)));
1271         if (link && !dslot) {
1272             if (!(dc->tb_flags & IMM_FLAG) && (dc->imm == 8 || dc->imm == 0x18))
1273                 t_gen_raise_exception(dc, EXCP_BREAK);
1274             if (dc->imm == 0) {
1275                 if ((dc->tb_flags & MSR_EE_FLAG) && mem_index == MMU_USER_IDX) {
1276                     tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1277                     t_gen_raise_exception(dc, EXCP_HW_EXCP);
1278                     return;
1279                 }
1280 
1281                 t_gen_raise_exception(dc, EXCP_DEBUG);
1282             }
1283         }
1284     } else {
1285         if (dec_alu_op_b_is_small_imm(dc)) {
1286             dc->jmp = JMP_DIRECT;
1287             dc->jmp_pc = dc->pc + (int32_t)((int16_t)dc->imm);
1288         } else {
1289             tcg_gen_movi_tl(env_btaken, 1);
1290             tcg_gen_movi_tl(env_btarget, dc->pc);
1291             tcg_gen_add_tl(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
1292         }
1293     }
1294 }
1295 
1296 static inline void do_rti(DisasContext *dc)
1297 {
1298     TCGv t0, t1;
1299     t0 = tcg_temp_new();
1300     t1 = tcg_temp_new();
1301     tcg_gen_shri_tl(t0, cpu_SR[SR_MSR], 1);
1302     tcg_gen_ori_tl(t1, cpu_SR[SR_MSR], MSR_IE);
1303     tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1304 
1305     tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1306     tcg_gen_or_tl(t1, t1, t0);
1307     msr_write(dc, t1);
1308     tcg_temp_free(t1);
1309     tcg_temp_free(t0);
1310     dc->tb_flags &= ~DRTI_FLAG;
1311 }
1312 
1313 static inline void do_rtb(DisasContext *dc)
1314 {
1315     TCGv t0, t1;
1316     t0 = tcg_temp_new();
1317     t1 = tcg_temp_new();
1318     tcg_gen_andi_tl(t1, cpu_SR[SR_MSR], ~MSR_BIP);
1319     tcg_gen_shri_tl(t0, t1, 1);
1320     tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1321 
1322     tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1323     tcg_gen_or_tl(t1, t1, t0);
1324     msr_write(dc, t1);
1325     tcg_temp_free(t1);
1326     tcg_temp_free(t0);
1327     dc->tb_flags &= ~DRTB_FLAG;
1328 }
1329 
1330 static inline void do_rte(DisasContext *dc)
1331 {
1332     TCGv t0, t1;
1333     t0 = tcg_temp_new();
1334     t1 = tcg_temp_new();
1335 
1336     tcg_gen_ori_tl(t1, cpu_SR[SR_MSR], MSR_EE);
1337     tcg_gen_andi_tl(t1, t1, ~MSR_EIP);
1338     tcg_gen_shri_tl(t0, t1, 1);
1339     tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1340 
1341     tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1342     tcg_gen_or_tl(t1, t1, t0);
1343     msr_write(dc, t1);
1344     tcg_temp_free(t1);
1345     tcg_temp_free(t0);
1346     dc->tb_flags &= ~DRTE_FLAG;
1347 }
1348 
1349 static void dec_rts(DisasContext *dc)
1350 {
1351     unsigned int b_bit, i_bit, e_bit;
1352     int mem_index = cpu_mmu_index(&dc->cpu->env, false);
1353 
1354     i_bit = dc->ir & (1 << 21);
1355     b_bit = dc->ir & (1 << 22);
1356     e_bit = dc->ir & (1 << 23);
1357 
1358     dc->delayed_branch = 2;
1359     dc->tb_flags |= D_FLAG;
1360     tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1361                   cpu_env, offsetof(CPUMBState, bimm));
1362 
1363     if (i_bit) {
1364         LOG_DIS("rtid ir=%x\n", dc->ir);
1365         if ((dc->tb_flags & MSR_EE_FLAG)
1366              && mem_index == MMU_USER_IDX) {
1367             tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1368             t_gen_raise_exception(dc, EXCP_HW_EXCP);
1369         }
1370         dc->tb_flags |= DRTI_FLAG;
1371     } else if (b_bit) {
1372         LOG_DIS("rtbd ir=%x\n", dc->ir);
1373         if ((dc->tb_flags & MSR_EE_FLAG)
1374              && mem_index == MMU_USER_IDX) {
1375             tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1376             t_gen_raise_exception(dc, EXCP_HW_EXCP);
1377         }
1378         dc->tb_flags |= DRTB_FLAG;
1379     } else if (e_bit) {
1380         LOG_DIS("rted ir=%x\n", dc->ir);
1381         if ((dc->tb_flags & MSR_EE_FLAG)
1382              && mem_index == MMU_USER_IDX) {
1383             tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1384             t_gen_raise_exception(dc, EXCP_HW_EXCP);
1385         }
1386         dc->tb_flags |= DRTE_FLAG;
1387     } else
1388         LOG_DIS("rts ir=%x\n", dc->ir);
1389 
1390     dc->jmp = JMP_INDIRECT;
1391     tcg_gen_movi_tl(env_btaken, 1);
1392     tcg_gen_add_tl(env_btarget, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
1393 }
1394 
1395 static int dec_check_fpuv2(DisasContext *dc)
1396 {
1397     if ((dc->cpu->cfg.use_fpu != 2) && (dc->tb_flags & MSR_EE_FLAG)) {
1398         tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_FPU);
1399         t_gen_raise_exception(dc, EXCP_HW_EXCP);
1400     }
1401     return (dc->cpu->cfg.use_fpu == 2) ? 0 : PVR2_USE_FPU2_MASK;
1402 }
1403 
1404 static void dec_fpu(DisasContext *dc)
1405 {
1406     unsigned int fpu_insn;
1407 
1408     if ((dc->tb_flags & MSR_EE_FLAG)
1409           && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
1410           && (dc->cpu->cfg.use_fpu != 1)) {
1411         tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1412         t_gen_raise_exception(dc, EXCP_HW_EXCP);
1413         return;
1414     }
1415 
1416     fpu_insn = (dc->ir >> 7) & 7;
1417 
1418     switch (fpu_insn) {
1419         case 0:
1420             gen_helper_fadd(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1421                             cpu_R[dc->rb]);
1422             break;
1423 
1424         case 1:
1425             gen_helper_frsub(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1426                              cpu_R[dc->rb]);
1427             break;
1428 
1429         case 2:
1430             gen_helper_fmul(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1431                             cpu_R[dc->rb]);
1432             break;
1433 
1434         case 3:
1435             gen_helper_fdiv(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1436                             cpu_R[dc->rb]);
1437             break;
1438 
1439         case 4:
1440             switch ((dc->ir >> 4) & 7) {
1441                 case 0:
1442                     gen_helper_fcmp_un(cpu_R[dc->rd], cpu_env,
1443                                        cpu_R[dc->ra], cpu_R[dc->rb]);
1444                     break;
1445                 case 1:
1446                     gen_helper_fcmp_lt(cpu_R[dc->rd], cpu_env,
1447                                        cpu_R[dc->ra], cpu_R[dc->rb]);
1448                     break;
1449                 case 2:
1450                     gen_helper_fcmp_eq(cpu_R[dc->rd], cpu_env,
1451                                        cpu_R[dc->ra], cpu_R[dc->rb]);
1452                     break;
1453                 case 3:
1454                     gen_helper_fcmp_le(cpu_R[dc->rd], cpu_env,
1455                                        cpu_R[dc->ra], cpu_R[dc->rb]);
1456                     break;
1457                 case 4:
1458                     gen_helper_fcmp_gt(cpu_R[dc->rd], cpu_env,
1459                                        cpu_R[dc->ra], cpu_R[dc->rb]);
1460                     break;
1461                 case 5:
1462                     gen_helper_fcmp_ne(cpu_R[dc->rd], cpu_env,
1463                                        cpu_R[dc->ra], cpu_R[dc->rb]);
1464                     break;
1465                 case 6:
1466                     gen_helper_fcmp_ge(cpu_R[dc->rd], cpu_env,
1467                                        cpu_R[dc->ra], cpu_R[dc->rb]);
1468                     break;
1469                 default:
1470                     qemu_log_mask(LOG_UNIMP,
1471                                   "unimplemented fcmp fpu_insn=%x pc=%x"
1472                                   " opc=%x\n",
1473                                   fpu_insn, dc->pc, dc->opcode);
1474                     dc->abort_at_next_insn = 1;
1475                     break;
1476             }
1477             break;
1478 
1479         case 5:
1480             if (!dec_check_fpuv2(dc)) {
1481                 return;
1482             }
1483             gen_helper_flt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1484             break;
1485 
1486         case 6:
1487             if (!dec_check_fpuv2(dc)) {
1488                 return;
1489             }
1490             gen_helper_fint(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1491             break;
1492 
1493         case 7:
1494             if (!dec_check_fpuv2(dc)) {
1495                 return;
1496             }
1497             gen_helper_fsqrt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1498             break;
1499 
1500         default:
1501             qemu_log_mask(LOG_UNIMP, "unimplemented FPU insn fpu_insn=%x pc=%x"
1502                           " opc=%x\n",
1503                           fpu_insn, dc->pc, dc->opcode);
1504             dc->abort_at_next_insn = 1;
1505             break;
1506     }
1507 }
1508 
1509 static void dec_null(DisasContext *dc)
1510 {
1511     if ((dc->tb_flags & MSR_EE_FLAG)
1512           && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
1513         tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1514         t_gen_raise_exception(dc, EXCP_HW_EXCP);
1515         return;
1516     }
1517     qemu_log_mask(LOG_GUEST_ERROR, "unknown insn pc=%x opc=%x\n", dc->pc, dc->opcode);
1518     dc->abort_at_next_insn = 1;
1519 }
1520 
1521 /* Insns connected to FSL or AXI stream attached devices.  */
1522 static void dec_stream(DisasContext *dc)
1523 {
1524     int mem_index = cpu_mmu_index(&dc->cpu->env, false);
1525     TCGv_i32 t_id, t_ctrl;
1526     int ctrl;
1527 
1528     LOG_DIS("%s%s imm=%x\n", dc->rd ? "get" : "put",
1529             dc->type_b ? "" : "d", dc->imm);
1530 
1531     if ((dc->tb_flags & MSR_EE_FLAG) && (mem_index == MMU_USER_IDX)) {
1532         tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1533         t_gen_raise_exception(dc, EXCP_HW_EXCP);
1534         return;
1535     }
1536 
1537     t_id = tcg_temp_new();
1538     if (dc->type_b) {
1539         tcg_gen_movi_tl(t_id, dc->imm & 0xf);
1540         ctrl = dc->imm >> 10;
1541     } else {
1542         tcg_gen_andi_tl(t_id, cpu_R[dc->rb], 0xf);
1543         ctrl = dc->imm >> 5;
1544     }
1545 
1546     t_ctrl = tcg_const_tl(ctrl);
1547 
1548     if (dc->rd == 0) {
1549         gen_helper_put(t_id, t_ctrl, cpu_R[dc->ra]);
1550     } else {
1551         gen_helper_get(cpu_R[dc->rd], t_id, t_ctrl);
1552     }
1553     tcg_temp_free(t_id);
1554     tcg_temp_free(t_ctrl);
1555 }
1556 
1557 static struct decoder_info {
1558     struct {
1559         uint32_t bits;
1560         uint32_t mask;
1561     };
1562     void (*dec)(DisasContext *dc);
1563 } decinfo[] = {
1564     {DEC_ADD, dec_add},
1565     {DEC_SUB, dec_sub},
1566     {DEC_AND, dec_and},
1567     {DEC_XOR, dec_xor},
1568     {DEC_OR, dec_or},
1569     {DEC_BIT, dec_bit},
1570     {DEC_BARREL, dec_barrel},
1571     {DEC_LD, dec_load},
1572     {DEC_ST, dec_store},
1573     {DEC_IMM, dec_imm},
1574     {DEC_BR, dec_br},
1575     {DEC_BCC, dec_bcc},
1576     {DEC_RTS, dec_rts},
1577     {DEC_FPU, dec_fpu},
1578     {DEC_MUL, dec_mul},
1579     {DEC_DIV, dec_div},
1580     {DEC_MSR, dec_msr},
1581     {DEC_STREAM, dec_stream},
1582     {{0, 0}, dec_null}
1583 };
1584 
1585 static inline void decode(DisasContext *dc, uint32_t ir)
1586 {
1587     int i;
1588 
1589     dc->ir = ir;
1590     LOG_DIS("%8.8x\t", dc->ir);
1591 
1592     if (dc->ir)
1593         dc->nr_nops = 0;
1594     else {
1595         if ((dc->tb_flags & MSR_EE_FLAG)
1596               && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
1597               && (dc->cpu->env.pvr.regs[2] & PVR2_OPCODE_0x0_ILL_MASK)) {
1598             tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1599             t_gen_raise_exception(dc, EXCP_HW_EXCP);
1600             return;
1601         }
1602 
1603         LOG_DIS("nr_nops=%d\t", dc->nr_nops);
1604         dc->nr_nops++;
1605         if (dc->nr_nops > 4) {
1606             cpu_abort(CPU(dc->cpu), "fetching nop sequence\n");
1607         }
1608     }
1609     /* bit 2 seems to indicate insn type.  */
1610     dc->type_b = ir & (1 << 29);
1611 
1612     dc->opcode = EXTRACT_FIELD(ir, 26, 31);
1613     dc->rd = EXTRACT_FIELD(ir, 21, 25);
1614     dc->ra = EXTRACT_FIELD(ir, 16, 20);
1615     dc->rb = EXTRACT_FIELD(ir, 11, 15);
1616     dc->imm = EXTRACT_FIELD(ir, 0, 15);
1617 
1618     /* Large switch for all insns.  */
1619     for (i = 0; i < ARRAY_SIZE(decinfo); i++) {
1620         if ((dc->opcode & decinfo[i].mask) == decinfo[i].bits) {
1621             decinfo[i].dec(dc);
1622             break;
1623         }
1624     }
1625 }
1626 
1627 /* generate intermediate code for basic block 'tb'.  */
1628 void gen_intermediate_code(CPUMBState *env, struct TranslationBlock *tb)
1629 {
1630     MicroBlazeCPU *cpu = mb_env_get_cpu(env);
1631     CPUState *cs = CPU(cpu);
1632     uint32_t pc_start;
1633     struct DisasContext ctx;
1634     struct DisasContext *dc = &ctx;
1635     uint32_t next_page_start, org_flags;
1636     target_ulong npc;
1637     int num_insns;
1638     int max_insns;
1639 
1640     pc_start = tb->pc;
1641     dc->cpu = cpu;
1642     dc->tb = tb;
1643     org_flags = dc->synced_flags = dc->tb_flags = tb->flags;
1644 
1645     dc->is_jmp = DISAS_NEXT;
1646     dc->jmp = 0;
1647     dc->delayed_branch = !!(dc->tb_flags & D_FLAG);
1648     if (dc->delayed_branch) {
1649         dc->jmp = JMP_INDIRECT;
1650     }
1651     dc->pc = pc_start;
1652     dc->singlestep_enabled = cs->singlestep_enabled;
1653     dc->cpustate_changed = 0;
1654     dc->abort_at_next_insn = 0;
1655     dc->nr_nops = 0;
1656 
1657     if (pc_start & 3) {
1658         cpu_abort(cs, "Microblaze: unaligned PC=%x\n", pc_start);
1659     }
1660 
1661     next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
1662     num_insns = 0;
1663     max_insns = tb->cflags & CF_COUNT_MASK;
1664     if (max_insns == 0) {
1665         max_insns = CF_COUNT_MASK;
1666     }
1667     if (max_insns > TCG_MAX_INSNS) {
1668         max_insns = TCG_MAX_INSNS;
1669     }
1670 
1671     gen_tb_start(tb);
1672     do
1673     {
1674         tcg_gen_insn_start(dc->pc);
1675         num_insns++;
1676 
1677 #if SIM_COMPAT
1678         if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1679             tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
1680             gen_helper_debug();
1681         }
1682 #endif
1683 
1684         if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
1685             t_gen_raise_exception(dc, EXCP_DEBUG);
1686             dc->is_jmp = DISAS_UPDATE;
1687             /* The address covered by the breakpoint must be included in
1688                [tb->pc, tb->pc + tb->size) in order to for it to be
1689                properly cleared -- thus we increment the PC here so that
1690                the logic setting tb->size below does the right thing.  */
1691             dc->pc += 4;
1692             break;
1693         }
1694 
1695         /* Pretty disas.  */
1696         LOG_DIS("%8.8x:\t", dc->pc);
1697 
1698         if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
1699             gen_io_start();
1700         }
1701 
1702         dc->clear_imm = 1;
1703         decode(dc, cpu_ldl_code(env, dc->pc));
1704         if (dc->clear_imm)
1705             dc->tb_flags &= ~IMM_FLAG;
1706         dc->pc += 4;
1707 
1708         if (dc->delayed_branch) {
1709             dc->delayed_branch--;
1710             if (!dc->delayed_branch) {
1711                 if (dc->tb_flags & DRTI_FLAG)
1712                     do_rti(dc);
1713                  if (dc->tb_flags & DRTB_FLAG)
1714                     do_rtb(dc);
1715                 if (dc->tb_flags & DRTE_FLAG)
1716                     do_rte(dc);
1717                 /* Clear the delay slot flag.  */
1718                 dc->tb_flags &= ~D_FLAG;
1719                 /* If it is a direct jump, try direct chaining.  */
1720                 if (dc->jmp == JMP_INDIRECT) {
1721                     eval_cond_jmp(dc, env_btarget, tcg_const_tl(dc->pc));
1722                     dc->is_jmp = DISAS_JUMP;
1723                 } else if (dc->jmp == JMP_DIRECT) {
1724                     t_sync_flags(dc);
1725                     gen_goto_tb(dc, 0, dc->jmp_pc);
1726                     dc->is_jmp = DISAS_TB_JUMP;
1727                 } else if (dc->jmp == JMP_DIRECT_CC) {
1728                     TCGLabel *l1 = gen_new_label();
1729                     t_sync_flags(dc);
1730                     /* Conditional jmp.  */
1731                     tcg_gen_brcondi_tl(TCG_COND_NE, env_btaken, 0, l1);
1732                     gen_goto_tb(dc, 1, dc->pc);
1733                     gen_set_label(l1);
1734                     gen_goto_tb(dc, 0, dc->jmp_pc);
1735 
1736                     dc->is_jmp = DISAS_TB_JUMP;
1737                 }
1738                 break;
1739             }
1740         }
1741         if (cs->singlestep_enabled) {
1742             break;
1743         }
1744     } while (!dc->is_jmp && !dc->cpustate_changed
1745              && !tcg_op_buf_full()
1746              && !singlestep
1747              && (dc->pc < next_page_start)
1748              && num_insns < max_insns);
1749 
1750     npc = dc->pc;
1751     if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
1752         if (dc->tb_flags & D_FLAG) {
1753             dc->is_jmp = DISAS_UPDATE;
1754             tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1755             sync_jmpstate(dc);
1756         } else
1757             npc = dc->jmp_pc;
1758     }
1759 
1760     if (tb->cflags & CF_LAST_IO)
1761         gen_io_end();
1762     /* Force an update if the per-tb cpu state has changed.  */
1763     if (dc->is_jmp == DISAS_NEXT
1764         && (dc->cpustate_changed || org_flags != dc->tb_flags)) {
1765         dc->is_jmp = DISAS_UPDATE;
1766         tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1767     }
1768     t_sync_flags(dc);
1769 
1770     if (unlikely(cs->singlestep_enabled)) {
1771         TCGv_i32 tmp = tcg_const_i32(EXCP_DEBUG);
1772 
1773         if (dc->is_jmp != DISAS_JUMP) {
1774             tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1775         }
1776         gen_helper_raise_exception(cpu_env, tmp);
1777         tcg_temp_free_i32(tmp);
1778     } else {
1779         switch(dc->is_jmp) {
1780             case DISAS_NEXT:
1781                 gen_goto_tb(dc, 1, npc);
1782                 break;
1783             default:
1784             case DISAS_JUMP:
1785             case DISAS_UPDATE:
1786                 /* indicate that the hash table must be used
1787                    to find the next TB */
1788                 tcg_gen_exit_tb(0);
1789                 break;
1790             case DISAS_TB_JUMP:
1791                 /* nothing more to generate */
1792                 break;
1793         }
1794     }
1795     gen_tb_end(tb, num_insns);
1796 
1797     tb->size = dc->pc - pc_start;
1798     tb->icount = num_insns;
1799 
1800 #ifdef DEBUG_DISAS
1801 #if !SIM_COMPAT
1802     if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
1803         && qemu_log_in_addr_range(pc_start)) {
1804         qemu_log_lock();
1805         qemu_log("--------------\n");
1806 #if DISAS_GNU
1807         log_target_disas(cs, pc_start, dc->pc - pc_start, 0);
1808 #endif
1809         qemu_log("\nisize=%d osize=%d\n",
1810                  dc->pc - pc_start, tcg_op_buf_count());
1811         qemu_log_unlock();
1812     }
1813 #endif
1814 #endif
1815     assert(!dc->abort_at_next_insn);
1816 }
1817 
1818 void mb_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
1819                        int flags)
1820 {
1821     MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1822     CPUMBState *env = &cpu->env;
1823     int i;
1824 
1825     if (!env || !f)
1826         return;
1827 
1828     cpu_fprintf(f, "IN: PC=%x %s\n",
1829                 env->sregs[SR_PC], lookup_symbol(env->sregs[SR_PC]));
1830     cpu_fprintf(f, "rmsr=%x resr=%x rear=%x debug=%x imm=%x iflags=%x fsr=%x\n",
1831              env->sregs[SR_MSR], env->sregs[SR_ESR], env->sregs[SR_EAR],
1832              env->debug, env->imm, env->iflags, env->sregs[SR_FSR]);
1833     cpu_fprintf(f, "btaken=%d btarget=%x mode=%s(saved=%s) eip=%d ie=%d\n",
1834              env->btaken, env->btarget,
1835              (env->sregs[SR_MSR] & MSR_UM) ? "user" : "kernel",
1836              (env->sregs[SR_MSR] & MSR_UMS) ? "user" : "kernel",
1837              (env->sregs[SR_MSR] & MSR_EIP),
1838              (env->sregs[SR_MSR] & MSR_IE));
1839 
1840     for (i = 0; i < 32; i++) {
1841         cpu_fprintf(f, "r%2.2d=%8.8x ", i, env->regs[i]);
1842         if ((i + 1) % 4 == 0)
1843             cpu_fprintf(f, "\n");
1844         }
1845     cpu_fprintf(f, "\n\n");
1846 }
1847 
1848 MicroBlazeCPU *cpu_mb_init(const char *cpu_model)
1849 {
1850     MicroBlazeCPU *cpu;
1851 
1852     cpu = MICROBLAZE_CPU(object_new(TYPE_MICROBLAZE_CPU));
1853 
1854     object_property_set_bool(OBJECT(cpu), true, "realized", NULL);
1855 
1856     return cpu;
1857 }
1858 
1859 void mb_tcg_init(void)
1860 {
1861     int i;
1862 
1863     cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
1864     tcg_ctx.tcg_env = cpu_env;
1865 
1866     env_debug = tcg_global_mem_new(cpu_env,
1867                     offsetof(CPUMBState, debug),
1868                     "debug0");
1869     env_iflags = tcg_global_mem_new(cpu_env,
1870                     offsetof(CPUMBState, iflags),
1871                     "iflags");
1872     env_imm = tcg_global_mem_new(cpu_env,
1873                     offsetof(CPUMBState, imm),
1874                     "imm");
1875     env_btarget = tcg_global_mem_new(cpu_env,
1876                      offsetof(CPUMBState, btarget),
1877                      "btarget");
1878     env_btaken = tcg_global_mem_new(cpu_env,
1879                      offsetof(CPUMBState, btaken),
1880                      "btaken");
1881     env_res_addr = tcg_global_mem_new(cpu_env,
1882                      offsetof(CPUMBState, res_addr),
1883                      "res_addr");
1884     env_res_val = tcg_global_mem_new(cpu_env,
1885                      offsetof(CPUMBState, res_val),
1886                      "res_val");
1887     for (i = 0; i < ARRAY_SIZE(cpu_R); i++) {
1888         cpu_R[i] = tcg_global_mem_new(cpu_env,
1889                           offsetof(CPUMBState, regs[i]),
1890                           regnames[i]);
1891     }
1892     for (i = 0; i < ARRAY_SIZE(cpu_SR); i++) {
1893         cpu_SR[i] = tcg_global_mem_new(cpu_env,
1894                           offsetof(CPUMBState, sregs[i]),
1895                           special_regnames[i]);
1896     }
1897 }
1898 
1899 void restore_state_to_opc(CPUMBState *env, TranslationBlock *tb,
1900                           target_ulong *data)
1901 {
1902     env->sregs[SR_PC] = data[0];
1903 }
1904