xref: /openbmc/qemu/target/microblaze/translate.c (revision 8f0a3716)
1 /*
2  *  Xilinx MicroBlaze emulation for qemu: main translation routines.
3  *
4  *  Copyright (c) 2009 Edgar E. Iglesias.
5  *  Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
25 #include "tcg-op.h"
26 #include "exec/helper-proto.h"
27 #include "microblaze-decode.h"
28 #include "exec/cpu_ldst.h"
29 #include "exec/helper-gen.h"
30 #include "exec/translator.h"
31 
32 #include "trace-tcg.h"
33 #include "exec/log.h"
34 
35 
36 #define SIM_COMPAT 0
37 #define DISAS_GNU 1
38 #define DISAS_MB 1
39 #if DISAS_MB && !SIM_COMPAT
40 #  define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
41 #else
42 #  define LOG_DIS(...) do { } while (0)
43 #endif
44 
45 #define D(x)
46 
47 #define EXTRACT_FIELD(src, start, end) \
48             (((src) >> start) & ((1 << (end - start + 1)) - 1))
49 
50 /* is_jmp field values */
51 #define DISAS_JUMP    DISAS_TARGET_0 /* only pc was modified dynamically */
52 #define DISAS_UPDATE  DISAS_TARGET_1 /* cpu state was modified dynamically */
53 #define DISAS_TB_JUMP DISAS_TARGET_2 /* only pc was modified statically */
54 
55 static TCGv env_debug;
56 static TCGv cpu_R[32];
57 static TCGv cpu_SR[18];
58 static TCGv env_imm;
59 static TCGv env_btaken;
60 static TCGv env_btarget;
61 static TCGv env_iflags;
62 static TCGv env_res_addr;
63 static TCGv env_res_val;
64 
65 #include "exec/gen-icount.h"
66 
67 /* This is the state at translation time.  */
68 typedef struct DisasContext {
69     MicroBlazeCPU *cpu;
70     target_ulong pc;
71 
72     /* Decoder.  */
73     int type_b;
74     uint32_t ir;
75     uint8_t opcode;
76     uint8_t rd, ra, rb;
77     uint16_t imm;
78 
79     unsigned int cpustate_changed;
80     unsigned int delayed_branch;
81     unsigned int tb_flags, synced_flags; /* tb dependent flags.  */
82     unsigned int clear_imm;
83     int is_jmp;
84 
85 #define JMP_NOJMP     0
86 #define JMP_DIRECT    1
87 #define JMP_DIRECT_CC 2
88 #define JMP_INDIRECT  3
89     unsigned int jmp;
90     uint32_t jmp_pc;
91 
92     int abort_at_next_insn;
93     int nr_nops;
94     struct TranslationBlock *tb;
95     int singlestep_enabled;
96 } DisasContext;
97 
98 static const char *regnames[] =
99 {
100     "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
101     "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
102     "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
103     "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
104 };
105 
106 static const char *special_regnames[] =
107 {
108     "rpc", "rmsr", "sr2", "sr3", "sr4", "sr5", "sr6", "sr7",
109     "sr8", "sr9", "sr10", "sr11", "sr12", "sr13", "sr14", "sr15",
110     "sr16", "sr17", "sr18"
111 };
112 
113 static inline void t_sync_flags(DisasContext *dc)
114 {
115     /* Synch the tb dependent flags between translator and runtime.  */
116     if (dc->tb_flags != dc->synced_flags) {
117         tcg_gen_movi_tl(env_iflags, dc->tb_flags);
118         dc->synced_flags = dc->tb_flags;
119     }
120 }
121 
122 static inline void t_gen_raise_exception(DisasContext *dc, uint32_t index)
123 {
124     TCGv_i32 tmp = tcg_const_i32(index);
125 
126     t_sync_flags(dc);
127     tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
128     gen_helper_raise_exception(cpu_env, tmp);
129     tcg_temp_free_i32(tmp);
130     dc->is_jmp = DISAS_UPDATE;
131 }
132 
133 static inline bool use_goto_tb(DisasContext *dc, target_ulong dest)
134 {
135 #ifndef CONFIG_USER_ONLY
136     return (dc->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
137 #else
138     return true;
139 #endif
140 }
141 
142 static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
143 {
144     if (use_goto_tb(dc, dest)) {
145         tcg_gen_goto_tb(n);
146         tcg_gen_movi_tl(cpu_SR[SR_PC], dest);
147         tcg_gen_exit_tb((uintptr_t)dc->tb + n);
148     } else {
149         tcg_gen_movi_tl(cpu_SR[SR_PC], dest);
150         tcg_gen_exit_tb(0);
151     }
152 }
153 
154 static void read_carry(DisasContext *dc, TCGv d)
155 {
156     tcg_gen_shri_tl(d, cpu_SR[SR_MSR], 31);
157 }
158 
159 /*
160  * write_carry sets the carry bits in MSR based on bit 0 of v.
161  * v[31:1] are ignored.
162  */
163 static void write_carry(DisasContext *dc, TCGv v)
164 {
165     TCGv t0 = tcg_temp_new();
166     tcg_gen_shli_tl(t0, v, 31);
167     tcg_gen_sari_tl(t0, t0, 31);
168     tcg_gen_andi_tl(t0, t0, (MSR_C | MSR_CC));
169     tcg_gen_andi_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR],
170                     ~(MSR_C | MSR_CC));
171     tcg_gen_or_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t0);
172     tcg_temp_free(t0);
173 }
174 
175 static void write_carryi(DisasContext *dc, bool carry)
176 {
177     TCGv t0 = tcg_temp_new();
178     tcg_gen_movi_tl(t0, carry);
179     write_carry(dc, t0);
180     tcg_temp_free(t0);
181 }
182 
183 /* True if ALU operand b is a small immediate that may deserve
184    faster treatment.  */
185 static inline int dec_alu_op_b_is_small_imm(DisasContext *dc)
186 {
187     /* Immediate insn without the imm prefix ?  */
188     return dc->type_b && !(dc->tb_flags & IMM_FLAG);
189 }
190 
191 static inline TCGv *dec_alu_op_b(DisasContext *dc)
192 {
193     if (dc->type_b) {
194         if (dc->tb_flags & IMM_FLAG)
195             tcg_gen_ori_tl(env_imm, env_imm, dc->imm);
196         else
197             tcg_gen_movi_tl(env_imm, (int32_t)((int16_t)dc->imm));
198         return &env_imm;
199     } else
200         return &cpu_R[dc->rb];
201 }
202 
203 static void dec_add(DisasContext *dc)
204 {
205     unsigned int k, c;
206     TCGv cf;
207 
208     k = dc->opcode & 4;
209     c = dc->opcode & 2;
210 
211     LOG_DIS("add%s%s%s r%d r%d r%d\n",
212             dc->type_b ? "i" : "", k ? "k" : "", c ? "c" : "",
213             dc->rd, dc->ra, dc->rb);
214 
215     /* Take care of the easy cases first.  */
216     if (k) {
217         /* k - keep carry, no need to update MSR.  */
218         /* If rd == r0, it's a nop.  */
219         if (dc->rd) {
220             tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
221 
222             if (c) {
223                 /* c - Add carry into the result.  */
224                 cf = tcg_temp_new();
225 
226                 read_carry(dc, cf);
227                 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
228                 tcg_temp_free(cf);
229             }
230         }
231         return;
232     }
233 
234     /* From now on, we can assume k is zero.  So we need to update MSR.  */
235     /* Extract carry.  */
236     cf = tcg_temp_new();
237     if (c) {
238         read_carry(dc, cf);
239     } else {
240         tcg_gen_movi_tl(cf, 0);
241     }
242 
243     if (dc->rd) {
244         TCGv ncf = tcg_temp_new();
245         gen_helper_carry(ncf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
246         tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
247         tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
248         write_carry(dc, ncf);
249         tcg_temp_free(ncf);
250     } else {
251         gen_helper_carry(cf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
252         write_carry(dc, cf);
253     }
254     tcg_temp_free(cf);
255 }
256 
257 static void dec_sub(DisasContext *dc)
258 {
259     unsigned int u, cmp, k, c;
260     TCGv cf, na;
261 
262     u = dc->imm & 2;
263     k = dc->opcode & 4;
264     c = dc->opcode & 2;
265     cmp = (dc->imm & 1) && (!dc->type_b) && k;
266 
267     if (cmp) {
268         LOG_DIS("cmp%s r%d, r%d ir=%x\n", u ? "u" : "", dc->rd, dc->ra, dc->ir);
269         if (dc->rd) {
270             if (u)
271                 gen_helper_cmpu(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
272             else
273                 gen_helper_cmp(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
274         }
275         return;
276     }
277 
278     LOG_DIS("sub%s%s r%d, r%d r%d\n",
279              k ? "k" : "",  c ? "c" : "", dc->rd, dc->ra, dc->rb);
280 
281     /* Take care of the easy cases first.  */
282     if (k) {
283         /* k - keep carry, no need to update MSR.  */
284         /* If rd == r0, it's a nop.  */
285         if (dc->rd) {
286             tcg_gen_sub_tl(cpu_R[dc->rd], *(dec_alu_op_b(dc)), cpu_R[dc->ra]);
287 
288             if (c) {
289                 /* c - Add carry into the result.  */
290                 cf = tcg_temp_new();
291 
292                 read_carry(dc, cf);
293                 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
294                 tcg_temp_free(cf);
295             }
296         }
297         return;
298     }
299 
300     /* From now on, we can assume k is zero.  So we need to update MSR.  */
301     /* Extract carry. And complement a into na.  */
302     cf = tcg_temp_new();
303     na = tcg_temp_new();
304     if (c) {
305         read_carry(dc, cf);
306     } else {
307         tcg_gen_movi_tl(cf, 1);
308     }
309 
310     /* d = b + ~a + c. carry defaults to 1.  */
311     tcg_gen_not_tl(na, cpu_R[dc->ra]);
312 
313     if (dc->rd) {
314         TCGv ncf = tcg_temp_new();
315         gen_helper_carry(ncf, na, *(dec_alu_op_b(dc)), cf);
316         tcg_gen_add_tl(cpu_R[dc->rd], na, *(dec_alu_op_b(dc)));
317         tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
318         write_carry(dc, ncf);
319         tcg_temp_free(ncf);
320     } else {
321         gen_helper_carry(cf, na, *(dec_alu_op_b(dc)), cf);
322         write_carry(dc, cf);
323     }
324     tcg_temp_free(cf);
325     tcg_temp_free(na);
326 }
327 
328 static void dec_pattern(DisasContext *dc)
329 {
330     unsigned int mode;
331 
332     if ((dc->tb_flags & MSR_EE_FLAG)
333           && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
334           && !dc->cpu->cfg.use_pcmp_instr) {
335         tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
336         t_gen_raise_exception(dc, EXCP_HW_EXCP);
337     }
338 
339     mode = dc->opcode & 3;
340     switch (mode) {
341         case 0:
342             /* pcmpbf.  */
343             LOG_DIS("pcmpbf r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
344             if (dc->rd)
345                 gen_helper_pcmpbf(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
346             break;
347         case 2:
348             LOG_DIS("pcmpeq r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
349             if (dc->rd) {
350                 tcg_gen_setcond_tl(TCG_COND_EQ, cpu_R[dc->rd],
351                                    cpu_R[dc->ra], cpu_R[dc->rb]);
352             }
353             break;
354         case 3:
355             LOG_DIS("pcmpne r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
356             if (dc->rd) {
357                 tcg_gen_setcond_tl(TCG_COND_NE, cpu_R[dc->rd],
358                                    cpu_R[dc->ra], cpu_R[dc->rb]);
359             }
360             break;
361         default:
362             cpu_abort(CPU(dc->cpu),
363                       "unsupported pattern insn opcode=%x\n", dc->opcode);
364             break;
365     }
366 }
367 
368 static void dec_and(DisasContext *dc)
369 {
370     unsigned int not;
371 
372     if (!dc->type_b && (dc->imm & (1 << 10))) {
373         dec_pattern(dc);
374         return;
375     }
376 
377     not = dc->opcode & (1 << 1);
378     LOG_DIS("and%s\n", not ? "n" : "");
379 
380     if (!dc->rd)
381         return;
382 
383     if (not) {
384         tcg_gen_andc_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
385     } else
386         tcg_gen_and_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
387 }
388 
389 static void dec_or(DisasContext *dc)
390 {
391     if (!dc->type_b && (dc->imm & (1 << 10))) {
392         dec_pattern(dc);
393         return;
394     }
395 
396     LOG_DIS("or r%d r%d r%d imm=%x\n", dc->rd, dc->ra, dc->rb, dc->imm);
397     if (dc->rd)
398         tcg_gen_or_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
399 }
400 
401 static void dec_xor(DisasContext *dc)
402 {
403     if (!dc->type_b && (dc->imm & (1 << 10))) {
404         dec_pattern(dc);
405         return;
406     }
407 
408     LOG_DIS("xor r%d\n", dc->rd);
409     if (dc->rd)
410         tcg_gen_xor_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
411 }
412 
413 static inline void msr_read(DisasContext *dc, TCGv d)
414 {
415     tcg_gen_mov_tl(d, cpu_SR[SR_MSR]);
416 }
417 
418 static inline void msr_write(DisasContext *dc, TCGv v)
419 {
420     TCGv t;
421 
422     t = tcg_temp_new();
423     dc->cpustate_changed = 1;
424     /* PVR bit is not writable.  */
425     tcg_gen_andi_tl(t, v, ~MSR_PVR);
426     tcg_gen_andi_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], MSR_PVR);
427     tcg_gen_or_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], v);
428     tcg_temp_free(t);
429 }
430 
431 static void dec_msr(DisasContext *dc)
432 {
433     CPUState *cs = CPU(dc->cpu);
434     TCGv t0, t1;
435     unsigned int sr, to, rn;
436     int mem_index = cpu_mmu_index(&dc->cpu->env, false);
437 
438     sr = dc->imm & ((1 << 14) - 1);
439     to = dc->imm & (1 << 14);
440     dc->type_b = 1;
441     if (to)
442         dc->cpustate_changed = 1;
443 
444     /* msrclr and msrset.  */
445     if (!(dc->imm & (1 << 15))) {
446         unsigned int clr = dc->ir & (1 << 16);
447 
448         LOG_DIS("msr%s r%d imm=%x\n", clr ? "clr" : "set",
449                 dc->rd, dc->imm);
450 
451         if (!dc->cpu->cfg.use_msr_instr) {
452             /* nop??? */
453             return;
454         }
455 
456         if ((dc->tb_flags & MSR_EE_FLAG)
457             && mem_index == MMU_USER_IDX && (dc->imm != 4 && dc->imm != 0)) {
458             tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
459             t_gen_raise_exception(dc, EXCP_HW_EXCP);
460             return;
461         }
462 
463         if (dc->rd)
464             msr_read(dc, cpu_R[dc->rd]);
465 
466         t0 = tcg_temp_new();
467         t1 = tcg_temp_new();
468         msr_read(dc, t0);
469         tcg_gen_mov_tl(t1, *(dec_alu_op_b(dc)));
470 
471         if (clr) {
472             tcg_gen_not_tl(t1, t1);
473             tcg_gen_and_tl(t0, t0, t1);
474         } else
475             tcg_gen_or_tl(t0, t0, t1);
476         msr_write(dc, t0);
477         tcg_temp_free(t0);
478         tcg_temp_free(t1);
479 	tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc + 4);
480         dc->is_jmp = DISAS_UPDATE;
481         return;
482     }
483 
484     if (to) {
485         if ((dc->tb_flags & MSR_EE_FLAG)
486              && mem_index == MMU_USER_IDX) {
487             tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
488             t_gen_raise_exception(dc, EXCP_HW_EXCP);
489             return;
490         }
491     }
492 
493 #if !defined(CONFIG_USER_ONLY)
494     /* Catch read/writes to the mmu block.  */
495     if ((sr & ~0xff) == 0x1000) {
496         sr &= 7;
497         LOG_DIS("m%ss sr%d r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
498         if (to)
499             gen_helper_mmu_write(cpu_env, tcg_const_tl(sr), cpu_R[dc->ra]);
500         else
501             gen_helper_mmu_read(cpu_R[dc->rd], cpu_env, tcg_const_tl(sr));
502         return;
503     }
504 #endif
505 
506     if (to) {
507         LOG_DIS("m%ss sr%x r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
508         switch (sr) {
509             case 0:
510                 break;
511             case 1:
512                 msr_write(dc, cpu_R[dc->ra]);
513                 break;
514             case 0x3:
515                 tcg_gen_mov_tl(cpu_SR[SR_EAR], cpu_R[dc->ra]);
516                 break;
517             case 0x5:
518                 tcg_gen_mov_tl(cpu_SR[SR_ESR], cpu_R[dc->ra]);
519                 break;
520             case 0x7:
521                 tcg_gen_andi_tl(cpu_SR[SR_FSR], cpu_R[dc->ra], 31);
522                 break;
523             case 0x800:
524                 tcg_gen_st_tl(cpu_R[dc->ra], cpu_env, offsetof(CPUMBState, slr));
525                 break;
526             case 0x802:
527                 tcg_gen_st_tl(cpu_R[dc->ra], cpu_env, offsetof(CPUMBState, shr));
528                 break;
529             default:
530                 cpu_abort(CPU(dc->cpu), "unknown mts reg %x\n", sr);
531                 break;
532         }
533     } else {
534         LOG_DIS("m%ss r%d sr%x imm=%x\n", to ? "t" : "f", dc->rd, sr, dc->imm);
535 
536         switch (sr) {
537             case 0:
538                 tcg_gen_movi_tl(cpu_R[dc->rd], dc->pc);
539                 break;
540             case 1:
541                 msr_read(dc, cpu_R[dc->rd]);
542                 break;
543             case 0x3:
544                 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_EAR]);
545                 break;
546             case 0x5:
547                 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_ESR]);
548                 break;
549              case 0x7:
550                 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_FSR]);
551                 break;
552             case 0xb:
553                 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_BTR]);
554                 break;
555             case 0x800:
556                 tcg_gen_ld_tl(cpu_R[dc->rd], cpu_env, offsetof(CPUMBState, slr));
557                 break;
558             case 0x802:
559                 tcg_gen_ld_tl(cpu_R[dc->rd], cpu_env, offsetof(CPUMBState, shr));
560                 break;
561             case 0x2000:
562             case 0x2001:
563             case 0x2002:
564             case 0x2003:
565             case 0x2004:
566             case 0x2005:
567             case 0x2006:
568             case 0x2007:
569             case 0x2008:
570             case 0x2009:
571             case 0x200a:
572             case 0x200b:
573             case 0x200c:
574                 rn = sr & 0xf;
575                 tcg_gen_ld_tl(cpu_R[dc->rd],
576                               cpu_env, offsetof(CPUMBState, pvr.regs[rn]));
577                 break;
578             default:
579                 cpu_abort(cs, "unknown mfs reg %x\n", sr);
580                 break;
581         }
582     }
583 
584     if (dc->rd == 0) {
585         tcg_gen_movi_tl(cpu_R[0], 0);
586     }
587 }
588 
589 /* Multiplier unit.  */
590 static void dec_mul(DisasContext *dc)
591 {
592     TCGv tmp;
593     unsigned int subcode;
594 
595     if ((dc->tb_flags & MSR_EE_FLAG)
596          && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
597          && !dc->cpu->cfg.use_hw_mul) {
598         tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
599         t_gen_raise_exception(dc, EXCP_HW_EXCP);
600         return;
601     }
602 
603     subcode = dc->imm & 3;
604 
605     if (dc->type_b) {
606         LOG_DIS("muli r%d r%d %x\n", dc->rd, dc->ra, dc->imm);
607         tcg_gen_mul_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
608         return;
609     }
610 
611     /* mulh, mulhsu and mulhu are not available if C_USE_HW_MUL is < 2.  */
612     if (subcode >= 1 && subcode <= 3 && dc->cpu->cfg.use_hw_mul < 2) {
613         /* nop??? */
614     }
615 
616     tmp = tcg_temp_new();
617     switch (subcode) {
618         case 0:
619             LOG_DIS("mul r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
620             tcg_gen_mul_tl(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
621             break;
622         case 1:
623             LOG_DIS("mulh r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
624             tcg_gen_muls2_tl(tmp, cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
625             break;
626         case 2:
627             LOG_DIS("mulhsu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
628             tcg_gen_mulsu2_tl(tmp, cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
629             break;
630         case 3:
631             LOG_DIS("mulhu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
632             tcg_gen_mulu2_tl(tmp, cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
633             break;
634         default:
635             cpu_abort(CPU(dc->cpu), "unknown MUL insn %x\n", subcode);
636             break;
637     }
638     tcg_temp_free(tmp);
639 }
640 
641 /* Div unit.  */
642 static void dec_div(DisasContext *dc)
643 {
644     unsigned int u;
645 
646     u = dc->imm & 2;
647     LOG_DIS("div\n");
648 
649     if ((dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
650           && !dc->cpu->cfg.use_div) {
651         tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
652         t_gen_raise_exception(dc, EXCP_HW_EXCP);
653     }
654 
655     if (u)
656         gen_helper_divu(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
657                         cpu_R[dc->ra]);
658     else
659         gen_helper_divs(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
660                         cpu_R[dc->ra]);
661     if (!dc->rd)
662         tcg_gen_movi_tl(cpu_R[dc->rd], 0);
663 }
664 
665 static void dec_barrel(DisasContext *dc)
666 {
667     TCGv t0;
668     unsigned int imm_w, imm_s;
669     bool s, t, e = false, i = false;
670 
671     if ((dc->tb_flags & MSR_EE_FLAG)
672           && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
673           && !dc->cpu->cfg.use_barrel) {
674         tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
675         t_gen_raise_exception(dc, EXCP_HW_EXCP);
676         return;
677     }
678 
679     if (dc->type_b) {
680         /* Insert and extract are only available in immediate mode.  */
681         i = extract32(dc->imm, 15, 1);
682         e = extract32(dc->imm, 14, 1);
683     }
684     s = extract32(dc->imm, 10, 1);
685     t = extract32(dc->imm, 9, 1);
686     imm_w = extract32(dc->imm, 6, 5);
687     imm_s = extract32(dc->imm, 0, 5);
688 
689     LOG_DIS("bs%s%s%s r%d r%d r%d\n",
690             e ? "e" : "",
691             s ? "l" : "r", t ? "a" : "l", dc->rd, dc->ra, dc->rb);
692 
693     if (e) {
694         if (imm_w + imm_s > 32 || imm_w == 0) {
695             /* These inputs have an undefined behavior.  */
696             qemu_log_mask(LOG_GUEST_ERROR, "bsefi: Bad input w=%d s=%d\n",
697                           imm_w, imm_s);
698         } else {
699             tcg_gen_extract_i32(cpu_R[dc->rd], cpu_R[dc->ra], imm_s, imm_w);
700         }
701     } else if (i) {
702         int width = imm_w - imm_s + 1;
703 
704         if (imm_w < imm_s) {
705             /* These inputs have an undefined behavior.  */
706             qemu_log_mask(LOG_GUEST_ERROR, "bsifi: Bad input w=%d s=%d\n",
707                           imm_w, imm_s);
708         } else {
709             tcg_gen_deposit_i32(cpu_R[dc->rd], cpu_R[dc->rd], cpu_R[dc->ra],
710                                 imm_s, width);
711         }
712     } else {
713         t0 = tcg_temp_new();
714 
715         tcg_gen_mov_tl(t0, *(dec_alu_op_b(dc)));
716         tcg_gen_andi_tl(t0, t0, 31);
717 
718         if (s) {
719             tcg_gen_shl_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
720         } else {
721             if (t) {
722                 tcg_gen_sar_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
723             } else {
724                 tcg_gen_shr_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
725             }
726         }
727         tcg_temp_free(t0);
728     }
729 }
730 
731 static void dec_bit(DisasContext *dc)
732 {
733     CPUState *cs = CPU(dc->cpu);
734     TCGv t0;
735     unsigned int op;
736     int mem_index = cpu_mmu_index(&dc->cpu->env, false);
737 
738     op = dc->ir & ((1 << 9) - 1);
739     switch (op) {
740         case 0x21:
741             /* src.  */
742             t0 = tcg_temp_new();
743 
744             LOG_DIS("src r%d r%d\n", dc->rd, dc->ra);
745             tcg_gen_andi_tl(t0, cpu_SR[SR_MSR], MSR_CC);
746             write_carry(dc, cpu_R[dc->ra]);
747             if (dc->rd) {
748                 tcg_gen_shri_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
749                 tcg_gen_or_tl(cpu_R[dc->rd], cpu_R[dc->rd], t0);
750             }
751             tcg_temp_free(t0);
752             break;
753 
754         case 0x1:
755         case 0x41:
756             /* srl.  */
757             LOG_DIS("srl r%d r%d\n", dc->rd, dc->ra);
758 
759             /* Update carry. Note that write carry only looks at the LSB.  */
760             write_carry(dc, cpu_R[dc->ra]);
761             if (dc->rd) {
762                 if (op == 0x41)
763                     tcg_gen_shri_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
764                 else
765                     tcg_gen_sari_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
766             }
767             break;
768         case 0x60:
769             LOG_DIS("ext8s r%d r%d\n", dc->rd, dc->ra);
770             tcg_gen_ext8s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
771             break;
772         case 0x61:
773             LOG_DIS("ext16s r%d r%d\n", dc->rd, dc->ra);
774             tcg_gen_ext16s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
775             break;
776         case 0x64:
777         case 0x66:
778         case 0x74:
779         case 0x76:
780             /* wdc.  */
781             LOG_DIS("wdc r%d\n", dc->ra);
782             if ((dc->tb_flags & MSR_EE_FLAG)
783                  && mem_index == MMU_USER_IDX) {
784                 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
785                 t_gen_raise_exception(dc, EXCP_HW_EXCP);
786                 return;
787             }
788             break;
789         case 0x68:
790             /* wic.  */
791             LOG_DIS("wic r%d\n", dc->ra);
792             if ((dc->tb_flags & MSR_EE_FLAG)
793                  && mem_index == MMU_USER_IDX) {
794                 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
795                 t_gen_raise_exception(dc, EXCP_HW_EXCP);
796                 return;
797             }
798             break;
799         case 0xe0:
800             if ((dc->tb_flags & MSR_EE_FLAG)
801                 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
802                 && !dc->cpu->cfg.use_pcmp_instr) {
803                 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
804                 t_gen_raise_exception(dc, EXCP_HW_EXCP);
805             }
806             if (dc->cpu->cfg.use_pcmp_instr) {
807                 tcg_gen_clzi_i32(cpu_R[dc->rd], cpu_R[dc->ra], 32);
808             }
809             break;
810         case 0x1e0:
811             /* swapb */
812             LOG_DIS("swapb r%d r%d\n", dc->rd, dc->ra);
813             tcg_gen_bswap32_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
814             break;
815         case 0x1e2:
816             /*swaph */
817             LOG_DIS("swaph r%d r%d\n", dc->rd, dc->ra);
818             tcg_gen_rotri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 16);
819             break;
820         default:
821             cpu_abort(cs, "unknown bit oc=%x op=%x rd=%d ra=%d rb=%d\n",
822                       dc->pc, op, dc->rd, dc->ra, dc->rb);
823             break;
824     }
825 }
826 
827 static inline void sync_jmpstate(DisasContext *dc)
828 {
829     if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
830         if (dc->jmp == JMP_DIRECT) {
831             tcg_gen_movi_tl(env_btaken, 1);
832         }
833         dc->jmp = JMP_INDIRECT;
834         tcg_gen_movi_tl(env_btarget, dc->jmp_pc);
835     }
836 }
837 
838 static void dec_imm(DisasContext *dc)
839 {
840     LOG_DIS("imm %x\n", dc->imm << 16);
841     tcg_gen_movi_tl(env_imm, (dc->imm << 16));
842     dc->tb_flags |= IMM_FLAG;
843     dc->clear_imm = 0;
844 }
845 
846 static inline TCGv *compute_ldst_addr(DisasContext *dc, TCGv *t)
847 {
848     unsigned int extimm = dc->tb_flags & IMM_FLAG;
849     /* Should be set to one if r1 is used by loadstores.  */
850     int stackprot = 0;
851 
852     /* All load/stores use ra.  */
853     if (dc->ra == 1 && dc->cpu->cfg.stackprot) {
854         stackprot = 1;
855     }
856 
857     /* Treat the common cases first.  */
858     if (!dc->type_b) {
859         /* If any of the regs is r0, return a ptr to the other.  */
860         if (dc->ra == 0) {
861             return &cpu_R[dc->rb];
862         } else if (dc->rb == 0) {
863             return &cpu_R[dc->ra];
864         }
865 
866         if (dc->rb == 1 && dc->cpu->cfg.stackprot) {
867             stackprot = 1;
868         }
869 
870         *t = tcg_temp_new();
871         tcg_gen_add_tl(*t, cpu_R[dc->ra], cpu_R[dc->rb]);
872 
873         if (stackprot) {
874             gen_helper_stackprot(cpu_env, *t);
875         }
876         return t;
877     }
878     /* Immediate.  */
879     if (!extimm) {
880         if (dc->imm == 0) {
881             return &cpu_R[dc->ra];
882         }
883         *t = tcg_temp_new();
884         tcg_gen_movi_tl(*t, (int32_t)((int16_t)dc->imm));
885         tcg_gen_add_tl(*t, cpu_R[dc->ra], *t);
886     } else {
887         *t = tcg_temp_new();
888         tcg_gen_add_tl(*t, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
889     }
890 
891     if (stackprot) {
892         gen_helper_stackprot(cpu_env, *t);
893     }
894     return t;
895 }
896 
897 static void dec_load(DisasContext *dc)
898 {
899     TCGv t, v, *addr;
900     unsigned int size, rev = 0, ex = 0;
901     TCGMemOp mop;
902 
903     mop = dc->opcode & 3;
904     size = 1 << mop;
905     if (!dc->type_b) {
906         rev = (dc->ir >> 9) & 1;
907         ex = (dc->ir >> 10) & 1;
908     }
909     mop |= MO_TE;
910     if (rev) {
911         mop ^= MO_BSWAP;
912     }
913 
914     if (size > 4 && (dc->tb_flags & MSR_EE_FLAG)
915           && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
916         tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
917         t_gen_raise_exception(dc, EXCP_HW_EXCP);
918         return;
919     }
920 
921     LOG_DIS("l%d%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
922                                                         ex ? "x" : "");
923 
924     t_sync_flags(dc);
925     addr = compute_ldst_addr(dc, &t);
926 
927     /*
928      * When doing reverse accesses we need to do two things.
929      *
930      * 1. Reverse the address wrt endianness.
931      * 2. Byteswap the data lanes on the way back into the CPU core.
932      */
933     if (rev && size != 4) {
934         /* Endian reverse the address. t is addr.  */
935         switch (size) {
936             case 1:
937             {
938                 /* 00 -> 11
939                    01 -> 10
940                    10 -> 10
941                    11 -> 00 */
942                 TCGv low = tcg_temp_new();
943 
944                 /* Force addr into the temp.  */
945                 if (addr != &t) {
946                     t = tcg_temp_new();
947                     tcg_gen_mov_tl(t, *addr);
948                     addr = &t;
949                 }
950 
951                 tcg_gen_andi_tl(low, t, 3);
952                 tcg_gen_sub_tl(low, tcg_const_tl(3), low);
953                 tcg_gen_andi_tl(t, t, ~3);
954                 tcg_gen_or_tl(t, t, low);
955                 tcg_gen_mov_tl(env_imm, t);
956                 tcg_temp_free(low);
957                 break;
958             }
959 
960             case 2:
961                 /* 00 -> 10
962                    10 -> 00.  */
963                 /* Force addr into the temp.  */
964                 if (addr != &t) {
965                     t = tcg_temp_new();
966                     tcg_gen_xori_tl(t, *addr, 2);
967                     addr = &t;
968                 } else {
969                     tcg_gen_xori_tl(t, t, 2);
970                 }
971                 break;
972             default:
973                 cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
974                 break;
975         }
976     }
977 
978     /* lwx does not throw unaligned access errors, so force alignment */
979     if (ex) {
980         /* Force addr into the temp.  */
981         if (addr != &t) {
982             t = tcg_temp_new();
983             tcg_gen_mov_tl(t, *addr);
984             addr = &t;
985         }
986         tcg_gen_andi_tl(t, t, ~3);
987     }
988 
989     /* If we get a fault on a dslot, the jmpstate better be in sync.  */
990     sync_jmpstate(dc);
991 
992     /* Verify alignment if needed.  */
993     /*
994      * Microblaze gives MMU faults priority over faults due to
995      * unaligned addresses. That's why we speculatively do the load
996      * into v. If the load succeeds, we verify alignment of the
997      * address and if that succeeds we write into the destination reg.
998      */
999     v = tcg_temp_new();
1000     tcg_gen_qemu_ld_tl(v, *addr, cpu_mmu_index(&dc->cpu->env, false), mop);
1001 
1002     if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
1003         tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
1004         gen_helper_memalign(cpu_env, *addr, tcg_const_tl(dc->rd),
1005                             tcg_const_tl(0), tcg_const_tl(size - 1));
1006     }
1007 
1008     if (ex) {
1009         tcg_gen_mov_tl(env_res_addr, *addr);
1010         tcg_gen_mov_tl(env_res_val, v);
1011     }
1012     if (dc->rd) {
1013         tcg_gen_mov_tl(cpu_R[dc->rd], v);
1014     }
1015     tcg_temp_free(v);
1016 
1017     if (ex) { /* lwx */
1018         /* no support for AXI exclusive so always clear C */
1019         write_carryi(dc, 0);
1020     }
1021 
1022     if (addr == &t)
1023         tcg_temp_free(t);
1024 }
1025 
1026 static void dec_store(DisasContext *dc)
1027 {
1028     TCGv t, *addr, swx_addr;
1029     TCGLabel *swx_skip = NULL;
1030     unsigned int size, rev = 0, ex = 0;
1031     TCGMemOp mop;
1032 
1033     mop = dc->opcode & 3;
1034     size = 1 << mop;
1035     if (!dc->type_b) {
1036         rev = (dc->ir >> 9) & 1;
1037         ex = (dc->ir >> 10) & 1;
1038     }
1039     mop |= MO_TE;
1040     if (rev) {
1041         mop ^= MO_BSWAP;
1042     }
1043 
1044     if (size > 4 && (dc->tb_flags & MSR_EE_FLAG)
1045           && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
1046         tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1047         t_gen_raise_exception(dc, EXCP_HW_EXCP);
1048         return;
1049     }
1050 
1051     LOG_DIS("s%d%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
1052                                                         ex ? "x" : "");
1053     t_sync_flags(dc);
1054     /* If we get a fault on a dslot, the jmpstate better be in sync.  */
1055     sync_jmpstate(dc);
1056     addr = compute_ldst_addr(dc, &t);
1057 
1058     swx_addr = tcg_temp_local_new();
1059     if (ex) { /* swx */
1060         TCGv tval;
1061 
1062         /* Force addr into the swx_addr. */
1063         tcg_gen_mov_tl(swx_addr, *addr);
1064         addr = &swx_addr;
1065         /* swx does not throw unaligned access errors, so force alignment */
1066         tcg_gen_andi_tl(swx_addr, swx_addr, ~3);
1067 
1068         write_carryi(dc, 1);
1069         swx_skip = gen_new_label();
1070         tcg_gen_brcond_tl(TCG_COND_NE, env_res_addr, swx_addr, swx_skip);
1071 
1072         /* Compare the value loaded at lwx with current contents of
1073            the reserved location.
1074            FIXME: This only works for system emulation where we can expect
1075            this compare and the following write to be atomic. For user
1076            emulation we need to add atomicity between threads.  */
1077         tval = tcg_temp_new();
1078         tcg_gen_qemu_ld_tl(tval, swx_addr, cpu_mmu_index(&dc->cpu->env, false),
1079                            MO_TEUL);
1080         tcg_gen_brcond_tl(TCG_COND_NE, env_res_val, tval, swx_skip);
1081         write_carryi(dc, 0);
1082         tcg_temp_free(tval);
1083     }
1084 
1085     if (rev && size != 4) {
1086         /* Endian reverse the address. t is addr.  */
1087         switch (size) {
1088             case 1:
1089             {
1090                 /* 00 -> 11
1091                    01 -> 10
1092                    10 -> 10
1093                    11 -> 00 */
1094                 TCGv low = tcg_temp_new();
1095 
1096                 /* Force addr into the temp.  */
1097                 if (addr != &t) {
1098                     t = tcg_temp_new();
1099                     tcg_gen_mov_tl(t, *addr);
1100                     addr = &t;
1101                 }
1102 
1103                 tcg_gen_andi_tl(low, t, 3);
1104                 tcg_gen_sub_tl(low, tcg_const_tl(3), low);
1105                 tcg_gen_andi_tl(t, t, ~3);
1106                 tcg_gen_or_tl(t, t, low);
1107                 tcg_gen_mov_tl(env_imm, t);
1108                 tcg_temp_free(low);
1109                 break;
1110             }
1111 
1112             case 2:
1113                 /* 00 -> 10
1114                    10 -> 00.  */
1115                 /* Force addr into the temp.  */
1116                 if (addr != &t) {
1117                     t = tcg_temp_new();
1118                     tcg_gen_xori_tl(t, *addr, 2);
1119                     addr = &t;
1120                 } else {
1121                     tcg_gen_xori_tl(t, t, 2);
1122                 }
1123                 break;
1124             default:
1125                 cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
1126                 break;
1127         }
1128     }
1129     tcg_gen_qemu_st_tl(cpu_R[dc->rd], *addr, cpu_mmu_index(&dc->cpu->env, false), mop);
1130 
1131     /* Verify alignment if needed.  */
1132     if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
1133         tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
1134         /* FIXME: if the alignment is wrong, we should restore the value
1135          *        in memory. One possible way to achieve this is to probe
1136          *        the MMU prior to the memaccess, thay way we could put
1137          *        the alignment checks in between the probe and the mem
1138          *        access.
1139          */
1140         gen_helper_memalign(cpu_env, *addr, tcg_const_tl(dc->rd),
1141                             tcg_const_tl(1), tcg_const_tl(size - 1));
1142     }
1143 
1144     if (ex) {
1145         gen_set_label(swx_skip);
1146     }
1147     tcg_temp_free(swx_addr);
1148 
1149     if (addr == &t)
1150         tcg_temp_free(t);
1151 }
1152 
1153 static inline void eval_cc(DisasContext *dc, unsigned int cc,
1154                            TCGv d, TCGv a, TCGv b)
1155 {
1156     switch (cc) {
1157         case CC_EQ:
1158             tcg_gen_setcond_tl(TCG_COND_EQ, d, a, b);
1159             break;
1160         case CC_NE:
1161             tcg_gen_setcond_tl(TCG_COND_NE, d, a, b);
1162             break;
1163         case CC_LT:
1164             tcg_gen_setcond_tl(TCG_COND_LT, d, a, b);
1165             break;
1166         case CC_LE:
1167             tcg_gen_setcond_tl(TCG_COND_LE, d, a, b);
1168             break;
1169         case CC_GE:
1170             tcg_gen_setcond_tl(TCG_COND_GE, d, a, b);
1171             break;
1172         case CC_GT:
1173             tcg_gen_setcond_tl(TCG_COND_GT, d, a, b);
1174             break;
1175         default:
1176             cpu_abort(CPU(dc->cpu), "Unknown condition code %x.\n", cc);
1177             break;
1178     }
1179 }
1180 
1181 static void eval_cond_jmp(DisasContext *dc, TCGv pc_true, TCGv pc_false)
1182 {
1183     TCGLabel *l1 = gen_new_label();
1184     /* Conditional jmp.  */
1185     tcg_gen_mov_tl(cpu_SR[SR_PC], pc_false);
1186     tcg_gen_brcondi_tl(TCG_COND_EQ, env_btaken, 0, l1);
1187     tcg_gen_mov_tl(cpu_SR[SR_PC], pc_true);
1188     gen_set_label(l1);
1189 }
1190 
1191 static void dec_bcc(DisasContext *dc)
1192 {
1193     unsigned int cc;
1194     unsigned int dslot;
1195 
1196     cc = EXTRACT_FIELD(dc->ir, 21, 23);
1197     dslot = dc->ir & (1 << 25);
1198     LOG_DIS("bcc%s r%d %x\n", dslot ? "d" : "", dc->ra, dc->imm);
1199 
1200     dc->delayed_branch = 1;
1201     if (dslot) {
1202         dc->delayed_branch = 2;
1203         dc->tb_flags |= D_FLAG;
1204         tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1205                       cpu_env, offsetof(CPUMBState, bimm));
1206     }
1207 
1208     if (dec_alu_op_b_is_small_imm(dc)) {
1209         int32_t offset = (int32_t)((int16_t)dc->imm); /* sign-extend.  */
1210 
1211         tcg_gen_movi_tl(env_btarget, dc->pc + offset);
1212         dc->jmp = JMP_DIRECT_CC;
1213         dc->jmp_pc = dc->pc + offset;
1214     } else {
1215         dc->jmp = JMP_INDIRECT;
1216         tcg_gen_movi_tl(env_btarget, dc->pc);
1217         tcg_gen_add_tl(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
1218     }
1219     eval_cc(dc, cc, env_btaken, cpu_R[dc->ra], tcg_const_tl(0));
1220 }
1221 
1222 static void dec_br(DisasContext *dc)
1223 {
1224     unsigned int dslot, link, abs, mbar;
1225     int mem_index = cpu_mmu_index(&dc->cpu->env, false);
1226 
1227     dslot = dc->ir & (1 << 20);
1228     abs = dc->ir & (1 << 19);
1229     link = dc->ir & (1 << 18);
1230 
1231     /* Memory barrier.  */
1232     mbar = (dc->ir >> 16) & 31;
1233     if (mbar == 2 && dc->imm == 4) {
1234         /* mbar IMM & 16 decodes to sleep.  */
1235         if (dc->rd & 16) {
1236             TCGv_i32 tmp_hlt = tcg_const_i32(EXCP_HLT);
1237             TCGv_i32 tmp_1 = tcg_const_i32(1);
1238 
1239             LOG_DIS("sleep\n");
1240 
1241             t_sync_flags(dc);
1242             tcg_gen_st_i32(tmp_1, cpu_env,
1243                            -offsetof(MicroBlazeCPU, env)
1244                            +offsetof(CPUState, halted));
1245             tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc + 4);
1246             gen_helper_raise_exception(cpu_env, tmp_hlt);
1247             tcg_temp_free_i32(tmp_hlt);
1248             tcg_temp_free_i32(tmp_1);
1249             return;
1250         }
1251         LOG_DIS("mbar %d\n", dc->rd);
1252         /* Break the TB.  */
1253         dc->cpustate_changed = 1;
1254         return;
1255     }
1256 
1257     LOG_DIS("br%s%s%s%s imm=%x\n",
1258              abs ? "a" : "", link ? "l" : "",
1259              dc->type_b ? "i" : "", dslot ? "d" : "",
1260              dc->imm);
1261 
1262     dc->delayed_branch = 1;
1263     if (dslot) {
1264         dc->delayed_branch = 2;
1265         dc->tb_flags |= D_FLAG;
1266         tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1267                       cpu_env, offsetof(CPUMBState, bimm));
1268     }
1269     if (link && dc->rd)
1270         tcg_gen_movi_tl(cpu_R[dc->rd], dc->pc);
1271 
1272     dc->jmp = JMP_INDIRECT;
1273     if (abs) {
1274         tcg_gen_movi_tl(env_btaken, 1);
1275         tcg_gen_mov_tl(env_btarget, *(dec_alu_op_b(dc)));
1276         if (link && !dslot) {
1277             if (!(dc->tb_flags & IMM_FLAG) && (dc->imm == 8 || dc->imm == 0x18))
1278                 t_gen_raise_exception(dc, EXCP_BREAK);
1279             if (dc->imm == 0) {
1280                 if ((dc->tb_flags & MSR_EE_FLAG) && mem_index == MMU_USER_IDX) {
1281                     tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1282                     t_gen_raise_exception(dc, EXCP_HW_EXCP);
1283                     return;
1284                 }
1285 
1286                 t_gen_raise_exception(dc, EXCP_DEBUG);
1287             }
1288         }
1289     } else {
1290         if (dec_alu_op_b_is_small_imm(dc)) {
1291             dc->jmp = JMP_DIRECT;
1292             dc->jmp_pc = dc->pc + (int32_t)((int16_t)dc->imm);
1293         } else {
1294             tcg_gen_movi_tl(env_btaken, 1);
1295             tcg_gen_movi_tl(env_btarget, dc->pc);
1296             tcg_gen_add_tl(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
1297         }
1298     }
1299 }
1300 
1301 static inline void do_rti(DisasContext *dc)
1302 {
1303     TCGv t0, t1;
1304     t0 = tcg_temp_new();
1305     t1 = tcg_temp_new();
1306     tcg_gen_shri_tl(t0, cpu_SR[SR_MSR], 1);
1307     tcg_gen_ori_tl(t1, cpu_SR[SR_MSR], MSR_IE);
1308     tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1309 
1310     tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1311     tcg_gen_or_tl(t1, t1, t0);
1312     msr_write(dc, t1);
1313     tcg_temp_free(t1);
1314     tcg_temp_free(t0);
1315     dc->tb_flags &= ~DRTI_FLAG;
1316 }
1317 
1318 static inline void do_rtb(DisasContext *dc)
1319 {
1320     TCGv t0, t1;
1321     t0 = tcg_temp_new();
1322     t1 = tcg_temp_new();
1323     tcg_gen_andi_tl(t1, cpu_SR[SR_MSR], ~MSR_BIP);
1324     tcg_gen_shri_tl(t0, t1, 1);
1325     tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1326 
1327     tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1328     tcg_gen_or_tl(t1, t1, t0);
1329     msr_write(dc, t1);
1330     tcg_temp_free(t1);
1331     tcg_temp_free(t0);
1332     dc->tb_flags &= ~DRTB_FLAG;
1333 }
1334 
1335 static inline void do_rte(DisasContext *dc)
1336 {
1337     TCGv t0, t1;
1338     t0 = tcg_temp_new();
1339     t1 = tcg_temp_new();
1340 
1341     tcg_gen_ori_tl(t1, cpu_SR[SR_MSR], MSR_EE);
1342     tcg_gen_andi_tl(t1, t1, ~MSR_EIP);
1343     tcg_gen_shri_tl(t0, t1, 1);
1344     tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1345 
1346     tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1347     tcg_gen_or_tl(t1, t1, t0);
1348     msr_write(dc, t1);
1349     tcg_temp_free(t1);
1350     tcg_temp_free(t0);
1351     dc->tb_flags &= ~DRTE_FLAG;
1352 }
1353 
1354 static void dec_rts(DisasContext *dc)
1355 {
1356     unsigned int b_bit, i_bit, e_bit;
1357     int mem_index = cpu_mmu_index(&dc->cpu->env, false);
1358 
1359     i_bit = dc->ir & (1 << 21);
1360     b_bit = dc->ir & (1 << 22);
1361     e_bit = dc->ir & (1 << 23);
1362 
1363     dc->delayed_branch = 2;
1364     dc->tb_flags |= D_FLAG;
1365     tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1366                   cpu_env, offsetof(CPUMBState, bimm));
1367 
1368     if (i_bit) {
1369         LOG_DIS("rtid ir=%x\n", dc->ir);
1370         if ((dc->tb_flags & MSR_EE_FLAG)
1371              && mem_index == MMU_USER_IDX) {
1372             tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1373             t_gen_raise_exception(dc, EXCP_HW_EXCP);
1374         }
1375         dc->tb_flags |= DRTI_FLAG;
1376     } else if (b_bit) {
1377         LOG_DIS("rtbd ir=%x\n", dc->ir);
1378         if ((dc->tb_flags & MSR_EE_FLAG)
1379              && mem_index == MMU_USER_IDX) {
1380             tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1381             t_gen_raise_exception(dc, EXCP_HW_EXCP);
1382         }
1383         dc->tb_flags |= DRTB_FLAG;
1384     } else if (e_bit) {
1385         LOG_DIS("rted ir=%x\n", dc->ir);
1386         if ((dc->tb_flags & MSR_EE_FLAG)
1387              && mem_index == MMU_USER_IDX) {
1388             tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1389             t_gen_raise_exception(dc, EXCP_HW_EXCP);
1390         }
1391         dc->tb_flags |= DRTE_FLAG;
1392     } else
1393         LOG_DIS("rts ir=%x\n", dc->ir);
1394 
1395     dc->jmp = JMP_INDIRECT;
1396     tcg_gen_movi_tl(env_btaken, 1);
1397     tcg_gen_add_tl(env_btarget, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
1398 }
1399 
1400 static int dec_check_fpuv2(DisasContext *dc)
1401 {
1402     if ((dc->cpu->cfg.use_fpu != 2) && (dc->tb_flags & MSR_EE_FLAG)) {
1403         tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_FPU);
1404         t_gen_raise_exception(dc, EXCP_HW_EXCP);
1405     }
1406     return (dc->cpu->cfg.use_fpu == 2) ? 0 : PVR2_USE_FPU2_MASK;
1407 }
1408 
1409 static void dec_fpu(DisasContext *dc)
1410 {
1411     unsigned int fpu_insn;
1412 
1413     if ((dc->tb_flags & MSR_EE_FLAG)
1414           && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
1415           && (dc->cpu->cfg.use_fpu != 1)) {
1416         tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1417         t_gen_raise_exception(dc, EXCP_HW_EXCP);
1418         return;
1419     }
1420 
1421     fpu_insn = (dc->ir >> 7) & 7;
1422 
1423     switch (fpu_insn) {
1424         case 0:
1425             gen_helper_fadd(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1426                             cpu_R[dc->rb]);
1427             break;
1428 
1429         case 1:
1430             gen_helper_frsub(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1431                              cpu_R[dc->rb]);
1432             break;
1433 
1434         case 2:
1435             gen_helper_fmul(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1436                             cpu_R[dc->rb]);
1437             break;
1438 
1439         case 3:
1440             gen_helper_fdiv(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1441                             cpu_R[dc->rb]);
1442             break;
1443 
1444         case 4:
1445             switch ((dc->ir >> 4) & 7) {
1446                 case 0:
1447                     gen_helper_fcmp_un(cpu_R[dc->rd], cpu_env,
1448                                        cpu_R[dc->ra], cpu_R[dc->rb]);
1449                     break;
1450                 case 1:
1451                     gen_helper_fcmp_lt(cpu_R[dc->rd], cpu_env,
1452                                        cpu_R[dc->ra], cpu_R[dc->rb]);
1453                     break;
1454                 case 2:
1455                     gen_helper_fcmp_eq(cpu_R[dc->rd], cpu_env,
1456                                        cpu_R[dc->ra], cpu_R[dc->rb]);
1457                     break;
1458                 case 3:
1459                     gen_helper_fcmp_le(cpu_R[dc->rd], cpu_env,
1460                                        cpu_R[dc->ra], cpu_R[dc->rb]);
1461                     break;
1462                 case 4:
1463                     gen_helper_fcmp_gt(cpu_R[dc->rd], cpu_env,
1464                                        cpu_R[dc->ra], cpu_R[dc->rb]);
1465                     break;
1466                 case 5:
1467                     gen_helper_fcmp_ne(cpu_R[dc->rd], cpu_env,
1468                                        cpu_R[dc->ra], cpu_R[dc->rb]);
1469                     break;
1470                 case 6:
1471                     gen_helper_fcmp_ge(cpu_R[dc->rd], cpu_env,
1472                                        cpu_R[dc->ra], cpu_R[dc->rb]);
1473                     break;
1474                 default:
1475                     qemu_log_mask(LOG_UNIMP,
1476                                   "unimplemented fcmp fpu_insn=%x pc=%x"
1477                                   " opc=%x\n",
1478                                   fpu_insn, dc->pc, dc->opcode);
1479                     dc->abort_at_next_insn = 1;
1480                     break;
1481             }
1482             break;
1483 
1484         case 5:
1485             if (!dec_check_fpuv2(dc)) {
1486                 return;
1487             }
1488             gen_helper_flt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1489             break;
1490 
1491         case 6:
1492             if (!dec_check_fpuv2(dc)) {
1493                 return;
1494             }
1495             gen_helper_fint(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1496             break;
1497 
1498         case 7:
1499             if (!dec_check_fpuv2(dc)) {
1500                 return;
1501             }
1502             gen_helper_fsqrt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1503             break;
1504 
1505         default:
1506             qemu_log_mask(LOG_UNIMP, "unimplemented FPU insn fpu_insn=%x pc=%x"
1507                           " opc=%x\n",
1508                           fpu_insn, dc->pc, dc->opcode);
1509             dc->abort_at_next_insn = 1;
1510             break;
1511     }
1512 }
1513 
1514 static void dec_null(DisasContext *dc)
1515 {
1516     if ((dc->tb_flags & MSR_EE_FLAG)
1517           && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
1518         tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1519         t_gen_raise_exception(dc, EXCP_HW_EXCP);
1520         return;
1521     }
1522     qemu_log_mask(LOG_GUEST_ERROR, "unknown insn pc=%x opc=%x\n", dc->pc, dc->opcode);
1523     dc->abort_at_next_insn = 1;
1524 }
1525 
1526 /* Insns connected to FSL or AXI stream attached devices.  */
1527 static void dec_stream(DisasContext *dc)
1528 {
1529     int mem_index = cpu_mmu_index(&dc->cpu->env, false);
1530     TCGv_i32 t_id, t_ctrl;
1531     int ctrl;
1532 
1533     LOG_DIS("%s%s imm=%x\n", dc->rd ? "get" : "put",
1534             dc->type_b ? "" : "d", dc->imm);
1535 
1536     if ((dc->tb_flags & MSR_EE_FLAG) && (mem_index == MMU_USER_IDX)) {
1537         tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1538         t_gen_raise_exception(dc, EXCP_HW_EXCP);
1539         return;
1540     }
1541 
1542     t_id = tcg_temp_new();
1543     if (dc->type_b) {
1544         tcg_gen_movi_tl(t_id, dc->imm & 0xf);
1545         ctrl = dc->imm >> 10;
1546     } else {
1547         tcg_gen_andi_tl(t_id, cpu_R[dc->rb], 0xf);
1548         ctrl = dc->imm >> 5;
1549     }
1550 
1551     t_ctrl = tcg_const_tl(ctrl);
1552 
1553     if (dc->rd == 0) {
1554         gen_helper_put(t_id, t_ctrl, cpu_R[dc->ra]);
1555     } else {
1556         gen_helper_get(cpu_R[dc->rd], t_id, t_ctrl);
1557     }
1558     tcg_temp_free(t_id);
1559     tcg_temp_free(t_ctrl);
1560 }
1561 
1562 static struct decoder_info {
1563     struct {
1564         uint32_t bits;
1565         uint32_t mask;
1566     };
1567     void (*dec)(DisasContext *dc);
1568 } decinfo[] = {
1569     {DEC_ADD, dec_add},
1570     {DEC_SUB, dec_sub},
1571     {DEC_AND, dec_and},
1572     {DEC_XOR, dec_xor},
1573     {DEC_OR, dec_or},
1574     {DEC_BIT, dec_bit},
1575     {DEC_BARREL, dec_barrel},
1576     {DEC_LD, dec_load},
1577     {DEC_ST, dec_store},
1578     {DEC_IMM, dec_imm},
1579     {DEC_BR, dec_br},
1580     {DEC_BCC, dec_bcc},
1581     {DEC_RTS, dec_rts},
1582     {DEC_FPU, dec_fpu},
1583     {DEC_MUL, dec_mul},
1584     {DEC_DIV, dec_div},
1585     {DEC_MSR, dec_msr},
1586     {DEC_STREAM, dec_stream},
1587     {{0, 0}, dec_null}
1588 };
1589 
1590 static inline void decode(DisasContext *dc, uint32_t ir)
1591 {
1592     int i;
1593 
1594     dc->ir = ir;
1595     LOG_DIS("%8.8x\t", dc->ir);
1596 
1597     if (dc->ir)
1598         dc->nr_nops = 0;
1599     else {
1600         if ((dc->tb_flags & MSR_EE_FLAG)
1601               && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
1602               && (dc->cpu->env.pvr.regs[2] & PVR2_OPCODE_0x0_ILL_MASK)) {
1603             tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1604             t_gen_raise_exception(dc, EXCP_HW_EXCP);
1605             return;
1606         }
1607 
1608         LOG_DIS("nr_nops=%d\t", dc->nr_nops);
1609         dc->nr_nops++;
1610         if (dc->nr_nops > 4) {
1611             cpu_abort(CPU(dc->cpu), "fetching nop sequence\n");
1612         }
1613     }
1614     /* bit 2 seems to indicate insn type.  */
1615     dc->type_b = ir & (1 << 29);
1616 
1617     dc->opcode = EXTRACT_FIELD(ir, 26, 31);
1618     dc->rd = EXTRACT_FIELD(ir, 21, 25);
1619     dc->ra = EXTRACT_FIELD(ir, 16, 20);
1620     dc->rb = EXTRACT_FIELD(ir, 11, 15);
1621     dc->imm = EXTRACT_FIELD(ir, 0, 15);
1622 
1623     /* Large switch for all insns.  */
1624     for (i = 0; i < ARRAY_SIZE(decinfo); i++) {
1625         if ((dc->opcode & decinfo[i].mask) == decinfo[i].bits) {
1626             decinfo[i].dec(dc);
1627             break;
1628         }
1629     }
1630 }
1631 
1632 /* generate intermediate code for basic block 'tb'.  */
1633 void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
1634 {
1635     CPUMBState *env = cs->env_ptr;
1636     MicroBlazeCPU *cpu = mb_env_get_cpu(env);
1637     uint32_t pc_start;
1638     struct DisasContext ctx;
1639     struct DisasContext *dc = &ctx;
1640     uint32_t next_page_start, org_flags;
1641     target_ulong npc;
1642     int num_insns;
1643     int max_insns;
1644 
1645     pc_start = tb->pc;
1646     dc->cpu = cpu;
1647     dc->tb = tb;
1648     org_flags = dc->synced_flags = dc->tb_flags = tb->flags;
1649 
1650     dc->is_jmp = DISAS_NEXT;
1651     dc->jmp = 0;
1652     dc->delayed_branch = !!(dc->tb_flags & D_FLAG);
1653     if (dc->delayed_branch) {
1654         dc->jmp = JMP_INDIRECT;
1655     }
1656     dc->pc = pc_start;
1657     dc->singlestep_enabled = cs->singlestep_enabled;
1658     dc->cpustate_changed = 0;
1659     dc->abort_at_next_insn = 0;
1660     dc->nr_nops = 0;
1661 
1662     if (pc_start & 3) {
1663         cpu_abort(cs, "Microblaze: unaligned PC=%x\n", pc_start);
1664     }
1665 
1666     next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
1667     num_insns = 0;
1668     max_insns = tb_cflags(tb) & CF_COUNT_MASK;
1669     if (max_insns == 0) {
1670         max_insns = CF_COUNT_MASK;
1671     }
1672     if (max_insns > TCG_MAX_INSNS) {
1673         max_insns = TCG_MAX_INSNS;
1674     }
1675 
1676     gen_tb_start(tb);
1677     do
1678     {
1679         tcg_gen_insn_start(dc->pc);
1680         num_insns++;
1681 
1682 #if SIM_COMPAT
1683         if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1684             tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
1685             gen_helper_debug();
1686         }
1687 #endif
1688 
1689         if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
1690             t_gen_raise_exception(dc, EXCP_DEBUG);
1691             dc->is_jmp = DISAS_UPDATE;
1692             /* The address covered by the breakpoint must be included in
1693                [tb->pc, tb->pc + tb->size) in order to for it to be
1694                properly cleared -- thus we increment the PC here so that
1695                the logic setting tb->size below does the right thing.  */
1696             dc->pc += 4;
1697             break;
1698         }
1699 
1700         /* Pretty disas.  */
1701         LOG_DIS("%8.8x:\t", dc->pc);
1702 
1703         if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
1704             gen_io_start();
1705         }
1706 
1707         dc->clear_imm = 1;
1708         decode(dc, cpu_ldl_code(env, dc->pc));
1709         if (dc->clear_imm)
1710             dc->tb_flags &= ~IMM_FLAG;
1711         dc->pc += 4;
1712 
1713         if (dc->delayed_branch) {
1714             dc->delayed_branch--;
1715             if (!dc->delayed_branch) {
1716                 if (dc->tb_flags & DRTI_FLAG)
1717                     do_rti(dc);
1718                  if (dc->tb_flags & DRTB_FLAG)
1719                     do_rtb(dc);
1720                 if (dc->tb_flags & DRTE_FLAG)
1721                     do_rte(dc);
1722                 /* Clear the delay slot flag.  */
1723                 dc->tb_flags &= ~D_FLAG;
1724                 /* If it is a direct jump, try direct chaining.  */
1725                 if (dc->jmp == JMP_INDIRECT) {
1726                     eval_cond_jmp(dc, env_btarget, tcg_const_tl(dc->pc));
1727                     dc->is_jmp = DISAS_JUMP;
1728                 } else if (dc->jmp == JMP_DIRECT) {
1729                     t_sync_flags(dc);
1730                     gen_goto_tb(dc, 0, dc->jmp_pc);
1731                     dc->is_jmp = DISAS_TB_JUMP;
1732                 } else if (dc->jmp == JMP_DIRECT_CC) {
1733                     TCGLabel *l1 = gen_new_label();
1734                     t_sync_flags(dc);
1735                     /* Conditional jmp.  */
1736                     tcg_gen_brcondi_tl(TCG_COND_NE, env_btaken, 0, l1);
1737                     gen_goto_tb(dc, 1, dc->pc);
1738                     gen_set_label(l1);
1739                     gen_goto_tb(dc, 0, dc->jmp_pc);
1740 
1741                     dc->is_jmp = DISAS_TB_JUMP;
1742                 }
1743                 break;
1744             }
1745         }
1746         if (cs->singlestep_enabled) {
1747             break;
1748         }
1749     } while (!dc->is_jmp && !dc->cpustate_changed
1750              && !tcg_op_buf_full()
1751              && !singlestep
1752              && (dc->pc < next_page_start)
1753              && num_insns < max_insns);
1754 
1755     npc = dc->pc;
1756     if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
1757         if (dc->tb_flags & D_FLAG) {
1758             dc->is_jmp = DISAS_UPDATE;
1759             tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1760             sync_jmpstate(dc);
1761         } else
1762             npc = dc->jmp_pc;
1763     }
1764 
1765     if (tb_cflags(tb) & CF_LAST_IO)
1766         gen_io_end();
1767     /* Force an update if the per-tb cpu state has changed.  */
1768     if (dc->is_jmp == DISAS_NEXT
1769         && (dc->cpustate_changed || org_flags != dc->tb_flags)) {
1770         dc->is_jmp = DISAS_UPDATE;
1771         tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1772     }
1773     t_sync_flags(dc);
1774 
1775     if (unlikely(cs->singlestep_enabled)) {
1776         TCGv_i32 tmp = tcg_const_i32(EXCP_DEBUG);
1777 
1778         if (dc->is_jmp != DISAS_JUMP) {
1779             tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1780         }
1781         gen_helper_raise_exception(cpu_env, tmp);
1782         tcg_temp_free_i32(tmp);
1783     } else {
1784         switch(dc->is_jmp) {
1785             case DISAS_NEXT:
1786                 gen_goto_tb(dc, 1, npc);
1787                 break;
1788             default:
1789             case DISAS_JUMP:
1790             case DISAS_UPDATE:
1791                 /* indicate that the hash table must be used
1792                    to find the next TB */
1793                 tcg_gen_exit_tb(0);
1794                 break;
1795             case DISAS_TB_JUMP:
1796                 /* nothing more to generate */
1797                 break;
1798         }
1799     }
1800     gen_tb_end(tb, num_insns);
1801 
1802     tb->size = dc->pc - pc_start;
1803     tb->icount = num_insns;
1804 
1805 #ifdef DEBUG_DISAS
1806 #if !SIM_COMPAT
1807     if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
1808         && qemu_log_in_addr_range(pc_start)) {
1809         qemu_log_lock();
1810         qemu_log("--------------\n");
1811         log_target_disas(cs, pc_start, dc->pc - pc_start);
1812         qemu_log_unlock();
1813     }
1814 #endif
1815 #endif
1816     assert(!dc->abort_at_next_insn);
1817 }
1818 
1819 void mb_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
1820                        int flags)
1821 {
1822     MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1823     CPUMBState *env = &cpu->env;
1824     int i;
1825 
1826     if (!env || !f)
1827         return;
1828 
1829     cpu_fprintf(f, "IN: PC=%x %s\n",
1830                 env->sregs[SR_PC], lookup_symbol(env->sregs[SR_PC]));
1831     cpu_fprintf(f, "rmsr=%x resr=%x rear=%x debug=%x imm=%x iflags=%x fsr=%x\n",
1832              env->sregs[SR_MSR], env->sregs[SR_ESR], env->sregs[SR_EAR],
1833              env->debug, env->imm, env->iflags, env->sregs[SR_FSR]);
1834     cpu_fprintf(f, "btaken=%d btarget=%x mode=%s(saved=%s) eip=%d ie=%d\n",
1835              env->btaken, env->btarget,
1836              (env->sregs[SR_MSR] & MSR_UM) ? "user" : "kernel",
1837              (env->sregs[SR_MSR] & MSR_UMS) ? "user" : "kernel",
1838              (env->sregs[SR_MSR] & MSR_EIP),
1839              (env->sregs[SR_MSR] & MSR_IE));
1840 
1841     for (i = 0; i < 32; i++) {
1842         cpu_fprintf(f, "r%2.2d=%8.8x ", i, env->regs[i]);
1843         if ((i + 1) % 4 == 0)
1844             cpu_fprintf(f, "\n");
1845         }
1846     cpu_fprintf(f, "\n\n");
1847 }
1848 
1849 void mb_tcg_init(void)
1850 {
1851     int i;
1852 
1853     env_debug = tcg_global_mem_new(cpu_env,
1854                     offsetof(CPUMBState, debug),
1855                     "debug0");
1856     env_iflags = tcg_global_mem_new(cpu_env,
1857                     offsetof(CPUMBState, iflags),
1858                     "iflags");
1859     env_imm = tcg_global_mem_new(cpu_env,
1860                     offsetof(CPUMBState, imm),
1861                     "imm");
1862     env_btarget = tcg_global_mem_new(cpu_env,
1863                      offsetof(CPUMBState, btarget),
1864                      "btarget");
1865     env_btaken = tcg_global_mem_new(cpu_env,
1866                      offsetof(CPUMBState, btaken),
1867                      "btaken");
1868     env_res_addr = tcg_global_mem_new(cpu_env,
1869                      offsetof(CPUMBState, res_addr),
1870                      "res_addr");
1871     env_res_val = tcg_global_mem_new(cpu_env,
1872                      offsetof(CPUMBState, res_val),
1873                      "res_val");
1874     for (i = 0; i < ARRAY_SIZE(cpu_R); i++) {
1875         cpu_R[i] = tcg_global_mem_new(cpu_env,
1876                           offsetof(CPUMBState, regs[i]),
1877                           regnames[i]);
1878     }
1879     for (i = 0; i < ARRAY_SIZE(cpu_SR); i++) {
1880         cpu_SR[i] = tcg_global_mem_new(cpu_env,
1881                           offsetof(CPUMBState, sregs[i]),
1882                           special_regnames[i]);
1883     }
1884 }
1885 
1886 void restore_state_to_opc(CPUMBState *env, TranslationBlock *tb,
1887                           target_ulong *data)
1888 {
1889     env->sregs[SR_PC] = data[0];
1890 }
1891