xref: /openbmc/qemu/target/microblaze/translate.c (revision b097ba37)
1 /*
2  *  Xilinx MicroBlaze emulation for qemu: main translation routines.
3  *
4  *  Copyright (c) 2009 Edgar E. Iglesias.
5  *  Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
25 #include "tcg-op.h"
26 #include "exec/helper-proto.h"
27 #include "microblaze-decode.h"
28 #include "exec/cpu_ldst.h"
29 #include "exec/helper-gen.h"
30 #include "exec/translator.h"
31 #include "qemu/qemu-print.h"
32 
33 #include "trace-tcg.h"
34 #include "exec/log.h"
35 
36 
37 #define SIM_COMPAT 0
38 #define DISAS_GNU 1
39 #define DISAS_MB 1
40 #if DISAS_MB && !SIM_COMPAT
41 #  define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
42 #else
43 #  define LOG_DIS(...) do { } while (0)
44 #endif
45 
46 #define D(x)
47 
48 #define EXTRACT_FIELD(src, start, end) \
49             (((src) >> start) & ((1 << (end - start + 1)) - 1))
50 
51 /* is_jmp field values */
52 #define DISAS_JUMP    DISAS_TARGET_0 /* only pc was modified dynamically */
53 #define DISAS_UPDATE  DISAS_TARGET_1 /* cpu state was modified dynamically */
54 #define DISAS_TB_JUMP DISAS_TARGET_2 /* only pc was modified statically */
55 
56 static TCGv_i32 env_debug;
57 static TCGv_i32 cpu_R[32];
58 static TCGv_i64 cpu_SR[14];
59 static TCGv_i32 env_imm;
60 static TCGv_i32 env_btaken;
61 static TCGv_i64 env_btarget;
62 static TCGv_i32 env_iflags;
63 static TCGv env_res_addr;
64 static TCGv_i32 env_res_val;
65 
66 #include "exec/gen-icount.h"
67 
68 /* This is the state at translation time.  */
69 typedef struct DisasContext {
70     MicroBlazeCPU *cpu;
71     uint32_t pc;
72 
73     /* Decoder.  */
74     int type_b;
75     uint32_t ir;
76     uint8_t opcode;
77     uint8_t rd, ra, rb;
78     uint16_t imm;
79 
80     unsigned int cpustate_changed;
81     unsigned int delayed_branch;
82     unsigned int tb_flags, synced_flags; /* tb dependent flags.  */
83     unsigned int clear_imm;
84     int is_jmp;
85 
86 #define JMP_NOJMP     0
87 #define JMP_DIRECT    1
88 #define JMP_DIRECT_CC 2
89 #define JMP_INDIRECT  3
90     unsigned int jmp;
91     uint32_t jmp_pc;
92 
93     int abort_at_next_insn;
94     struct TranslationBlock *tb;
95     int singlestep_enabled;
96 } DisasContext;
97 
98 static const char *regnames[] =
99 {
100     "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
101     "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
102     "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
103     "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
104 };
105 
106 static const char *special_regnames[] =
107 {
108     "rpc", "rmsr", "sr2", "rear", "sr4", "resr", "sr6", "rfsr",
109     "sr8", "sr9", "sr10", "rbtr", "sr12", "redr"
110 };
111 
112 static inline void t_sync_flags(DisasContext *dc)
113 {
114     /* Synch the tb dependent flags between translator and runtime.  */
115     if (dc->tb_flags != dc->synced_flags) {
116         tcg_gen_movi_i32(env_iflags, dc->tb_flags);
117         dc->synced_flags = dc->tb_flags;
118     }
119 }
120 
121 static inline void t_gen_raise_exception(DisasContext *dc, uint32_t index)
122 {
123     TCGv_i32 tmp = tcg_const_i32(index);
124 
125     t_sync_flags(dc);
126     tcg_gen_movi_i64(cpu_SR[SR_PC], dc->pc);
127     gen_helper_raise_exception(cpu_env, tmp);
128     tcg_temp_free_i32(tmp);
129     dc->is_jmp = DISAS_UPDATE;
130 }
131 
132 static inline bool use_goto_tb(DisasContext *dc, target_ulong dest)
133 {
134 #ifndef CONFIG_USER_ONLY
135     return (dc->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
136 #else
137     return true;
138 #endif
139 }
140 
141 static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
142 {
143     if (use_goto_tb(dc, dest)) {
144         tcg_gen_goto_tb(n);
145         tcg_gen_movi_i64(cpu_SR[SR_PC], dest);
146         tcg_gen_exit_tb(dc->tb, n);
147     } else {
148         tcg_gen_movi_i64(cpu_SR[SR_PC], dest);
149         tcg_gen_exit_tb(NULL, 0);
150     }
151 }
152 
153 static void read_carry(DisasContext *dc, TCGv_i32 d)
154 {
155     tcg_gen_extrl_i64_i32(d, cpu_SR[SR_MSR]);
156     tcg_gen_shri_i32(d, d, 31);
157 }
158 
159 /*
160  * write_carry sets the carry bits in MSR based on bit 0 of v.
161  * v[31:1] are ignored.
162  */
163 static void write_carry(DisasContext *dc, TCGv_i32 v)
164 {
165     TCGv_i64 t0 = tcg_temp_new_i64();
166     tcg_gen_extu_i32_i64(t0, v);
167     /* Deposit bit 0 into MSR_C and the alias MSR_CC.  */
168     tcg_gen_deposit_i64(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t0, 2, 1);
169     tcg_gen_deposit_i64(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t0, 31, 1);
170     tcg_temp_free_i64(t0);
171 }
172 
173 static void write_carryi(DisasContext *dc, bool carry)
174 {
175     TCGv_i32 t0 = tcg_temp_new_i32();
176     tcg_gen_movi_i32(t0, carry);
177     write_carry(dc, t0);
178     tcg_temp_free_i32(t0);
179 }
180 
181 /*
182  * Returns true if the insn an illegal operation.
183  * If exceptions are enabled, an exception is raised.
184  */
185 static bool trap_illegal(DisasContext *dc, bool cond)
186 {
187     if (cond && (dc->tb_flags & MSR_EE_FLAG)
188         && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
189         tcg_gen_movi_i64(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
190         t_gen_raise_exception(dc, EXCP_HW_EXCP);
191     }
192     return cond;
193 }
194 
195 /*
196  * Returns true if the insn is illegal in userspace.
197  * If exceptions are enabled, an exception is raised.
198  */
199 static bool trap_userspace(DisasContext *dc, bool cond)
200 {
201     int mem_index = cpu_mmu_index(&dc->cpu->env, false);
202     bool cond_user = cond && mem_index == MMU_USER_IDX;
203 
204     if (cond_user && (dc->tb_flags & MSR_EE_FLAG)) {
205         tcg_gen_movi_i64(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
206         t_gen_raise_exception(dc, EXCP_HW_EXCP);
207     }
208     return cond_user;
209 }
210 
211 /* True if ALU operand b is a small immediate that may deserve
212    faster treatment.  */
213 static inline int dec_alu_op_b_is_small_imm(DisasContext *dc)
214 {
215     /* Immediate insn without the imm prefix ?  */
216     return dc->type_b && !(dc->tb_flags & IMM_FLAG);
217 }
218 
219 static inline TCGv_i32 *dec_alu_op_b(DisasContext *dc)
220 {
221     if (dc->type_b) {
222         if (dc->tb_flags & IMM_FLAG)
223             tcg_gen_ori_i32(env_imm, env_imm, dc->imm);
224         else
225             tcg_gen_movi_i32(env_imm, (int32_t)((int16_t)dc->imm));
226         return &env_imm;
227     } else
228         return &cpu_R[dc->rb];
229 }
230 
231 static void dec_add(DisasContext *dc)
232 {
233     unsigned int k, c;
234     TCGv_i32 cf;
235 
236     k = dc->opcode & 4;
237     c = dc->opcode & 2;
238 
239     LOG_DIS("add%s%s%s r%d r%d r%d\n",
240             dc->type_b ? "i" : "", k ? "k" : "", c ? "c" : "",
241             dc->rd, dc->ra, dc->rb);
242 
243     /* Take care of the easy cases first.  */
244     if (k) {
245         /* k - keep carry, no need to update MSR.  */
246         /* If rd == r0, it's a nop.  */
247         if (dc->rd) {
248             tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
249 
250             if (c) {
251                 /* c - Add carry into the result.  */
252                 cf = tcg_temp_new_i32();
253 
254                 read_carry(dc, cf);
255                 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
256                 tcg_temp_free_i32(cf);
257             }
258         }
259         return;
260     }
261 
262     /* From now on, we can assume k is zero.  So we need to update MSR.  */
263     /* Extract carry.  */
264     cf = tcg_temp_new_i32();
265     if (c) {
266         read_carry(dc, cf);
267     } else {
268         tcg_gen_movi_i32(cf, 0);
269     }
270 
271     if (dc->rd) {
272         TCGv_i32 ncf = tcg_temp_new_i32();
273         gen_helper_carry(ncf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
274         tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
275         tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
276         write_carry(dc, ncf);
277         tcg_temp_free_i32(ncf);
278     } else {
279         gen_helper_carry(cf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
280         write_carry(dc, cf);
281     }
282     tcg_temp_free_i32(cf);
283 }
284 
285 static void dec_sub(DisasContext *dc)
286 {
287     unsigned int u, cmp, k, c;
288     TCGv_i32 cf, na;
289 
290     u = dc->imm & 2;
291     k = dc->opcode & 4;
292     c = dc->opcode & 2;
293     cmp = (dc->imm & 1) && (!dc->type_b) && k;
294 
295     if (cmp) {
296         LOG_DIS("cmp%s r%d, r%d ir=%x\n", u ? "u" : "", dc->rd, dc->ra, dc->ir);
297         if (dc->rd) {
298             if (u)
299                 gen_helper_cmpu(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
300             else
301                 gen_helper_cmp(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
302         }
303         return;
304     }
305 
306     LOG_DIS("sub%s%s r%d, r%d r%d\n",
307              k ? "k" : "",  c ? "c" : "", dc->rd, dc->ra, dc->rb);
308 
309     /* Take care of the easy cases first.  */
310     if (k) {
311         /* k - keep carry, no need to update MSR.  */
312         /* If rd == r0, it's a nop.  */
313         if (dc->rd) {
314             tcg_gen_sub_i32(cpu_R[dc->rd], *(dec_alu_op_b(dc)), cpu_R[dc->ra]);
315 
316             if (c) {
317                 /* c - Add carry into the result.  */
318                 cf = tcg_temp_new_i32();
319 
320                 read_carry(dc, cf);
321                 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
322                 tcg_temp_free_i32(cf);
323             }
324         }
325         return;
326     }
327 
328     /* From now on, we can assume k is zero.  So we need to update MSR.  */
329     /* Extract carry. And complement a into na.  */
330     cf = tcg_temp_new_i32();
331     na = tcg_temp_new_i32();
332     if (c) {
333         read_carry(dc, cf);
334     } else {
335         tcg_gen_movi_i32(cf, 1);
336     }
337 
338     /* d = b + ~a + c. carry defaults to 1.  */
339     tcg_gen_not_i32(na, cpu_R[dc->ra]);
340 
341     if (dc->rd) {
342         TCGv_i32 ncf = tcg_temp_new_i32();
343         gen_helper_carry(ncf, na, *(dec_alu_op_b(dc)), cf);
344         tcg_gen_add_i32(cpu_R[dc->rd], na, *(dec_alu_op_b(dc)));
345         tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
346         write_carry(dc, ncf);
347         tcg_temp_free_i32(ncf);
348     } else {
349         gen_helper_carry(cf, na, *(dec_alu_op_b(dc)), cf);
350         write_carry(dc, cf);
351     }
352     tcg_temp_free_i32(cf);
353     tcg_temp_free_i32(na);
354 }
355 
356 static void dec_pattern(DisasContext *dc)
357 {
358     unsigned int mode;
359 
360     if (trap_illegal(dc, !dc->cpu->cfg.use_pcmp_instr)) {
361         return;
362     }
363 
364     mode = dc->opcode & 3;
365     switch (mode) {
366         case 0:
367             /* pcmpbf.  */
368             LOG_DIS("pcmpbf r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
369             if (dc->rd)
370                 gen_helper_pcmpbf(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
371             break;
372         case 2:
373             LOG_DIS("pcmpeq r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
374             if (dc->rd) {
375                 tcg_gen_setcond_i32(TCG_COND_EQ, cpu_R[dc->rd],
376                                    cpu_R[dc->ra], cpu_R[dc->rb]);
377             }
378             break;
379         case 3:
380             LOG_DIS("pcmpne r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
381             if (dc->rd) {
382                 tcg_gen_setcond_i32(TCG_COND_NE, cpu_R[dc->rd],
383                                    cpu_R[dc->ra], cpu_R[dc->rb]);
384             }
385             break;
386         default:
387             cpu_abort(CPU(dc->cpu),
388                       "unsupported pattern insn opcode=%x\n", dc->opcode);
389             break;
390     }
391 }
392 
393 static void dec_and(DisasContext *dc)
394 {
395     unsigned int not;
396 
397     if (!dc->type_b && (dc->imm & (1 << 10))) {
398         dec_pattern(dc);
399         return;
400     }
401 
402     not = dc->opcode & (1 << 1);
403     LOG_DIS("and%s\n", not ? "n" : "");
404 
405     if (!dc->rd)
406         return;
407 
408     if (not) {
409         tcg_gen_andc_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
410     } else
411         tcg_gen_and_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
412 }
413 
414 static void dec_or(DisasContext *dc)
415 {
416     if (!dc->type_b && (dc->imm & (1 << 10))) {
417         dec_pattern(dc);
418         return;
419     }
420 
421     LOG_DIS("or r%d r%d r%d imm=%x\n", dc->rd, dc->ra, dc->rb, dc->imm);
422     if (dc->rd)
423         tcg_gen_or_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
424 }
425 
426 static void dec_xor(DisasContext *dc)
427 {
428     if (!dc->type_b && (dc->imm & (1 << 10))) {
429         dec_pattern(dc);
430         return;
431     }
432 
433     LOG_DIS("xor r%d\n", dc->rd);
434     if (dc->rd)
435         tcg_gen_xor_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
436 }
437 
438 static inline void msr_read(DisasContext *dc, TCGv_i32 d)
439 {
440     tcg_gen_extrl_i64_i32(d, cpu_SR[SR_MSR]);
441 }
442 
443 static inline void msr_write(DisasContext *dc, TCGv_i32 v)
444 {
445     TCGv_i64 t;
446 
447     t = tcg_temp_new_i64();
448     dc->cpustate_changed = 1;
449     /* PVR bit is not writable.  */
450     tcg_gen_extu_i32_i64(t, v);
451     tcg_gen_andi_i64(t, t, ~MSR_PVR);
452     tcg_gen_andi_i64(cpu_SR[SR_MSR], cpu_SR[SR_MSR], MSR_PVR);
453     tcg_gen_or_i64(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t);
454     tcg_temp_free_i64(t);
455 }
456 
457 static void dec_msr(DisasContext *dc)
458 {
459     CPUState *cs = CPU(dc->cpu);
460     TCGv_i32 t0, t1;
461     unsigned int sr, rn;
462     bool to, clrset, extended = false;
463 
464     sr = extract32(dc->imm, 0, 14);
465     to = extract32(dc->imm, 14, 1);
466     clrset = extract32(dc->imm, 15, 1) == 0;
467     dc->type_b = 1;
468     if (to) {
469         dc->cpustate_changed = 1;
470     }
471 
472     /* Extended MSRs are only available if addr_size > 32.  */
473     if (dc->cpu->cfg.addr_size > 32) {
474         /* The E-bit is encoded differently for To/From MSR.  */
475         static const unsigned int e_bit[] = { 19, 24 };
476 
477         extended = extract32(dc->imm, e_bit[to], 1);
478     }
479 
480     /* msrclr and msrset.  */
481     if (clrset) {
482         bool clr = extract32(dc->ir, 16, 1);
483 
484         LOG_DIS("msr%s r%d imm=%x\n", clr ? "clr" : "set",
485                 dc->rd, dc->imm);
486 
487         if (!dc->cpu->cfg.use_msr_instr) {
488             /* nop??? */
489             return;
490         }
491 
492         if (trap_userspace(dc, dc->imm != 4 && dc->imm != 0)) {
493             return;
494         }
495 
496         if (dc->rd)
497             msr_read(dc, cpu_R[dc->rd]);
498 
499         t0 = tcg_temp_new_i32();
500         t1 = tcg_temp_new_i32();
501         msr_read(dc, t0);
502         tcg_gen_mov_i32(t1, *(dec_alu_op_b(dc)));
503 
504         if (clr) {
505             tcg_gen_not_i32(t1, t1);
506             tcg_gen_and_i32(t0, t0, t1);
507         } else
508             tcg_gen_or_i32(t0, t0, t1);
509         msr_write(dc, t0);
510         tcg_temp_free_i32(t0);
511         tcg_temp_free_i32(t1);
512         tcg_gen_movi_i64(cpu_SR[SR_PC], dc->pc + 4);
513         dc->is_jmp = DISAS_UPDATE;
514         return;
515     }
516 
517     if (trap_userspace(dc, to)) {
518         return;
519     }
520 
521 #if !defined(CONFIG_USER_ONLY)
522     /* Catch read/writes to the mmu block.  */
523     if ((sr & ~0xff) == 0x1000) {
524         TCGv_i32 tmp_ext = tcg_const_i32(extended);
525         TCGv_i32 tmp_sr;
526 
527         sr &= 7;
528         tmp_sr = tcg_const_i32(sr);
529         LOG_DIS("m%ss sr%d r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
530         if (to) {
531             gen_helper_mmu_write(cpu_env, tmp_ext, tmp_sr, cpu_R[dc->ra]);
532         } else {
533             gen_helper_mmu_read(cpu_R[dc->rd], cpu_env, tmp_ext, tmp_sr);
534         }
535         tcg_temp_free_i32(tmp_sr);
536         tcg_temp_free_i32(tmp_ext);
537         return;
538     }
539 #endif
540 
541     if (to) {
542         LOG_DIS("m%ss sr%x r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
543         switch (sr) {
544             case 0:
545                 break;
546             case 1:
547                 msr_write(dc, cpu_R[dc->ra]);
548                 break;
549             case SR_EAR:
550             case SR_ESR:
551             case SR_FSR:
552                 tcg_gen_extu_i32_i64(cpu_SR[sr], cpu_R[dc->ra]);
553                 break;
554             case 0x800:
555                 tcg_gen_st_i32(cpu_R[dc->ra],
556                                cpu_env, offsetof(CPUMBState, slr));
557                 break;
558             case 0x802:
559                 tcg_gen_st_i32(cpu_R[dc->ra],
560                                cpu_env, offsetof(CPUMBState, shr));
561                 break;
562             default:
563                 cpu_abort(CPU(dc->cpu), "unknown mts reg %x\n", sr);
564                 break;
565         }
566     } else {
567         LOG_DIS("m%ss r%d sr%x imm=%x\n", to ? "t" : "f", dc->rd, sr, dc->imm);
568 
569         switch (sr) {
570             case 0:
571                 tcg_gen_movi_i32(cpu_R[dc->rd], dc->pc);
572                 break;
573             case 1:
574                 msr_read(dc, cpu_R[dc->rd]);
575                 break;
576             case SR_EAR:
577                 if (extended) {
578                     tcg_gen_extrh_i64_i32(cpu_R[dc->rd], cpu_SR[sr]);
579                     break;
580                 }
581             case SR_ESR:
582             case SR_FSR:
583             case SR_BTR:
584                 tcg_gen_extrl_i64_i32(cpu_R[dc->rd], cpu_SR[sr]);
585                 break;
586             case 0x800:
587                 tcg_gen_ld_i32(cpu_R[dc->rd],
588                                cpu_env, offsetof(CPUMBState, slr));
589                 break;
590             case 0x802:
591                 tcg_gen_ld_i32(cpu_R[dc->rd],
592                                cpu_env, offsetof(CPUMBState, shr));
593                 break;
594             case 0x2000 ... 0x200c:
595                 rn = sr & 0xf;
596                 tcg_gen_ld_i32(cpu_R[dc->rd],
597                               cpu_env, offsetof(CPUMBState, pvr.regs[rn]));
598                 break;
599             default:
600                 cpu_abort(cs, "unknown mfs reg %x\n", sr);
601                 break;
602         }
603     }
604 
605     if (dc->rd == 0) {
606         tcg_gen_movi_i32(cpu_R[0], 0);
607     }
608 }
609 
610 /* Multiplier unit.  */
611 static void dec_mul(DisasContext *dc)
612 {
613     TCGv_i32 tmp;
614     unsigned int subcode;
615 
616     if (trap_illegal(dc, !dc->cpu->cfg.use_hw_mul)) {
617         return;
618     }
619 
620     subcode = dc->imm & 3;
621 
622     if (dc->type_b) {
623         LOG_DIS("muli r%d r%d %x\n", dc->rd, dc->ra, dc->imm);
624         tcg_gen_mul_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
625         return;
626     }
627 
628     /* mulh, mulhsu and mulhu are not available if C_USE_HW_MUL is < 2.  */
629     if (subcode >= 1 && subcode <= 3 && dc->cpu->cfg.use_hw_mul < 2) {
630         /* nop??? */
631     }
632 
633     tmp = tcg_temp_new_i32();
634     switch (subcode) {
635         case 0:
636             LOG_DIS("mul r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
637             tcg_gen_mul_i32(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
638             break;
639         case 1:
640             LOG_DIS("mulh r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
641             tcg_gen_muls2_i32(tmp, cpu_R[dc->rd],
642                               cpu_R[dc->ra], cpu_R[dc->rb]);
643             break;
644         case 2:
645             LOG_DIS("mulhsu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
646             tcg_gen_mulsu2_i32(tmp, cpu_R[dc->rd],
647                                cpu_R[dc->ra], cpu_R[dc->rb]);
648             break;
649         case 3:
650             LOG_DIS("mulhu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
651             tcg_gen_mulu2_i32(tmp, cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
652             break;
653         default:
654             cpu_abort(CPU(dc->cpu), "unknown MUL insn %x\n", subcode);
655             break;
656     }
657     tcg_temp_free_i32(tmp);
658 }
659 
660 /* Div unit.  */
661 static void dec_div(DisasContext *dc)
662 {
663     unsigned int u;
664 
665     u = dc->imm & 2;
666     LOG_DIS("div\n");
667 
668     if (trap_illegal(dc, !dc->cpu->cfg.use_div)) {
669         return;
670     }
671 
672     if (u)
673         gen_helper_divu(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
674                         cpu_R[dc->ra]);
675     else
676         gen_helper_divs(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
677                         cpu_R[dc->ra]);
678     if (!dc->rd)
679         tcg_gen_movi_i32(cpu_R[dc->rd], 0);
680 }
681 
682 static void dec_barrel(DisasContext *dc)
683 {
684     TCGv_i32 t0;
685     unsigned int imm_w, imm_s;
686     bool s, t, e = false, i = false;
687 
688     if (trap_illegal(dc, !dc->cpu->cfg.use_barrel)) {
689         return;
690     }
691 
692     if (dc->type_b) {
693         /* Insert and extract are only available in immediate mode.  */
694         i = extract32(dc->imm, 15, 1);
695         e = extract32(dc->imm, 14, 1);
696     }
697     s = extract32(dc->imm, 10, 1);
698     t = extract32(dc->imm, 9, 1);
699     imm_w = extract32(dc->imm, 6, 5);
700     imm_s = extract32(dc->imm, 0, 5);
701 
702     LOG_DIS("bs%s%s%s r%d r%d r%d\n",
703             e ? "e" : "",
704             s ? "l" : "r", t ? "a" : "l", dc->rd, dc->ra, dc->rb);
705 
706     if (e) {
707         if (imm_w + imm_s > 32 || imm_w == 0) {
708             /* These inputs have an undefined behavior.  */
709             qemu_log_mask(LOG_GUEST_ERROR, "bsefi: Bad input w=%d s=%d\n",
710                           imm_w, imm_s);
711         } else {
712             tcg_gen_extract_i32(cpu_R[dc->rd], cpu_R[dc->ra], imm_s, imm_w);
713         }
714     } else if (i) {
715         int width = imm_w - imm_s + 1;
716 
717         if (imm_w < imm_s) {
718             /* These inputs have an undefined behavior.  */
719             qemu_log_mask(LOG_GUEST_ERROR, "bsifi: Bad input w=%d s=%d\n",
720                           imm_w, imm_s);
721         } else {
722             tcg_gen_deposit_i32(cpu_R[dc->rd], cpu_R[dc->rd], cpu_R[dc->ra],
723                                 imm_s, width);
724         }
725     } else {
726         t0 = tcg_temp_new_i32();
727 
728         tcg_gen_mov_i32(t0, *(dec_alu_op_b(dc)));
729         tcg_gen_andi_i32(t0, t0, 31);
730 
731         if (s) {
732             tcg_gen_shl_i32(cpu_R[dc->rd], cpu_R[dc->ra], t0);
733         } else {
734             if (t) {
735                 tcg_gen_sar_i32(cpu_R[dc->rd], cpu_R[dc->ra], t0);
736             } else {
737                 tcg_gen_shr_i32(cpu_R[dc->rd], cpu_R[dc->ra], t0);
738             }
739         }
740         tcg_temp_free_i32(t0);
741     }
742 }
743 
744 static void dec_bit(DisasContext *dc)
745 {
746     CPUState *cs = CPU(dc->cpu);
747     TCGv_i32 t0;
748     unsigned int op;
749 
750     op = dc->ir & ((1 << 9) - 1);
751     switch (op) {
752         case 0x21:
753             /* src.  */
754             t0 = tcg_temp_new_i32();
755 
756             LOG_DIS("src r%d r%d\n", dc->rd, dc->ra);
757             tcg_gen_extrl_i64_i32(t0, cpu_SR[SR_MSR]);
758             tcg_gen_andi_i32(t0, t0, MSR_CC);
759             write_carry(dc, cpu_R[dc->ra]);
760             if (dc->rd) {
761                 tcg_gen_shri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 1);
762                 tcg_gen_or_i32(cpu_R[dc->rd], cpu_R[dc->rd], t0);
763             }
764             tcg_temp_free_i32(t0);
765             break;
766 
767         case 0x1:
768         case 0x41:
769             /* srl.  */
770             LOG_DIS("srl r%d r%d\n", dc->rd, dc->ra);
771 
772             /* Update carry. Note that write carry only looks at the LSB.  */
773             write_carry(dc, cpu_R[dc->ra]);
774             if (dc->rd) {
775                 if (op == 0x41)
776                     tcg_gen_shri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 1);
777                 else
778                     tcg_gen_sari_i32(cpu_R[dc->rd], cpu_R[dc->ra], 1);
779             }
780             break;
781         case 0x60:
782             LOG_DIS("ext8s r%d r%d\n", dc->rd, dc->ra);
783             tcg_gen_ext8s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
784             break;
785         case 0x61:
786             LOG_DIS("ext16s r%d r%d\n", dc->rd, dc->ra);
787             tcg_gen_ext16s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
788             break;
789         case 0x64:
790         case 0x66:
791         case 0x74:
792         case 0x76:
793             /* wdc.  */
794             LOG_DIS("wdc r%d\n", dc->ra);
795             trap_userspace(dc, true);
796             break;
797         case 0x68:
798             /* wic.  */
799             LOG_DIS("wic r%d\n", dc->ra);
800             trap_userspace(dc, true);
801             break;
802         case 0xe0:
803             if (trap_illegal(dc, !dc->cpu->cfg.use_pcmp_instr)) {
804                 return;
805             }
806             if (dc->cpu->cfg.use_pcmp_instr) {
807                 tcg_gen_clzi_i32(cpu_R[dc->rd], cpu_R[dc->ra], 32);
808             }
809             break;
810         case 0x1e0:
811             /* swapb */
812             LOG_DIS("swapb r%d r%d\n", dc->rd, dc->ra);
813             tcg_gen_bswap32_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
814             break;
815         case 0x1e2:
816             /*swaph */
817             LOG_DIS("swaph r%d r%d\n", dc->rd, dc->ra);
818             tcg_gen_rotri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 16);
819             break;
820         default:
821             cpu_abort(cs, "unknown bit oc=%x op=%x rd=%d ra=%d rb=%d\n",
822                       dc->pc, op, dc->rd, dc->ra, dc->rb);
823             break;
824     }
825 }
826 
827 static inline void sync_jmpstate(DisasContext *dc)
828 {
829     if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
830         if (dc->jmp == JMP_DIRECT) {
831             tcg_gen_movi_i32(env_btaken, 1);
832         }
833         dc->jmp = JMP_INDIRECT;
834         tcg_gen_movi_i64(env_btarget, dc->jmp_pc);
835     }
836 }
837 
838 static void dec_imm(DisasContext *dc)
839 {
840     LOG_DIS("imm %x\n", dc->imm << 16);
841     tcg_gen_movi_i32(env_imm, (dc->imm << 16));
842     dc->tb_flags |= IMM_FLAG;
843     dc->clear_imm = 0;
844 }
845 
846 static inline void compute_ldst_addr(DisasContext *dc, bool ea, TCGv t)
847 {
848     bool extimm = dc->tb_flags & IMM_FLAG;
849     /* Should be set to true if r1 is used by loadstores.  */
850     bool stackprot = false;
851     TCGv_i32 t32;
852 
853     /* All load/stores use ra.  */
854     if (dc->ra == 1 && dc->cpu->cfg.stackprot) {
855         stackprot = true;
856     }
857 
858     /* Treat the common cases first.  */
859     if (!dc->type_b) {
860         if (ea) {
861             int addr_size = dc->cpu->cfg.addr_size;
862 
863             if (addr_size == 32) {
864                 tcg_gen_extu_i32_tl(t, cpu_R[dc->rb]);
865                 return;
866             }
867 
868             tcg_gen_concat_i32_i64(t, cpu_R[dc->rb], cpu_R[dc->ra]);
869             if (addr_size < 64) {
870                 /* Mask off out of range bits.  */
871                 tcg_gen_andi_i64(t, t, MAKE_64BIT_MASK(0, addr_size));
872             }
873             return;
874         }
875 
876         /* If any of the regs is r0, set t to the value of the other reg.  */
877         if (dc->ra == 0) {
878             tcg_gen_extu_i32_tl(t, cpu_R[dc->rb]);
879             return;
880         } else if (dc->rb == 0) {
881             tcg_gen_extu_i32_tl(t, cpu_R[dc->ra]);
882             return;
883         }
884 
885         if (dc->rb == 1 && dc->cpu->cfg.stackprot) {
886             stackprot = true;
887         }
888 
889         t32 = tcg_temp_new_i32();
890         tcg_gen_add_i32(t32, cpu_R[dc->ra], cpu_R[dc->rb]);
891         tcg_gen_extu_i32_tl(t, t32);
892         tcg_temp_free_i32(t32);
893 
894         if (stackprot) {
895             gen_helper_stackprot(cpu_env, t);
896         }
897         return;
898     }
899     /* Immediate.  */
900     t32 = tcg_temp_new_i32();
901     if (!extimm) {
902         tcg_gen_addi_i32(t32, cpu_R[dc->ra], (int16_t)dc->imm);
903     } else {
904         tcg_gen_add_i32(t32, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
905     }
906     tcg_gen_extu_i32_tl(t, t32);
907     tcg_temp_free_i32(t32);
908 
909     if (stackprot) {
910         gen_helper_stackprot(cpu_env, t);
911     }
912     return;
913 }
914 
915 static void dec_load(DisasContext *dc)
916 {
917     TCGv_i32 v;
918     TCGv addr;
919     unsigned int size;
920     bool rev = false, ex = false, ea = false;
921     int mem_index = cpu_mmu_index(&dc->cpu->env, false);
922     TCGMemOp mop;
923 
924     mop = dc->opcode & 3;
925     size = 1 << mop;
926     if (!dc->type_b) {
927         ea = extract32(dc->ir, 7, 1);
928         rev = extract32(dc->ir, 9, 1);
929         ex = extract32(dc->ir, 10, 1);
930     }
931     mop |= MO_TE;
932     if (rev) {
933         mop ^= MO_BSWAP;
934     }
935 
936     if (trap_illegal(dc, size > 4)) {
937         return;
938     }
939 
940     if (trap_userspace(dc, ea)) {
941         return;
942     }
943 
944     LOG_DIS("l%d%s%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
945                                                         ex ? "x" : "",
946                                                         ea ? "ea" : "");
947 
948     t_sync_flags(dc);
949     addr = tcg_temp_new();
950     compute_ldst_addr(dc, ea, addr);
951     /* Extended addressing bypasses the MMU.  */
952     mem_index = ea ? MMU_NOMMU_IDX : mem_index;
953 
954     /*
955      * When doing reverse accesses we need to do two things.
956      *
957      * 1. Reverse the address wrt endianness.
958      * 2. Byteswap the data lanes on the way back into the CPU core.
959      */
960     if (rev && size != 4) {
961         /* Endian reverse the address. t is addr.  */
962         switch (size) {
963             case 1:
964             {
965                 /* 00 -> 11
966                    01 -> 10
967                    10 -> 10
968                    11 -> 00 */
969                 TCGv low = tcg_temp_new();
970 
971                 tcg_gen_andi_tl(low, addr, 3);
972                 tcg_gen_sub_tl(low, tcg_const_tl(3), low);
973                 tcg_gen_andi_tl(addr, addr, ~3);
974                 tcg_gen_or_tl(addr, addr, low);
975                 tcg_temp_free(low);
976                 break;
977             }
978 
979             case 2:
980                 /* 00 -> 10
981                    10 -> 00.  */
982                 tcg_gen_xori_tl(addr, addr, 2);
983                 break;
984             default:
985                 cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
986                 break;
987         }
988     }
989 
990     /* lwx does not throw unaligned access errors, so force alignment */
991     if (ex) {
992         tcg_gen_andi_tl(addr, addr, ~3);
993     }
994 
995     /* If we get a fault on a dslot, the jmpstate better be in sync.  */
996     sync_jmpstate(dc);
997 
998     /* Verify alignment if needed.  */
999     /*
1000      * Microblaze gives MMU faults priority over faults due to
1001      * unaligned addresses. That's why we speculatively do the load
1002      * into v. If the load succeeds, we verify alignment of the
1003      * address and if that succeeds we write into the destination reg.
1004      */
1005     v = tcg_temp_new_i32();
1006     tcg_gen_qemu_ld_i32(v, addr, mem_index, mop);
1007 
1008     if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
1009         tcg_gen_movi_i64(cpu_SR[SR_PC], dc->pc);
1010         gen_helper_memalign(cpu_env, addr, tcg_const_i32(dc->rd),
1011                             tcg_const_i32(0), tcg_const_i32(size - 1));
1012     }
1013 
1014     if (ex) {
1015         tcg_gen_mov_tl(env_res_addr, addr);
1016         tcg_gen_mov_i32(env_res_val, v);
1017     }
1018     if (dc->rd) {
1019         tcg_gen_mov_i32(cpu_R[dc->rd], v);
1020     }
1021     tcg_temp_free_i32(v);
1022 
1023     if (ex) { /* lwx */
1024         /* no support for AXI exclusive so always clear C */
1025         write_carryi(dc, 0);
1026     }
1027 
1028     tcg_temp_free(addr);
1029 }
1030 
1031 static void dec_store(DisasContext *dc)
1032 {
1033     TCGv addr;
1034     TCGLabel *swx_skip = NULL;
1035     unsigned int size;
1036     bool rev = false, ex = false, ea = false;
1037     int mem_index = cpu_mmu_index(&dc->cpu->env, false);
1038     TCGMemOp mop;
1039 
1040     mop = dc->opcode & 3;
1041     size = 1 << mop;
1042     if (!dc->type_b) {
1043         ea = extract32(dc->ir, 7, 1);
1044         rev = extract32(dc->ir, 9, 1);
1045         ex = extract32(dc->ir, 10, 1);
1046     }
1047     mop |= MO_TE;
1048     if (rev) {
1049         mop ^= MO_BSWAP;
1050     }
1051 
1052     if (trap_illegal(dc, size > 4)) {
1053         return;
1054     }
1055 
1056     trap_userspace(dc, ea);
1057 
1058     LOG_DIS("s%d%s%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
1059                                                         ex ? "x" : "",
1060                                                         ea ? "ea" : "");
1061     t_sync_flags(dc);
1062     /* If we get a fault on a dslot, the jmpstate better be in sync.  */
1063     sync_jmpstate(dc);
1064     /* SWX needs a temp_local.  */
1065     addr = ex ? tcg_temp_local_new() : tcg_temp_new();
1066     compute_ldst_addr(dc, ea, addr);
1067     /* Extended addressing bypasses the MMU.  */
1068     mem_index = ea ? MMU_NOMMU_IDX : mem_index;
1069 
1070     if (ex) { /* swx */
1071         TCGv_i32 tval;
1072 
1073         /* swx does not throw unaligned access errors, so force alignment */
1074         tcg_gen_andi_tl(addr, addr, ~3);
1075 
1076         write_carryi(dc, 1);
1077         swx_skip = gen_new_label();
1078         tcg_gen_brcond_tl(TCG_COND_NE, env_res_addr, addr, swx_skip);
1079 
1080         /* Compare the value loaded at lwx with current contents of
1081            the reserved location.
1082            FIXME: This only works for system emulation where we can expect
1083            this compare and the following write to be atomic. For user
1084            emulation we need to add atomicity between threads.  */
1085         tval = tcg_temp_new_i32();
1086         tcg_gen_qemu_ld_i32(tval, addr, cpu_mmu_index(&dc->cpu->env, false),
1087                             MO_TEUL);
1088         tcg_gen_brcond_i32(TCG_COND_NE, env_res_val, tval, swx_skip);
1089         write_carryi(dc, 0);
1090         tcg_temp_free_i32(tval);
1091     }
1092 
1093     if (rev && size != 4) {
1094         /* Endian reverse the address. t is addr.  */
1095         switch (size) {
1096             case 1:
1097             {
1098                 /* 00 -> 11
1099                    01 -> 10
1100                    10 -> 10
1101                    11 -> 00 */
1102                 TCGv low = tcg_temp_new();
1103 
1104                 tcg_gen_andi_tl(low, addr, 3);
1105                 tcg_gen_sub_tl(low, tcg_const_tl(3), low);
1106                 tcg_gen_andi_tl(addr, addr, ~3);
1107                 tcg_gen_or_tl(addr, addr, low);
1108                 tcg_temp_free(low);
1109                 break;
1110             }
1111 
1112             case 2:
1113                 /* 00 -> 10
1114                    10 -> 00.  */
1115                 /* Force addr into the temp.  */
1116                 tcg_gen_xori_tl(addr, addr, 2);
1117                 break;
1118             default:
1119                 cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
1120                 break;
1121         }
1122     }
1123     tcg_gen_qemu_st_i32(cpu_R[dc->rd], addr, mem_index, mop);
1124 
1125     /* Verify alignment if needed.  */
1126     if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
1127         tcg_gen_movi_i64(cpu_SR[SR_PC], dc->pc);
1128         /* FIXME: if the alignment is wrong, we should restore the value
1129          *        in memory. One possible way to achieve this is to probe
1130          *        the MMU prior to the memaccess, thay way we could put
1131          *        the alignment checks in between the probe and the mem
1132          *        access.
1133          */
1134         gen_helper_memalign(cpu_env, addr, tcg_const_i32(dc->rd),
1135                             tcg_const_i32(1), tcg_const_i32(size - 1));
1136     }
1137 
1138     if (ex) {
1139         gen_set_label(swx_skip);
1140     }
1141 
1142     tcg_temp_free(addr);
1143 }
1144 
1145 static inline void eval_cc(DisasContext *dc, unsigned int cc,
1146                            TCGv_i32 d, TCGv_i32 a)
1147 {
1148     static const int mb_to_tcg_cc[] = {
1149         [CC_EQ] = TCG_COND_EQ,
1150         [CC_NE] = TCG_COND_NE,
1151         [CC_LT] = TCG_COND_LT,
1152         [CC_LE] = TCG_COND_LE,
1153         [CC_GE] = TCG_COND_GE,
1154         [CC_GT] = TCG_COND_GT,
1155     };
1156 
1157     switch (cc) {
1158     case CC_EQ:
1159     case CC_NE:
1160     case CC_LT:
1161     case CC_LE:
1162     case CC_GE:
1163     case CC_GT:
1164         tcg_gen_setcondi_i32(mb_to_tcg_cc[cc], d, a, 0);
1165         break;
1166     default:
1167         cpu_abort(CPU(dc->cpu), "Unknown condition code %x.\n", cc);
1168         break;
1169     }
1170 }
1171 
1172 static void eval_cond_jmp(DisasContext *dc, TCGv_i64 pc_true, TCGv_i64 pc_false)
1173 {
1174     TCGv_i64 tmp_btaken = tcg_temp_new_i64();
1175     TCGv_i64 tmp_zero = tcg_const_i64(0);
1176 
1177     tcg_gen_extu_i32_i64(tmp_btaken, env_btaken);
1178     tcg_gen_movcond_i64(TCG_COND_NE, cpu_SR[SR_PC],
1179                         tmp_btaken, tmp_zero,
1180                         pc_true, pc_false);
1181 
1182     tcg_temp_free_i64(tmp_btaken);
1183     tcg_temp_free_i64(tmp_zero);
1184 }
1185 
1186 static void dec_bcc(DisasContext *dc)
1187 {
1188     unsigned int cc;
1189     unsigned int dslot;
1190 
1191     cc = EXTRACT_FIELD(dc->ir, 21, 23);
1192     dslot = dc->ir & (1 << 25);
1193     LOG_DIS("bcc%s r%d %x\n", dslot ? "d" : "", dc->ra, dc->imm);
1194 
1195     dc->delayed_branch = 1;
1196     if (dslot) {
1197         dc->delayed_branch = 2;
1198         dc->tb_flags |= D_FLAG;
1199         tcg_gen_st_i32(tcg_const_i32(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1200                       cpu_env, offsetof(CPUMBState, bimm));
1201     }
1202 
1203     if (dec_alu_op_b_is_small_imm(dc)) {
1204         int32_t offset = (int32_t)((int16_t)dc->imm); /* sign-extend.  */
1205 
1206         tcg_gen_movi_i64(env_btarget, dc->pc + offset);
1207         dc->jmp = JMP_DIRECT_CC;
1208         dc->jmp_pc = dc->pc + offset;
1209     } else {
1210         dc->jmp = JMP_INDIRECT;
1211         tcg_gen_extu_i32_i64(env_btarget, *(dec_alu_op_b(dc)));
1212         tcg_gen_addi_i64(env_btarget, env_btarget, dc->pc);
1213         tcg_gen_andi_i64(env_btarget, env_btarget, UINT32_MAX);
1214     }
1215     eval_cc(dc, cc, env_btaken, cpu_R[dc->ra]);
1216 }
1217 
1218 static void dec_br(DisasContext *dc)
1219 {
1220     unsigned int dslot, link, abs, mbar;
1221 
1222     dslot = dc->ir & (1 << 20);
1223     abs = dc->ir & (1 << 19);
1224     link = dc->ir & (1 << 18);
1225 
1226     /* Memory barrier.  */
1227     mbar = (dc->ir >> 16) & 31;
1228     if (mbar == 2 && dc->imm == 4) {
1229         /* mbar IMM & 16 decodes to sleep.  */
1230         if (dc->rd & 16) {
1231             TCGv_i32 tmp_hlt = tcg_const_i32(EXCP_HLT);
1232             TCGv_i32 tmp_1 = tcg_const_i32(1);
1233 
1234             LOG_DIS("sleep\n");
1235 
1236             t_sync_flags(dc);
1237             tcg_gen_st_i32(tmp_1, cpu_env,
1238                            -offsetof(MicroBlazeCPU, env)
1239                            +offsetof(CPUState, halted));
1240             tcg_gen_movi_i64(cpu_SR[SR_PC], dc->pc + 4);
1241             gen_helper_raise_exception(cpu_env, tmp_hlt);
1242             tcg_temp_free_i32(tmp_hlt);
1243             tcg_temp_free_i32(tmp_1);
1244             return;
1245         }
1246         LOG_DIS("mbar %d\n", dc->rd);
1247         /* Break the TB.  */
1248         dc->cpustate_changed = 1;
1249         return;
1250     }
1251 
1252     LOG_DIS("br%s%s%s%s imm=%x\n",
1253              abs ? "a" : "", link ? "l" : "",
1254              dc->type_b ? "i" : "", dslot ? "d" : "",
1255              dc->imm);
1256 
1257     dc->delayed_branch = 1;
1258     if (dslot) {
1259         dc->delayed_branch = 2;
1260         dc->tb_flags |= D_FLAG;
1261         tcg_gen_st_i32(tcg_const_i32(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1262                       cpu_env, offsetof(CPUMBState, bimm));
1263     }
1264     if (link && dc->rd)
1265         tcg_gen_movi_i32(cpu_R[dc->rd], dc->pc);
1266 
1267     dc->jmp = JMP_INDIRECT;
1268     if (abs) {
1269         tcg_gen_movi_i32(env_btaken, 1);
1270         tcg_gen_extu_i32_i64(env_btarget, *(dec_alu_op_b(dc)));
1271         if (link && !dslot) {
1272             if (!(dc->tb_flags & IMM_FLAG) && (dc->imm == 8 || dc->imm == 0x18))
1273                 t_gen_raise_exception(dc, EXCP_BREAK);
1274             if (dc->imm == 0) {
1275                 if (trap_userspace(dc, true)) {
1276                     return;
1277                 }
1278 
1279                 t_gen_raise_exception(dc, EXCP_DEBUG);
1280             }
1281         }
1282     } else {
1283         if (dec_alu_op_b_is_small_imm(dc)) {
1284             dc->jmp = JMP_DIRECT;
1285             dc->jmp_pc = dc->pc + (int32_t)((int16_t)dc->imm);
1286         } else {
1287             tcg_gen_movi_i32(env_btaken, 1);
1288             tcg_gen_extu_i32_i64(env_btarget, *(dec_alu_op_b(dc)));
1289             tcg_gen_addi_i64(env_btarget, env_btarget, dc->pc);
1290             tcg_gen_andi_i64(env_btarget, env_btarget, UINT32_MAX);
1291         }
1292     }
1293 }
1294 
1295 static inline void do_rti(DisasContext *dc)
1296 {
1297     TCGv_i32 t0, t1;
1298     t0 = tcg_temp_new_i32();
1299     t1 = tcg_temp_new_i32();
1300     tcg_gen_extrl_i64_i32(t1, cpu_SR[SR_MSR]);
1301     tcg_gen_shri_i32(t0, t1, 1);
1302     tcg_gen_ori_i32(t1, t1, MSR_IE);
1303     tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
1304 
1305     tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
1306     tcg_gen_or_i32(t1, t1, t0);
1307     msr_write(dc, t1);
1308     tcg_temp_free_i32(t1);
1309     tcg_temp_free_i32(t0);
1310     dc->tb_flags &= ~DRTI_FLAG;
1311 }
1312 
1313 static inline void do_rtb(DisasContext *dc)
1314 {
1315     TCGv_i32 t0, t1;
1316     t0 = tcg_temp_new_i32();
1317     t1 = tcg_temp_new_i32();
1318     tcg_gen_extrl_i64_i32(t1, cpu_SR[SR_MSR]);
1319     tcg_gen_andi_i32(t1, t1, ~MSR_BIP);
1320     tcg_gen_shri_i32(t0, t1, 1);
1321     tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
1322 
1323     tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
1324     tcg_gen_or_i32(t1, t1, t0);
1325     msr_write(dc, t1);
1326     tcg_temp_free_i32(t1);
1327     tcg_temp_free_i32(t0);
1328     dc->tb_flags &= ~DRTB_FLAG;
1329 }
1330 
1331 static inline void do_rte(DisasContext *dc)
1332 {
1333     TCGv_i32 t0, t1;
1334     t0 = tcg_temp_new_i32();
1335     t1 = tcg_temp_new_i32();
1336 
1337     tcg_gen_extrl_i64_i32(t1, cpu_SR[SR_MSR]);
1338     tcg_gen_ori_i32(t1, t1, MSR_EE);
1339     tcg_gen_andi_i32(t1, t1, ~MSR_EIP);
1340     tcg_gen_shri_i32(t0, t1, 1);
1341     tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
1342 
1343     tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
1344     tcg_gen_or_i32(t1, t1, t0);
1345     msr_write(dc, t1);
1346     tcg_temp_free_i32(t1);
1347     tcg_temp_free_i32(t0);
1348     dc->tb_flags &= ~DRTE_FLAG;
1349 }
1350 
1351 static void dec_rts(DisasContext *dc)
1352 {
1353     unsigned int b_bit, i_bit, e_bit;
1354     TCGv_i64 tmp64;
1355 
1356     i_bit = dc->ir & (1 << 21);
1357     b_bit = dc->ir & (1 << 22);
1358     e_bit = dc->ir & (1 << 23);
1359 
1360     if (trap_userspace(dc, i_bit || b_bit || e_bit)) {
1361         return;
1362     }
1363 
1364     dc->delayed_branch = 2;
1365     dc->tb_flags |= D_FLAG;
1366     tcg_gen_st_i32(tcg_const_i32(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1367                   cpu_env, offsetof(CPUMBState, bimm));
1368 
1369     if (i_bit) {
1370         LOG_DIS("rtid ir=%x\n", dc->ir);
1371         dc->tb_flags |= DRTI_FLAG;
1372     } else if (b_bit) {
1373         LOG_DIS("rtbd ir=%x\n", dc->ir);
1374         dc->tb_flags |= DRTB_FLAG;
1375     } else if (e_bit) {
1376         LOG_DIS("rted ir=%x\n", dc->ir);
1377         dc->tb_flags |= DRTE_FLAG;
1378     } else
1379         LOG_DIS("rts ir=%x\n", dc->ir);
1380 
1381     dc->jmp = JMP_INDIRECT;
1382     tcg_gen_movi_i32(env_btaken, 1);
1383 
1384     tmp64 = tcg_temp_new_i64();
1385     tcg_gen_extu_i32_i64(env_btarget, *(dec_alu_op_b(dc)));
1386     tcg_gen_extu_i32_i64(tmp64, cpu_R[dc->ra]);
1387     tcg_gen_add_i64(env_btarget, env_btarget, tmp64);
1388     tcg_gen_andi_i64(env_btarget, env_btarget, UINT32_MAX);
1389     tcg_temp_free_i64(tmp64);
1390 }
1391 
1392 static int dec_check_fpuv2(DisasContext *dc)
1393 {
1394     if ((dc->cpu->cfg.use_fpu != 2) && (dc->tb_flags & MSR_EE_FLAG)) {
1395         tcg_gen_movi_i64(cpu_SR[SR_ESR], ESR_EC_FPU);
1396         t_gen_raise_exception(dc, EXCP_HW_EXCP);
1397     }
1398     return (dc->cpu->cfg.use_fpu == 2) ? 0 : PVR2_USE_FPU2_MASK;
1399 }
1400 
1401 static void dec_fpu(DisasContext *dc)
1402 {
1403     unsigned int fpu_insn;
1404 
1405     if (trap_illegal(dc, !dc->cpu->cfg.use_fpu)) {
1406         return;
1407     }
1408 
1409     fpu_insn = (dc->ir >> 7) & 7;
1410 
1411     switch (fpu_insn) {
1412         case 0:
1413             gen_helper_fadd(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1414                             cpu_R[dc->rb]);
1415             break;
1416 
1417         case 1:
1418             gen_helper_frsub(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1419                              cpu_R[dc->rb]);
1420             break;
1421 
1422         case 2:
1423             gen_helper_fmul(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1424                             cpu_R[dc->rb]);
1425             break;
1426 
1427         case 3:
1428             gen_helper_fdiv(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1429                             cpu_R[dc->rb]);
1430             break;
1431 
1432         case 4:
1433             switch ((dc->ir >> 4) & 7) {
1434                 case 0:
1435                     gen_helper_fcmp_un(cpu_R[dc->rd], cpu_env,
1436                                        cpu_R[dc->ra], cpu_R[dc->rb]);
1437                     break;
1438                 case 1:
1439                     gen_helper_fcmp_lt(cpu_R[dc->rd], cpu_env,
1440                                        cpu_R[dc->ra], cpu_R[dc->rb]);
1441                     break;
1442                 case 2:
1443                     gen_helper_fcmp_eq(cpu_R[dc->rd], cpu_env,
1444                                        cpu_R[dc->ra], cpu_R[dc->rb]);
1445                     break;
1446                 case 3:
1447                     gen_helper_fcmp_le(cpu_R[dc->rd], cpu_env,
1448                                        cpu_R[dc->ra], cpu_R[dc->rb]);
1449                     break;
1450                 case 4:
1451                     gen_helper_fcmp_gt(cpu_R[dc->rd], cpu_env,
1452                                        cpu_R[dc->ra], cpu_R[dc->rb]);
1453                     break;
1454                 case 5:
1455                     gen_helper_fcmp_ne(cpu_R[dc->rd], cpu_env,
1456                                        cpu_R[dc->ra], cpu_R[dc->rb]);
1457                     break;
1458                 case 6:
1459                     gen_helper_fcmp_ge(cpu_R[dc->rd], cpu_env,
1460                                        cpu_R[dc->ra], cpu_R[dc->rb]);
1461                     break;
1462                 default:
1463                     qemu_log_mask(LOG_UNIMP,
1464                                   "unimplemented fcmp fpu_insn=%x pc=%x"
1465                                   " opc=%x\n",
1466                                   fpu_insn, dc->pc, dc->opcode);
1467                     dc->abort_at_next_insn = 1;
1468                     break;
1469             }
1470             break;
1471 
1472         case 5:
1473             if (!dec_check_fpuv2(dc)) {
1474                 return;
1475             }
1476             gen_helper_flt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1477             break;
1478 
1479         case 6:
1480             if (!dec_check_fpuv2(dc)) {
1481                 return;
1482             }
1483             gen_helper_fint(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1484             break;
1485 
1486         case 7:
1487             if (!dec_check_fpuv2(dc)) {
1488                 return;
1489             }
1490             gen_helper_fsqrt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1491             break;
1492 
1493         default:
1494             qemu_log_mask(LOG_UNIMP, "unimplemented FPU insn fpu_insn=%x pc=%x"
1495                           " opc=%x\n",
1496                           fpu_insn, dc->pc, dc->opcode);
1497             dc->abort_at_next_insn = 1;
1498             break;
1499     }
1500 }
1501 
1502 static void dec_null(DisasContext *dc)
1503 {
1504     if (trap_illegal(dc, true)) {
1505         return;
1506     }
1507     qemu_log_mask(LOG_GUEST_ERROR, "unknown insn pc=%x opc=%x\n", dc->pc, dc->opcode);
1508     dc->abort_at_next_insn = 1;
1509 }
1510 
1511 /* Insns connected to FSL or AXI stream attached devices.  */
1512 static void dec_stream(DisasContext *dc)
1513 {
1514     TCGv_i32 t_id, t_ctrl;
1515     int ctrl;
1516 
1517     LOG_DIS("%s%s imm=%x\n", dc->rd ? "get" : "put",
1518             dc->type_b ? "" : "d", dc->imm);
1519 
1520     if (trap_userspace(dc, true)) {
1521         return;
1522     }
1523 
1524     t_id = tcg_temp_new_i32();
1525     if (dc->type_b) {
1526         tcg_gen_movi_i32(t_id, dc->imm & 0xf);
1527         ctrl = dc->imm >> 10;
1528     } else {
1529         tcg_gen_andi_i32(t_id, cpu_R[dc->rb], 0xf);
1530         ctrl = dc->imm >> 5;
1531     }
1532 
1533     t_ctrl = tcg_const_i32(ctrl);
1534 
1535     if (dc->rd == 0) {
1536         gen_helper_put(t_id, t_ctrl, cpu_R[dc->ra]);
1537     } else {
1538         gen_helper_get(cpu_R[dc->rd], t_id, t_ctrl);
1539     }
1540     tcg_temp_free_i32(t_id);
1541     tcg_temp_free_i32(t_ctrl);
1542 }
1543 
1544 static struct decoder_info {
1545     struct {
1546         uint32_t bits;
1547         uint32_t mask;
1548     };
1549     void (*dec)(DisasContext *dc);
1550 } decinfo[] = {
1551     {DEC_ADD, dec_add},
1552     {DEC_SUB, dec_sub},
1553     {DEC_AND, dec_and},
1554     {DEC_XOR, dec_xor},
1555     {DEC_OR, dec_or},
1556     {DEC_BIT, dec_bit},
1557     {DEC_BARREL, dec_barrel},
1558     {DEC_LD, dec_load},
1559     {DEC_ST, dec_store},
1560     {DEC_IMM, dec_imm},
1561     {DEC_BR, dec_br},
1562     {DEC_BCC, dec_bcc},
1563     {DEC_RTS, dec_rts},
1564     {DEC_FPU, dec_fpu},
1565     {DEC_MUL, dec_mul},
1566     {DEC_DIV, dec_div},
1567     {DEC_MSR, dec_msr},
1568     {DEC_STREAM, dec_stream},
1569     {{0, 0}, dec_null}
1570 };
1571 
1572 static inline void decode(DisasContext *dc, uint32_t ir)
1573 {
1574     int i;
1575 
1576     dc->ir = ir;
1577     LOG_DIS("%8.8x\t", dc->ir);
1578 
1579     if (ir == 0) {
1580         trap_illegal(dc, dc->cpu->env.pvr.regs[2] & PVR2_OPCODE_0x0_ILL_MASK);
1581         /* Don't decode nop/zero instructions any further.  */
1582         return;
1583     }
1584 
1585     /* bit 2 seems to indicate insn type.  */
1586     dc->type_b = ir & (1 << 29);
1587 
1588     dc->opcode = EXTRACT_FIELD(ir, 26, 31);
1589     dc->rd = EXTRACT_FIELD(ir, 21, 25);
1590     dc->ra = EXTRACT_FIELD(ir, 16, 20);
1591     dc->rb = EXTRACT_FIELD(ir, 11, 15);
1592     dc->imm = EXTRACT_FIELD(ir, 0, 15);
1593 
1594     /* Large switch for all insns.  */
1595     for (i = 0; i < ARRAY_SIZE(decinfo); i++) {
1596         if ((dc->opcode & decinfo[i].mask) == decinfo[i].bits) {
1597             decinfo[i].dec(dc);
1598             break;
1599         }
1600     }
1601 }
1602 
1603 /* generate intermediate code for basic block 'tb'.  */
1604 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
1605 {
1606     CPUMBState *env = cs->env_ptr;
1607     MicroBlazeCPU *cpu = mb_env_get_cpu(env);
1608     uint32_t pc_start;
1609     struct DisasContext ctx;
1610     struct DisasContext *dc = &ctx;
1611     uint32_t page_start, org_flags;
1612     uint32_t npc;
1613     int num_insns;
1614 
1615     pc_start = tb->pc;
1616     dc->cpu = cpu;
1617     dc->tb = tb;
1618     org_flags = dc->synced_flags = dc->tb_flags = tb->flags;
1619 
1620     dc->is_jmp = DISAS_NEXT;
1621     dc->jmp = 0;
1622     dc->delayed_branch = !!(dc->tb_flags & D_FLAG);
1623     if (dc->delayed_branch) {
1624         dc->jmp = JMP_INDIRECT;
1625     }
1626     dc->pc = pc_start;
1627     dc->singlestep_enabled = cs->singlestep_enabled;
1628     dc->cpustate_changed = 0;
1629     dc->abort_at_next_insn = 0;
1630 
1631     if (pc_start & 3) {
1632         cpu_abort(cs, "Microblaze: unaligned PC=%x\n", pc_start);
1633     }
1634 
1635     page_start = pc_start & TARGET_PAGE_MASK;
1636     num_insns = 0;
1637 
1638     gen_tb_start(tb);
1639     do
1640     {
1641         tcg_gen_insn_start(dc->pc);
1642         num_insns++;
1643 
1644 #if SIM_COMPAT
1645         if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1646             tcg_gen_movi_i64(cpu_SR[SR_PC], dc->pc);
1647             gen_helper_debug();
1648         }
1649 #endif
1650 
1651         if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
1652             t_gen_raise_exception(dc, EXCP_DEBUG);
1653             dc->is_jmp = DISAS_UPDATE;
1654             /* The address covered by the breakpoint must be included in
1655                [tb->pc, tb->pc + tb->size) in order to for it to be
1656                properly cleared -- thus we increment the PC here so that
1657                the logic setting tb->size below does the right thing.  */
1658             dc->pc += 4;
1659             break;
1660         }
1661 
1662         /* Pretty disas.  */
1663         LOG_DIS("%8.8x:\t", dc->pc);
1664 
1665         if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
1666             gen_io_start();
1667         }
1668 
1669         dc->clear_imm = 1;
1670         decode(dc, cpu_ldl_code(env, dc->pc));
1671         if (dc->clear_imm)
1672             dc->tb_flags &= ~IMM_FLAG;
1673         dc->pc += 4;
1674 
1675         if (dc->delayed_branch) {
1676             dc->delayed_branch--;
1677             if (!dc->delayed_branch) {
1678                 if (dc->tb_flags & DRTI_FLAG)
1679                     do_rti(dc);
1680                  if (dc->tb_flags & DRTB_FLAG)
1681                     do_rtb(dc);
1682                 if (dc->tb_flags & DRTE_FLAG)
1683                     do_rte(dc);
1684                 /* Clear the delay slot flag.  */
1685                 dc->tb_flags &= ~D_FLAG;
1686                 /* If it is a direct jump, try direct chaining.  */
1687                 if (dc->jmp == JMP_INDIRECT) {
1688                     eval_cond_jmp(dc, env_btarget, tcg_const_i64(dc->pc));
1689                     dc->is_jmp = DISAS_JUMP;
1690                 } else if (dc->jmp == JMP_DIRECT) {
1691                     t_sync_flags(dc);
1692                     gen_goto_tb(dc, 0, dc->jmp_pc);
1693                     dc->is_jmp = DISAS_TB_JUMP;
1694                 } else if (dc->jmp == JMP_DIRECT_CC) {
1695                     TCGLabel *l1 = gen_new_label();
1696                     t_sync_flags(dc);
1697                     /* Conditional jmp.  */
1698                     tcg_gen_brcondi_i32(TCG_COND_NE, env_btaken, 0, l1);
1699                     gen_goto_tb(dc, 1, dc->pc);
1700                     gen_set_label(l1);
1701                     gen_goto_tb(dc, 0, dc->jmp_pc);
1702 
1703                     dc->is_jmp = DISAS_TB_JUMP;
1704                 }
1705                 break;
1706             }
1707         }
1708         if (cs->singlestep_enabled) {
1709             break;
1710         }
1711     } while (!dc->is_jmp && !dc->cpustate_changed
1712              && !tcg_op_buf_full()
1713              && !singlestep
1714              && (dc->pc - page_start < TARGET_PAGE_SIZE)
1715              && num_insns < max_insns);
1716 
1717     npc = dc->pc;
1718     if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
1719         if (dc->tb_flags & D_FLAG) {
1720             dc->is_jmp = DISAS_UPDATE;
1721             tcg_gen_movi_i64(cpu_SR[SR_PC], npc);
1722             sync_jmpstate(dc);
1723         } else
1724             npc = dc->jmp_pc;
1725     }
1726 
1727     if (tb_cflags(tb) & CF_LAST_IO)
1728         gen_io_end();
1729     /* Force an update if the per-tb cpu state has changed.  */
1730     if (dc->is_jmp == DISAS_NEXT
1731         && (dc->cpustate_changed || org_flags != dc->tb_flags)) {
1732         dc->is_jmp = DISAS_UPDATE;
1733         tcg_gen_movi_i64(cpu_SR[SR_PC], npc);
1734     }
1735     t_sync_flags(dc);
1736 
1737     if (unlikely(cs->singlestep_enabled)) {
1738         TCGv_i32 tmp = tcg_const_i32(EXCP_DEBUG);
1739 
1740         if (dc->is_jmp != DISAS_JUMP) {
1741             tcg_gen_movi_i64(cpu_SR[SR_PC], npc);
1742         }
1743         gen_helper_raise_exception(cpu_env, tmp);
1744         tcg_temp_free_i32(tmp);
1745     } else {
1746         switch(dc->is_jmp) {
1747             case DISAS_NEXT:
1748                 gen_goto_tb(dc, 1, npc);
1749                 break;
1750             default:
1751             case DISAS_JUMP:
1752             case DISAS_UPDATE:
1753                 /* indicate that the hash table must be used
1754                    to find the next TB */
1755                 tcg_gen_exit_tb(NULL, 0);
1756                 break;
1757             case DISAS_TB_JUMP:
1758                 /* nothing more to generate */
1759                 break;
1760         }
1761     }
1762     gen_tb_end(tb, num_insns);
1763 
1764     tb->size = dc->pc - pc_start;
1765     tb->icount = num_insns;
1766 
1767 #ifdef DEBUG_DISAS
1768 #if !SIM_COMPAT
1769     if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
1770         && qemu_log_in_addr_range(pc_start)) {
1771         qemu_log_lock();
1772         qemu_log("--------------\n");
1773         log_target_disas(cs, pc_start, dc->pc - pc_start);
1774         qemu_log_unlock();
1775     }
1776 #endif
1777 #endif
1778     assert(!dc->abort_at_next_insn);
1779 }
1780 
1781 void mb_cpu_dump_state(CPUState *cs, FILE *f, int flags)
1782 {
1783     MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1784     CPUMBState *env = &cpu->env;
1785     int i;
1786 
1787     if (!env) {
1788         return;
1789     }
1790 
1791     qemu_fprintf(f, "IN: PC=%" PRIx64 " %s\n",
1792                  env->sregs[SR_PC], lookup_symbol(env->sregs[SR_PC]));
1793     qemu_fprintf(f, "rmsr=%" PRIx64 " resr=%" PRIx64 " rear=%" PRIx64 " "
1794                  "debug=%x imm=%x iflags=%x fsr=%" PRIx64 "\n",
1795                  env->sregs[SR_MSR], env->sregs[SR_ESR], env->sregs[SR_EAR],
1796                  env->debug, env->imm, env->iflags, env->sregs[SR_FSR]);
1797     qemu_fprintf(f, "btaken=%d btarget=%" PRIx64 " mode=%s(saved=%s) "
1798                  "eip=%d ie=%d\n",
1799                  env->btaken, env->btarget,
1800                  (env->sregs[SR_MSR] & MSR_UM) ? "user" : "kernel",
1801                  (env->sregs[SR_MSR] & MSR_UMS) ? "user" : "kernel",
1802                  (bool)(env->sregs[SR_MSR] & MSR_EIP),
1803                  (bool)(env->sregs[SR_MSR] & MSR_IE));
1804 
1805     for (i = 0; i < 32; i++) {
1806         qemu_fprintf(f, "r%2.2d=%8.8x ", i, env->regs[i]);
1807         if ((i + 1) % 4 == 0)
1808             qemu_fprintf(f, "\n");
1809         }
1810     qemu_fprintf(f, "\n\n");
1811 }
1812 
1813 void mb_tcg_init(void)
1814 {
1815     int i;
1816 
1817     env_debug = tcg_global_mem_new_i32(cpu_env,
1818                     offsetof(CPUMBState, debug),
1819                     "debug0");
1820     env_iflags = tcg_global_mem_new_i32(cpu_env,
1821                     offsetof(CPUMBState, iflags),
1822                     "iflags");
1823     env_imm = tcg_global_mem_new_i32(cpu_env,
1824                     offsetof(CPUMBState, imm),
1825                     "imm");
1826     env_btarget = tcg_global_mem_new_i64(cpu_env,
1827                      offsetof(CPUMBState, btarget),
1828                      "btarget");
1829     env_btaken = tcg_global_mem_new_i32(cpu_env,
1830                      offsetof(CPUMBState, btaken),
1831                      "btaken");
1832     env_res_addr = tcg_global_mem_new(cpu_env,
1833                      offsetof(CPUMBState, res_addr),
1834                      "res_addr");
1835     env_res_val = tcg_global_mem_new_i32(cpu_env,
1836                      offsetof(CPUMBState, res_val),
1837                      "res_val");
1838     for (i = 0; i < ARRAY_SIZE(cpu_R); i++) {
1839         cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
1840                           offsetof(CPUMBState, regs[i]),
1841                           regnames[i]);
1842     }
1843     for (i = 0; i < ARRAY_SIZE(cpu_SR); i++) {
1844         cpu_SR[i] = tcg_global_mem_new_i64(cpu_env,
1845                           offsetof(CPUMBState, sregs[i]),
1846                           special_regnames[i]);
1847     }
1848 }
1849 
1850 void restore_state_to_opc(CPUMBState *env, TranslationBlock *tb,
1851                           target_ulong *data)
1852 {
1853     env->sregs[SR_PC] = data[0];
1854 }
1855