xref: /openbmc/qemu/target/openrisc/translate.c (revision 3ae8a100)
1 /*
2  * OpenRISC translation
3  *
4  * Copyright (c) 2011-2012 Jia Liu <proljc@gmail.com>
5  *                         Feng Gao <gf91597@gmail.com>
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "disas/disas.h"
25 #include "tcg-op.h"
26 #include "qemu-common.h"
27 #include "qemu/log.h"
28 #include "qemu/bitops.h"
29 #include "exec/cpu_ldst.h"
30 #include "exec/translator.h"
31 
32 #include "exec/helper-proto.h"
33 #include "exec/helper-gen.h"
34 #include "exec/gen-icount.h"
35 
36 #include "trace-tcg.h"
37 #include "exec/log.h"
38 
39 #define LOG_DIS(str, ...) \
40     qemu_log_mask(CPU_LOG_TB_IN_ASM, "%08x: " str, dc->base.pc_next,    \
41                   ## __VA_ARGS__)
42 
43 /* is_jmp field values */
44 #define DISAS_JUMP    DISAS_TARGET_0 /* only pc was modified dynamically */
45 #define DISAS_UPDATE  DISAS_TARGET_1 /* cpu state was modified dynamically */
46 #define DISAS_TB_JUMP DISAS_TARGET_2 /* only pc was modified statically */
47 
48 typedef struct DisasContext {
49     DisasContextBase base;
50     uint32_t mem_idx;
51     uint32_t tb_flags;
52     uint32_t delayed_branch;
53 } DisasContext;
54 
55 /* Include the auto-generated decoder.  */
56 #include "decode.inc.c"
57 
58 static TCGv cpu_sr;
59 static TCGv cpu_R[32];
60 static TCGv cpu_R0;
61 static TCGv cpu_pc;
62 static TCGv jmp_pc;            /* l.jr/l.jalr temp pc */
63 static TCGv cpu_ppc;
64 static TCGv cpu_sr_f;           /* bf/bnf, F flag taken */
65 static TCGv cpu_sr_cy;          /* carry (unsigned overflow) */
66 static TCGv cpu_sr_ov;          /* signed overflow */
67 static TCGv cpu_lock_addr;
68 static TCGv cpu_lock_value;
69 static TCGv_i32 fpcsr;
70 static TCGv_i64 cpu_mac;        /* MACHI:MACLO */
71 static TCGv_i32 cpu_dflag;
72 
73 void openrisc_translate_init(void)
74 {
75     static const char * const regnames[] = {
76         "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
77         "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
78         "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
79         "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
80     };
81     int i;
82 
83     cpu_sr = tcg_global_mem_new(cpu_env,
84                                 offsetof(CPUOpenRISCState, sr), "sr");
85     cpu_dflag = tcg_global_mem_new_i32(cpu_env,
86                                        offsetof(CPUOpenRISCState, dflag),
87                                        "dflag");
88     cpu_pc = tcg_global_mem_new(cpu_env,
89                                 offsetof(CPUOpenRISCState, pc), "pc");
90     cpu_ppc = tcg_global_mem_new(cpu_env,
91                                  offsetof(CPUOpenRISCState, ppc), "ppc");
92     jmp_pc = tcg_global_mem_new(cpu_env,
93                                 offsetof(CPUOpenRISCState, jmp_pc), "jmp_pc");
94     cpu_sr_f = tcg_global_mem_new(cpu_env,
95                                   offsetof(CPUOpenRISCState, sr_f), "sr_f");
96     cpu_sr_cy = tcg_global_mem_new(cpu_env,
97                                    offsetof(CPUOpenRISCState, sr_cy), "sr_cy");
98     cpu_sr_ov = tcg_global_mem_new(cpu_env,
99                                    offsetof(CPUOpenRISCState, sr_ov), "sr_ov");
100     cpu_lock_addr = tcg_global_mem_new(cpu_env,
101                                        offsetof(CPUOpenRISCState, lock_addr),
102                                        "lock_addr");
103     cpu_lock_value = tcg_global_mem_new(cpu_env,
104                                         offsetof(CPUOpenRISCState, lock_value),
105                                         "lock_value");
106     fpcsr = tcg_global_mem_new_i32(cpu_env,
107                                    offsetof(CPUOpenRISCState, fpcsr),
108                                    "fpcsr");
109     cpu_mac = tcg_global_mem_new_i64(cpu_env,
110                                      offsetof(CPUOpenRISCState, mac),
111                                      "mac");
112     for (i = 0; i < 32; i++) {
113         cpu_R[i] = tcg_global_mem_new(cpu_env,
114                                       offsetof(CPUOpenRISCState,
115                                                shadow_gpr[0][i]),
116                                       regnames[i]);
117     }
118     cpu_R0 = cpu_R[0];
119 }
120 
121 static void gen_exception(DisasContext *dc, unsigned int excp)
122 {
123     TCGv_i32 tmp = tcg_const_i32(excp);
124     gen_helper_exception(cpu_env, tmp);
125     tcg_temp_free_i32(tmp);
126 }
127 
128 static void gen_illegal_exception(DisasContext *dc)
129 {
130     tcg_gen_movi_tl(cpu_pc, dc->base.pc_next);
131     gen_exception(dc, EXCP_ILLEGAL);
132     dc->base.is_jmp = DISAS_NORETURN;
133 }
134 
135 /* not used yet, open it when we need or64.  */
136 /*#ifdef TARGET_OPENRISC64
137 static void check_ob64s(DisasContext *dc)
138 {
139     if (!(dc->flags & CPUCFGR_OB64S)) {
140         gen_illegal_exception(dc);
141     }
142 }
143 
144 static void check_of64s(DisasContext *dc)
145 {
146     if (!(dc->flags & CPUCFGR_OF64S)) {
147         gen_illegal_exception(dc);
148     }
149 }
150 
151 static void check_ov64s(DisasContext *dc)
152 {
153     if (!(dc->flags & CPUCFGR_OV64S)) {
154         gen_illegal_exception(dc);
155     }
156 }
157 #endif*/
158 
159 /* We're about to write to REG.  On the off-chance that the user is
160    writing to R0, re-instate the architectural register.  */
161 #define check_r0_write(reg)             \
162     do {                                \
163         if (unlikely(reg == 0)) {       \
164             cpu_R[0] = cpu_R0;          \
165         }                               \
166     } while (0)
167 
168 static inline bool use_goto_tb(DisasContext *dc, target_ulong dest)
169 {
170     if (unlikely(dc->base.singlestep_enabled)) {
171         return false;
172     }
173 
174 #ifndef CONFIG_USER_ONLY
175     return (dc->base.tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
176 #else
177     return true;
178 #endif
179 }
180 
181 static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
182 {
183     if (use_goto_tb(dc, dest)) {
184         tcg_gen_movi_tl(cpu_pc, dest);
185         tcg_gen_goto_tb(n);
186         tcg_gen_exit_tb(dc->base.tb, n);
187     } else {
188         tcg_gen_movi_tl(cpu_pc, dest);
189         if (dc->base.singlestep_enabled) {
190             gen_exception(dc, EXCP_DEBUG);
191         }
192         tcg_gen_exit_tb(NULL, 0);
193     }
194 }
195 
196 static void gen_ove_cy(DisasContext *dc)
197 {
198     if (dc->tb_flags & SR_OVE) {
199         gen_helper_ove_cy(cpu_env);
200     }
201 }
202 
203 static void gen_ove_ov(DisasContext *dc)
204 {
205     if (dc->tb_flags & SR_OVE) {
206         gen_helper_ove_ov(cpu_env);
207     }
208 }
209 
210 static void gen_ove_cyov(DisasContext *dc)
211 {
212     if (dc->tb_flags & SR_OVE) {
213         gen_helper_ove_cyov(cpu_env);
214     }
215 }
216 
217 static void gen_add(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb)
218 {
219     TCGv t0 = tcg_const_tl(0);
220     TCGv res = tcg_temp_new();
221 
222     tcg_gen_add2_tl(res, cpu_sr_cy, srca, t0, srcb, t0);
223     tcg_gen_xor_tl(cpu_sr_ov, srca, srcb);
224     tcg_gen_xor_tl(t0, res, srcb);
225     tcg_gen_andc_tl(cpu_sr_ov, t0, cpu_sr_ov);
226     tcg_temp_free(t0);
227 
228     tcg_gen_mov_tl(dest, res);
229     tcg_temp_free(res);
230 
231     gen_ove_cyov(dc);
232 }
233 
234 static void gen_addc(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb)
235 {
236     TCGv t0 = tcg_const_tl(0);
237     TCGv res = tcg_temp_new();
238 
239     tcg_gen_add2_tl(res, cpu_sr_cy, srca, t0, cpu_sr_cy, t0);
240     tcg_gen_add2_tl(res, cpu_sr_cy, res, cpu_sr_cy, srcb, t0);
241     tcg_gen_xor_tl(cpu_sr_ov, srca, srcb);
242     tcg_gen_xor_tl(t0, res, srcb);
243     tcg_gen_andc_tl(cpu_sr_ov, t0, cpu_sr_ov);
244     tcg_temp_free(t0);
245 
246     tcg_gen_mov_tl(dest, res);
247     tcg_temp_free(res);
248 
249     gen_ove_cyov(dc);
250 }
251 
252 static void gen_sub(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb)
253 {
254     TCGv res = tcg_temp_new();
255 
256     tcg_gen_sub_tl(res, srca, srcb);
257     tcg_gen_xor_tl(cpu_sr_cy, srca, srcb);
258     tcg_gen_xor_tl(cpu_sr_ov, res, srcb);
259     tcg_gen_and_tl(cpu_sr_ov, cpu_sr_ov, cpu_sr_cy);
260     tcg_gen_setcond_tl(TCG_COND_LTU, cpu_sr_cy, srca, srcb);
261 
262     tcg_gen_mov_tl(dest, res);
263     tcg_temp_free(res);
264 
265     gen_ove_cyov(dc);
266 }
267 
268 static void gen_mul(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb)
269 {
270     TCGv t0 = tcg_temp_new();
271 
272     tcg_gen_muls2_tl(dest, cpu_sr_ov, srca, srcb);
273     tcg_gen_sari_tl(t0, dest, TARGET_LONG_BITS - 1);
274     tcg_gen_setcond_tl(TCG_COND_NE, cpu_sr_ov, cpu_sr_ov, t0);
275     tcg_temp_free(t0);
276 
277     tcg_gen_neg_tl(cpu_sr_ov, cpu_sr_ov);
278     gen_ove_ov(dc);
279 }
280 
281 static void gen_mulu(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb)
282 {
283     tcg_gen_muls2_tl(dest, cpu_sr_cy, srca, srcb);
284     tcg_gen_setcondi_tl(TCG_COND_NE, cpu_sr_cy, cpu_sr_cy, 0);
285 
286     gen_ove_cy(dc);
287 }
288 
289 static void gen_div(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb)
290 {
291     TCGv t0 = tcg_temp_new();
292 
293     tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_sr_ov, srcb, 0);
294     /* The result of divide-by-zero is undefined.
295        Supress the host-side exception by dividing by 1.  */
296     tcg_gen_or_tl(t0, srcb, cpu_sr_ov);
297     tcg_gen_div_tl(dest, srca, t0);
298     tcg_temp_free(t0);
299 
300     tcg_gen_neg_tl(cpu_sr_ov, cpu_sr_ov);
301     gen_ove_ov(dc);
302 }
303 
304 static void gen_divu(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb)
305 {
306     TCGv t0 = tcg_temp_new();
307 
308     tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_sr_cy, srcb, 0);
309     /* The result of divide-by-zero is undefined.
310        Supress the host-side exception by dividing by 1.  */
311     tcg_gen_or_tl(t0, srcb, cpu_sr_cy);
312     tcg_gen_divu_tl(dest, srca, t0);
313     tcg_temp_free(t0);
314 
315     gen_ove_cy(dc);
316 }
317 
318 static void gen_muld(DisasContext *dc, TCGv srca, TCGv srcb)
319 {
320     TCGv_i64 t1 = tcg_temp_new_i64();
321     TCGv_i64 t2 = tcg_temp_new_i64();
322 
323     tcg_gen_ext_tl_i64(t1, srca);
324     tcg_gen_ext_tl_i64(t2, srcb);
325     if (TARGET_LONG_BITS == 32) {
326         tcg_gen_mul_i64(cpu_mac, t1, t2);
327         tcg_gen_movi_tl(cpu_sr_ov, 0);
328     } else {
329         TCGv_i64 high = tcg_temp_new_i64();
330 
331         tcg_gen_muls2_i64(cpu_mac, high, t1, t2);
332         tcg_gen_sari_i64(t1, cpu_mac, 63);
333         tcg_gen_setcond_i64(TCG_COND_NE, t1, t1, high);
334         tcg_temp_free_i64(high);
335         tcg_gen_trunc_i64_tl(cpu_sr_ov, t1);
336         tcg_gen_neg_tl(cpu_sr_ov, cpu_sr_ov);
337 
338         gen_ove_ov(dc);
339     }
340     tcg_temp_free_i64(t1);
341     tcg_temp_free_i64(t2);
342 }
343 
344 static void gen_muldu(DisasContext *dc, TCGv srca, TCGv srcb)
345 {
346     TCGv_i64 t1 = tcg_temp_new_i64();
347     TCGv_i64 t2 = tcg_temp_new_i64();
348 
349     tcg_gen_extu_tl_i64(t1, srca);
350     tcg_gen_extu_tl_i64(t2, srcb);
351     if (TARGET_LONG_BITS == 32) {
352         tcg_gen_mul_i64(cpu_mac, t1, t2);
353         tcg_gen_movi_tl(cpu_sr_cy, 0);
354     } else {
355         TCGv_i64 high = tcg_temp_new_i64();
356 
357         tcg_gen_mulu2_i64(cpu_mac, high, t1, t2);
358         tcg_gen_setcondi_i64(TCG_COND_NE, high, high, 0);
359         tcg_gen_trunc_i64_tl(cpu_sr_cy, high);
360         tcg_temp_free_i64(high);
361 
362         gen_ove_cy(dc);
363     }
364     tcg_temp_free_i64(t1);
365     tcg_temp_free_i64(t2);
366 }
367 
368 static void gen_mac(DisasContext *dc, TCGv srca, TCGv srcb)
369 {
370     TCGv_i64 t1 = tcg_temp_new_i64();
371     TCGv_i64 t2 = tcg_temp_new_i64();
372 
373     tcg_gen_ext_tl_i64(t1, srca);
374     tcg_gen_ext_tl_i64(t2, srcb);
375     tcg_gen_mul_i64(t1, t1, t2);
376 
377     /* Note that overflow is only computed during addition stage.  */
378     tcg_gen_xor_i64(t2, cpu_mac, t1);
379     tcg_gen_add_i64(cpu_mac, cpu_mac, t1);
380     tcg_gen_xor_i64(t1, t1, cpu_mac);
381     tcg_gen_andc_i64(t1, t1, t2);
382     tcg_temp_free_i64(t2);
383 
384 #if TARGET_LONG_BITS == 32
385     tcg_gen_extrh_i64_i32(cpu_sr_ov, t1);
386 #else
387     tcg_gen_mov_i64(cpu_sr_ov, t1);
388 #endif
389     tcg_temp_free_i64(t1);
390 
391     gen_ove_ov(dc);
392 }
393 
394 static void gen_macu(DisasContext *dc, TCGv srca, TCGv srcb)
395 {
396     TCGv_i64 t1 = tcg_temp_new_i64();
397     TCGv_i64 t2 = tcg_temp_new_i64();
398 
399     tcg_gen_extu_tl_i64(t1, srca);
400     tcg_gen_extu_tl_i64(t2, srcb);
401     tcg_gen_mul_i64(t1, t1, t2);
402     tcg_temp_free_i64(t2);
403 
404     /* Note that overflow is only computed during addition stage.  */
405     tcg_gen_add_i64(cpu_mac, cpu_mac, t1);
406     tcg_gen_setcond_i64(TCG_COND_LTU, t1, cpu_mac, t1);
407     tcg_gen_trunc_i64_tl(cpu_sr_cy, t1);
408     tcg_temp_free_i64(t1);
409 
410     gen_ove_cy(dc);
411 }
412 
413 static void gen_msb(DisasContext *dc, TCGv srca, TCGv srcb)
414 {
415     TCGv_i64 t1 = tcg_temp_new_i64();
416     TCGv_i64 t2 = tcg_temp_new_i64();
417 
418     tcg_gen_ext_tl_i64(t1, srca);
419     tcg_gen_ext_tl_i64(t2, srcb);
420     tcg_gen_mul_i64(t1, t1, t2);
421 
422     /* Note that overflow is only computed during subtraction stage.  */
423     tcg_gen_xor_i64(t2, cpu_mac, t1);
424     tcg_gen_sub_i64(cpu_mac, cpu_mac, t1);
425     tcg_gen_xor_i64(t1, t1, cpu_mac);
426     tcg_gen_and_i64(t1, t1, t2);
427     tcg_temp_free_i64(t2);
428 
429 #if TARGET_LONG_BITS == 32
430     tcg_gen_extrh_i64_i32(cpu_sr_ov, t1);
431 #else
432     tcg_gen_mov_i64(cpu_sr_ov, t1);
433 #endif
434     tcg_temp_free_i64(t1);
435 
436     gen_ove_ov(dc);
437 }
438 
439 static void gen_msbu(DisasContext *dc, TCGv srca, TCGv srcb)
440 {
441     TCGv_i64 t1 = tcg_temp_new_i64();
442     TCGv_i64 t2 = tcg_temp_new_i64();
443 
444     tcg_gen_extu_tl_i64(t1, srca);
445     tcg_gen_extu_tl_i64(t2, srcb);
446     tcg_gen_mul_i64(t1, t1, t2);
447 
448     /* Note that overflow is only computed during subtraction stage.  */
449     tcg_gen_setcond_i64(TCG_COND_LTU, t2, cpu_mac, t1);
450     tcg_gen_sub_i64(cpu_mac, cpu_mac, t1);
451     tcg_gen_trunc_i64_tl(cpu_sr_cy, t2);
452     tcg_temp_free_i64(t2);
453     tcg_temp_free_i64(t1);
454 
455     gen_ove_cy(dc);
456 }
457 
458 static bool trans_l_add(DisasContext *dc, arg_dab *a, uint32_t insn)
459 {
460     LOG_DIS("l.add r%d, r%d, r%d\n", a->d, a->a, a->b);
461     check_r0_write(a->d);
462     gen_add(dc, cpu_R[a->d], cpu_R[a->a], cpu_R[a->b]);
463     return true;
464 }
465 
466 static bool trans_l_addc(DisasContext *dc, arg_dab *a, uint32_t insn)
467 {
468     LOG_DIS("l.addc r%d, r%d, r%d\n", a->d, a->a, a->b);
469     check_r0_write(a->d);
470     gen_addc(dc, cpu_R[a->d], cpu_R[a->a], cpu_R[a->b]);
471     return true;
472 }
473 
474 static bool trans_l_sub(DisasContext *dc, arg_dab *a, uint32_t insn)
475 {
476     LOG_DIS("l.sub r%d, r%d, r%d\n", a->d, a->a, a->b);
477     check_r0_write(a->d);
478     gen_sub(dc, cpu_R[a->d], cpu_R[a->a], cpu_R[a->b]);
479     return true;
480 }
481 
482 static bool trans_l_and(DisasContext *dc, arg_dab *a, uint32_t insn)
483 {
484     LOG_DIS("l.and r%d, r%d, r%d\n", a->d, a->a, a->b);
485     check_r0_write(a->d);
486     tcg_gen_and_tl(cpu_R[a->d], cpu_R[a->a], cpu_R[a->b]);
487     return true;
488 }
489 
490 static bool trans_l_or(DisasContext *dc, arg_dab *a, uint32_t insn)
491 {
492     LOG_DIS("l.or r%d, r%d, r%d\n", a->d, a->a, a->b);
493     check_r0_write(a->d);
494     tcg_gen_or_tl(cpu_R[a->d], cpu_R[a->a], cpu_R[a->b]);
495     return true;
496 }
497 
498 static bool trans_l_xor(DisasContext *dc, arg_dab *a, uint32_t insn)
499 {
500     LOG_DIS("l.xor r%d, r%d, r%d\n", a->d, a->a, a->b);
501     check_r0_write(a->d);
502     tcg_gen_xor_tl(cpu_R[a->d], cpu_R[a->a], cpu_R[a->b]);
503     return true;
504 }
505 
506 static bool trans_l_sll(DisasContext *dc, arg_dab *a, uint32_t insn)
507 {
508     LOG_DIS("l.sll r%d, r%d, r%d\n", a->d, a->a, a->b);
509     check_r0_write(a->d);
510     tcg_gen_shl_tl(cpu_R[a->d], cpu_R[a->a], cpu_R[a->b]);
511     return true;
512 }
513 
514 static bool trans_l_srl(DisasContext *dc, arg_dab *a, uint32_t insn)
515 {
516     LOG_DIS("l.srl r%d, r%d, r%d\n", a->d, a->a, a->b);
517     check_r0_write(a->d);
518     tcg_gen_shr_tl(cpu_R[a->d], cpu_R[a->a], cpu_R[a->b]);
519     return true;
520 }
521 
522 static bool trans_l_sra(DisasContext *dc, arg_dab *a, uint32_t insn)
523 {
524     LOG_DIS("l.sra r%d, r%d, r%d\n", a->d, a->a, a->b);
525     check_r0_write(a->d);
526     tcg_gen_sar_tl(cpu_R[a->d], cpu_R[a->a], cpu_R[a->b]);
527     return true;
528 }
529 
530 static bool trans_l_ror(DisasContext *dc, arg_dab *a, uint32_t insn)
531 {
532     LOG_DIS("l.ror r%d, r%d, r%d\n", a->d, a->a, a->b);
533     check_r0_write(a->d);
534     tcg_gen_rotr_tl(cpu_R[a->d], cpu_R[a->a], cpu_R[a->b]);
535     return true;
536 }
537 
538 static bool trans_l_exths(DisasContext *dc, arg_da *a, uint32_t insn)
539 {
540     LOG_DIS("l.exths r%d, r%d\n", a->d, a->a);
541     check_r0_write(a->d);
542     tcg_gen_ext16s_tl(cpu_R[a->d], cpu_R[a->a]);
543     return true;
544 }
545 
546 static bool trans_l_extbs(DisasContext *dc, arg_da *a, uint32_t insn)
547 {
548     LOG_DIS("l.extbs r%d, r%d\n", a->d, a->a);
549     check_r0_write(a->d);
550     tcg_gen_ext8s_tl(cpu_R[a->d], cpu_R[a->a]);
551     return true;
552 }
553 
554 static bool trans_l_exthz(DisasContext *dc, arg_da *a, uint32_t insn)
555 {
556     LOG_DIS("l.exthz r%d, r%d\n", a->d, a->a);
557     check_r0_write(a->d);
558     tcg_gen_ext16u_tl(cpu_R[a->d], cpu_R[a->a]);
559     return true;
560 }
561 
562 static bool trans_l_extbz(DisasContext *dc, arg_da *a, uint32_t insn)
563 {
564     LOG_DIS("l.extbz r%d, r%d\n", a->d, a->a);
565     check_r0_write(a->d);
566     tcg_gen_ext8u_tl(cpu_R[a->d], cpu_R[a->a]);
567     return true;
568 }
569 
570 static bool trans_l_cmov(DisasContext *dc, arg_dab *a, uint32_t insn)
571 {
572     TCGv zero;
573     LOG_DIS("l.cmov r%d, r%d, r%d\n", a->d, a->a, a->b);
574 
575     check_r0_write(a->d);
576     zero = tcg_const_tl(0);
577     tcg_gen_movcond_tl(TCG_COND_NE, cpu_R[a->d], cpu_sr_f, zero,
578                        cpu_R[a->a], cpu_R[a->b]);
579     tcg_temp_free(zero);
580     return true;
581 }
582 
583 static bool trans_l_ff1(DisasContext *dc, arg_da *a, uint32_t insn)
584 {
585     LOG_DIS("l.ff1 r%d, r%d\n", a->d, a->a);
586 
587     check_r0_write(a->d);
588     tcg_gen_ctzi_tl(cpu_R[a->d], cpu_R[a->a], -1);
589     tcg_gen_addi_tl(cpu_R[a->d], cpu_R[a->d], 1);
590     return true;
591 }
592 
593 static bool trans_l_fl1(DisasContext *dc, arg_da *a, uint32_t insn)
594 {
595     LOG_DIS("l.fl1 r%d, r%d\n", a->d, a->a);
596 
597     check_r0_write(a->d);
598     tcg_gen_clzi_tl(cpu_R[a->d], cpu_R[a->a], TARGET_LONG_BITS);
599     tcg_gen_subfi_tl(cpu_R[a->d], TARGET_LONG_BITS, cpu_R[a->d]);
600     return true;
601 }
602 
603 static bool trans_l_mul(DisasContext *dc, arg_dab *a, uint32_t insn)
604 {
605     LOG_DIS("l.mul r%d, r%d, r%d\n", a->d, a->a, a->b);
606 
607     check_r0_write(a->d);
608     gen_mul(dc, cpu_R[a->d], cpu_R[a->a], cpu_R[a->b]);
609     return true;
610 }
611 
612 static bool trans_l_mulu(DisasContext *dc, arg_dab *a, uint32_t insn)
613 {
614     LOG_DIS("l.mulu r%d, r%d, r%d\n", a->d, a->a, a->b);
615 
616     check_r0_write(a->d);
617     gen_mulu(dc, cpu_R[a->d], cpu_R[a->a], cpu_R[a->b]);
618     return true;
619 }
620 
621 static bool trans_l_div(DisasContext *dc, arg_dab *a, uint32_t insn)
622 {
623     LOG_DIS("l.div r%d, r%d, r%d\n", a->d, a->a, a->b);
624 
625     check_r0_write(a->d);
626     gen_div(dc, cpu_R[a->d], cpu_R[a->a], cpu_R[a->b]);
627     return true;
628 }
629 
630 static bool trans_l_divu(DisasContext *dc, arg_dab *a, uint32_t insn)
631 {
632     LOG_DIS("l.divu r%d, r%d, r%d\n", a->d, a->a, a->b);
633 
634     check_r0_write(a->d);
635     gen_divu(dc, cpu_R[a->d], cpu_R[a->a], cpu_R[a->b]);
636     return true;
637 }
638 
639 static bool trans_l_muld(DisasContext *dc, arg_ab *a, uint32_t insn)
640 {
641     LOG_DIS("l.muld r%d, r%d\n", a->a, a->b);
642     gen_muld(dc, cpu_R[a->a], cpu_R[a->b]);
643     return true;
644 }
645 
646 static bool trans_l_muldu(DisasContext *dc, arg_ab *a, uint32_t insn)
647 {
648     LOG_DIS("l.muldu r%d, r%d\n", a->a, a->b);
649     gen_muldu(dc, cpu_R[a->a], cpu_R[a->b]);
650     return true;
651 }
652 
653 static bool trans_l_j(DisasContext *dc, arg_l_j *a, uint32_t insn)
654 {
655     target_ulong tmp_pc = dc->base.pc_next + a->n * 4;
656 
657     LOG_DIS("l.j %d\n", a->n);
658     tcg_gen_movi_tl(jmp_pc, tmp_pc);
659     dc->delayed_branch = 2;
660     return true;
661 }
662 
663 static bool trans_l_jal(DisasContext *dc, arg_l_jal *a, uint32_t insn)
664 {
665     target_ulong tmp_pc = dc->base.pc_next + a->n * 4;
666     target_ulong ret_pc = dc->base.pc_next + 8;
667 
668     LOG_DIS("l.jal %d\n", a->n);
669     tcg_gen_movi_tl(cpu_R[9], ret_pc);
670     /* Optimize jal being used to load the PC for PIC.  */
671     if (tmp_pc != ret_pc) {
672         tcg_gen_movi_tl(jmp_pc, tmp_pc);
673         dc->delayed_branch = 2;
674     }
675     return true;
676 }
677 
678 static void do_bf(DisasContext *dc, arg_l_bf *a, TCGCond cond)
679 {
680     target_ulong tmp_pc = dc->base.pc_next + a->n * 4;
681     TCGv t_next = tcg_const_tl(dc->base.pc_next + 8);
682     TCGv t_true = tcg_const_tl(tmp_pc);
683     TCGv t_zero = tcg_const_tl(0);
684 
685     tcg_gen_movcond_tl(cond, jmp_pc, cpu_sr_f, t_zero, t_true, t_next);
686 
687     tcg_temp_free(t_next);
688     tcg_temp_free(t_true);
689     tcg_temp_free(t_zero);
690     dc->delayed_branch = 2;
691 }
692 
693 static bool trans_l_bf(DisasContext *dc, arg_l_bf *a, uint32_t insn)
694 {
695     LOG_DIS("l.bf %d\n", a->n);
696     do_bf(dc, a, TCG_COND_NE);
697     return true;
698 }
699 
700 static bool trans_l_bnf(DisasContext *dc, arg_l_bf *a, uint32_t insn)
701 {
702     LOG_DIS("l.bnf %d\n", a->n);
703     do_bf(dc, a, TCG_COND_EQ);
704     return true;
705 }
706 
707 static bool trans_l_jr(DisasContext *dc, arg_l_jr *a, uint32_t insn)
708 {
709     LOG_DIS("l.jr r%d\n", a->b);
710     tcg_gen_mov_tl(jmp_pc, cpu_R[a->b]);
711     dc->delayed_branch = 2;
712     return true;
713 }
714 
715 static bool trans_l_jalr(DisasContext *dc, arg_l_jalr *a, uint32_t insn)
716 {
717     LOG_DIS("l.jalr r%d\n", a->b);
718     tcg_gen_mov_tl(jmp_pc, cpu_R[a->b]);
719     tcg_gen_movi_tl(cpu_R[9], dc->base.pc_next + 8);
720     dc->delayed_branch = 2;
721     return true;
722 }
723 
724 static bool trans_l_lwa(DisasContext *dc, arg_load *a, uint32_t insn)
725 {
726     TCGv ea;
727 
728     LOG_DIS("l.lwa r%d, r%d, %d\n", a->d, a->a, a->i);
729 
730     check_r0_write(a->d);
731     ea = tcg_temp_new();
732     tcg_gen_addi_tl(ea, cpu_R[a->a], a->i);
733     tcg_gen_qemu_ld_tl(cpu_R[a->d], ea, dc->mem_idx, MO_TEUL);
734     tcg_gen_mov_tl(cpu_lock_addr, ea);
735     tcg_gen_mov_tl(cpu_lock_value, cpu_R[a->d]);
736     tcg_temp_free(ea);
737     return true;
738 }
739 
740 static void do_load(DisasContext *dc, arg_load *a, TCGMemOp mop)
741 {
742     TCGv ea;
743 
744     check_r0_write(a->d);
745     ea = tcg_temp_new();
746     tcg_gen_addi_tl(ea, cpu_R[a->a], a->i);
747     tcg_gen_qemu_ld_tl(cpu_R[a->d], ea, dc->mem_idx, mop);
748     tcg_temp_free(ea);
749 }
750 
751 static bool trans_l_lwz(DisasContext *dc, arg_load *a, uint32_t insn)
752 {
753     LOG_DIS("l.lwz r%d, r%d, %d\n", a->d, a->a, a->i);
754     do_load(dc, a, MO_TEUL);
755     return true;
756 }
757 
758 static bool trans_l_lws(DisasContext *dc, arg_load *a, uint32_t insn)
759 {
760     LOG_DIS("l.lws r%d, r%d, %d\n", a->d, a->a, a->i);
761     do_load(dc, a, MO_TESL);
762     return true;
763 }
764 
765 static bool trans_l_lbz(DisasContext *dc, arg_load *a, uint32_t insn)
766 {
767     LOG_DIS("l.lbz r%d, r%d, %d\n", a->d, a->a, a->i);
768     do_load(dc, a, MO_UB);
769     return true;
770 }
771 
772 static bool trans_l_lbs(DisasContext *dc, arg_load *a, uint32_t insn)
773 {
774     LOG_DIS("l.lbs r%d, r%d, %d\n", a->d, a->a, a->i);
775     do_load(dc, a, MO_SB);
776     return true;
777 }
778 
779 static bool trans_l_lhz(DisasContext *dc, arg_load *a, uint32_t insn)
780 {
781     LOG_DIS("l.lhz r%d, r%d, %d\n", a->d, a->a, a->i);
782     do_load(dc, a, MO_TEUW);
783     return true;
784 }
785 
786 static bool trans_l_lhs(DisasContext *dc, arg_load *a, uint32_t insn)
787 {
788     LOG_DIS("l.lhs r%d, r%d, %d\n", a->d, a->a, a->i);
789     do_load(dc, a, MO_TESW);
790     return true;
791 }
792 
793 static bool trans_l_swa(DisasContext *dc, arg_store *a, uint32_t insn)
794 {
795     TCGv ea, val;
796     TCGLabel *lab_fail, *lab_done;
797 
798     LOG_DIS("l.swa r%d, r%d, %d\n", a->a, a->b, a->i);
799 
800     ea = tcg_temp_new();
801     tcg_gen_addi_tl(ea, cpu_R[a->a], a->i);
802 
803     /* For TB_FLAGS_R0_0, the branch below invalidates the temporary assigned
804        to cpu_R[0].  Since l.swa is quite often immediately followed by a
805        branch, don't bother reallocating; finish the TB using the "real" R0.
806        This also takes care of RB input across the branch.  */
807     cpu_R[0] = cpu_R0;
808 
809     lab_fail = gen_new_label();
810     lab_done = gen_new_label();
811     tcg_gen_brcond_tl(TCG_COND_NE, ea, cpu_lock_addr, lab_fail);
812     tcg_temp_free(ea);
813 
814     val = tcg_temp_new();
815     tcg_gen_atomic_cmpxchg_tl(val, cpu_lock_addr, cpu_lock_value,
816                               cpu_R[a->b], dc->mem_idx, MO_TEUL);
817     tcg_gen_setcond_tl(TCG_COND_EQ, cpu_sr_f, val, cpu_lock_value);
818     tcg_temp_free(val);
819 
820     tcg_gen_br(lab_done);
821 
822     gen_set_label(lab_fail);
823     tcg_gen_movi_tl(cpu_sr_f, 0);
824 
825     gen_set_label(lab_done);
826     tcg_gen_movi_tl(cpu_lock_addr, -1);
827     return true;
828 }
829 
830 static void do_store(DisasContext *dc, arg_store *a, TCGMemOp mop)
831 {
832     TCGv t0 = tcg_temp_new();
833     tcg_gen_addi_tl(t0, cpu_R[a->a], a->i);
834     tcg_gen_qemu_st_tl(cpu_R[a->b], t0, dc->mem_idx, mop);
835     tcg_temp_free(t0);
836 }
837 
838 static bool trans_l_sw(DisasContext *dc, arg_store *a, uint32_t insn)
839 {
840     LOG_DIS("l.sw r%d, r%d, %d\n", a->a, a->b, a->i);
841     do_store(dc, a, MO_TEUL);
842     return true;
843 }
844 
845 static bool trans_l_sb(DisasContext *dc, arg_store *a, uint32_t insn)
846 {
847     LOG_DIS("l.sb r%d, r%d, %d\n", a->a, a->b, a->i);
848     do_store(dc, a, MO_UB);
849     return true;
850 }
851 
852 static bool trans_l_sh(DisasContext *dc, arg_store *a, uint32_t insn)
853 {
854     LOG_DIS("l.sh r%d, r%d, %d\n", a->a, a->b, a->i);
855     do_store(dc, a, MO_TEUW);
856     return true;
857 }
858 
859 static bool trans_l_nop(DisasContext *dc, arg_l_nop *a, uint32_t insn)
860 {
861     LOG_DIS("l.nop %d\n", a->k);
862     return true;
863 }
864 
865 static bool trans_l_addi(DisasContext *dc, arg_rri *a, uint32_t insn)
866 {
867     TCGv t0;
868 
869     LOG_DIS("l.addi r%d, r%d, %d\n", a->d, a->a, a->i);
870     check_r0_write(a->d);
871     t0 = tcg_const_tl(a->i);
872     gen_add(dc, cpu_R[a->d], cpu_R[a->a], t0);
873     tcg_temp_free(t0);
874     return true;
875 }
876 
877 static bool trans_l_addic(DisasContext *dc, arg_rri *a, uint32_t insn)
878 {
879     TCGv t0;
880 
881     LOG_DIS("l.addic r%d, r%d, %d\n", a->d, a->a, a->i);
882     check_r0_write(a->d);
883     t0 = tcg_const_tl(a->i);
884     gen_addc(dc, cpu_R[a->d], cpu_R[a->a], t0);
885     tcg_temp_free(t0);
886     return true;
887 }
888 
889 static bool trans_l_muli(DisasContext *dc, arg_rri *a, uint32_t insn)
890 {
891     TCGv t0;
892 
893     LOG_DIS("l.muli r%d, r%d, %d\n", a->d, a->a, a->i);
894     check_r0_write(a->d);
895     t0 = tcg_const_tl(a->i);
896     gen_mul(dc, cpu_R[a->d], cpu_R[a->a], t0);
897     tcg_temp_free(t0);
898     return true;
899 }
900 
901 static bool trans_l_maci(DisasContext *dc, arg_l_maci *a, uint32_t insn)
902 {
903     TCGv t0;
904 
905     LOG_DIS("l.maci r%d, %d\n", a->a, a->i);
906     t0 = tcg_const_tl(a->i);
907     gen_mac(dc, cpu_R[a->a], t0);
908     tcg_temp_free(t0);
909     return true;
910 }
911 
912 static bool trans_l_andi(DisasContext *dc, arg_rrk *a, uint32_t insn)
913 {
914     LOG_DIS("l.andi r%d, r%d, %d\n", a->d, a->a, a->k);
915     check_r0_write(a->d);
916     tcg_gen_andi_tl(cpu_R[a->d], cpu_R[a->a], a->k);
917     return true;
918 }
919 
920 static bool trans_l_ori(DisasContext *dc, arg_rrk *a, uint32_t insn)
921 {
922     LOG_DIS("l.ori r%d, r%d, %d\n", a->d, a->a, a->k);
923     check_r0_write(a->d);
924     tcg_gen_ori_tl(cpu_R[a->d], cpu_R[a->a], a->k);
925     return true;
926 }
927 
928 static bool trans_l_xori(DisasContext *dc, arg_rri *a, uint32_t insn)
929 {
930     LOG_DIS("l.xori r%d, r%d, %d\n", a->d, a->a, a->i);
931     check_r0_write(a->d);
932     tcg_gen_xori_tl(cpu_R[a->d], cpu_R[a->a], a->i);
933     return true;
934 }
935 
936 static bool trans_l_mfspr(DisasContext *dc, arg_l_mfspr *a, uint32_t insn)
937 {
938     LOG_DIS("l.mfspr r%d, r%d, %d\n", a->d, a->a, a->k);
939     check_r0_write(a->d);
940 
941 #ifdef CONFIG_USER_ONLY
942     gen_illegal_exception(dc);
943 #else
944     if (dc->mem_idx == MMU_USER_IDX) {
945         gen_illegal_exception(dc);
946     } else {
947         TCGv_i32 ti = tcg_const_i32(a->k);
948         gen_helper_mfspr(cpu_R[a->d], cpu_env, cpu_R[a->d], cpu_R[a->a], ti);
949         tcg_temp_free_i32(ti);
950     }
951 #endif
952     return true;
953 }
954 
955 static bool trans_l_mtspr(DisasContext *dc, arg_l_mtspr *a, uint32_t insn)
956 {
957     LOG_DIS("l.mtspr r%d, r%d, %d\n", a->a, a->b, a->k);
958 
959 #ifdef CONFIG_USER_ONLY
960     gen_illegal_exception(dc);
961 #else
962     if (dc->mem_idx == MMU_USER_IDX) {
963         gen_illegal_exception(dc);
964     } else {
965         TCGv_i32 ti = tcg_const_i32(a->k);
966         gen_helper_mtspr(cpu_env, cpu_R[a->a], cpu_R[a->b], ti);
967         tcg_temp_free_i32(ti);
968     }
969 #endif
970     return true;
971 }
972 
973 static bool trans_l_mac(DisasContext *dc, arg_ab *a, uint32_t insn)
974 {
975     LOG_DIS("l.mac r%d, r%d\n", a->a, a->b);
976     gen_mac(dc, cpu_R[a->a], cpu_R[a->b]);
977     return true;
978 }
979 
980 static bool trans_l_msb(DisasContext *dc, arg_ab *a, uint32_t insn)
981 {
982     LOG_DIS("l.msb r%d, r%d\n", a->a, a->b);
983     gen_msb(dc, cpu_R[a->a], cpu_R[a->b]);
984     return true;
985 }
986 
987 static bool trans_l_macu(DisasContext *dc, arg_ab *a, uint32_t insn)
988 {
989     LOG_DIS("l.mac r%d, r%d\n", a->a, a->b);
990     gen_macu(dc, cpu_R[a->a], cpu_R[a->b]);
991     return true;
992 }
993 
994 static bool trans_l_msbu(DisasContext *dc, arg_ab *a, uint32_t insn)
995 {
996     LOG_DIS("l.msb r%d, r%d\n", a->a, a->b);
997     gen_msbu(dc, cpu_R[a->a], cpu_R[a->b]);
998     return true;
999 }
1000 
1001 static bool trans_l_slli(DisasContext *dc, arg_dal *a, uint32_t insn)
1002 {
1003     LOG_DIS("l.slli r%d, r%d, %d\n", a->d, a->a, a->l);
1004     check_r0_write(a->d);
1005     tcg_gen_shli_tl(cpu_R[a->d], cpu_R[a->a], a->l & (TARGET_LONG_BITS - 1));
1006     return true;
1007 }
1008 
1009 static bool trans_l_srli(DisasContext *dc, arg_dal *a, uint32_t insn)
1010 {
1011     LOG_DIS("l.srli r%d, r%d, %d\n", a->d, a->a, a->l);
1012     check_r0_write(a->d);
1013     tcg_gen_shri_tl(cpu_R[a->d], cpu_R[a->a], a->l & (TARGET_LONG_BITS - 1));
1014     return true;
1015 }
1016 
1017 static bool trans_l_srai(DisasContext *dc, arg_dal *a, uint32_t insn)
1018 {
1019     LOG_DIS("l.srai r%d, r%d, %d\n", a->d, a->a, a->l);
1020     check_r0_write(a->d);
1021     tcg_gen_sari_tl(cpu_R[a->d], cpu_R[a->a], a->l & (TARGET_LONG_BITS - 1));
1022     return true;
1023 }
1024 
1025 static bool trans_l_rori(DisasContext *dc, arg_dal *a, uint32_t insn)
1026 {
1027     LOG_DIS("l.rori r%d, r%d, %d\n", a->d, a->a, a->l);
1028     check_r0_write(a->d);
1029     tcg_gen_rotri_tl(cpu_R[a->d], cpu_R[a->a], a->l & (TARGET_LONG_BITS - 1));
1030     return true;
1031 }
1032 
1033 static bool trans_l_movhi(DisasContext *dc, arg_l_movhi *a, uint32_t insn)
1034 {
1035     LOG_DIS("l.movhi r%d, %d\n", a->d, a->k);
1036     check_r0_write(a->d);
1037     tcg_gen_movi_tl(cpu_R[a->d], a->k << 16);
1038     return true;
1039 }
1040 
1041 static bool trans_l_macrc(DisasContext *dc, arg_l_macrc *a, uint32_t insn)
1042 {
1043     LOG_DIS("l.macrc r%d\n", a->d);
1044     check_r0_write(a->d);
1045     tcg_gen_trunc_i64_tl(cpu_R[a->d], cpu_mac);
1046     tcg_gen_movi_i64(cpu_mac, 0);
1047     return true;
1048 }
1049 
1050 static bool trans_l_sfeq(DisasContext *dc, arg_ab *a, TCGCond cond)
1051 {
1052     LOG_DIS("l.sfeq r%d, r%d\n", a->a, a->b);
1053     tcg_gen_setcond_tl(TCG_COND_EQ, cpu_sr_f, cpu_R[a->a], cpu_R[a->b]);
1054     return true;
1055 }
1056 
1057 static bool trans_l_sfne(DisasContext *dc, arg_ab *a, TCGCond cond)
1058 {
1059     LOG_DIS("l.sfne r%d, r%d\n", a->a, a->b);
1060     tcg_gen_setcond_tl(TCG_COND_NE, cpu_sr_f, cpu_R[a->a], cpu_R[a->b]);
1061     return true;
1062 }
1063 
1064 static bool trans_l_sfgtu(DisasContext *dc, arg_ab *a, TCGCond cond)
1065 {
1066     LOG_DIS("l.sfgtu r%d, r%d\n", a->a, a->b);
1067     tcg_gen_setcond_tl(TCG_COND_GTU, cpu_sr_f, cpu_R[a->a], cpu_R[a->b]);
1068     return true;
1069 }
1070 
1071 static bool trans_l_sfgeu(DisasContext *dc, arg_ab *a, TCGCond cond)
1072 {
1073     LOG_DIS("l.sfgeu r%d, r%d\n", a->a, a->b);
1074     tcg_gen_setcond_tl(TCG_COND_GEU, cpu_sr_f, cpu_R[a->a], cpu_R[a->b]);
1075     return true;
1076 }
1077 
1078 static bool trans_l_sfltu(DisasContext *dc, arg_ab *a, TCGCond cond)
1079 {
1080     LOG_DIS("l.sfltu r%d, r%d\n", a->a, a->b);
1081     tcg_gen_setcond_tl(TCG_COND_LTU, cpu_sr_f, cpu_R[a->a], cpu_R[a->b]);
1082     return true;
1083 }
1084 
1085 static bool trans_l_sfleu(DisasContext *dc, arg_ab *a, TCGCond cond)
1086 {
1087     LOG_DIS("l.sfleu r%d, r%d\n", a->a, a->b);
1088     tcg_gen_setcond_tl(TCG_COND_LEU, cpu_sr_f, cpu_R[a->a], cpu_R[a->b]);
1089     return true;
1090 }
1091 
1092 static bool trans_l_sfgts(DisasContext *dc, arg_ab *a, TCGCond cond)
1093 {
1094     LOG_DIS("l.sfgts r%d, r%d\n", a->a, a->b);
1095     tcg_gen_setcond_tl(TCG_COND_GT, cpu_sr_f, cpu_R[a->a], cpu_R[a->b]);
1096     return true;
1097 }
1098 
1099 static bool trans_l_sfges(DisasContext *dc, arg_ab *a, TCGCond cond)
1100 {
1101     LOG_DIS("l.sfges r%d, r%d\n", a->a, a->b);
1102     tcg_gen_setcond_tl(TCG_COND_GE, cpu_sr_f, cpu_R[a->a], cpu_R[a->b]);
1103     return true;
1104 }
1105 
1106 static bool trans_l_sflts(DisasContext *dc, arg_ab *a, TCGCond cond)
1107 {
1108     LOG_DIS("l.sflts r%d, r%d\n", a->a, a->b);
1109     tcg_gen_setcond_tl(TCG_COND_LT, cpu_sr_f, cpu_R[a->a], cpu_R[a->b]);
1110     return true;
1111 }
1112 
1113 static bool trans_l_sfles(DisasContext *dc, arg_ab *a, TCGCond cond)
1114 {
1115     LOG_DIS("l.sfles r%d, r%d\n", a->a, a->b);
1116     tcg_gen_setcond_tl(TCG_COND_LE, cpu_sr_f, cpu_R[a->a], cpu_R[a->b]);
1117     return true;
1118 }
1119 
1120 static bool trans_l_sfeqi(DisasContext *dc, arg_ai *a, TCGCond cond)
1121 {
1122     LOG_DIS("l.sfeqi r%d, %d\n", a->a, a->i);
1123     tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_sr_f, cpu_R[a->a], a->i);
1124     return true;
1125 }
1126 
1127 static bool trans_l_sfnei(DisasContext *dc, arg_ai *a, TCGCond cond)
1128 {
1129     LOG_DIS("l.sfnei r%d, %d\n", a->a, a->i);
1130     tcg_gen_setcondi_tl(TCG_COND_NE, cpu_sr_f, cpu_R[a->a], a->i);
1131     return true;
1132 }
1133 
1134 static bool trans_l_sfgtui(DisasContext *dc, arg_ai *a, TCGCond cond)
1135 {
1136     LOG_DIS("l.sfgtui r%d, %d\n", a->a, a->i);
1137     tcg_gen_setcondi_tl(TCG_COND_GTU, cpu_sr_f, cpu_R[a->a], a->i);
1138     return true;
1139 }
1140 
1141 static bool trans_l_sfgeui(DisasContext *dc, arg_ai *a, TCGCond cond)
1142 {
1143     LOG_DIS("l.sfgeui r%d, %d\n", a->a, a->i);
1144     tcg_gen_setcondi_tl(TCG_COND_GEU, cpu_sr_f, cpu_R[a->a], a->i);
1145     return true;
1146 }
1147 
1148 static bool trans_l_sfltui(DisasContext *dc, arg_ai *a, TCGCond cond)
1149 {
1150     LOG_DIS("l.sfltui r%d, %d\n", a->a, a->i);
1151     tcg_gen_setcondi_tl(TCG_COND_LTU, cpu_sr_f, cpu_R[a->a], a->i);
1152     return true;
1153 }
1154 
1155 static bool trans_l_sfleui(DisasContext *dc, arg_ai *a, TCGCond cond)
1156 {
1157     LOG_DIS("l.sfleui r%d, %d\n", a->a, a->i);
1158     tcg_gen_setcondi_tl(TCG_COND_LEU, cpu_sr_f, cpu_R[a->a], a->i);
1159     return true;
1160 }
1161 
1162 static bool trans_l_sfgtsi(DisasContext *dc, arg_ai *a, TCGCond cond)
1163 {
1164     LOG_DIS("l.sfgtsi r%d, %d\n", a->a, a->i);
1165     tcg_gen_setcondi_tl(TCG_COND_GT, cpu_sr_f, cpu_R[a->a], a->i);
1166     return true;
1167 }
1168 
1169 static bool trans_l_sfgesi(DisasContext *dc, arg_ai *a, TCGCond cond)
1170 {
1171     LOG_DIS("l.sfgesi r%d, %d\n", a->a, a->i);
1172     tcg_gen_setcondi_tl(TCG_COND_GE, cpu_sr_f, cpu_R[a->a], a->i);
1173     return true;
1174 }
1175 
1176 static bool trans_l_sfltsi(DisasContext *dc, arg_ai *a, TCGCond cond)
1177 {
1178     LOG_DIS("l.sfltsi r%d, %d\n", a->a, a->i);
1179     tcg_gen_setcondi_tl(TCG_COND_LT, cpu_sr_f, cpu_R[a->a], a->i);
1180     return true;
1181 }
1182 
1183 static bool trans_l_sflesi(DisasContext *dc, arg_ai *a, TCGCond cond)
1184 {
1185     LOG_DIS("l.sflesi r%d, %d\n", a->a, a->i);
1186     tcg_gen_setcondi_tl(TCG_COND_LE, cpu_sr_f, cpu_R[a->a], a->i);
1187     return true;
1188 }
1189 
1190 static bool trans_l_sys(DisasContext *dc, arg_l_sys *a, uint32_t insn)
1191 {
1192     LOG_DIS("l.sys %d\n", a->k);
1193     tcg_gen_movi_tl(cpu_pc, dc->base.pc_next);
1194     gen_exception(dc, EXCP_SYSCALL);
1195     dc->base.is_jmp = DISAS_NORETURN;
1196     return true;
1197 }
1198 
1199 static bool trans_l_trap(DisasContext *dc, arg_l_trap *a, uint32_t insn)
1200 {
1201     LOG_DIS("l.trap %d\n", a->k);
1202     tcg_gen_movi_tl(cpu_pc, dc->base.pc_next);
1203     gen_exception(dc, EXCP_TRAP);
1204     dc->base.is_jmp = DISAS_NORETURN;
1205     return true;
1206 }
1207 
1208 static bool trans_l_msync(DisasContext *dc, arg_l_msync *a, uint32_t insn)
1209 {
1210     LOG_DIS("l.msync\n");
1211     tcg_gen_mb(TCG_MO_ALL);
1212     return true;
1213 }
1214 
1215 static bool trans_l_psync(DisasContext *dc, arg_l_psync *a, uint32_t insn)
1216 {
1217     LOG_DIS("l.psync\n");
1218     return true;
1219 }
1220 
1221 static bool trans_l_csync(DisasContext *dc, arg_l_csync *a, uint32_t insn)
1222 {
1223     LOG_DIS("l.csync\n");
1224     return true;
1225 }
1226 
1227 static bool trans_l_rfe(DisasContext *dc, arg_l_rfe *a, uint32_t insn)
1228 {
1229     LOG_DIS("l.rfe\n");
1230 
1231 #ifdef CONFIG_USER_ONLY
1232     gen_illegal_exception(dc);
1233 #else
1234     if (dc->mem_idx == MMU_USER_IDX) {
1235         gen_illegal_exception(dc);
1236     } else {
1237         gen_helper_rfe(cpu_env);
1238         dc->base.is_jmp = DISAS_UPDATE;
1239     }
1240 #endif
1241     return true;
1242 }
1243 
1244 static void do_fp2(DisasContext *dc, arg_da *a,
1245                    void (*fn)(TCGv, TCGv_env, TCGv))
1246 {
1247     check_r0_write(a->d);
1248     fn(cpu_R[a->d], cpu_env, cpu_R[a->a]);
1249     gen_helper_update_fpcsr(cpu_env);
1250 }
1251 
1252 static void do_fp3(DisasContext *dc, arg_dab *a,
1253                    void (*fn)(TCGv, TCGv_env, TCGv, TCGv))
1254 {
1255     check_r0_write(a->d);
1256     fn(cpu_R[a->d], cpu_env, cpu_R[a->a], cpu_R[a->b]);
1257     gen_helper_update_fpcsr(cpu_env);
1258 }
1259 
1260 static void do_fpcmp(DisasContext *dc, arg_ab *a,
1261                      void (*fn)(TCGv, TCGv_env, TCGv, TCGv),
1262                      bool inv, bool swap)
1263 {
1264     if (swap) {
1265         fn(cpu_sr_f, cpu_env, cpu_R[a->b], cpu_R[a->a]);
1266     } else {
1267         fn(cpu_sr_f, cpu_env, cpu_R[a->a], cpu_R[a->b]);
1268     }
1269     if (inv) {
1270         tcg_gen_xori_tl(cpu_sr_f, cpu_sr_f, 1);
1271     }
1272     gen_helper_update_fpcsr(cpu_env);
1273 }
1274 
1275 static bool trans_lf_add_s(DisasContext *dc, arg_dab *a, uint32_t insn)
1276 {
1277     LOG_DIS("lf.add.s r%d, r%d, r%d\n", a->d, a->a, a->b);
1278     do_fp3(dc, a, gen_helper_float_add_s);
1279     return true;
1280 }
1281 
1282 static bool trans_lf_sub_s(DisasContext *dc, arg_dab *a, uint32_t insn)
1283 {
1284     LOG_DIS("lf.sub.s r%d, r%d, r%d\n", a->d, a->a, a->b);
1285     do_fp3(dc, a, gen_helper_float_sub_s);
1286     return true;
1287 }
1288 
1289 static bool trans_lf_mul_s(DisasContext *dc, arg_dab *a, uint32_t insn)
1290 {
1291     LOG_DIS("lf.mul.s r%d, r%d, r%d\n", a->d, a->a, a->b);
1292     do_fp3(dc, a, gen_helper_float_mul_s);
1293     return true;
1294 }
1295 
1296 static bool trans_lf_div_s(DisasContext *dc, arg_dab *a, uint32_t insn)
1297 {
1298     LOG_DIS("lf.div.s r%d, r%d, r%d\n", a->d, a->a, a->b);
1299     do_fp3(dc, a, gen_helper_float_div_s);
1300     return true;
1301 }
1302 
1303 static bool trans_lf_rem_s(DisasContext *dc, arg_dab *a, uint32_t insn)
1304 {
1305     LOG_DIS("lf.rem.s r%d, r%d, r%d\n", a->d, a->a, a->b);
1306     do_fp3(dc, a, gen_helper_float_rem_s);
1307     return true;
1308 }
1309 
1310 static bool trans_lf_itof_s(DisasContext *dc, arg_da *a, uint32_t insn)
1311 {
1312     LOG_DIS("lf.itof.s r%d, r%d\n", a->d, a->a);
1313     do_fp2(dc, a, gen_helper_itofs);
1314     return true;
1315 }
1316 
1317 static bool trans_lf_ftoi_s(DisasContext *dc, arg_da *a, uint32_t insn)
1318 {
1319     LOG_DIS("lf.ftoi.s r%d, r%d\n", a->d, a->a);
1320     do_fp2(dc, a, gen_helper_ftois);
1321     return true;
1322 }
1323 
1324 static bool trans_lf_madd_s(DisasContext *dc, arg_dab *a, uint32_t insn)
1325 {
1326     LOG_DIS("lf.madd.s r%d, r%d, r%d\n", a->d, a->a, a->b);
1327     check_r0_write(a->d);
1328     gen_helper_float_madd_s(cpu_R[a->d], cpu_env, cpu_R[a->d],
1329                             cpu_R[a->a], cpu_R[a->b]);
1330     gen_helper_update_fpcsr(cpu_env);
1331     return true;
1332 }
1333 
1334 static bool trans_lf_sfeq_s(DisasContext *dc, arg_ab *a, uint32_t insn)
1335 {
1336     LOG_DIS("lf.sfeq.s r%d, r%d\n", a->a, a->b);
1337     do_fpcmp(dc, a, gen_helper_float_eq_s, false, false);
1338     return true;
1339 }
1340 
1341 static bool trans_lf_sfne_s(DisasContext *dc, arg_ab *a, uint32_t insn)
1342 {
1343     LOG_DIS("lf.sfne.s r%d, r%d\n", a->a, a->b);
1344     do_fpcmp(dc, a, gen_helper_float_eq_s, true, false);
1345     return true;
1346 }
1347 
1348 static bool trans_lf_sfgt_s(DisasContext *dc, arg_ab *a, uint32_t insn)
1349 {
1350     LOG_DIS("lf.sfgt.s r%d, r%d\n", a->a, a->b);
1351     do_fpcmp(dc, a, gen_helper_float_lt_s, false, true);
1352     return true;
1353 }
1354 
1355 static bool trans_lf_sfge_s(DisasContext *dc, arg_ab *a, uint32_t insn)
1356 {
1357     LOG_DIS("lf.sfge.s r%d, r%d\n", a->a, a->b);
1358     do_fpcmp(dc, a, gen_helper_float_le_s, false, true);
1359     return true;
1360 }
1361 
1362 static bool trans_lf_sflt_s(DisasContext *dc, arg_ab *a, uint32_t insn)
1363 {
1364     LOG_DIS("lf.sflt.s r%d, r%d\n", a->a, a->b);
1365     do_fpcmp(dc, a, gen_helper_float_lt_s, false, false);
1366     return true;
1367 }
1368 
1369 static bool trans_lf_sfle_s(DisasContext *dc, arg_ab *a, uint32_t insn)
1370 {
1371     LOG_DIS("lf.sfle.s r%d, r%d\n", a->a, a->b);
1372     do_fpcmp(dc, a, gen_helper_float_le_s, false, false);
1373     return true;
1374 }
1375 
1376 static void openrisc_tr_init_disas_context(DisasContextBase *dcb, CPUState *cs)
1377 {
1378     DisasContext *dc = container_of(dcb, DisasContext, base);
1379     CPUOpenRISCState *env = cs->env_ptr;
1380     int bound;
1381 
1382     dc->mem_idx = cpu_mmu_index(env, false);
1383     dc->tb_flags = dc->base.tb->flags;
1384     dc->delayed_branch = (dc->tb_flags & TB_FLAGS_DFLAG) != 0;
1385     bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
1386     dc->base.max_insns = MIN(dc->base.max_insns, bound);
1387 }
1388 
1389 static void openrisc_tr_tb_start(DisasContextBase *db, CPUState *cs)
1390 {
1391     DisasContext *dc = container_of(db, DisasContext, base);
1392 
1393     /* Allow the TCG optimizer to see that R0 == 0,
1394        when it's true, which is the common case.  */
1395     if (dc->tb_flags & TB_FLAGS_R0_0) {
1396         cpu_R[0] = tcg_const_tl(0);
1397     } else {
1398         cpu_R[0] = cpu_R0;
1399     }
1400 }
1401 
1402 static void openrisc_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
1403 {
1404     DisasContext *dc = container_of(dcbase, DisasContext, base);
1405 
1406     tcg_gen_insn_start(dc->base.pc_next, (dc->delayed_branch ? 1 : 0)
1407                        | (dc->base.num_insns > 1 ? 2 : 0));
1408 }
1409 
1410 static bool openrisc_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs,
1411                                          const CPUBreakpoint *bp)
1412 {
1413     DisasContext *dc = container_of(dcbase, DisasContext, base);
1414 
1415     tcg_gen_movi_tl(cpu_pc, dc->base.pc_next);
1416     gen_exception(dc, EXCP_DEBUG);
1417     dc->base.is_jmp = DISAS_NORETURN;
1418     /* The address covered by the breakpoint must be included in
1419        [tb->pc, tb->pc + tb->size) in order to for it to be
1420        properly cleared -- thus we increment the PC here so that
1421        the logic setting tb->size below does the right thing.  */
1422     dc->base.pc_next += 4;
1423     return true;
1424 }
1425 
1426 static void openrisc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
1427 {
1428     DisasContext *dc = container_of(dcbase, DisasContext, base);
1429     OpenRISCCPU *cpu = OPENRISC_CPU(cs);
1430     uint32_t insn = cpu_ldl_code(&cpu->env, dc->base.pc_next);
1431 
1432     if (!decode(dc, insn)) {
1433         gen_illegal_exception(dc);
1434     }
1435     dc->base.pc_next += 4;
1436 
1437     /* delay slot */
1438     if (dc->delayed_branch) {
1439         dc->delayed_branch--;
1440         if (!dc->delayed_branch) {
1441             tcg_gen_mov_tl(cpu_pc, jmp_pc);
1442             tcg_gen_discard_tl(jmp_pc);
1443             dc->base.is_jmp = DISAS_UPDATE;
1444             return;
1445         }
1446     }
1447 }
1448 
1449 static void openrisc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
1450 {
1451     DisasContext *dc = container_of(dcbase, DisasContext, base);
1452 
1453     if ((dc->tb_flags & TB_FLAGS_DFLAG ? 1 : 0) != (dc->delayed_branch != 0)) {
1454         tcg_gen_movi_i32(cpu_dflag, dc->delayed_branch != 0);
1455     }
1456 
1457     tcg_gen_movi_tl(cpu_ppc, dc->base.pc_next - 4);
1458     if (dc->base.is_jmp == DISAS_NEXT) {
1459         dc->base.is_jmp = DISAS_UPDATE;
1460         tcg_gen_movi_tl(cpu_pc, dc->base.pc_next);
1461     }
1462     if (unlikely(dc->base.singlestep_enabled)) {
1463         gen_exception(dc, EXCP_DEBUG);
1464     } else {
1465         switch (dc->base.is_jmp) {
1466         case DISAS_TOO_MANY:
1467             gen_goto_tb(dc, 0, dc->base.pc_next);
1468             break;
1469         case DISAS_NORETURN:
1470         case DISAS_JUMP:
1471         case DISAS_TB_JUMP:
1472             break;
1473         case DISAS_UPDATE:
1474             /* indicate that the hash table must be used
1475                to find the next TB */
1476             tcg_gen_exit_tb(NULL, 0);
1477             break;
1478         default:
1479             g_assert_not_reached();
1480         }
1481     }
1482 }
1483 
1484 static void openrisc_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs)
1485 {
1486     DisasContext *s = container_of(dcbase, DisasContext, base);
1487 
1488     qemu_log("IN: %s\n", lookup_symbol(s->base.pc_first));
1489     log_target_disas(cs, s->base.pc_first, s->base.tb->size);
1490 }
1491 
1492 static const TranslatorOps openrisc_tr_ops = {
1493     .init_disas_context = openrisc_tr_init_disas_context,
1494     .tb_start           = openrisc_tr_tb_start,
1495     .insn_start         = openrisc_tr_insn_start,
1496     .breakpoint_check   = openrisc_tr_breakpoint_check,
1497     .translate_insn     = openrisc_tr_translate_insn,
1498     .tb_stop            = openrisc_tr_tb_stop,
1499     .disas_log          = openrisc_tr_disas_log,
1500 };
1501 
1502 void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
1503 {
1504     DisasContext ctx;
1505 
1506     translator_loop(&openrisc_tr_ops, &ctx.base, cs, tb);
1507 }
1508 
1509 void openrisc_cpu_dump_state(CPUState *cs, FILE *f,
1510                              fprintf_function cpu_fprintf,
1511                              int flags)
1512 {
1513     OpenRISCCPU *cpu = OPENRISC_CPU(cs);
1514     CPUOpenRISCState *env = &cpu->env;
1515     int i;
1516 
1517     cpu_fprintf(f, "PC=%08x\n", env->pc);
1518     for (i = 0; i < 32; ++i) {
1519         cpu_fprintf(f, "R%02d=%08x%c", i, cpu_get_gpr(env, i),
1520                     (i % 4) == 3 ? '\n' : ' ');
1521     }
1522 }
1523 
1524 void restore_state_to_opc(CPUOpenRISCState *env, TranslationBlock *tb,
1525                           target_ulong *data)
1526 {
1527     env->pc = data[0];
1528     env->dflag = data[1] & 1;
1529     if (data[1] & 2) {
1530         env->ppc = env->pc - 4;
1531     }
1532 }
1533