xref: /openbmc/qemu/target/sparc/translate.c (revision 287b11520bd4dafd58e42ccff7010f8c4bbafcf9)
1 /*
2    SPARC translation
3 
4    Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5    Copyright (C) 2003-2005 Fabrice Bellard
6 
7    This library is free software; you can redistribute it and/or
8    modify it under the terms of the GNU Lesser General Public
9    License as published by the Free Software Foundation; either
10    version 2.1 of the License, or (at your option) any later version.
11 
12    This library is distributed in the hope that it will be useful,
13    but WITHOUT ANY WARRANTY; without even the implied warranty of
14    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15    Lesser General Public License for more details.
16 
17    You should have received a copy of the GNU Lesser General Public
18    License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 
23 #include "cpu.h"
24 #include "disas/disas.h"
25 #include "exec/helper-proto.h"
26 #include "exec/exec-all.h"
27 #include "tcg/tcg-op.h"
28 
29 #include "exec/helper-gen.h"
30 
31 #include "exec/translator.h"
32 #include "exec/log.h"
33 #include "asi.h"
34 
35 #define HELPER_H "helper.h"
36 #include "exec/helper-info.c.inc"
37 #undef  HELPER_H
38 
39 #ifdef TARGET_SPARC64
40 # define gen_helper_rdpsr(D, E)                 qemu_build_not_reached()
41 # define gen_helper_rett(E)                     qemu_build_not_reached()
42 # define gen_helper_power_down(E)               qemu_build_not_reached()
43 # define gen_helper_wrpsr(E, S)                 qemu_build_not_reached()
44 #else
45 # define gen_helper_clear_softint(E, S)         qemu_build_not_reached()
46 # define gen_helper_done(E)                     qemu_build_not_reached()
47 # define gen_helper_flushw(E)                   qemu_build_not_reached()
48 # define gen_helper_rdccr(D, E)                 qemu_build_not_reached()
49 # define gen_helper_rdcwp(D, E)                 qemu_build_not_reached()
50 # define gen_helper_restored(E)                 qemu_build_not_reached()
51 # define gen_helper_retry(E)                    qemu_build_not_reached()
52 # define gen_helper_saved(E)                    qemu_build_not_reached()
53 # define gen_helper_sdivx(D, E, A, B)           qemu_build_not_reached()
54 # define gen_helper_set_softint(E, S)           qemu_build_not_reached()
55 # define gen_helper_tick_get_count(D, E, T, C)  qemu_build_not_reached()
56 # define gen_helper_tick_set_count(P, S)        qemu_build_not_reached()
57 # define gen_helper_tick_set_limit(P, S)        qemu_build_not_reached()
58 # define gen_helper_udivx(D, E, A, B)           qemu_build_not_reached()
59 # define gen_helper_wrccr(E, S)                 qemu_build_not_reached()
60 # define gen_helper_wrcwp(E, S)                 qemu_build_not_reached()
61 # define gen_helper_wrgl(E, S)                  qemu_build_not_reached()
62 # define gen_helper_write_softint(E, S)         qemu_build_not_reached()
63 # define gen_helper_wrpil(E, S)                 qemu_build_not_reached()
64 # define gen_helper_wrpstate(E, S)              qemu_build_not_reached()
65 # define MAXTL_MASK                             0
66 #endif
67 
68 /* Dynamic PC, must exit to main loop. */
69 #define DYNAMIC_PC         1
70 /* Dynamic PC, one of two values according to jump_pc[T2]. */
71 #define JUMP_PC            2
72 /* Dynamic PC, may lookup next TB. */
73 #define DYNAMIC_PC_LOOKUP  3
74 
75 #define DISAS_EXIT  DISAS_TARGET_0
76 
77 /* global register indexes */
78 static TCGv_ptr cpu_regwptr;
79 static TCGv cpu_cc_src, cpu_cc_src2, cpu_cc_dst;
80 static TCGv_i32 cpu_cc_op;
81 static TCGv_i32 cpu_psr;
82 static TCGv cpu_fsr, cpu_pc, cpu_npc;
83 static TCGv cpu_regs[32];
84 static TCGv cpu_y;
85 static TCGv cpu_tbr;
86 static TCGv cpu_cond;
87 #ifdef TARGET_SPARC64
88 static TCGv_i32 cpu_xcc, cpu_fprs;
89 static TCGv cpu_gsr;
90 #else
91 # define cpu_fprs               ({ qemu_build_not_reached(); (TCGv)NULL; })
92 # define cpu_gsr                ({ qemu_build_not_reached(); (TCGv)NULL; })
93 #endif
94 /* Floating point registers */
95 static TCGv_i64 cpu_fpr[TARGET_DPREGS];
96 
97 #define env_field_offsetof(X)     offsetof(CPUSPARCState, X)
98 #ifdef TARGET_SPARC64
99 # define env32_field_offsetof(X)  ({ qemu_build_not_reached(); 0; })
100 # define env64_field_offsetof(X)  env_field_offsetof(X)
101 #else
102 # define env32_field_offsetof(X)  env_field_offsetof(X)
103 # define env64_field_offsetof(X)  ({ qemu_build_not_reached(); 0; })
104 #endif
105 
106 typedef struct DisasDelayException {
107     struct DisasDelayException *next;
108     TCGLabel *lab;
109     TCGv_i32 excp;
110     /* Saved state at parent insn. */
111     target_ulong pc;
112     target_ulong npc;
113 } DisasDelayException;
114 
115 typedef struct DisasContext {
116     DisasContextBase base;
117     target_ulong pc;    /* current Program Counter: integer or DYNAMIC_PC */
118     target_ulong npc;   /* next PC: integer or DYNAMIC_PC or JUMP_PC */
119     target_ulong jump_pc[2]; /* used when JUMP_PC pc value is used */
120     int mem_idx;
121     bool fpu_enabled;
122     bool address_mask_32bit;
123 #ifndef CONFIG_USER_ONLY
124     bool supervisor;
125 #ifdef TARGET_SPARC64
126     bool hypervisor;
127 #endif
128 #endif
129 
130     uint32_t cc_op;  /* current CC operation */
131     sparc_def_t *def;
132 #ifdef TARGET_SPARC64
133     int fprs_dirty;
134     int asi;
135 #endif
136     DisasDelayException *delay_excp_list;
137 } DisasContext;
138 
139 typedef struct {
140     TCGCond cond;
141     bool is_bool;
142     TCGv c1, c2;
143 } DisasCompare;
144 
145 // This function uses non-native bit order
146 #define GET_FIELD(X, FROM, TO)                                  \
147     ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
148 
149 // This function uses the order in the manuals, i.e. bit 0 is 2^0
150 #define GET_FIELD_SP(X, FROM, TO)               \
151     GET_FIELD(X, 31 - (TO), 31 - (FROM))
152 
153 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
154 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
155 
156 #ifdef TARGET_SPARC64
157 #define DFPREG(r) (((r & 1) << 5) | (r & 0x1e))
158 #define QFPREG(r) (((r & 1) << 5) | (r & 0x1c))
159 #else
160 #define DFPREG(r) (r & 0x1e)
161 #define QFPREG(r) (r & 0x1c)
162 #endif
163 
164 #define UA2005_HTRAP_MASK 0xff
165 #define V8_TRAP_MASK 0x7f
166 
167 static int sign_extend(int x, int len)
168 {
169     len = 32 - len;
170     return (x << len) >> len;
171 }
172 
173 #define IS_IMM (insn & (1<<13))
174 
175 static void gen_update_fprs_dirty(DisasContext *dc, int rd)
176 {
177 #if defined(TARGET_SPARC64)
178     int bit = (rd < 32) ? 1 : 2;
179     /* If we know we've already set this bit within the TB,
180        we can avoid setting it again.  */
181     if (!(dc->fprs_dirty & bit)) {
182         dc->fprs_dirty |= bit;
183         tcg_gen_ori_i32(cpu_fprs, cpu_fprs, bit);
184     }
185 #endif
186 }
187 
188 /* floating point registers moves */
189 static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src)
190 {
191     TCGv_i32 ret = tcg_temp_new_i32();
192     if (src & 1) {
193         tcg_gen_extrl_i64_i32(ret, cpu_fpr[src / 2]);
194     } else {
195         tcg_gen_extrh_i64_i32(ret, cpu_fpr[src / 2]);
196     }
197     return ret;
198 }
199 
200 static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v)
201 {
202     TCGv_i64 t = tcg_temp_new_i64();
203 
204     tcg_gen_extu_i32_i64(t, v);
205     tcg_gen_deposit_i64(cpu_fpr[dst / 2], cpu_fpr[dst / 2], t,
206                         (dst & 1 ? 0 : 32), 32);
207     gen_update_fprs_dirty(dc, dst);
208 }
209 
210 static TCGv_i32 gen_dest_fpr_F(DisasContext *dc)
211 {
212     return tcg_temp_new_i32();
213 }
214 
215 static TCGv_i64 gen_load_fpr_D(DisasContext *dc, unsigned int src)
216 {
217     src = DFPREG(src);
218     return cpu_fpr[src / 2];
219 }
220 
221 static void gen_store_fpr_D(DisasContext *dc, unsigned int dst, TCGv_i64 v)
222 {
223     dst = DFPREG(dst);
224     tcg_gen_mov_i64(cpu_fpr[dst / 2], v);
225     gen_update_fprs_dirty(dc, dst);
226 }
227 
228 static TCGv_i64 gen_dest_fpr_D(DisasContext *dc, unsigned int dst)
229 {
230     return cpu_fpr[DFPREG(dst) / 2];
231 }
232 
233 static void gen_op_load_fpr_QT0(unsigned int src)
234 {
235     tcg_gen_st_i64(cpu_fpr[src / 2], tcg_env, offsetof(CPUSPARCState, qt0) +
236                    offsetof(CPU_QuadU, ll.upper));
237     tcg_gen_st_i64(cpu_fpr[src/2 + 1], tcg_env, offsetof(CPUSPARCState, qt0) +
238                    offsetof(CPU_QuadU, ll.lower));
239 }
240 
241 static void gen_op_load_fpr_QT1(unsigned int src)
242 {
243     tcg_gen_st_i64(cpu_fpr[src / 2], tcg_env, offsetof(CPUSPARCState, qt1) +
244                    offsetof(CPU_QuadU, ll.upper));
245     tcg_gen_st_i64(cpu_fpr[src/2 + 1], tcg_env, offsetof(CPUSPARCState, qt1) +
246                    offsetof(CPU_QuadU, ll.lower));
247 }
248 
249 static void gen_op_store_QT0_fpr(unsigned int dst)
250 {
251     tcg_gen_ld_i64(cpu_fpr[dst / 2], tcg_env, offsetof(CPUSPARCState, qt0) +
252                    offsetof(CPU_QuadU, ll.upper));
253     tcg_gen_ld_i64(cpu_fpr[dst/2 + 1], tcg_env, offsetof(CPUSPARCState, qt0) +
254                    offsetof(CPU_QuadU, ll.lower));
255 }
256 
257 #ifdef TARGET_SPARC64
258 static void gen_move_Q(DisasContext *dc, unsigned int rd, unsigned int rs)
259 {
260     rd = QFPREG(rd);
261     rs = QFPREG(rs);
262 
263     tcg_gen_mov_i64(cpu_fpr[rd / 2], cpu_fpr[rs / 2]);
264     tcg_gen_mov_i64(cpu_fpr[rd / 2 + 1], cpu_fpr[rs / 2 + 1]);
265     gen_update_fprs_dirty(dc, rd);
266 }
267 #endif
268 
269 /* moves */
270 #ifdef CONFIG_USER_ONLY
271 #define supervisor(dc) 0
272 #define hypervisor(dc) 0
273 #else
274 #ifdef TARGET_SPARC64
275 #define hypervisor(dc) (dc->hypervisor)
276 #define supervisor(dc) (dc->supervisor | dc->hypervisor)
277 #else
278 #define supervisor(dc) (dc->supervisor)
279 #define hypervisor(dc) 0
280 #endif
281 #endif
282 
283 #if !defined(TARGET_SPARC64)
284 # define AM_CHECK(dc)  false
285 #elif defined(TARGET_ABI32)
286 # define AM_CHECK(dc)  true
287 #elif defined(CONFIG_USER_ONLY)
288 # define AM_CHECK(dc)  false
289 #else
290 # define AM_CHECK(dc)  ((dc)->address_mask_32bit)
291 #endif
292 
293 static void gen_address_mask(DisasContext *dc, TCGv addr)
294 {
295     if (AM_CHECK(dc)) {
296         tcg_gen_andi_tl(addr, addr, 0xffffffffULL);
297     }
298 }
299 
300 static target_ulong address_mask_i(DisasContext *dc, target_ulong addr)
301 {
302     return AM_CHECK(dc) ? (uint32_t)addr : addr;
303 }
304 
305 static TCGv gen_load_gpr(DisasContext *dc, int reg)
306 {
307     if (reg > 0) {
308         assert(reg < 32);
309         return cpu_regs[reg];
310     } else {
311         TCGv t = tcg_temp_new();
312         tcg_gen_movi_tl(t, 0);
313         return t;
314     }
315 }
316 
317 static void gen_store_gpr(DisasContext *dc, int reg, TCGv v)
318 {
319     if (reg > 0) {
320         assert(reg < 32);
321         tcg_gen_mov_tl(cpu_regs[reg], v);
322     }
323 }
324 
325 static TCGv gen_dest_gpr(DisasContext *dc, int reg)
326 {
327     if (reg > 0) {
328         assert(reg < 32);
329         return cpu_regs[reg];
330     } else {
331         return tcg_temp_new();
332     }
333 }
334 
335 static bool use_goto_tb(DisasContext *s, target_ulong pc, target_ulong npc)
336 {
337     return translator_use_goto_tb(&s->base, pc) &&
338            translator_use_goto_tb(&s->base, npc);
339 }
340 
341 static void gen_goto_tb(DisasContext *s, int tb_num,
342                         target_ulong pc, target_ulong npc)
343 {
344     if (use_goto_tb(s, pc, npc))  {
345         /* jump to same page: we can use a direct jump */
346         tcg_gen_goto_tb(tb_num);
347         tcg_gen_movi_tl(cpu_pc, pc);
348         tcg_gen_movi_tl(cpu_npc, npc);
349         tcg_gen_exit_tb(s->base.tb, tb_num);
350     } else {
351         /* jump to another page: we can use an indirect jump */
352         tcg_gen_movi_tl(cpu_pc, pc);
353         tcg_gen_movi_tl(cpu_npc, npc);
354         tcg_gen_lookup_and_goto_ptr();
355     }
356 }
357 
358 // XXX suboptimal
359 static void gen_mov_reg_N(TCGv reg, TCGv_i32 src)
360 {
361     tcg_gen_extu_i32_tl(reg, src);
362     tcg_gen_extract_tl(reg, reg, PSR_NEG_SHIFT, 1);
363 }
364 
365 static void gen_mov_reg_Z(TCGv reg, TCGv_i32 src)
366 {
367     tcg_gen_extu_i32_tl(reg, src);
368     tcg_gen_extract_tl(reg, reg, PSR_ZERO_SHIFT, 1);
369 }
370 
371 static void gen_mov_reg_V(TCGv reg, TCGv_i32 src)
372 {
373     tcg_gen_extu_i32_tl(reg, src);
374     tcg_gen_extract_tl(reg, reg, PSR_OVF_SHIFT, 1);
375 }
376 
377 static void gen_mov_reg_C(TCGv reg, TCGv_i32 src)
378 {
379     tcg_gen_extu_i32_tl(reg, src);
380     tcg_gen_extract_tl(reg, reg, PSR_CARRY_SHIFT, 1);
381 }
382 
383 static void gen_op_add_cc(TCGv dst, TCGv src1, TCGv src2)
384 {
385     tcg_gen_mov_tl(cpu_cc_src, src1);
386     tcg_gen_mov_tl(cpu_cc_src2, src2);
387     tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
388     tcg_gen_mov_tl(dst, cpu_cc_dst);
389 }
390 
391 static TCGv_i32 gen_add32_carry32(void)
392 {
393     TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
394 
395     /* Carry is computed from a previous add: (dst < src)  */
396 #if TARGET_LONG_BITS == 64
397     cc_src1_32 = tcg_temp_new_i32();
398     cc_src2_32 = tcg_temp_new_i32();
399     tcg_gen_extrl_i64_i32(cc_src1_32, cpu_cc_dst);
400     tcg_gen_extrl_i64_i32(cc_src2_32, cpu_cc_src);
401 #else
402     cc_src1_32 = cpu_cc_dst;
403     cc_src2_32 = cpu_cc_src;
404 #endif
405 
406     carry_32 = tcg_temp_new_i32();
407     tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
408 
409     return carry_32;
410 }
411 
412 static TCGv_i32 gen_sub32_carry32(void)
413 {
414     TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
415 
416     /* Carry is computed from a previous borrow: (src1 < src2)  */
417 #if TARGET_LONG_BITS == 64
418     cc_src1_32 = tcg_temp_new_i32();
419     cc_src2_32 = tcg_temp_new_i32();
420     tcg_gen_extrl_i64_i32(cc_src1_32, cpu_cc_src);
421     tcg_gen_extrl_i64_i32(cc_src2_32, cpu_cc_src2);
422 #else
423     cc_src1_32 = cpu_cc_src;
424     cc_src2_32 = cpu_cc_src2;
425 #endif
426 
427     carry_32 = tcg_temp_new_i32();
428     tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
429 
430     return carry_32;
431 }
432 
433 static void gen_op_addc_int(TCGv dst, TCGv src1, TCGv src2,
434                             TCGv_i32 carry_32, bool update_cc)
435 {
436     tcg_gen_add_tl(dst, src1, src2);
437 
438 #ifdef TARGET_SPARC64
439     TCGv carry = tcg_temp_new();
440     tcg_gen_extu_i32_tl(carry, carry_32);
441     tcg_gen_add_tl(dst, dst, carry);
442 #else
443     tcg_gen_add_i32(dst, dst, carry_32);
444 #endif
445 
446     if (update_cc) {
447         tcg_debug_assert(dst == cpu_cc_dst);
448         tcg_gen_mov_tl(cpu_cc_src, src1);
449         tcg_gen_mov_tl(cpu_cc_src2, src2);
450     }
451 }
452 
453 static void gen_op_addc_int_add(TCGv dst, TCGv src1, TCGv src2, bool update_cc)
454 {
455     TCGv discard;
456 
457     if (TARGET_LONG_BITS == 64) {
458         gen_op_addc_int(dst, src1, src2, gen_add32_carry32(), update_cc);
459         return;
460     }
461 
462     /*
463      * We can re-use the host's hardware carry generation by using
464      * an ADD2 opcode.  We discard the low part of the output.
465      * Ideally we'd combine this operation with the add that
466      * generated the carry in the first place.
467      */
468     discard = tcg_temp_new();
469     tcg_gen_add2_tl(discard, dst, cpu_cc_src, src1, cpu_cc_src2, src2);
470 
471     if (update_cc) {
472         tcg_debug_assert(dst == cpu_cc_dst);
473         tcg_gen_mov_tl(cpu_cc_src, src1);
474         tcg_gen_mov_tl(cpu_cc_src2, src2);
475     }
476 }
477 
478 static void gen_op_addc_add(TCGv dst, TCGv src1, TCGv src2)
479 {
480     gen_op_addc_int_add(dst, src1, src2, false);
481 }
482 
483 static void gen_op_addccc_add(TCGv dst, TCGv src1, TCGv src2)
484 {
485     gen_op_addc_int_add(dst, src1, src2, true);
486 }
487 
488 static void gen_op_addc_sub(TCGv dst, TCGv src1, TCGv src2)
489 {
490     gen_op_addc_int(dst, src1, src2, gen_sub32_carry32(), false);
491 }
492 
493 static void gen_op_addccc_sub(TCGv dst, TCGv src1, TCGv src2)
494 {
495     gen_op_addc_int(dst, src1, src2, gen_sub32_carry32(), true);
496 }
497 
498 static void gen_op_addc_int_generic(TCGv dst, TCGv src1, TCGv src2,
499                                     bool update_cc)
500 {
501     TCGv_i32 carry_32 = tcg_temp_new_i32();
502     gen_helper_compute_C_icc(carry_32, tcg_env);
503     gen_op_addc_int(dst, src1, src2, carry_32, update_cc);
504 }
505 
506 static void gen_op_addc_generic(TCGv dst, TCGv src1, TCGv src2)
507 {
508     gen_op_addc_int_generic(dst, src1, src2, false);
509 }
510 
511 static void gen_op_addccc_generic(TCGv dst, TCGv src1, TCGv src2)
512 {
513     gen_op_addc_int_generic(dst, src1, src2, true);
514 }
515 
516 static void gen_op_sub_cc(TCGv dst, TCGv src1, TCGv src2)
517 {
518     tcg_gen_mov_tl(cpu_cc_src, src1);
519     tcg_gen_mov_tl(cpu_cc_src2, src2);
520     tcg_gen_sub_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
521     tcg_gen_mov_tl(dst, cpu_cc_dst);
522 }
523 
524 static void gen_op_subc_int(TCGv dst, TCGv src1, TCGv src2,
525                             TCGv_i32 carry_32, bool update_cc)
526 {
527     TCGv carry;
528 
529 #if TARGET_LONG_BITS == 64
530     carry = tcg_temp_new();
531     tcg_gen_extu_i32_i64(carry, carry_32);
532 #else
533     carry = carry_32;
534 #endif
535 
536     tcg_gen_sub_tl(dst, src1, src2);
537     tcg_gen_sub_tl(dst, dst, carry);
538 
539     if (update_cc) {
540         tcg_debug_assert(dst == cpu_cc_dst);
541         tcg_gen_mov_tl(cpu_cc_src, src1);
542         tcg_gen_mov_tl(cpu_cc_src2, src2);
543     }
544 }
545 
546 static void gen_op_subc_add(TCGv dst, TCGv src1, TCGv src2)
547 {
548     gen_op_subc_int(dst, src1, src2, gen_add32_carry32(), false);
549 }
550 
551 static void gen_op_subccc_add(TCGv dst, TCGv src1, TCGv src2)
552 {
553     gen_op_subc_int(dst, src1, src2, gen_add32_carry32(), true);
554 }
555 
556 static void gen_op_subc_int_sub(TCGv dst, TCGv src1, TCGv src2, bool update_cc)
557 {
558     TCGv discard;
559 
560     if (TARGET_LONG_BITS == 64) {
561         gen_op_subc_int(dst, src1, src2, gen_sub32_carry32(), update_cc);
562         return;
563     }
564 
565     /*
566      * We can re-use the host's hardware carry generation by using
567      * a SUB2 opcode.  We discard the low part of the output.
568      */
569     discard = tcg_temp_new();
570     tcg_gen_sub2_tl(discard, dst, cpu_cc_src, src1, cpu_cc_src2, src2);
571 
572     if (update_cc) {
573         tcg_debug_assert(dst == cpu_cc_dst);
574         tcg_gen_mov_tl(cpu_cc_src, src1);
575         tcg_gen_mov_tl(cpu_cc_src2, src2);
576     }
577 }
578 
579 static void gen_op_subc_sub(TCGv dst, TCGv src1, TCGv src2)
580 {
581     gen_op_subc_int_sub(dst, src1, src2, false);
582 }
583 
584 static void gen_op_subccc_sub(TCGv dst, TCGv src1, TCGv src2)
585 {
586     gen_op_subc_int_sub(dst, src1, src2, true);
587 }
588 
589 static void gen_op_subc_int_generic(TCGv dst, TCGv src1, TCGv src2,
590                                     bool update_cc)
591 {
592     TCGv_i32 carry_32 = tcg_temp_new_i32();
593 
594     gen_helper_compute_C_icc(carry_32, tcg_env);
595     gen_op_subc_int(dst, src1, src2, carry_32, update_cc);
596 }
597 
598 static void gen_op_subc_generic(TCGv dst, TCGv src1, TCGv src2)
599 {
600     gen_op_subc_int_generic(dst, src1, src2, false);
601 }
602 
603 static void gen_op_subccc_generic(TCGv dst, TCGv src1, TCGv src2)
604 {
605     gen_op_subc_int_generic(dst, src1, src2, true);
606 }
607 
608 static void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
609 {
610     TCGv r_temp, zero, t0;
611 
612     r_temp = tcg_temp_new();
613     t0 = tcg_temp_new();
614 
615     /* old op:
616     if (!(env->y & 1))
617         T1 = 0;
618     */
619     zero = tcg_constant_tl(0);
620     tcg_gen_andi_tl(cpu_cc_src, src1, 0xffffffff);
621     tcg_gen_andi_tl(r_temp, cpu_y, 0x1);
622     tcg_gen_andi_tl(cpu_cc_src2, src2, 0xffffffff);
623     tcg_gen_movcond_tl(TCG_COND_EQ, cpu_cc_src2, r_temp, zero,
624                        zero, cpu_cc_src2);
625 
626     // b2 = T0 & 1;
627     // env->y = (b2 << 31) | (env->y >> 1);
628     tcg_gen_extract_tl(t0, cpu_y, 1, 31);
629     tcg_gen_deposit_tl(cpu_y, t0, cpu_cc_src, 31, 1);
630 
631     // b1 = N ^ V;
632     gen_mov_reg_N(t0, cpu_psr);
633     gen_mov_reg_V(r_temp, cpu_psr);
634     tcg_gen_xor_tl(t0, t0, r_temp);
635 
636     // T0 = (b1 << 31) | (T0 >> 1);
637     // src1 = T0;
638     tcg_gen_shli_tl(t0, t0, 31);
639     tcg_gen_shri_tl(cpu_cc_src, cpu_cc_src, 1);
640     tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t0);
641 
642     tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
643 
644     tcg_gen_mov_tl(dst, cpu_cc_dst);
645 }
646 
647 static void gen_op_multiply(TCGv dst, TCGv src1, TCGv src2, int sign_ext)
648 {
649 #if TARGET_LONG_BITS == 32
650     if (sign_ext) {
651         tcg_gen_muls2_tl(dst, cpu_y, src1, src2);
652     } else {
653         tcg_gen_mulu2_tl(dst, cpu_y, src1, src2);
654     }
655 #else
656     TCGv t0 = tcg_temp_new_i64();
657     TCGv t1 = tcg_temp_new_i64();
658 
659     if (sign_ext) {
660         tcg_gen_ext32s_i64(t0, src1);
661         tcg_gen_ext32s_i64(t1, src2);
662     } else {
663         tcg_gen_ext32u_i64(t0, src1);
664         tcg_gen_ext32u_i64(t1, src2);
665     }
666 
667     tcg_gen_mul_i64(dst, t0, t1);
668     tcg_gen_shri_i64(cpu_y, dst, 32);
669 #endif
670 }
671 
672 static void gen_op_umul(TCGv dst, TCGv src1, TCGv src2)
673 {
674     /* zero-extend truncated operands before multiplication */
675     gen_op_multiply(dst, src1, src2, 0);
676 }
677 
678 static void gen_op_smul(TCGv dst, TCGv src1, TCGv src2)
679 {
680     /* sign-extend truncated operands before multiplication */
681     gen_op_multiply(dst, src1, src2, 1);
682 }
683 
684 static void gen_op_udivx(TCGv dst, TCGv src1, TCGv src2)
685 {
686     gen_helper_udivx(dst, tcg_env, src1, src2);
687 }
688 
689 static void gen_op_sdivx(TCGv dst, TCGv src1, TCGv src2)
690 {
691     gen_helper_sdivx(dst, tcg_env, src1, src2);
692 }
693 
694 static void gen_op_udiv(TCGv dst, TCGv src1, TCGv src2)
695 {
696     gen_helper_udiv(dst, tcg_env, src1, src2);
697 }
698 
699 static void gen_op_sdiv(TCGv dst, TCGv src1, TCGv src2)
700 {
701     gen_helper_sdiv(dst, tcg_env, src1, src2);
702 }
703 
704 static void gen_op_udivcc(TCGv dst, TCGv src1, TCGv src2)
705 {
706     gen_helper_udiv_cc(dst, tcg_env, src1, src2);
707 }
708 
709 static void gen_op_sdivcc(TCGv dst, TCGv src1, TCGv src2)
710 {
711     gen_helper_sdiv_cc(dst, tcg_env, src1, src2);
712 }
713 
714 static void gen_op_taddcctv(TCGv dst, TCGv src1, TCGv src2)
715 {
716     gen_helper_taddcctv(dst, tcg_env, src1, src2);
717 }
718 
719 static void gen_op_tsubcctv(TCGv dst, TCGv src1, TCGv src2)
720 {
721     gen_helper_tsubcctv(dst, tcg_env, src1, src2);
722 }
723 
724 static void gen_op_popc(TCGv dst, TCGv src1, TCGv src2)
725 {
726     tcg_gen_ctpop_tl(dst, src2);
727 }
728 
729 // 1
730 static void gen_op_eval_ba(TCGv dst)
731 {
732     tcg_gen_movi_tl(dst, 1);
733 }
734 
735 // Z
736 static void gen_op_eval_be(TCGv dst, TCGv_i32 src)
737 {
738     gen_mov_reg_Z(dst, src);
739 }
740 
741 // Z | (N ^ V)
742 static void gen_op_eval_ble(TCGv dst, TCGv_i32 src)
743 {
744     TCGv t0 = tcg_temp_new();
745     gen_mov_reg_N(t0, src);
746     gen_mov_reg_V(dst, src);
747     tcg_gen_xor_tl(dst, dst, t0);
748     gen_mov_reg_Z(t0, src);
749     tcg_gen_or_tl(dst, dst, t0);
750 }
751 
752 // N ^ V
753 static void gen_op_eval_bl(TCGv dst, TCGv_i32 src)
754 {
755     TCGv t0 = tcg_temp_new();
756     gen_mov_reg_V(t0, src);
757     gen_mov_reg_N(dst, src);
758     tcg_gen_xor_tl(dst, dst, t0);
759 }
760 
761 // C | Z
762 static void gen_op_eval_bleu(TCGv dst, TCGv_i32 src)
763 {
764     TCGv t0 = tcg_temp_new();
765     gen_mov_reg_Z(t0, src);
766     gen_mov_reg_C(dst, src);
767     tcg_gen_or_tl(dst, dst, t0);
768 }
769 
770 // C
771 static void gen_op_eval_bcs(TCGv dst, TCGv_i32 src)
772 {
773     gen_mov_reg_C(dst, src);
774 }
775 
776 // V
777 static void gen_op_eval_bvs(TCGv dst, TCGv_i32 src)
778 {
779     gen_mov_reg_V(dst, src);
780 }
781 
782 // 0
783 static void gen_op_eval_bn(TCGv dst)
784 {
785     tcg_gen_movi_tl(dst, 0);
786 }
787 
788 // N
789 static void gen_op_eval_bneg(TCGv dst, TCGv_i32 src)
790 {
791     gen_mov_reg_N(dst, src);
792 }
793 
794 // !Z
795 static void gen_op_eval_bne(TCGv dst, TCGv_i32 src)
796 {
797     gen_mov_reg_Z(dst, src);
798     tcg_gen_xori_tl(dst, dst, 0x1);
799 }
800 
801 // !(Z | (N ^ V))
802 static void gen_op_eval_bg(TCGv dst, TCGv_i32 src)
803 {
804     gen_op_eval_ble(dst, src);
805     tcg_gen_xori_tl(dst, dst, 0x1);
806 }
807 
808 // !(N ^ V)
809 static void gen_op_eval_bge(TCGv dst, TCGv_i32 src)
810 {
811     gen_op_eval_bl(dst, src);
812     tcg_gen_xori_tl(dst, dst, 0x1);
813 }
814 
815 // !(C | Z)
816 static void gen_op_eval_bgu(TCGv dst, TCGv_i32 src)
817 {
818     gen_op_eval_bleu(dst, src);
819     tcg_gen_xori_tl(dst, dst, 0x1);
820 }
821 
822 // !C
823 static void gen_op_eval_bcc(TCGv dst, TCGv_i32 src)
824 {
825     gen_mov_reg_C(dst, src);
826     tcg_gen_xori_tl(dst, dst, 0x1);
827 }
828 
829 // !N
830 static void gen_op_eval_bpos(TCGv dst, TCGv_i32 src)
831 {
832     gen_mov_reg_N(dst, src);
833     tcg_gen_xori_tl(dst, dst, 0x1);
834 }
835 
836 // !V
837 static void gen_op_eval_bvc(TCGv dst, TCGv_i32 src)
838 {
839     gen_mov_reg_V(dst, src);
840     tcg_gen_xori_tl(dst, dst, 0x1);
841 }
842 
843 /*
844   FPSR bit field FCC1 | FCC0:
845    0 =
846    1 <
847    2 >
848    3 unordered
849 */
850 static void gen_mov_reg_FCC0(TCGv reg, TCGv src,
851                                     unsigned int fcc_offset)
852 {
853     tcg_gen_shri_tl(reg, src, FSR_FCC0_SHIFT + fcc_offset);
854     tcg_gen_andi_tl(reg, reg, 0x1);
855 }
856 
857 static void gen_mov_reg_FCC1(TCGv reg, TCGv src, unsigned int fcc_offset)
858 {
859     tcg_gen_shri_tl(reg, src, FSR_FCC1_SHIFT + fcc_offset);
860     tcg_gen_andi_tl(reg, reg, 0x1);
861 }
862 
863 // !0: FCC0 | FCC1
864 static void gen_op_eval_fbne(TCGv dst, TCGv src, unsigned int fcc_offset)
865 {
866     TCGv t0 = tcg_temp_new();
867     gen_mov_reg_FCC0(dst, src, fcc_offset);
868     gen_mov_reg_FCC1(t0, src, fcc_offset);
869     tcg_gen_or_tl(dst, dst, t0);
870 }
871 
872 // 1 or 2: FCC0 ^ FCC1
873 static void gen_op_eval_fblg(TCGv dst, TCGv src, unsigned int fcc_offset)
874 {
875     TCGv t0 = tcg_temp_new();
876     gen_mov_reg_FCC0(dst, src, fcc_offset);
877     gen_mov_reg_FCC1(t0, src, fcc_offset);
878     tcg_gen_xor_tl(dst, dst, t0);
879 }
880 
881 // 1 or 3: FCC0
882 static void gen_op_eval_fbul(TCGv dst, TCGv src, unsigned int fcc_offset)
883 {
884     gen_mov_reg_FCC0(dst, src, fcc_offset);
885 }
886 
887 // 1: FCC0 & !FCC1
888 static void gen_op_eval_fbl(TCGv dst, TCGv src, unsigned int fcc_offset)
889 {
890     TCGv t0 = tcg_temp_new();
891     gen_mov_reg_FCC0(dst, src, fcc_offset);
892     gen_mov_reg_FCC1(t0, src, fcc_offset);
893     tcg_gen_andc_tl(dst, dst, t0);
894 }
895 
896 // 2 or 3: FCC1
897 static void gen_op_eval_fbug(TCGv dst, TCGv src, unsigned int fcc_offset)
898 {
899     gen_mov_reg_FCC1(dst, src, fcc_offset);
900 }
901 
902 // 2: !FCC0 & FCC1
903 static void gen_op_eval_fbg(TCGv dst, TCGv src, unsigned int fcc_offset)
904 {
905     TCGv t0 = tcg_temp_new();
906     gen_mov_reg_FCC0(dst, src, fcc_offset);
907     gen_mov_reg_FCC1(t0, src, fcc_offset);
908     tcg_gen_andc_tl(dst, t0, dst);
909 }
910 
911 // 3: FCC0 & FCC1
912 static void gen_op_eval_fbu(TCGv dst, TCGv src, unsigned int fcc_offset)
913 {
914     TCGv t0 = tcg_temp_new();
915     gen_mov_reg_FCC0(dst, src, fcc_offset);
916     gen_mov_reg_FCC1(t0, src, fcc_offset);
917     tcg_gen_and_tl(dst, dst, t0);
918 }
919 
920 // 0: !(FCC0 | FCC1)
921 static void gen_op_eval_fbe(TCGv dst, TCGv src, unsigned int fcc_offset)
922 {
923     TCGv t0 = tcg_temp_new();
924     gen_mov_reg_FCC0(dst, src, fcc_offset);
925     gen_mov_reg_FCC1(t0, src, fcc_offset);
926     tcg_gen_or_tl(dst, dst, t0);
927     tcg_gen_xori_tl(dst, dst, 0x1);
928 }
929 
930 // 0 or 3: !(FCC0 ^ FCC1)
931 static void gen_op_eval_fbue(TCGv dst, TCGv src, unsigned int fcc_offset)
932 {
933     TCGv t0 = tcg_temp_new();
934     gen_mov_reg_FCC0(dst, src, fcc_offset);
935     gen_mov_reg_FCC1(t0, src, fcc_offset);
936     tcg_gen_xor_tl(dst, dst, t0);
937     tcg_gen_xori_tl(dst, dst, 0x1);
938 }
939 
940 // 0 or 2: !FCC0
941 static void gen_op_eval_fbge(TCGv dst, TCGv src, unsigned int fcc_offset)
942 {
943     gen_mov_reg_FCC0(dst, src, fcc_offset);
944     tcg_gen_xori_tl(dst, dst, 0x1);
945 }
946 
947 // !1: !(FCC0 & !FCC1)
948 static void gen_op_eval_fbuge(TCGv dst, TCGv src, unsigned int fcc_offset)
949 {
950     TCGv t0 = tcg_temp_new();
951     gen_mov_reg_FCC0(dst, src, fcc_offset);
952     gen_mov_reg_FCC1(t0, src, fcc_offset);
953     tcg_gen_andc_tl(dst, dst, t0);
954     tcg_gen_xori_tl(dst, dst, 0x1);
955 }
956 
957 // 0 or 1: !FCC1
958 static void gen_op_eval_fble(TCGv dst, TCGv src, unsigned int fcc_offset)
959 {
960     gen_mov_reg_FCC1(dst, src, fcc_offset);
961     tcg_gen_xori_tl(dst, dst, 0x1);
962 }
963 
964 // !2: !(!FCC0 & FCC1)
965 static void gen_op_eval_fbule(TCGv dst, TCGv src, unsigned int fcc_offset)
966 {
967     TCGv t0 = tcg_temp_new();
968     gen_mov_reg_FCC0(dst, src, fcc_offset);
969     gen_mov_reg_FCC1(t0, src, fcc_offset);
970     tcg_gen_andc_tl(dst, t0, dst);
971     tcg_gen_xori_tl(dst, dst, 0x1);
972 }
973 
974 // !3: !(FCC0 & FCC1)
975 static void gen_op_eval_fbo(TCGv dst, TCGv src, unsigned int fcc_offset)
976 {
977     TCGv t0 = tcg_temp_new();
978     gen_mov_reg_FCC0(dst, src, fcc_offset);
979     gen_mov_reg_FCC1(t0, src, fcc_offset);
980     tcg_gen_and_tl(dst, dst, t0);
981     tcg_gen_xori_tl(dst, dst, 0x1);
982 }
983 
984 static void gen_branch2(DisasContext *dc, target_ulong pc1,
985                         target_ulong pc2, TCGv r_cond)
986 {
987     TCGLabel *l1 = gen_new_label();
988 
989     tcg_gen_brcondi_tl(TCG_COND_EQ, r_cond, 0, l1);
990 
991     gen_goto_tb(dc, 0, pc1, pc1 + 4);
992 
993     gen_set_label(l1);
994     gen_goto_tb(dc, 1, pc2, pc2 + 4);
995 }
996 
997 static void gen_generic_branch(DisasContext *dc)
998 {
999     TCGv npc0 = tcg_constant_tl(dc->jump_pc[0]);
1000     TCGv npc1 = tcg_constant_tl(dc->jump_pc[1]);
1001     TCGv zero = tcg_constant_tl(0);
1002 
1003     tcg_gen_movcond_tl(TCG_COND_NE, cpu_npc, cpu_cond, zero, npc0, npc1);
1004 }
1005 
1006 /* call this function before using the condition register as it may
1007    have been set for a jump */
1008 static void flush_cond(DisasContext *dc)
1009 {
1010     if (dc->npc == JUMP_PC) {
1011         gen_generic_branch(dc);
1012         dc->npc = DYNAMIC_PC_LOOKUP;
1013     }
1014 }
1015 
1016 static void save_npc(DisasContext *dc)
1017 {
1018     if (dc->npc & 3) {
1019         switch (dc->npc) {
1020         case JUMP_PC:
1021             gen_generic_branch(dc);
1022             dc->npc = DYNAMIC_PC_LOOKUP;
1023             break;
1024         case DYNAMIC_PC:
1025         case DYNAMIC_PC_LOOKUP:
1026             break;
1027         default:
1028             g_assert_not_reached();
1029         }
1030     } else {
1031         tcg_gen_movi_tl(cpu_npc, dc->npc);
1032     }
1033 }
1034 
1035 static void update_psr(DisasContext *dc)
1036 {
1037     if (dc->cc_op != CC_OP_FLAGS) {
1038         dc->cc_op = CC_OP_FLAGS;
1039         gen_helper_compute_psr(tcg_env);
1040     }
1041 }
1042 
1043 static void save_state(DisasContext *dc)
1044 {
1045     tcg_gen_movi_tl(cpu_pc, dc->pc);
1046     save_npc(dc);
1047 }
1048 
1049 static void gen_exception(DisasContext *dc, int which)
1050 {
1051     save_state(dc);
1052     gen_helper_raise_exception(tcg_env, tcg_constant_i32(which));
1053     dc->base.is_jmp = DISAS_NORETURN;
1054 }
1055 
1056 static TCGLabel *delay_exceptionv(DisasContext *dc, TCGv_i32 excp)
1057 {
1058     DisasDelayException *e = g_new0(DisasDelayException, 1);
1059 
1060     e->next = dc->delay_excp_list;
1061     dc->delay_excp_list = e;
1062 
1063     e->lab = gen_new_label();
1064     e->excp = excp;
1065     e->pc = dc->pc;
1066     /* Caller must have used flush_cond before branch. */
1067     assert(e->npc != JUMP_PC);
1068     e->npc = dc->npc;
1069 
1070     return e->lab;
1071 }
1072 
1073 static TCGLabel *delay_exception(DisasContext *dc, int excp)
1074 {
1075     return delay_exceptionv(dc, tcg_constant_i32(excp));
1076 }
1077 
1078 static void gen_check_align(DisasContext *dc, TCGv addr, int mask)
1079 {
1080     TCGv t = tcg_temp_new();
1081     TCGLabel *lab;
1082 
1083     tcg_gen_andi_tl(t, addr, mask);
1084 
1085     flush_cond(dc);
1086     lab = delay_exception(dc, TT_UNALIGNED);
1087     tcg_gen_brcondi_tl(TCG_COND_NE, t, 0, lab);
1088 }
1089 
1090 static void gen_mov_pc_npc(DisasContext *dc)
1091 {
1092     if (dc->npc & 3) {
1093         switch (dc->npc) {
1094         case JUMP_PC:
1095             gen_generic_branch(dc);
1096             tcg_gen_mov_tl(cpu_pc, cpu_npc);
1097             dc->pc = DYNAMIC_PC_LOOKUP;
1098             break;
1099         case DYNAMIC_PC:
1100         case DYNAMIC_PC_LOOKUP:
1101             tcg_gen_mov_tl(cpu_pc, cpu_npc);
1102             dc->pc = dc->npc;
1103             break;
1104         default:
1105             g_assert_not_reached();
1106         }
1107     } else {
1108         dc->pc = dc->npc;
1109     }
1110 }
1111 
1112 static void gen_op_next_insn(void)
1113 {
1114     tcg_gen_mov_tl(cpu_pc, cpu_npc);
1115     tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
1116 }
1117 
1118 static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond,
1119                         DisasContext *dc)
1120 {
1121     static int subcc_cond[16] = {
1122         TCG_COND_NEVER,
1123         TCG_COND_EQ,
1124         TCG_COND_LE,
1125         TCG_COND_LT,
1126         TCG_COND_LEU,
1127         TCG_COND_LTU,
1128         -1, /* neg */
1129         -1, /* overflow */
1130         TCG_COND_ALWAYS,
1131         TCG_COND_NE,
1132         TCG_COND_GT,
1133         TCG_COND_GE,
1134         TCG_COND_GTU,
1135         TCG_COND_GEU,
1136         -1, /* pos */
1137         -1, /* no overflow */
1138     };
1139 
1140     static int logic_cond[16] = {
1141         TCG_COND_NEVER,
1142         TCG_COND_EQ,     /* eq:  Z */
1143         TCG_COND_LE,     /* le:  Z | (N ^ V) -> Z | N */
1144         TCG_COND_LT,     /* lt:  N ^ V -> N */
1145         TCG_COND_EQ,     /* leu: C | Z -> Z */
1146         TCG_COND_NEVER,  /* ltu: C -> 0 */
1147         TCG_COND_LT,     /* neg: N */
1148         TCG_COND_NEVER,  /* vs:  V -> 0 */
1149         TCG_COND_ALWAYS,
1150         TCG_COND_NE,     /* ne:  !Z */
1151         TCG_COND_GT,     /* gt:  !(Z | (N ^ V)) -> !(Z | N) */
1152         TCG_COND_GE,     /* ge:  !(N ^ V) -> !N */
1153         TCG_COND_NE,     /* gtu: !(C | Z) -> !Z */
1154         TCG_COND_ALWAYS, /* geu: !C -> 1 */
1155         TCG_COND_GE,     /* pos: !N */
1156         TCG_COND_ALWAYS, /* vc:  !V -> 1 */
1157     };
1158 
1159     TCGv_i32 r_src;
1160     TCGv r_dst;
1161 
1162 #ifdef TARGET_SPARC64
1163     if (xcc) {
1164         r_src = cpu_xcc;
1165     } else {
1166         r_src = cpu_psr;
1167     }
1168 #else
1169     r_src = cpu_psr;
1170 #endif
1171 
1172     switch (dc->cc_op) {
1173     case CC_OP_LOGIC:
1174         cmp->cond = logic_cond[cond];
1175     do_compare_dst_0:
1176         cmp->is_bool = false;
1177         cmp->c2 = tcg_constant_tl(0);
1178 #ifdef TARGET_SPARC64
1179         if (!xcc) {
1180             cmp->c1 = tcg_temp_new();
1181             tcg_gen_ext32s_tl(cmp->c1, cpu_cc_dst);
1182             break;
1183         }
1184 #endif
1185         cmp->c1 = cpu_cc_dst;
1186         break;
1187 
1188     case CC_OP_SUB:
1189         switch (cond) {
1190         case 6:  /* neg */
1191         case 14: /* pos */
1192             cmp->cond = (cond == 6 ? TCG_COND_LT : TCG_COND_GE);
1193             goto do_compare_dst_0;
1194 
1195         case 7: /* overflow */
1196         case 15: /* !overflow */
1197             goto do_dynamic;
1198 
1199         default:
1200             cmp->cond = subcc_cond[cond];
1201             cmp->is_bool = false;
1202 #ifdef TARGET_SPARC64
1203             if (!xcc) {
1204                 /* Note that sign-extension works for unsigned compares as
1205                    long as both operands are sign-extended.  */
1206                 cmp->c1 = tcg_temp_new();
1207                 cmp->c2 = tcg_temp_new();
1208                 tcg_gen_ext32s_tl(cmp->c1, cpu_cc_src);
1209                 tcg_gen_ext32s_tl(cmp->c2, cpu_cc_src2);
1210                 break;
1211             }
1212 #endif
1213             cmp->c1 = cpu_cc_src;
1214             cmp->c2 = cpu_cc_src2;
1215             break;
1216         }
1217         break;
1218 
1219     default:
1220     do_dynamic:
1221         gen_helper_compute_psr(tcg_env);
1222         dc->cc_op = CC_OP_FLAGS;
1223         /* FALLTHRU */
1224 
1225     case CC_OP_FLAGS:
1226         /* We're going to generate a boolean result.  */
1227         cmp->cond = TCG_COND_NE;
1228         cmp->is_bool = true;
1229         cmp->c1 = r_dst = tcg_temp_new();
1230         cmp->c2 = tcg_constant_tl(0);
1231 
1232         switch (cond) {
1233         case 0x0:
1234             gen_op_eval_bn(r_dst);
1235             break;
1236         case 0x1:
1237             gen_op_eval_be(r_dst, r_src);
1238             break;
1239         case 0x2:
1240             gen_op_eval_ble(r_dst, r_src);
1241             break;
1242         case 0x3:
1243             gen_op_eval_bl(r_dst, r_src);
1244             break;
1245         case 0x4:
1246             gen_op_eval_bleu(r_dst, r_src);
1247             break;
1248         case 0x5:
1249             gen_op_eval_bcs(r_dst, r_src);
1250             break;
1251         case 0x6:
1252             gen_op_eval_bneg(r_dst, r_src);
1253             break;
1254         case 0x7:
1255             gen_op_eval_bvs(r_dst, r_src);
1256             break;
1257         case 0x8:
1258             gen_op_eval_ba(r_dst);
1259             break;
1260         case 0x9:
1261             gen_op_eval_bne(r_dst, r_src);
1262             break;
1263         case 0xa:
1264             gen_op_eval_bg(r_dst, r_src);
1265             break;
1266         case 0xb:
1267             gen_op_eval_bge(r_dst, r_src);
1268             break;
1269         case 0xc:
1270             gen_op_eval_bgu(r_dst, r_src);
1271             break;
1272         case 0xd:
1273             gen_op_eval_bcc(r_dst, r_src);
1274             break;
1275         case 0xe:
1276             gen_op_eval_bpos(r_dst, r_src);
1277             break;
1278         case 0xf:
1279             gen_op_eval_bvc(r_dst, r_src);
1280             break;
1281         }
1282         break;
1283     }
1284 }
1285 
1286 static void gen_fcompare(DisasCompare *cmp, unsigned int cc, unsigned int cond)
1287 {
1288     unsigned int offset;
1289     TCGv r_dst;
1290 
1291     /* For now we still generate a straight boolean result.  */
1292     cmp->cond = TCG_COND_NE;
1293     cmp->is_bool = true;
1294     cmp->c1 = r_dst = tcg_temp_new();
1295     cmp->c2 = tcg_constant_tl(0);
1296 
1297     switch (cc) {
1298     default:
1299     case 0x0:
1300         offset = 0;
1301         break;
1302     case 0x1:
1303         offset = 32 - 10;
1304         break;
1305     case 0x2:
1306         offset = 34 - 10;
1307         break;
1308     case 0x3:
1309         offset = 36 - 10;
1310         break;
1311     }
1312 
1313     switch (cond) {
1314     case 0x0:
1315         gen_op_eval_bn(r_dst);
1316         break;
1317     case 0x1:
1318         gen_op_eval_fbne(r_dst, cpu_fsr, offset);
1319         break;
1320     case 0x2:
1321         gen_op_eval_fblg(r_dst, cpu_fsr, offset);
1322         break;
1323     case 0x3:
1324         gen_op_eval_fbul(r_dst, cpu_fsr, offset);
1325         break;
1326     case 0x4:
1327         gen_op_eval_fbl(r_dst, cpu_fsr, offset);
1328         break;
1329     case 0x5:
1330         gen_op_eval_fbug(r_dst, cpu_fsr, offset);
1331         break;
1332     case 0x6:
1333         gen_op_eval_fbg(r_dst, cpu_fsr, offset);
1334         break;
1335     case 0x7:
1336         gen_op_eval_fbu(r_dst, cpu_fsr, offset);
1337         break;
1338     case 0x8:
1339         gen_op_eval_ba(r_dst);
1340         break;
1341     case 0x9:
1342         gen_op_eval_fbe(r_dst, cpu_fsr, offset);
1343         break;
1344     case 0xa:
1345         gen_op_eval_fbue(r_dst, cpu_fsr, offset);
1346         break;
1347     case 0xb:
1348         gen_op_eval_fbge(r_dst, cpu_fsr, offset);
1349         break;
1350     case 0xc:
1351         gen_op_eval_fbuge(r_dst, cpu_fsr, offset);
1352         break;
1353     case 0xd:
1354         gen_op_eval_fble(r_dst, cpu_fsr, offset);
1355         break;
1356     case 0xe:
1357         gen_op_eval_fbule(r_dst, cpu_fsr, offset);
1358         break;
1359     case 0xf:
1360         gen_op_eval_fbo(r_dst, cpu_fsr, offset);
1361         break;
1362     }
1363 }
1364 
1365 // Inverted logic
1366 static const TCGCond gen_tcg_cond_reg[8] = {
1367     TCG_COND_NEVER,  /* reserved */
1368     TCG_COND_NE,
1369     TCG_COND_GT,
1370     TCG_COND_GE,
1371     TCG_COND_NEVER,  /* reserved */
1372     TCG_COND_EQ,
1373     TCG_COND_LE,
1374     TCG_COND_LT,
1375 };
1376 
1377 static void gen_compare_reg(DisasCompare *cmp, int cond, TCGv r_src)
1378 {
1379     cmp->cond = tcg_invert_cond(gen_tcg_cond_reg[cond]);
1380     cmp->is_bool = false;
1381     cmp->c1 = r_src;
1382     cmp->c2 = tcg_constant_tl(0);
1383 }
1384 
1385 #ifdef TARGET_SPARC64
1386 static void gen_op_fcmps(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1387 {
1388     switch (fccno) {
1389     case 0:
1390         gen_helper_fcmps(cpu_fsr, tcg_env, r_rs1, r_rs2);
1391         break;
1392     case 1:
1393         gen_helper_fcmps_fcc1(cpu_fsr, tcg_env, r_rs1, r_rs2);
1394         break;
1395     case 2:
1396         gen_helper_fcmps_fcc2(cpu_fsr, tcg_env, r_rs1, r_rs2);
1397         break;
1398     case 3:
1399         gen_helper_fcmps_fcc3(cpu_fsr, tcg_env, r_rs1, r_rs2);
1400         break;
1401     }
1402 }
1403 
1404 static void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1405 {
1406     switch (fccno) {
1407     case 0:
1408         gen_helper_fcmpd(cpu_fsr, tcg_env, r_rs1, r_rs2);
1409         break;
1410     case 1:
1411         gen_helper_fcmpd_fcc1(cpu_fsr, tcg_env, r_rs1, r_rs2);
1412         break;
1413     case 2:
1414         gen_helper_fcmpd_fcc2(cpu_fsr, tcg_env, r_rs1, r_rs2);
1415         break;
1416     case 3:
1417         gen_helper_fcmpd_fcc3(cpu_fsr, tcg_env, r_rs1, r_rs2);
1418         break;
1419     }
1420 }
1421 
1422 static void gen_op_fcmpq(int fccno)
1423 {
1424     switch (fccno) {
1425     case 0:
1426         gen_helper_fcmpq(cpu_fsr, tcg_env);
1427         break;
1428     case 1:
1429         gen_helper_fcmpq_fcc1(cpu_fsr, tcg_env);
1430         break;
1431     case 2:
1432         gen_helper_fcmpq_fcc2(cpu_fsr, tcg_env);
1433         break;
1434     case 3:
1435         gen_helper_fcmpq_fcc3(cpu_fsr, tcg_env);
1436         break;
1437     }
1438 }
1439 
1440 static void gen_op_fcmpes(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1441 {
1442     switch (fccno) {
1443     case 0:
1444         gen_helper_fcmpes(cpu_fsr, tcg_env, r_rs1, r_rs2);
1445         break;
1446     case 1:
1447         gen_helper_fcmpes_fcc1(cpu_fsr, tcg_env, r_rs1, r_rs2);
1448         break;
1449     case 2:
1450         gen_helper_fcmpes_fcc2(cpu_fsr, tcg_env, r_rs1, r_rs2);
1451         break;
1452     case 3:
1453         gen_helper_fcmpes_fcc3(cpu_fsr, tcg_env, r_rs1, r_rs2);
1454         break;
1455     }
1456 }
1457 
1458 static void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1459 {
1460     switch (fccno) {
1461     case 0:
1462         gen_helper_fcmped(cpu_fsr, tcg_env, r_rs1, r_rs2);
1463         break;
1464     case 1:
1465         gen_helper_fcmped_fcc1(cpu_fsr, tcg_env, r_rs1, r_rs2);
1466         break;
1467     case 2:
1468         gen_helper_fcmped_fcc2(cpu_fsr, tcg_env, r_rs1, r_rs2);
1469         break;
1470     case 3:
1471         gen_helper_fcmped_fcc3(cpu_fsr, tcg_env, r_rs1, r_rs2);
1472         break;
1473     }
1474 }
1475 
1476 static void gen_op_fcmpeq(int fccno)
1477 {
1478     switch (fccno) {
1479     case 0:
1480         gen_helper_fcmpeq(cpu_fsr, tcg_env);
1481         break;
1482     case 1:
1483         gen_helper_fcmpeq_fcc1(cpu_fsr, tcg_env);
1484         break;
1485     case 2:
1486         gen_helper_fcmpeq_fcc2(cpu_fsr, tcg_env);
1487         break;
1488     case 3:
1489         gen_helper_fcmpeq_fcc3(cpu_fsr, tcg_env);
1490         break;
1491     }
1492 }
1493 
1494 #else
1495 
1496 static void gen_op_fcmps(int fccno, TCGv r_rs1, TCGv r_rs2)
1497 {
1498     gen_helper_fcmps(cpu_fsr, tcg_env, r_rs1, r_rs2);
1499 }
1500 
1501 static void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1502 {
1503     gen_helper_fcmpd(cpu_fsr, tcg_env, r_rs1, r_rs2);
1504 }
1505 
1506 static void gen_op_fcmpq(int fccno)
1507 {
1508     gen_helper_fcmpq(cpu_fsr, tcg_env);
1509 }
1510 
1511 static void gen_op_fcmpes(int fccno, TCGv r_rs1, TCGv r_rs2)
1512 {
1513     gen_helper_fcmpes(cpu_fsr, tcg_env, r_rs1, r_rs2);
1514 }
1515 
1516 static void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1517 {
1518     gen_helper_fcmped(cpu_fsr, tcg_env, r_rs1, r_rs2);
1519 }
1520 
1521 static void gen_op_fcmpeq(int fccno)
1522 {
1523     gen_helper_fcmpeq(cpu_fsr, tcg_env);
1524 }
1525 #endif
1526 
1527 static void gen_op_fpexception_im(DisasContext *dc, int fsr_flags)
1528 {
1529     tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_NMASK);
1530     tcg_gen_ori_tl(cpu_fsr, cpu_fsr, fsr_flags);
1531     gen_exception(dc, TT_FP_EXCP);
1532 }
1533 
1534 static int gen_trap_ifnofpu(DisasContext *dc)
1535 {
1536 #if !defined(CONFIG_USER_ONLY)
1537     if (!dc->fpu_enabled) {
1538         gen_exception(dc, TT_NFPU_INSN);
1539         return 1;
1540     }
1541 #endif
1542     return 0;
1543 }
1544 
1545 static void gen_op_clear_ieee_excp_and_FTT(void)
1546 {
1547     tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_CEXC_NMASK);
1548 }
1549 
1550 static void gen_fop_FF(DisasContext *dc, int rd, int rs,
1551                               void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32))
1552 {
1553     TCGv_i32 dst, src;
1554 
1555     src = gen_load_fpr_F(dc, rs);
1556     dst = gen_dest_fpr_F(dc);
1557 
1558     gen(dst, tcg_env, src);
1559     gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
1560 
1561     gen_store_fpr_F(dc, rd, dst);
1562 }
1563 
1564 static void gen_ne_fop_FF(DisasContext *dc, int rd, int rs,
1565                           void (*gen)(TCGv_i32, TCGv_i32))
1566 {
1567     TCGv_i32 dst, src;
1568 
1569     src = gen_load_fpr_F(dc, rs);
1570     dst = gen_dest_fpr_F(dc);
1571 
1572     gen(dst, src);
1573 
1574     gen_store_fpr_F(dc, rd, dst);
1575 }
1576 
1577 static void gen_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2,
1578                         void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32))
1579 {
1580     TCGv_i32 dst, src1, src2;
1581 
1582     src1 = gen_load_fpr_F(dc, rs1);
1583     src2 = gen_load_fpr_F(dc, rs2);
1584     dst = gen_dest_fpr_F(dc);
1585 
1586     gen(dst, tcg_env, src1, src2);
1587     gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
1588 
1589     gen_store_fpr_F(dc, rd, dst);
1590 }
1591 
1592 #ifdef TARGET_SPARC64
1593 static void gen_ne_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2,
1594                            void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32))
1595 {
1596     TCGv_i32 dst, src1, src2;
1597 
1598     src1 = gen_load_fpr_F(dc, rs1);
1599     src2 = gen_load_fpr_F(dc, rs2);
1600     dst = gen_dest_fpr_F(dc);
1601 
1602     gen(dst, src1, src2);
1603 
1604     gen_store_fpr_F(dc, rd, dst);
1605 }
1606 #endif
1607 
1608 static void gen_fop_DD(DisasContext *dc, int rd, int rs,
1609                        void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64))
1610 {
1611     TCGv_i64 dst, src;
1612 
1613     src = gen_load_fpr_D(dc, rs);
1614     dst = gen_dest_fpr_D(dc, rd);
1615 
1616     gen(dst, tcg_env, src);
1617     gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
1618 
1619     gen_store_fpr_D(dc, rd, dst);
1620 }
1621 
1622 #ifdef TARGET_SPARC64
1623 static void gen_ne_fop_DD(DisasContext *dc, int rd, int rs,
1624                           void (*gen)(TCGv_i64, TCGv_i64))
1625 {
1626     TCGv_i64 dst, src;
1627 
1628     src = gen_load_fpr_D(dc, rs);
1629     dst = gen_dest_fpr_D(dc, rd);
1630 
1631     gen(dst, src);
1632 
1633     gen_store_fpr_D(dc, rd, dst);
1634 }
1635 #endif
1636 
1637 static void gen_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1638                         void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i64))
1639 {
1640     TCGv_i64 dst, src1, src2;
1641 
1642     src1 = gen_load_fpr_D(dc, rs1);
1643     src2 = gen_load_fpr_D(dc, rs2);
1644     dst = gen_dest_fpr_D(dc, rd);
1645 
1646     gen(dst, tcg_env, src1, src2);
1647     gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
1648 
1649     gen_store_fpr_D(dc, rd, dst);
1650 }
1651 
1652 #ifdef TARGET_SPARC64
1653 static void gen_ne_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1654                            void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64))
1655 {
1656     TCGv_i64 dst, src1, src2;
1657 
1658     src1 = gen_load_fpr_D(dc, rs1);
1659     src2 = gen_load_fpr_D(dc, rs2);
1660     dst = gen_dest_fpr_D(dc, rd);
1661 
1662     gen(dst, src1, src2);
1663 
1664     gen_store_fpr_D(dc, rd, dst);
1665 }
1666 
1667 static void gen_gsr_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1668                             void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
1669 {
1670     TCGv_i64 dst, src1, src2;
1671 
1672     src1 = gen_load_fpr_D(dc, rs1);
1673     src2 = gen_load_fpr_D(dc, rs2);
1674     dst = gen_dest_fpr_D(dc, rd);
1675 
1676     gen(dst, cpu_gsr, src1, src2);
1677 
1678     gen_store_fpr_D(dc, rd, dst);
1679 }
1680 
1681 static void gen_ne_fop_DDDD(DisasContext *dc, int rd, int rs1, int rs2,
1682                             void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
1683 {
1684     TCGv_i64 dst, src0, src1, src2;
1685 
1686     src1 = gen_load_fpr_D(dc, rs1);
1687     src2 = gen_load_fpr_D(dc, rs2);
1688     src0 = gen_load_fpr_D(dc, rd);
1689     dst = gen_dest_fpr_D(dc, rd);
1690 
1691     gen(dst, src0, src1, src2);
1692 
1693     gen_store_fpr_D(dc, rd, dst);
1694 }
1695 #endif
1696 
1697 static void gen_fop_QQ(DisasContext *dc, int rd, int rs,
1698                        void (*gen)(TCGv_ptr))
1699 {
1700     gen_op_load_fpr_QT1(QFPREG(rs));
1701 
1702     gen(tcg_env);
1703     gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
1704 
1705     gen_op_store_QT0_fpr(QFPREG(rd));
1706     gen_update_fprs_dirty(dc, QFPREG(rd));
1707 }
1708 
1709 #ifdef TARGET_SPARC64
1710 static void gen_ne_fop_QQ(DisasContext *dc, int rd, int rs,
1711                           void (*gen)(TCGv_ptr))
1712 {
1713     gen_op_load_fpr_QT1(QFPREG(rs));
1714 
1715     gen(tcg_env);
1716 
1717     gen_op_store_QT0_fpr(QFPREG(rd));
1718     gen_update_fprs_dirty(dc, QFPREG(rd));
1719 }
1720 #endif
1721 
1722 static void gen_fop_QQQ(DisasContext *dc, int rd, int rs1, int rs2,
1723                         void (*gen)(TCGv_ptr))
1724 {
1725     gen_op_load_fpr_QT0(QFPREG(rs1));
1726     gen_op_load_fpr_QT1(QFPREG(rs2));
1727 
1728     gen(tcg_env);
1729     gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
1730 
1731     gen_op_store_QT0_fpr(QFPREG(rd));
1732     gen_update_fprs_dirty(dc, QFPREG(rd));
1733 }
1734 
1735 static void gen_fop_DFF(DisasContext *dc, int rd, int rs1, int rs2,
1736                         void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32, TCGv_i32))
1737 {
1738     TCGv_i64 dst;
1739     TCGv_i32 src1, src2;
1740 
1741     src1 = gen_load_fpr_F(dc, rs1);
1742     src2 = gen_load_fpr_F(dc, rs2);
1743     dst = gen_dest_fpr_D(dc, rd);
1744 
1745     gen(dst, tcg_env, src1, src2);
1746     gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
1747 
1748     gen_store_fpr_D(dc, rd, dst);
1749 }
1750 
1751 static void gen_fop_QDD(DisasContext *dc, int rd, int rs1, int rs2,
1752                         void (*gen)(TCGv_ptr, TCGv_i64, TCGv_i64))
1753 {
1754     TCGv_i64 src1, src2;
1755 
1756     src1 = gen_load_fpr_D(dc, rs1);
1757     src2 = gen_load_fpr_D(dc, rs2);
1758 
1759     gen(tcg_env, src1, src2);
1760     gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
1761 
1762     gen_op_store_QT0_fpr(QFPREG(rd));
1763     gen_update_fprs_dirty(dc, QFPREG(rd));
1764 }
1765 
1766 #ifdef TARGET_SPARC64
1767 static void gen_fop_DF(DisasContext *dc, int rd, int rs,
1768                        void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32))
1769 {
1770     TCGv_i64 dst;
1771     TCGv_i32 src;
1772 
1773     src = gen_load_fpr_F(dc, rs);
1774     dst = gen_dest_fpr_D(dc, rd);
1775 
1776     gen(dst, tcg_env, src);
1777     gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
1778 
1779     gen_store_fpr_D(dc, rd, dst);
1780 }
1781 #endif
1782 
1783 static void gen_ne_fop_DF(DisasContext *dc, int rd, int rs,
1784                           void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32))
1785 {
1786     TCGv_i64 dst;
1787     TCGv_i32 src;
1788 
1789     src = gen_load_fpr_F(dc, rs);
1790     dst = gen_dest_fpr_D(dc, rd);
1791 
1792     gen(dst, tcg_env, src);
1793 
1794     gen_store_fpr_D(dc, rd, dst);
1795 }
1796 
1797 static void gen_fop_FD(DisasContext *dc, int rd, int rs,
1798                        void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i64))
1799 {
1800     TCGv_i32 dst;
1801     TCGv_i64 src;
1802 
1803     src = gen_load_fpr_D(dc, rs);
1804     dst = gen_dest_fpr_F(dc);
1805 
1806     gen(dst, tcg_env, src);
1807     gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
1808 
1809     gen_store_fpr_F(dc, rd, dst);
1810 }
1811 
1812 static void gen_fop_FQ(DisasContext *dc, int rd, int rs,
1813                        void (*gen)(TCGv_i32, TCGv_ptr))
1814 {
1815     TCGv_i32 dst;
1816 
1817     gen_op_load_fpr_QT1(QFPREG(rs));
1818     dst = gen_dest_fpr_F(dc);
1819 
1820     gen(dst, tcg_env);
1821     gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
1822 
1823     gen_store_fpr_F(dc, rd, dst);
1824 }
1825 
1826 static void gen_fop_DQ(DisasContext *dc, int rd, int rs,
1827                        void (*gen)(TCGv_i64, TCGv_ptr))
1828 {
1829     TCGv_i64 dst;
1830 
1831     gen_op_load_fpr_QT1(QFPREG(rs));
1832     dst = gen_dest_fpr_D(dc, rd);
1833 
1834     gen(dst, tcg_env);
1835     gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
1836 
1837     gen_store_fpr_D(dc, rd, dst);
1838 }
1839 
1840 static void gen_ne_fop_QF(DisasContext *dc, int rd, int rs,
1841                           void (*gen)(TCGv_ptr, TCGv_i32))
1842 {
1843     TCGv_i32 src;
1844 
1845     src = gen_load_fpr_F(dc, rs);
1846 
1847     gen(tcg_env, src);
1848 
1849     gen_op_store_QT0_fpr(QFPREG(rd));
1850     gen_update_fprs_dirty(dc, QFPREG(rd));
1851 }
1852 
1853 static void gen_ne_fop_QD(DisasContext *dc, int rd, int rs,
1854                           void (*gen)(TCGv_ptr, TCGv_i64))
1855 {
1856     TCGv_i64 src;
1857 
1858     src = gen_load_fpr_D(dc, rs);
1859 
1860     gen(tcg_env, src);
1861 
1862     gen_op_store_QT0_fpr(QFPREG(rd));
1863     gen_update_fprs_dirty(dc, QFPREG(rd));
1864 }
1865 
1866 /* asi moves */
1867 typedef enum {
1868     GET_ASI_HELPER,
1869     GET_ASI_EXCP,
1870     GET_ASI_DIRECT,
1871     GET_ASI_DTWINX,
1872     GET_ASI_BLOCK,
1873     GET_ASI_SHORT,
1874     GET_ASI_BCOPY,
1875     GET_ASI_BFILL,
1876 } ASIType;
1877 
1878 typedef struct {
1879     ASIType type;
1880     int asi;
1881     int mem_idx;
1882     MemOp memop;
1883 } DisasASI;
1884 
1885 /*
1886  * Build DisasASI.
1887  * For asi == -1, treat as non-asi.
1888  * For ask == -2, treat as immediate offset (v8 error, v9 %asi).
1889  */
1890 static DisasASI resolve_asi(DisasContext *dc, int asi, MemOp memop)
1891 {
1892     ASIType type = GET_ASI_HELPER;
1893     int mem_idx = dc->mem_idx;
1894 
1895     if (asi == -1) {
1896         /* Artificial "non-asi" case. */
1897         type = GET_ASI_DIRECT;
1898         goto done;
1899     }
1900 
1901 #ifndef TARGET_SPARC64
1902     /* Before v9, all asis are immediate and privileged.  */
1903     if (asi < 0) {
1904         gen_exception(dc, TT_ILL_INSN);
1905         type = GET_ASI_EXCP;
1906     } else if (supervisor(dc)
1907                /* Note that LEON accepts ASI_USERDATA in user mode, for
1908                   use with CASA.  Also note that previous versions of
1909                   QEMU allowed (and old versions of gcc emitted) ASI_P
1910                   for LEON, which is incorrect.  */
1911                || (asi == ASI_USERDATA
1912                    && (dc->def->features & CPU_FEATURE_CASA))) {
1913         switch (asi) {
1914         case ASI_USERDATA:   /* User data access */
1915             mem_idx = MMU_USER_IDX;
1916             type = GET_ASI_DIRECT;
1917             break;
1918         case ASI_KERNELDATA: /* Supervisor data access */
1919             mem_idx = MMU_KERNEL_IDX;
1920             type = GET_ASI_DIRECT;
1921             break;
1922         case ASI_M_BYPASS:    /* MMU passthrough */
1923         case ASI_LEON_BYPASS: /* LEON MMU passthrough */
1924             mem_idx = MMU_PHYS_IDX;
1925             type = GET_ASI_DIRECT;
1926             break;
1927         case ASI_M_BCOPY: /* Block copy, sta access */
1928             mem_idx = MMU_KERNEL_IDX;
1929             type = GET_ASI_BCOPY;
1930             break;
1931         case ASI_M_BFILL: /* Block fill, stda access */
1932             mem_idx = MMU_KERNEL_IDX;
1933             type = GET_ASI_BFILL;
1934             break;
1935         }
1936 
1937         /* MMU_PHYS_IDX is used when the MMU is disabled to passthrough the
1938          * permissions check in get_physical_address(..).
1939          */
1940         mem_idx = (dc->mem_idx == MMU_PHYS_IDX) ? MMU_PHYS_IDX : mem_idx;
1941     } else {
1942         gen_exception(dc, TT_PRIV_INSN);
1943         type = GET_ASI_EXCP;
1944     }
1945 #else
1946     if (asi < 0) {
1947         asi = dc->asi;
1948     }
1949     /* With v9, all asis below 0x80 are privileged.  */
1950     /* ??? We ought to check cpu_has_hypervisor, but we didn't copy
1951        down that bit into DisasContext.  For the moment that's ok,
1952        since the direct implementations below doesn't have any ASIs
1953        in the restricted [0x30, 0x7f] range, and the check will be
1954        done properly in the helper.  */
1955     if (!supervisor(dc) && asi < 0x80) {
1956         gen_exception(dc, TT_PRIV_ACT);
1957         type = GET_ASI_EXCP;
1958     } else {
1959         switch (asi) {
1960         case ASI_REAL:      /* Bypass */
1961         case ASI_REAL_IO:   /* Bypass, non-cacheable */
1962         case ASI_REAL_L:    /* Bypass LE */
1963         case ASI_REAL_IO_L: /* Bypass, non-cacheable LE */
1964         case ASI_TWINX_REAL:   /* Real address, twinx */
1965         case ASI_TWINX_REAL_L: /* Real address, twinx, LE */
1966         case ASI_QUAD_LDD_PHYS:
1967         case ASI_QUAD_LDD_PHYS_L:
1968             mem_idx = MMU_PHYS_IDX;
1969             break;
1970         case ASI_N:  /* Nucleus */
1971         case ASI_NL: /* Nucleus LE */
1972         case ASI_TWINX_N:
1973         case ASI_TWINX_NL:
1974         case ASI_NUCLEUS_QUAD_LDD:
1975         case ASI_NUCLEUS_QUAD_LDD_L:
1976             if (hypervisor(dc)) {
1977                 mem_idx = MMU_PHYS_IDX;
1978             } else {
1979                 mem_idx = MMU_NUCLEUS_IDX;
1980             }
1981             break;
1982         case ASI_AIUP:  /* As if user primary */
1983         case ASI_AIUPL: /* As if user primary LE */
1984         case ASI_TWINX_AIUP:
1985         case ASI_TWINX_AIUP_L:
1986         case ASI_BLK_AIUP_4V:
1987         case ASI_BLK_AIUP_L_4V:
1988         case ASI_BLK_AIUP:
1989         case ASI_BLK_AIUPL:
1990             mem_idx = MMU_USER_IDX;
1991             break;
1992         case ASI_AIUS:  /* As if user secondary */
1993         case ASI_AIUSL: /* As if user secondary LE */
1994         case ASI_TWINX_AIUS:
1995         case ASI_TWINX_AIUS_L:
1996         case ASI_BLK_AIUS_4V:
1997         case ASI_BLK_AIUS_L_4V:
1998         case ASI_BLK_AIUS:
1999         case ASI_BLK_AIUSL:
2000             mem_idx = MMU_USER_SECONDARY_IDX;
2001             break;
2002         case ASI_S:  /* Secondary */
2003         case ASI_SL: /* Secondary LE */
2004         case ASI_TWINX_S:
2005         case ASI_TWINX_SL:
2006         case ASI_BLK_COMMIT_S:
2007         case ASI_BLK_S:
2008         case ASI_BLK_SL:
2009         case ASI_FL8_S:
2010         case ASI_FL8_SL:
2011         case ASI_FL16_S:
2012         case ASI_FL16_SL:
2013             if (mem_idx == MMU_USER_IDX) {
2014                 mem_idx = MMU_USER_SECONDARY_IDX;
2015             } else if (mem_idx == MMU_KERNEL_IDX) {
2016                 mem_idx = MMU_KERNEL_SECONDARY_IDX;
2017             }
2018             break;
2019         case ASI_P:  /* Primary */
2020         case ASI_PL: /* Primary LE */
2021         case ASI_TWINX_P:
2022         case ASI_TWINX_PL:
2023         case ASI_BLK_COMMIT_P:
2024         case ASI_BLK_P:
2025         case ASI_BLK_PL:
2026         case ASI_FL8_P:
2027         case ASI_FL8_PL:
2028         case ASI_FL16_P:
2029         case ASI_FL16_PL:
2030             break;
2031         }
2032         switch (asi) {
2033         case ASI_REAL:
2034         case ASI_REAL_IO:
2035         case ASI_REAL_L:
2036         case ASI_REAL_IO_L:
2037         case ASI_N:
2038         case ASI_NL:
2039         case ASI_AIUP:
2040         case ASI_AIUPL:
2041         case ASI_AIUS:
2042         case ASI_AIUSL:
2043         case ASI_S:
2044         case ASI_SL:
2045         case ASI_P:
2046         case ASI_PL:
2047             type = GET_ASI_DIRECT;
2048             break;
2049         case ASI_TWINX_REAL:
2050         case ASI_TWINX_REAL_L:
2051         case ASI_TWINX_N:
2052         case ASI_TWINX_NL:
2053         case ASI_TWINX_AIUP:
2054         case ASI_TWINX_AIUP_L:
2055         case ASI_TWINX_AIUS:
2056         case ASI_TWINX_AIUS_L:
2057         case ASI_TWINX_P:
2058         case ASI_TWINX_PL:
2059         case ASI_TWINX_S:
2060         case ASI_TWINX_SL:
2061         case ASI_QUAD_LDD_PHYS:
2062         case ASI_QUAD_LDD_PHYS_L:
2063         case ASI_NUCLEUS_QUAD_LDD:
2064         case ASI_NUCLEUS_QUAD_LDD_L:
2065             type = GET_ASI_DTWINX;
2066             break;
2067         case ASI_BLK_COMMIT_P:
2068         case ASI_BLK_COMMIT_S:
2069         case ASI_BLK_AIUP_4V:
2070         case ASI_BLK_AIUP_L_4V:
2071         case ASI_BLK_AIUP:
2072         case ASI_BLK_AIUPL:
2073         case ASI_BLK_AIUS_4V:
2074         case ASI_BLK_AIUS_L_4V:
2075         case ASI_BLK_AIUS:
2076         case ASI_BLK_AIUSL:
2077         case ASI_BLK_S:
2078         case ASI_BLK_SL:
2079         case ASI_BLK_P:
2080         case ASI_BLK_PL:
2081             type = GET_ASI_BLOCK;
2082             break;
2083         case ASI_FL8_S:
2084         case ASI_FL8_SL:
2085         case ASI_FL8_P:
2086         case ASI_FL8_PL:
2087             memop = MO_UB;
2088             type = GET_ASI_SHORT;
2089             break;
2090         case ASI_FL16_S:
2091         case ASI_FL16_SL:
2092         case ASI_FL16_P:
2093         case ASI_FL16_PL:
2094             memop = MO_TEUW;
2095             type = GET_ASI_SHORT;
2096             break;
2097         }
2098         /* The little-endian asis all have bit 3 set.  */
2099         if (asi & 8) {
2100             memop ^= MO_BSWAP;
2101         }
2102     }
2103 #endif
2104 
2105  done:
2106     return (DisasASI){ type, asi, mem_idx, memop };
2107 }
2108 
2109 #if defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
2110 static void gen_helper_ld_asi(TCGv_i64 r, TCGv_env e, TCGv a,
2111                               TCGv_i32 asi, TCGv_i32 mop)
2112 {
2113     g_assert_not_reached();
2114 }
2115 
2116 static void gen_helper_st_asi(TCGv_env e, TCGv a, TCGv_i64 r,
2117                               TCGv_i32 asi, TCGv_i32 mop)
2118 {
2119     g_assert_not_reached();
2120 }
2121 #endif
2122 
2123 static void gen_ld_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr)
2124 {
2125     switch (da->type) {
2126     case GET_ASI_EXCP:
2127         break;
2128     case GET_ASI_DTWINX: /* Reserved for ldda.  */
2129         gen_exception(dc, TT_ILL_INSN);
2130         break;
2131     case GET_ASI_DIRECT:
2132         tcg_gen_qemu_ld_tl(dst, addr, da->mem_idx, da->memop | MO_ALIGN);
2133         break;
2134     default:
2135         {
2136             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2137             TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN);
2138 
2139             save_state(dc);
2140 #ifdef TARGET_SPARC64
2141             gen_helper_ld_asi(dst, tcg_env, addr, r_asi, r_mop);
2142 #else
2143             {
2144                 TCGv_i64 t64 = tcg_temp_new_i64();
2145                 gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
2146                 tcg_gen_trunc_i64_tl(dst, t64);
2147             }
2148 #endif
2149         }
2150         break;
2151     }
2152 }
2153 
2154 static void gen_st_asi(DisasContext *dc, DisasASI *da, TCGv src, TCGv addr)
2155 {
2156     switch (da->type) {
2157     case GET_ASI_EXCP:
2158         break;
2159 
2160     case GET_ASI_DTWINX: /* Reserved for stda.  */
2161         if (TARGET_LONG_BITS == 32) {
2162             gen_exception(dc, TT_ILL_INSN);
2163             break;
2164         } else if (!(dc->def->features & CPU_FEATURE_HYPV)) {
2165             /* Pre OpenSPARC CPUs don't have these */
2166             gen_exception(dc, TT_ILL_INSN);
2167             break;
2168         }
2169         /* In OpenSPARC T1+ CPUs TWINX ASIs in store are ST_BLKINIT_ ASIs */
2170         /* fall through */
2171 
2172     case GET_ASI_DIRECT:
2173         tcg_gen_qemu_st_tl(src, addr, da->mem_idx, da->memop | MO_ALIGN);
2174         break;
2175 
2176     case GET_ASI_BCOPY:
2177         assert(TARGET_LONG_BITS == 32);
2178         /* Copy 32 bytes from the address in SRC to ADDR.  */
2179         /* ??? The original qemu code suggests 4-byte alignment, dropping
2180            the low bits, but the only place I can see this used is in the
2181            Linux kernel with 32 byte alignment, which would make more sense
2182            as a cacheline-style operation.  */
2183         {
2184             TCGv saddr = tcg_temp_new();
2185             TCGv daddr = tcg_temp_new();
2186             TCGv four = tcg_constant_tl(4);
2187             TCGv_i32 tmp = tcg_temp_new_i32();
2188             int i;
2189 
2190             tcg_gen_andi_tl(saddr, src, -4);
2191             tcg_gen_andi_tl(daddr, addr, -4);
2192             for (i = 0; i < 32; i += 4) {
2193                 /* Since the loads and stores are paired, allow the
2194                    copy to happen in the host endianness.  */
2195                 tcg_gen_qemu_ld_i32(tmp, saddr, da->mem_idx, MO_UL);
2196                 tcg_gen_qemu_st_i32(tmp, daddr, da->mem_idx, MO_UL);
2197                 tcg_gen_add_tl(saddr, saddr, four);
2198                 tcg_gen_add_tl(daddr, daddr, four);
2199             }
2200         }
2201         break;
2202 
2203     default:
2204         {
2205             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2206             TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN);
2207 
2208             save_state(dc);
2209 #ifdef TARGET_SPARC64
2210             gen_helper_st_asi(tcg_env, addr, src, r_asi, r_mop);
2211 #else
2212             {
2213                 TCGv_i64 t64 = tcg_temp_new_i64();
2214                 tcg_gen_extu_tl_i64(t64, src);
2215                 gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
2216             }
2217 #endif
2218 
2219             /* A write to a TLB register may alter page maps.  End the TB. */
2220             dc->npc = DYNAMIC_PC;
2221         }
2222         break;
2223     }
2224 }
2225 
2226 static void gen_swap_asi(DisasContext *dc, DisasASI *da,
2227                          TCGv dst, TCGv src, TCGv addr)
2228 {
2229     switch (da->type) {
2230     case GET_ASI_EXCP:
2231         break;
2232     case GET_ASI_DIRECT:
2233         tcg_gen_atomic_xchg_tl(dst, addr, src,
2234                                da->mem_idx, da->memop | MO_ALIGN);
2235         break;
2236     default:
2237         /* ??? Should be DAE_invalid_asi.  */
2238         gen_exception(dc, TT_DATA_ACCESS);
2239         break;
2240     }
2241 }
2242 
2243 static void gen_cas_asi(DisasContext *dc, DisasASI *da,
2244                         TCGv oldv, TCGv newv, TCGv cmpv, TCGv addr)
2245 {
2246     switch (da->type) {
2247     case GET_ASI_EXCP:
2248         return;
2249     case GET_ASI_DIRECT:
2250         tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, newv,
2251                                   da->mem_idx, da->memop | MO_ALIGN);
2252         break;
2253     default:
2254         /* ??? Should be DAE_invalid_asi.  */
2255         gen_exception(dc, TT_DATA_ACCESS);
2256         break;
2257     }
2258 }
2259 
2260 static void gen_ldstub_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr)
2261 {
2262     switch (da->type) {
2263     case GET_ASI_EXCP:
2264         break;
2265     case GET_ASI_DIRECT:
2266         tcg_gen_atomic_xchg_tl(dst, addr, tcg_constant_tl(0xff),
2267                                da->mem_idx, MO_UB);
2268         break;
2269     default:
2270         /* ??? In theory, this should be raise DAE_invalid_asi.
2271            But the SS-20 roms do ldstuba [%l0] #ASI_M_CTL, %o1.  */
2272         if (tb_cflags(dc->base.tb) & CF_PARALLEL) {
2273             gen_helper_exit_atomic(tcg_env);
2274         } else {
2275             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2276             TCGv_i32 r_mop = tcg_constant_i32(MO_UB);
2277             TCGv_i64 s64, t64;
2278 
2279             save_state(dc);
2280             t64 = tcg_temp_new_i64();
2281             gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
2282 
2283             s64 = tcg_constant_i64(0xff);
2284             gen_helper_st_asi(tcg_env, addr, s64, r_asi, r_mop);
2285 
2286             tcg_gen_trunc_i64_tl(dst, t64);
2287 
2288             /* End the TB.  */
2289             dc->npc = DYNAMIC_PC;
2290         }
2291         break;
2292     }
2293 }
2294 
2295 static void gen_ldf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size,
2296                         TCGv addr, int rd)
2297 {
2298     MemOp memop = da->memop;
2299     MemOp size = memop & MO_SIZE;
2300     TCGv_i32 d32;
2301     TCGv_i64 d64;
2302     TCGv addr_tmp;
2303 
2304     /* TODO: Use 128-bit load/store below. */
2305     if (size == MO_128) {
2306         memop = (memop & ~MO_SIZE) | MO_64;
2307     }
2308 
2309     switch (da->type) {
2310     case GET_ASI_EXCP:
2311         break;
2312 
2313     case GET_ASI_DIRECT:
2314         memop |= MO_ALIGN_4;
2315         switch (size) {
2316         case MO_32:
2317             d32 = gen_dest_fpr_F(dc);
2318             tcg_gen_qemu_ld_i32(d32, addr, da->mem_idx, memop);
2319             gen_store_fpr_F(dc, rd, d32);
2320             break;
2321 
2322         case MO_64:
2323             tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da->mem_idx, memop);
2324             break;
2325 
2326         case MO_128:
2327             d64 = tcg_temp_new_i64();
2328             tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx, memop);
2329             addr_tmp = tcg_temp_new();
2330             tcg_gen_addi_tl(addr_tmp, addr, 8);
2331             tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2 + 1], addr_tmp, da->mem_idx, memop);
2332             tcg_gen_mov_i64(cpu_fpr[rd / 2], d64);
2333             break;
2334         default:
2335             g_assert_not_reached();
2336         }
2337         break;
2338 
2339     case GET_ASI_BLOCK:
2340         /* Valid for lddfa on aligned registers only.  */
2341         if (orig_size == MO_64 && (rd & 7) == 0) {
2342             /* The first operation checks required alignment.  */
2343             addr_tmp = tcg_temp_new();
2344             for (int i = 0; ; ++i) {
2345                 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2 + i], addr, da->mem_idx,
2346                                     memop | (i == 0 ? MO_ALIGN_64 : 0));
2347                 if (i == 7) {
2348                     break;
2349                 }
2350                 tcg_gen_addi_tl(addr_tmp, addr, 8);
2351                 addr = addr_tmp;
2352             }
2353         } else {
2354             gen_exception(dc, TT_ILL_INSN);
2355         }
2356         break;
2357 
2358     case GET_ASI_SHORT:
2359         /* Valid for lddfa only.  */
2360         if (orig_size == MO_64) {
2361             tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da->mem_idx,
2362                                 memop | MO_ALIGN);
2363         } else {
2364             gen_exception(dc, TT_ILL_INSN);
2365         }
2366         break;
2367 
2368     default:
2369         {
2370             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2371             TCGv_i32 r_mop = tcg_constant_i32(memop | MO_ALIGN);
2372 
2373             save_state(dc);
2374             /* According to the table in the UA2011 manual, the only
2375                other asis that are valid for ldfa/lddfa/ldqfa are
2376                the NO_FAULT asis.  We still need a helper for these,
2377                but we can just use the integer asi helper for them.  */
2378             switch (size) {
2379             case MO_32:
2380                 d64 = tcg_temp_new_i64();
2381                 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
2382                 d32 = gen_dest_fpr_F(dc);
2383                 tcg_gen_extrl_i64_i32(d32, d64);
2384                 gen_store_fpr_F(dc, rd, d32);
2385                 break;
2386             case MO_64:
2387                 gen_helper_ld_asi(cpu_fpr[rd / 2], tcg_env, addr,
2388                                   r_asi, r_mop);
2389                 break;
2390             case MO_128:
2391                 d64 = tcg_temp_new_i64();
2392                 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
2393                 addr_tmp = tcg_temp_new();
2394                 tcg_gen_addi_tl(addr_tmp, addr, 8);
2395                 gen_helper_ld_asi(cpu_fpr[rd / 2 + 1], tcg_env, addr_tmp,
2396                                   r_asi, r_mop);
2397                 tcg_gen_mov_i64(cpu_fpr[rd / 2], d64);
2398                 break;
2399             default:
2400                 g_assert_not_reached();
2401             }
2402         }
2403         break;
2404     }
2405 }
2406 
2407 static void gen_stf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size,
2408                         TCGv addr, int rd)
2409 {
2410     MemOp memop = da->memop;
2411     MemOp size = memop & MO_SIZE;
2412     TCGv_i32 d32;
2413     TCGv addr_tmp;
2414 
2415     /* TODO: Use 128-bit load/store below. */
2416     if (size == MO_128) {
2417         memop = (memop & ~MO_SIZE) | MO_64;
2418     }
2419 
2420     switch (da->type) {
2421     case GET_ASI_EXCP:
2422         break;
2423 
2424     case GET_ASI_DIRECT:
2425         memop |= MO_ALIGN_4;
2426         switch (size) {
2427         case MO_32:
2428             d32 = gen_load_fpr_F(dc, rd);
2429             tcg_gen_qemu_st_i32(d32, addr, da->mem_idx, memop | MO_ALIGN);
2430             break;
2431         case MO_64:
2432             tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da->mem_idx,
2433                                 memop | MO_ALIGN_4);
2434             break;
2435         case MO_128:
2436             /* Only 4-byte alignment required.  However, it is legal for the
2437                cpu to signal the alignment fault, and the OS trap handler is
2438                required to fix it up.  Requiring 16-byte alignment here avoids
2439                having to probe the second page before performing the first
2440                write.  */
2441             tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da->mem_idx,
2442                                 memop | MO_ALIGN_16);
2443             addr_tmp = tcg_temp_new();
2444             tcg_gen_addi_tl(addr_tmp, addr, 8);
2445             tcg_gen_qemu_st_i64(cpu_fpr[rd / 2 + 1], addr_tmp, da->mem_idx, memop);
2446             break;
2447         default:
2448             g_assert_not_reached();
2449         }
2450         break;
2451 
2452     case GET_ASI_BLOCK:
2453         /* Valid for stdfa on aligned registers only.  */
2454         if (orig_size == MO_64 && (rd & 7) == 0) {
2455             /* The first operation checks required alignment.  */
2456             addr_tmp = tcg_temp_new();
2457             for (int i = 0; ; ++i) {
2458                 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2 + i], addr, da->mem_idx,
2459                                     memop | (i == 0 ? MO_ALIGN_64 : 0));
2460                 if (i == 7) {
2461                     break;
2462                 }
2463                 tcg_gen_addi_tl(addr_tmp, addr, 8);
2464                 addr = addr_tmp;
2465             }
2466         } else {
2467             gen_exception(dc, TT_ILL_INSN);
2468         }
2469         break;
2470 
2471     case GET_ASI_SHORT:
2472         /* Valid for stdfa only.  */
2473         if (orig_size == MO_64) {
2474             tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da->mem_idx,
2475                                 memop | MO_ALIGN);
2476         } else {
2477             gen_exception(dc, TT_ILL_INSN);
2478         }
2479         break;
2480 
2481     default:
2482         /* According to the table in the UA2011 manual, the only
2483            other asis that are valid for ldfa/lddfa/ldqfa are
2484            the PST* asis, which aren't currently handled.  */
2485         gen_exception(dc, TT_ILL_INSN);
2486         break;
2487     }
2488 }
2489 
2490 static void gen_ldda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd)
2491 {
2492     TCGv hi = gen_dest_gpr(dc, rd);
2493     TCGv lo = gen_dest_gpr(dc, rd + 1);
2494 
2495     switch (da->type) {
2496     case GET_ASI_EXCP:
2497         return;
2498 
2499     case GET_ASI_DTWINX:
2500 #ifdef TARGET_SPARC64
2501         {
2502             MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16;
2503             TCGv_i128 t = tcg_temp_new_i128();
2504 
2505             tcg_gen_qemu_ld_i128(t, addr, da->mem_idx, mop);
2506             /*
2507              * Note that LE twinx acts as if each 64-bit register result is
2508              * byte swapped.  We perform one 128-bit LE load, so must swap
2509              * the order of the writebacks.
2510              */
2511             if ((mop & MO_BSWAP) == MO_TE) {
2512                 tcg_gen_extr_i128_i64(lo, hi, t);
2513             } else {
2514                 tcg_gen_extr_i128_i64(hi, lo, t);
2515             }
2516         }
2517         break;
2518 #else
2519         g_assert_not_reached();
2520 #endif
2521 
2522     case GET_ASI_DIRECT:
2523         {
2524             TCGv_i64 tmp = tcg_temp_new_i64();
2525 
2526             tcg_gen_qemu_ld_i64(tmp, addr, da->mem_idx, da->memop | MO_ALIGN);
2527 
2528             /* Note that LE ldda acts as if each 32-bit register
2529                result is byte swapped.  Having just performed one
2530                64-bit bswap, we need now to swap the writebacks.  */
2531             if ((da->memop & MO_BSWAP) == MO_TE) {
2532                 tcg_gen_extr_i64_tl(lo, hi, tmp);
2533             } else {
2534                 tcg_gen_extr_i64_tl(hi, lo, tmp);
2535             }
2536         }
2537         break;
2538 
2539     default:
2540         /* ??? In theory we've handled all of the ASIs that are valid
2541            for ldda, and this should raise DAE_invalid_asi.  However,
2542            real hardware allows others.  This can be seen with e.g.
2543            FreeBSD 10.3 wrt ASI_IC_TAG.  */
2544         {
2545             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2546             TCGv_i32 r_mop = tcg_constant_i32(da->memop);
2547             TCGv_i64 tmp = tcg_temp_new_i64();
2548 
2549             save_state(dc);
2550             gen_helper_ld_asi(tmp, tcg_env, addr, r_asi, r_mop);
2551 
2552             /* See above.  */
2553             if ((da->memop & MO_BSWAP) == MO_TE) {
2554                 tcg_gen_extr_i64_tl(lo, hi, tmp);
2555             } else {
2556                 tcg_gen_extr_i64_tl(hi, lo, tmp);
2557             }
2558         }
2559         break;
2560     }
2561 
2562     gen_store_gpr(dc, rd, hi);
2563     gen_store_gpr(dc, rd + 1, lo);
2564 }
2565 
2566 static void gen_stda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd)
2567 {
2568     TCGv hi = gen_load_gpr(dc, rd);
2569     TCGv lo = gen_load_gpr(dc, rd + 1);
2570 
2571     switch (da->type) {
2572     case GET_ASI_EXCP:
2573         break;
2574 
2575     case GET_ASI_DTWINX:
2576 #ifdef TARGET_SPARC64
2577         {
2578             MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16;
2579             TCGv_i128 t = tcg_temp_new_i128();
2580 
2581             /*
2582              * Note that LE twinx acts as if each 64-bit register result is
2583              * byte swapped.  We perform one 128-bit LE store, so must swap
2584              * the order of the construction.
2585              */
2586             if ((mop & MO_BSWAP) == MO_TE) {
2587                 tcg_gen_concat_i64_i128(t, lo, hi);
2588             } else {
2589                 tcg_gen_concat_i64_i128(t, hi, lo);
2590             }
2591             tcg_gen_qemu_st_i128(t, addr, da->mem_idx, mop);
2592         }
2593         break;
2594 #else
2595         g_assert_not_reached();
2596 #endif
2597 
2598     case GET_ASI_DIRECT:
2599         {
2600             TCGv_i64 t64 = tcg_temp_new_i64();
2601 
2602             /* Note that LE stda acts as if each 32-bit register result is
2603                byte swapped.  We will perform one 64-bit LE store, so now
2604                we must swap the order of the construction.  */
2605             if ((da->memop & MO_BSWAP) == MO_TE) {
2606                 tcg_gen_concat_tl_i64(t64, lo, hi);
2607             } else {
2608                 tcg_gen_concat_tl_i64(t64, hi, lo);
2609             }
2610             tcg_gen_qemu_st_i64(t64, addr, da->mem_idx, da->memop | MO_ALIGN);
2611         }
2612         break;
2613 
2614     case GET_ASI_BFILL:
2615         assert(TARGET_LONG_BITS == 32);
2616         /* Store 32 bytes of T64 to ADDR.  */
2617         /* ??? The original qemu code suggests 8-byte alignment, dropping
2618            the low bits, but the only place I can see this used is in the
2619            Linux kernel with 32 byte alignment, which would make more sense
2620            as a cacheline-style operation.  */
2621         {
2622             TCGv_i64 t64 = tcg_temp_new_i64();
2623             TCGv d_addr = tcg_temp_new();
2624             TCGv eight = tcg_constant_tl(8);
2625             int i;
2626 
2627             tcg_gen_concat_tl_i64(t64, lo, hi);
2628             tcg_gen_andi_tl(d_addr, addr, -8);
2629             for (i = 0; i < 32; i += 8) {
2630                 tcg_gen_qemu_st_i64(t64, d_addr, da->mem_idx, da->memop);
2631                 tcg_gen_add_tl(d_addr, d_addr, eight);
2632             }
2633         }
2634         break;
2635 
2636     default:
2637         /* ??? In theory we've handled all of the ASIs that are valid
2638            for stda, and this should raise DAE_invalid_asi.  */
2639         {
2640             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2641             TCGv_i32 r_mop = tcg_constant_i32(da->memop);
2642             TCGv_i64 t64 = tcg_temp_new_i64();
2643 
2644             /* See above.  */
2645             if ((da->memop & MO_BSWAP) == MO_TE) {
2646                 tcg_gen_concat_tl_i64(t64, lo, hi);
2647             } else {
2648                 tcg_gen_concat_tl_i64(t64, hi, lo);
2649             }
2650 
2651             save_state(dc);
2652             gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
2653         }
2654         break;
2655     }
2656 }
2657 
2658 static TCGv get_src1(DisasContext *dc, unsigned int insn)
2659 {
2660     unsigned int rs1 = GET_FIELD(insn, 13, 17);
2661     return gen_load_gpr(dc, rs1);
2662 }
2663 
2664 #ifdef TARGET_SPARC64
2665 static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2666 {
2667     TCGv_i32 c32, zero, dst, s1, s2;
2668 
2669     /* We have two choices here: extend the 32 bit data and use movcond_i64,
2670        or fold the comparison down to 32 bits and use movcond_i32.  Choose
2671        the later.  */
2672     c32 = tcg_temp_new_i32();
2673     if (cmp->is_bool) {
2674         tcg_gen_extrl_i64_i32(c32, cmp->c1);
2675     } else {
2676         TCGv_i64 c64 = tcg_temp_new_i64();
2677         tcg_gen_setcond_i64(cmp->cond, c64, cmp->c1, cmp->c2);
2678         tcg_gen_extrl_i64_i32(c32, c64);
2679     }
2680 
2681     s1 = gen_load_fpr_F(dc, rs);
2682     s2 = gen_load_fpr_F(dc, rd);
2683     dst = gen_dest_fpr_F(dc);
2684     zero = tcg_constant_i32(0);
2685 
2686     tcg_gen_movcond_i32(TCG_COND_NE, dst, c32, zero, s1, s2);
2687 
2688     gen_store_fpr_F(dc, rd, dst);
2689 }
2690 
2691 static void gen_fmovd(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2692 {
2693     TCGv_i64 dst = gen_dest_fpr_D(dc, rd);
2694     tcg_gen_movcond_i64(cmp->cond, dst, cmp->c1, cmp->c2,
2695                         gen_load_fpr_D(dc, rs),
2696                         gen_load_fpr_D(dc, rd));
2697     gen_store_fpr_D(dc, rd, dst);
2698 }
2699 
2700 static void gen_fmovq(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2701 {
2702     int qd = QFPREG(rd);
2703     int qs = QFPREG(rs);
2704 
2705     tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2], cmp->c1, cmp->c2,
2706                         cpu_fpr[qs / 2], cpu_fpr[qd / 2]);
2707     tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2 + 1], cmp->c1, cmp->c2,
2708                         cpu_fpr[qs / 2 + 1], cpu_fpr[qd / 2 + 1]);
2709 
2710     gen_update_fprs_dirty(dc, qd);
2711 }
2712 
2713 static void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr)
2714 {
2715     TCGv_i32 r_tl = tcg_temp_new_i32();
2716 
2717     /* load env->tl into r_tl */
2718     tcg_gen_ld_i32(r_tl, tcg_env, offsetof(CPUSPARCState, tl));
2719 
2720     /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2721     tcg_gen_andi_i32(r_tl, r_tl, MAXTL_MASK);
2722 
2723     /* calculate offset to current trap state from env->ts, reuse r_tl */
2724     tcg_gen_muli_i32(r_tl, r_tl, sizeof (trap_state));
2725     tcg_gen_addi_ptr(r_tsptr, tcg_env, offsetof(CPUSPARCState, ts));
2726 
2727     /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2728     {
2729         TCGv_ptr r_tl_tmp = tcg_temp_new_ptr();
2730         tcg_gen_ext_i32_ptr(r_tl_tmp, r_tl);
2731         tcg_gen_add_ptr(r_tsptr, r_tsptr, r_tl_tmp);
2732     }
2733 }
2734 
2735 static void gen_edge(DisasContext *dc, TCGv dst, TCGv s1, TCGv s2,
2736                      int width, bool cc, bool left)
2737 {
2738     TCGv lo1, lo2;
2739     uint64_t amask, tabl, tabr;
2740     int shift, imask, omask;
2741 
2742     if (cc) {
2743         tcg_gen_mov_tl(cpu_cc_src, s1);
2744         tcg_gen_mov_tl(cpu_cc_src2, s2);
2745         tcg_gen_sub_tl(cpu_cc_dst, s1, s2);
2746         tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
2747         dc->cc_op = CC_OP_SUB;
2748     }
2749 
2750     /* Theory of operation: there are two tables, left and right (not to
2751        be confused with the left and right versions of the opcode).  These
2752        are indexed by the low 3 bits of the inputs.  To make things "easy",
2753        these tables are loaded into two constants, TABL and TABR below.
2754        The operation index = (input & imask) << shift calculates the index
2755        into the constant, while val = (table >> index) & omask calculates
2756        the value we're looking for.  */
2757     switch (width) {
2758     case 8:
2759         imask = 0x7;
2760         shift = 3;
2761         omask = 0xff;
2762         if (left) {
2763             tabl = 0x80c0e0f0f8fcfeffULL;
2764             tabr = 0xff7f3f1f0f070301ULL;
2765         } else {
2766             tabl = 0x0103070f1f3f7fffULL;
2767             tabr = 0xfffefcf8f0e0c080ULL;
2768         }
2769         break;
2770     case 16:
2771         imask = 0x6;
2772         shift = 1;
2773         omask = 0xf;
2774         if (left) {
2775             tabl = 0x8cef;
2776             tabr = 0xf731;
2777         } else {
2778             tabl = 0x137f;
2779             tabr = 0xfec8;
2780         }
2781         break;
2782     case 32:
2783         imask = 0x4;
2784         shift = 0;
2785         omask = 0x3;
2786         if (left) {
2787             tabl = (2 << 2) | 3;
2788             tabr = (3 << 2) | 1;
2789         } else {
2790             tabl = (1 << 2) | 3;
2791             tabr = (3 << 2) | 2;
2792         }
2793         break;
2794     default:
2795         abort();
2796     }
2797 
2798     lo1 = tcg_temp_new();
2799     lo2 = tcg_temp_new();
2800     tcg_gen_andi_tl(lo1, s1, imask);
2801     tcg_gen_andi_tl(lo2, s2, imask);
2802     tcg_gen_shli_tl(lo1, lo1, shift);
2803     tcg_gen_shli_tl(lo2, lo2, shift);
2804 
2805     tcg_gen_shr_tl(lo1, tcg_constant_tl(tabl), lo1);
2806     tcg_gen_shr_tl(lo2, tcg_constant_tl(tabr), lo2);
2807     tcg_gen_andi_tl(lo1, lo1, omask);
2808     tcg_gen_andi_tl(lo2, lo2, omask);
2809 
2810     amask = -8;
2811     if (AM_CHECK(dc)) {
2812         amask &= 0xffffffffULL;
2813     }
2814     tcg_gen_andi_tl(s1, s1, amask);
2815     tcg_gen_andi_tl(s2, s2, amask);
2816 
2817     /* Compute dst = (s1 == s2 ? lo1 : lo1 & lo2). */
2818     tcg_gen_and_tl(lo2, lo2, lo1);
2819     tcg_gen_movcond_tl(TCG_COND_EQ, dst, s1, s2, lo1, lo2);
2820 }
2821 
2822 static void gen_alignaddr(TCGv dst, TCGv s1, TCGv s2, bool left)
2823 {
2824     TCGv tmp = tcg_temp_new();
2825 
2826     tcg_gen_add_tl(tmp, s1, s2);
2827     tcg_gen_andi_tl(dst, tmp, -8);
2828     if (left) {
2829         tcg_gen_neg_tl(tmp, tmp);
2830     }
2831     tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
2832 }
2833 
2834 static void gen_faligndata(TCGv dst, TCGv gsr, TCGv s1, TCGv s2)
2835 {
2836     TCGv t1, t2, shift;
2837 
2838     t1 = tcg_temp_new();
2839     t2 = tcg_temp_new();
2840     shift = tcg_temp_new();
2841 
2842     tcg_gen_andi_tl(shift, gsr, 7);
2843     tcg_gen_shli_tl(shift, shift, 3);
2844     tcg_gen_shl_tl(t1, s1, shift);
2845 
2846     /* A shift of 64 does not produce 0 in TCG.  Divide this into a
2847        shift of (up to 63) followed by a constant shift of 1.  */
2848     tcg_gen_xori_tl(shift, shift, 63);
2849     tcg_gen_shr_tl(t2, s2, shift);
2850     tcg_gen_shri_tl(t2, t2, 1);
2851 
2852     tcg_gen_or_tl(dst, t1, t2);
2853 }
2854 #endif
2855 
2856 static int extract_dfpreg(DisasContext *dc, int x)
2857 {
2858     return DFPREG(x);
2859 }
2860 
2861 static int extract_qfpreg(DisasContext *dc, int x)
2862 {
2863     return QFPREG(x);
2864 }
2865 
2866 /* Include the auto-generated decoder.  */
2867 #include "decode-insns.c.inc"
2868 
2869 #define TRANS(NAME, AVAIL, FUNC, ...) \
2870     static bool trans_##NAME(DisasContext *dc, arg_##NAME *a) \
2871     { return avail_##AVAIL(dc) && FUNC(dc, __VA_ARGS__); }
2872 
2873 #define avail_ALL(C)      true
2874 #ifdef TARGET_SPARC64
2875 # define avail_32(C)      false
2876 # define avail_ASR17(C)   false
2877 # define avail_CASA(C)    true
2878 # define avail_DIV(C)     true
2879 # define avail_MUL(C)     true
2880 # define avail_POWERDOWN(C) false
2881 # define avail_64(C)      true
2882 # define avail_GL(C)      ((C)->def->features & CPU_FEATURE_GL)
2883 # define avail_HYPV(C)    ((C)->def->features & CPU_FEATURE_HYPV)
2884 #else
2885 # define avail_32(C)      true
2886 # define avail_ASR17(C)   ((C)->def->features & CPU_FEATURE_ASR17)
2887 # define avail_CASA(C)    ((C)->def->features & CPU_FEATURE_CASA)
2888 # define avail_DIV(C)     ((C)->def->features & CPU_FEATURE_DIV)
2889 # define avail_MUL(C)     ((C)->def->features & CPU_FEATURE_MUL)
2890 # define avail_POWERDOWN(C) ((C)->def->features & CPU_FEATURE_POWERDOWN)
2891 # define avail_64(C)      false
2892 # define avail_GL(C)      false
2893 # define avail_HYPV(C)    false
2894 #endif
2895 
2896 /* Default case for non jump instructions. */
2897 static bool advance_pc(DisasContext *dc)
2898 {
2899     if (dc->npc & 3) {
2900         switch (dc->npc) {
2901         case DYNAMIC_PC:
2902         case DYNAMIC_PC_LOOKUP:
2903             dc->pc = dc->npc;
2904             gen_op_next_insn();
2905             break;
2906         case JUMP_PC:
2907             /* we can do a static jump */
2908             gen_branch2(dc, dc->jump_pc[0], dc->jump_pc[1], cpu_cond);
2909             dc->base.is_jmp = DISAS_NORETURN;
2910             break;
2911         default:
2912             g_assert_not_reached();
2913         }
2914     } else {
2915         dc->pc = dc->npc;
2916         dc->npc = dc->npc + 4;
2917     }
2918     return true;
2919 }
2920 
2921 /*
2922  * Major opcodes 00 and 01 -- branches, call, and sethi
2923  */
2924 
2925 static bool advance_jump_uncond_never(DisasContext *dc, bool annul)
2926 {
2927     if (annul) {
2928         dc->pc = dc->npc + 4;
2929         dc->npc = dc->pc + 4;
2930     } else {
2931         dc->pc = dc->npc;
2932         dc->npc = dc->pc + 4;
2933     }
2934     return true;
2935 }
2936 
2937 static bool advance_jump_uncond_always(DisasContext *dc, bool annul,
2938                                        target_ulong dest)
2939 {
2940     if (annul) {
2941         dc->pc = dest;
2942         dc->npc = dest + 4;
2943     } else {
2944         dc->pc = dc->npc;
2945         dc->npc = dest;
2946         tcg_gen_mov_tl(cpu_pc, cpu_npc);
2947     }
2948     return true;
2949 }
2950 
2951 static bool advance_jump_cond(DisasContext *dc, DisasCompare *cmp,
2952                               bool annul, target_ulong dest)
2953 {
2954     target_ulong npc = dc->npc;
2955 
2956     if (annul) {
2957         TCGLabel *l1 = gen_new_label();
2958 
2959         tcg_gen_brcond_tl(tcg_invert_cond(cmp->cond), cmp->c1, cmp->c2, l1);
2960         gen_goto_tb(dc, 0, npc, dest);
2961         gen_set_label(l1);
2962         gen_goto_tb(dc, 1, npc + 4, npc + 8);
2963 
2964         dc->base.is_jmp = DISAS_NORETURN;
2965     } else {
2966         if (npc & 3) {
2967             switch (npc) {
2968             case DYNAMIC_PC:
2969             case DYNAMIC_PC_LOOKUP:
2970                 tcg_gen_mov_tl(cpu_pc, cpu_npc);
2971                 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
2972                 tcg_gen_movcond_tl(cmp->cond, cpu_npc,
2973                                    cmp->c1, cmp->c2,
2974                                    tcg_constant_tl(dest), cpu_npc);
2975                 dc->pc = npc;
2976                 break;
2977             default:
2978                 g_assert_not_reached();
2979             }
2980         } else {
2981             dc->pc = npc;
2982             dc->jump_pc[0] = dest;
2983             dc->jump_pc[1] = npc + 4;
2984             dc->npc = JUMP_PC;
2985             if (cmp->is_bool) {
2986                 tcg_gen_mov_tl(cpu_cond, cmp->c1);
2987             } else {
2988                 tcg_gen_setcond_tl(cmp->cond, cpu_cond, cmp->c1, cmp->c2);
2989             }
2990         }
2991     }
2992     return true;
2993 }
2994 
2995 static bool raise_priv(DisasContext *dc)
2996 {
2997     gen_exception(dc, TT_PRIV_INSN);
2998     return true;
2999 }
3000 
3001 static bool raise_unimpfpop(DisasContext *dc)
3002 {
3003     gen_op_fpexception_im(dc, FSR_FTT_UNIMPFPOP);
3004     return true;
3005 }
3006 
3007 static bool gen_trap_float128(DisasContext *dc)
3008 {
3009     if (dc->def->features & CPU_FEATURE_FLOAT128) {
3010         return false;
3011     }
3012     return raise_unimpfpop(dc);
3013 }
3014 
3015 static bool do_bpcc(DisasContext *dc, arg_bcc *a)
3016 {
3017     target_long target = address_mask_i(dc, dc->pc + a->i * 4);
3018     DisasCompare cmp;
3019 
3020     switch (a->cond) {
3021     case 0x0:
3022         return advance_jump_uncond_never(dc, a->a);
3023     case 0x8:
3024         return advance_jump_uncond_always(dc, a->a, target);
3025     default:
3026         flush_cond(dc);
3027 
3028         gen_compare(&cmp, a->cc, a->cond, dc);
3029         return advance_jump_cond(dc, &cmp, a->a, target);
3030     }
3031 }
3032 
3033 TRANS(Bicc, ALL, do_bpcc, a)
3034 TRANS(BPcc,  64, do_bpcc, a)
3035 
3036 static bool do_fbpfcc(DisasContext *dc, arg_bcc *a)
3037 {
3038     target_long target = address_mask_i(dc, dc->pc + a->i * 4);
3039     DisasCompare cmp;
3040 
3041     if (gen_trap_ifnofpu(dc)) {
3042         return true;
3043     }
3044     switch (a->cond) {
3045     case 0x0:
3046         return advance_jump_uncond_never(dc, a->a);
3047     case 0x8:
3048         return advance_jump_uncond_always(dc, a->a, target);
3049     default:
3050         flush_cond(dc);
3051 
3052         gen_fcompare(&cmp, a->cc, a->cond);
3053         return advance_jump_cond(dc, &cmp, a->a, target);
3054     }
3055 }
3056 
3057 TRANS(FBPfcc,  64, do_fbpfcc, a)
3058 TRANS(FBfcc,  ALL, do_fbpfcc, a)
3059 
3060 static bool trans_BPr(DisasContext *dc, arg_BPr *a)
3061 {
3062     target_long target = address_mask_i(dc, dc->pc + a->i * 4);
3063     DisasCompare cmp;
3064 
3065     if (!avail_64(dc)) {
3066         return false;
3067     }
3068     if (gen_tcg_cond_reg[a->cond] == TCG_COND_NEVER) {
3069         return false;
3070     }
3071 
3072     flush_cond(dc);
3073     gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1));
3074     return advance_jump_cond(dc, &cmp, a->a, target);
3075 }
3076 
3077 static bool trans_CALL(DisasContext *dc, arg_CALL *a)
3078 {
3079     target_long target = address_mask_i(dc, dc->pc + a->i * 4);
3080 
3081     gen_store_gpr(dc, 15, tcg_constant_tl(dc->pc));
3082     gen_mov_pc_npc(dc);
3083     dc->npc = target;
3084     return true;
3085 }
3086 
3087 static bool trans_NCP(DisasContext *dc, arg_NCP *a)
3088 {
3089     /*
3090      * For sparc32, always generate the no-coprocessor exception.
3091      * For sparc64, always generate illegal instruction.
3092      */
3093 #ifdef TARGET_SPARC64
3094     return false;
3095 #else
3096     gen_exception(dc, TT_NCP_INSN);
3097     return true;
3098 #endif
3099 }
3100 
3101 static bool trans_SETHI(DisasContext *dc, arg_SETHI *a)
3102 {
3103     /* Special-case %g0 because that's the canonical nop.  */
3104     if (a->rd) {
3105         gen_store_gpr(dc, a->rd, tcg_constant_tl((uint32_t)a->i << 10));
3106     }
3107     return advance_pc(dc);
3108 }
3109 
3110 /*
3111  * Major Opcode 10 -- integer, floating-point, vis, and system insns.
3112  */
3113 
3114 static bool do_tcc(DisasContext *dc, int cond, int cc,
3115                    int rs1, bool imm, int rs2_or_imm)
3116 {
3117     int mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc)
3118                 ? UA2005_HTRAP_MASK : V8_TRAP_MASK);
3119     DisasCompare cmp;
3120     TCGLabel *lab;
3121     TCGv_i32 trap;
3122 
3123     /* Trap never.  */
3124     if (cond == 0) {
3125         return advance_pc(dc);
3126     }
3127 
3128     /*
3129      * Immediate traps are the most common case.  Since this value is
3130      * live across the branch, it really pays to evaluate the constant.
3131      */
3132     if (rs1 == 0 && (imm || rs2_or_imm == 0)) {
3133         trap = tcg_constant_i32((rs2_or_imm & mask) + TT_TRAP);
3134     } else {
3135         trap = tcg_temp_new_i32();
3136         tcg_gen_trunc_tl_i32(trap, gen_load_gpr(dc, rs1));
3137         if (imm) {
3138             tcg_gen_addi_i32(trap, trap, rs2_or_imm);
3139         } else {
3140             TCGv_i32 t2 = tcg_temp_new_i32();
3141             tcg_gen_trunc_tl_i32(t2, gen_load_gpr(dc, rs2_or_imm));
3142             tcg_gen_add_i32(trap, trap, t2);
3143         }
3144         tcg_gen_andi_i32(trap, trap, mask);
3145         tcg_gen_addi_i32(trap, trap, TT_TRAP);
3146     }
3147 
3148     /* Trap always.  */
3149     if (cond == 8) {
3150         save_state(dc);
3151         gen_helper_raise_exception(tcg_env, trap);
3152         dc->base.is_jmp = DISAS_NORETURN;
3153         return true;
3154     }
3155 
3156     /* Conditional trap.  */
3157     flush_cond(dc);
3158     lab = delay_exceptionv(dc, trap);
3159     gen_compare(&cmp, cc, cond, dc);
3160     tcg_gen_brcond_tl(cmp.cond, cmp.c1, cmp.c2, lab);
3161 
3162     return advance_pc(dc);
3163 }
3164 
3165 static bool trans_Tcc_r(DisasContext *dc, arg_Tcc_r *a)
3166 {
3167     if (avail_32(dc) && a->cc) {
3168         return false;
3169     }
3170     return do_tcc(dc, a->cond, a->cc, a->rs1, false, a->rs2);
3171 }
3172 
3173 static bool trans_Tcc_i_v7(DisasContext *dc, arg_Tcc_i_v7 *a)
3174 {
3175     if (avail_64(dc)) {
3176         return false;
3177     }
3178     return do_tcc(dc, a->cond, 0, a->rs1, true, a->i);
3179 }
3180 
3181 static bool trans_Tcc_i_v9(DisasContext *dc, arg_Tcc_i_v9 *a)
3182 {
3183     if (avail_32(dc)) {
3184         return false;
3185     }
3186     return do_tcc(dc, a->cond, a->cc, a->rs1, true, a->i);
3187 }
3188 
3189 static bool trans_STBAR(DisasContext *dc, arg_STBAR *a)
3190 {
3191     tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
3192     return advance_pc(dc);
3193 }
3194 
3195 static bool trans_MEMBAR(DisasContext *dc, arg_MEMBAR *a)
3196 {
3197     if (avail_32(dc)) {
3198         return false;
3199     }
3200     if (a->mmask) {
3201         /* Note TCG_MO_* was modeled on sparc64, so mmask matches. */
3202         tcg_gen_mb(a->mmask | TCG_BAR_SC);
3203     }
3204     if (a->cmask) {
3205         /* For #Sync, etc, end the TB to recognize interrupts. */
3206         dc->base.is_jmp = DISAS_EXIT;
3207     }
3208     return advance_pc(dc);
3209 }
3210 
3211 static bool do_rd_special(DisasContext *dc, bool priv, int rd,
3212                           TCGv (*func)(DisasContext *, TCGv))
3213 {
3214     if (!priv) {
3215         return raise_priv(dc);
3216     }
3217     gen_store_gpr(dc, rd, func(dc, gen_dest_gpr(dc, rd)));
3218     return advance_pc(dc);
3219 }
3220 
3221 static TCGv do_rdy(DisasContext *dc, TCGv dst)
3222 {
3223     return cpu_y;
3224 }
3225 
3226 static bool trans_RDY(DisasContext *dc, arg_RDY *a)
3227 {
3228     /*
3229      * TODO: Need a feature bit for sparcv8.  In the meantime, treat all
3230      * 32-bit cpus like sparcv7, which ignores the rs1 field.
3231      * This matches after all other ASR, so Leon3 Asr17 is handled first.
3232      */
3233     if (avail_64(dc) && a->rs1 != 0) {
3234         return false;
3235     }
3236     return do_rd_special(dc, true, a->rd, do_rdy);
3237 }
3238 
3239 static TCGv do_rd_leon3_config(DisasContext *dc, TCGv dst)
3240 {
3241     uint32_t val;
3242 
3243     /*
3244      * TODO: There are many more fields to be filled,
3245      * some of which are writable.
3246      */
3247     val = dc->def->nwindows - 1;   /* [4:0] NWIN */
3248     val |= 1 << 8;                 /* [8]   V8   */
3249 
3250     return tcg_constant_tl(val);
3251 }
3252 
3253 TRANS(RDASR17, ASR17, do_rd_special, true, a->rd, do_rd_leon3_config)
3254 
3255 static TCGv do_rdccr(DisasContext *dc, TCGv dst)
3256 {
3257     update_psr(dc);
3258     gen_helper_rdccr(dst, tcg_env);
3259     return dst;
3260 }
3261 
3262 TRANS(RDCCR, 64, do_rd_special, true, a->rd, do_rdccr)
3263 
3264 static TCGv do_rdasi(DisasContext *dc, TCGv dst)
3265 {
3266 #ifdef TARGET_SPARC64
3267     return tcg_constant_tl(dc->asi);
3268 #else
3269     qemu_build_not_reached();
3270 #endif
3271 }
3272 
3273 TRANS(RDASI, 64, do_rd_special, true, a->rd, do_rdasi)
3274 
3275 static TCGv do_rdtick(DisasContext *dc, TCGv dst)
3276 {
3277     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3278 
3279     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
3280     if (translator_io_start(&dc->base)) {
3281         dc->base.is_jmp = DISAS_EXIT;
3282     }
3283     gen_helper_tick_get_count(dst, tcg_env, r_tickptr,
3284                               tcg_constant_i32(dc->mem_idx));
3285     return dst;
3286 }
3287 
3288 /* TODO: non-priv access only allowed when enabled. */
3289 TRANS(RDTICK, 64, do_rd_special, true, a->rd, do_rdtick)
3290 
3291 static TCGv do_rdpc(DisasContext *dc, TCGv dst)
3292 {
3293     return tcg_constant_tl(address_mask_i(dc, dc->pc));
3294 }
3295 
3296 TRANS(RDPC, 64, do_rd_special, true, a->rd, do_rdpc)
3297 
3298 static TCGv do_rdfprs(DisasContext *dc, TCGv dst)
3299 {
3300     tcg_gen_ext_i32_tl(dst, cpu_fprs);
3301     return dst;
3302 }
3303 
3304 TRANS(RDFPRS, 64, do_rd_special, true, a->rd, do_rdfprs)
3305 
3306 static TCGv do_rdgsr(DisasContext *dc, TCGv dst)
3307 {
3308     gen_trap_ifnofpu(dc);
3309     return cpu_gsr;
3310 }
3311 
3312 TRANS(RDGSR, 64, do_rd_special, true, a->rd, do_rdgsr)
3313 
3314 static TCGv do_rdsoftint(DisasContext *dc, TCGv dst)
3315 {
3316     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(softint));
3317     return dst;
3318 }
3319 
3320 TRANS(RDSOFTINT, 64, do_rd_special, supervisor(dc), a->rd, do_rdsoftint)
3321 
3322 static TCGv do_rdtick_cmpr(DisasContext *dc, TCGv dst)
3323 {
3324     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(tick_cmpr));
3325     return dst;
3326 }
3327 
3328 /* TODO: non-priv access only allowed when enabled. */
3329 TRANS(RDTICK_CMPR, 64, do_rd_special, true, a->rd, do_rdtick_cmpr)
3330 
3331 static TCGv do_rdstick(DisasContext *dc, TCGv dst)
3332 {
3333     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3334 
3335     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick));
3336     if (translator_io_start(&dc->base)) {
3337         dc->base.is_jmp = DISAS_EXIT;
3338     }
3339     gen_helper_tick_get_count(dst, tcg_env, r_tickptr,
3340                               tcg_constant_i32(dc->mem_idx));
3341     return dst;
3342 }
3343 
3344 /* TODO: non-priv access only allowed when enabled. */
3345 TRANS(RDSTICK, 64, do_rd_special, true, a->rd, do_rdstick)
3346 
3347 static TCGv do_rdstick_cmpr(DisasContext *dc, TCGv dst)
3348 {
3349     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(stick_cmpr));
3350     return dst;
3351 }
3352 
3353 /* TODO: supervisor access only allowed when enabled by hypervisor. */
3354 TRANS(RDSTICK_CMPR, 64, do_rd_special, supervisor(dc), a->rd, do_rdstick_cmpr)
3355 
3356 /*
3357  * UltraSPARC-T1 Strand status.
3358  * HYPV check maybe not enough, UA2005 & UA2007 describe
3359  * this ASR as impl. dep
3360  */
3361 static TCGv do_rdstrand_status(DisasContext *dc, TCGv dst)
3362 {
3363     return tcg_constant_tl(1);
3364 }
3365 
3366 TRANS(RDSTRAND_STATUS, HYPV, do_rd_special, true, a->rd, do_rdstrand_status)
3367 
3368 static TCGv do_rdpsr(DisasContext *dc, TCGv dst)
3369 {
3370     update_psr(dc);
3371     gen_helper_rdpsr(dst, tcg_env);
3372     return dst;
3373 }
3374 
3375 TRANS(RDPSR, 32, do_rd_special, supervisor(dc), a->rd, do_rdpsr)
3376 
3377 static TCGv do_rdhpstate(DisasContext *dc, TCGv dst)
3378 {
3379     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hpstate));
3380     return dst;
3381 }
3382 
3383 TRANS(RDHPR_hpstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhpstate)
3384 
3385 static TCGv do_rdhtstate(DisasContext *dc, TCGv dst)
3386 {
3387     TCGv_i32 tl = tcg_temp_new_i32();
3388     TCGv_ptr tp = tcg_temp_new_ptr();
3389 
3390     tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl));
3391     tcg_gen_andi_i32(tl, tl, MAXTL_MASK);
3392     tcg_gen_shli_i32(tl, tl, 3);
3393     tcg_gen_ext_i32_ptr(tp, tl);
3394     tcg_gen_add_ptr(tp, tp, tcg_env);
3395 
3396     tcg_gen_ld_tl(dst, tp, env64_field_offsetof(htstate));
3397     return dst;
3398 }
3399 
3400 TRANS(RDHPR_htstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtstate)
3401 
3402 static TCGv do_rdhintp(DisasContext *dc, TCGv dst)
3403 {
3404     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hintp));
3405     return dst;
3406 }
3407 
3408 TRANS(RDHPR_hintp, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhintp)
3409 
3410 static TCGv do_rdhtba(DisasContext *dc, TCGv dst)
3411 {
3412     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(htba));
3413     return dst;
3414 }
3415 
3416 TRANS(RDHPR_htba, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtba)
3417 
3418 static TCGv do_rdhver(DisasContext *dc, TCGv dst)
3419 {
3420     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hver));
3421     return dst;
3422 }
3423 
3424 TRANS(RDHPR_hver, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhver)
3425 
3426 static TCGv do_rdhstick_cmpr(DisasContext *dc, TCGv dst)
3427 {
3428     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hstick_cmpr));
3429     return dst;
3430 }
3431 
3432 TRANS(RDHPR_hstick_cmpr, HYPV, do_rd_special, hypervisor(dc), a->rd,
3433       do_rdhstick_cmpr)
3434 
3435 static TCGv do_rdwim(DisasContext *dc, TCGv dst)
3436 {
3437     tcg_gen_ld_tl(dst, tcg_env, env32_field_offsetof(wim));
3438     return dst;
3439 }
3440 
3441 TRANS(RDWIM, 32, do_rd_special, supervisor(dc), a->rd, do_rdwim)
3442 
3443 static TCGv do_rdtpc(DisasContext *dc, TCGv dst)
3444 {
3445 #ifdef TARGET_SPARC64
3446     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3447 
3448     gen_load_trap_state_at_tl(r_tsptr);
3449     tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tpc));
3450     return dst;
3451 #else
3452     qemu_build_not_reached();
3453 #endif
3454 }
3455 
3456 TRANS(RDPR_tpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtpc)
3457 
3458 static TCGv do_rdtnpc(DisasContext *dc, TCGv dst)
3459 {
3460 #ifdef TARGET_SPARC64
3461     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3462 
3463     gen_load_trap_state_at_tl(r_tsptr);
3464     tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tnpc));
3465     return dst;
3466 #else
3467     qemu_build_not_reached();
3468 #endif
3469 }
3470 
3471 TRANS(RDPR_tnpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtnpc)
3472 
3473 static TCGv do_rdtstate(DisasContext *dc, TCGv dst)
3474 {
3475 #ifdef TARGET_SPARC64
3476     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3477 
3478     gen_load_trap_state_at_tl(r_tsptr);
3479     tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tstate));
3480     return dst;
3481 #else
3482     qemu_build_not_reached();
3483 #endif
3484 }
3485 
3486 TRANS(RDPR_tstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdtstate)
3487 
3488 static TCGv do_rdtt(DisasContext *dc, TCGv dst)
3489 {
3490 #ifdef TARGET_SPARC64
3491     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3492 
3493     gen_load_trap_state_at_tl(r_tsptr);
3494     tcg_gen_ld32s_tl(dst, r_tsptr, offsetof(trap_state, tt));
3495     return dst;
3496 #else
3497     qemu_build_not_reached();
3498 #endif
3499 }
3500 
3501 TRANS(RDPR_tt, 64, do_rd_special, supervisor(dc), a->rd, do_rdtt)
3502 TRANS(RDPR_tick, 64, do_rd_special, supervisor(dc), a->rd, do_rdtick)
3503 
3504 static TCGv do_rdtba(DisasContext *dc, TCGv dst)
3505 {
3506     return cpu_tbr;
3507 }
3508 
3509 TRANS(RDTBR, 32, do_rd_special, supervisor(dc), a->rd, do_rdtba)
3510 TRANS(RDPR_tba, 64, do_rd_special, supervisor(dc), a->rd, do_rdtba)
3511 
3512 static TCGv do_rdpstate(DisasContext *dc, TCGv dst)
3513 {
3514     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(pstate));
3515     return dst;
3516 }
3517 
3518 TRANS(RDPR_pstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdpstate)
3519 
3520 static TCGv do_rdtl(DisasContext *dc, TCGv dst)
3521 {
3522     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(tl));
3523     return dst;
3524 }
3525 
3526 TRANS(RDPR_tl, 64, do_rd_special, supervisor(dc), a->rd, do_rdtl)
3527 
3528 static TCGv do_rdpil(DisasContext *dc, TCGv dst)
3529 {
3530     tcg_gen_ld32s_tl(dst, tcg_env, env_field_offsetof(psrpil));
3531     return dst;
3532 }
3533 
3534 TRANS(RDPR_pil, 64, do_rd_special, supervisor(dc), a->rd, do_rdpil)
3535 
3536 static TCGv do_rdcwp(DisasContext *dc, TCGv dst)
3537 {
3538     gen_helper_rdcwp(dst, tcg_env);
3539     return dst;
3540 }
3541 
3542 TRANS(RDPR_cwp, 64, do_rd_special, supervisor(dc), a->rd, do_rdcwp)
3543 
3544 static TCGv do_rdcansave(DisasContext *dc, TCGv dst)
3545 {
3546     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cansave));
3547     return dst;
3548 }
3549 
3550 TRANS(RDPR_cansave, 64, do_rd_special, supervisor(dc), a->rd, do_rdcansave)
3551 
3552 static TCGv do_rdcanrestore(DisasContext *dc, TCGv dst)
3553 {
3554     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(canrestore));
3555     return dst;
3556 }
3557 
3558 TRANS(RDPR_canrestore, 64, do_rd_special, supervisor(dc), a->rd,
3559       do_rdcanrestore)
3560 
3561 static TCGv do_rdcleanwin(DisasContext *dc, TCGv dst)
3562 {
3563     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cleanwin));
3564     return dst;
3565 }
3566 
3567 TRANS(RDPR_cleanwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdcleanwin)
3568 
3569 static TCGv do_rdotherwin(DisasContext *dc, TCGv dst)
3570 {
3571     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(otherwin));
3572     return dst;
3573 }
3574 
3575 TRANS(RDPR_otherwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdotherwin)
3576 
3577 static TCGv do_rdwstate(DisasContext *dc, TCGv dst)
3578 {
3579     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(wstate));
3580     return dst;
3581 }
3582 
3583 TRANS(RDPR_wstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdwstate)
3584 
3585 static TCGv do_rdgl(DisasContext *dc, TCGv dst)
3586 {
3587     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(gl));
3588     return dst;
3589 }
3590 
3591 TRANS(RDPR_gl, GL, do_rd_special, supervisor(dc), a->rd, do_rdgl)
3592 
3593 /* UA2005 strand status */
3594 static TCGv do_rdssr(DisasContext *dc, TCGv dst)
3595 {
3596     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(ssr));
3597     return dst;
3598 }
3599 
3600 TRANS(RDPR_strand_status, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdssr)
3601 
3602 static TCGv do_rdver(DisasContext *dc, TCGv dst)
3603 {
3604     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(version));
3605     return dst;
3606 }
3607 
3608 TRANS(RDPR_ver, 64, do_rd_special, supervisor(dc), a->rd, do_rdver)
3609 
3610 static bool trans_FLUSHW(DisasContext *dc, arg_FLUSHW *a)
3611 {
3612     if (avail_64(dc)) {
3613         gen_helper_flushw(tcg_env);
3614         return advance_pc(dc);
3615     }
3616     return false;
3617 }
3618 
3619 static bool do_wr_special(DisasContext *dc, arg_r_r_ri *a, bool priv,
3620                           void (*func)(DisasContext *, TCGv))
3621 {
3622     TCGv src;
3623 
3624     /* For simplicity, we under-decoded the rs2 form. */
3625     if (!a->imm && (a->rs2_or_imm & ~0x1f)) {
3626         return false;
3627     }
3628     if (!priv) {
3629         return raise_priv(dc);
3630     }
3631 
3632     if (a->rs1 == 0 && (a->imm || a->rs2_or_imm == 0)) {
3633         src = tcg_constant_tl(a->rs2_or_imm);
3634     } else {
3635         TCGv src1 = gen_load_gpr(dc, a->rs1);
3636         if (a->rs2_or_imm == 0) {
3637             src = src1;
3638         } else {
3639             src = tcg_temp_new();
3640             if (a->imm) {
3641                 tcg_gen_xori_tl(src, src1, a->rs2_or_imm);
3642             } else {
3643                 tcg_gen_xor_tl(src, src1, gen_load_gpr(dc, a->rs2_or_imm));
3644             }
3645         }
3646     }
3647     func(dc, src);
3648     return advance_pc(dc);
3649 }
3650 
3651 static void do_wry(DisasContext *dc, TCGv src)
3652 {
3653     tcg_gen_ext32u_tl(cpu_y, src);
3654 }
3655 
3656 TRANS(WRY, ALL, do_wr_special, a, true, do_wry)
3657 
3658 static void do_wrccr(DisasContext *dc, TCGv src)
3659 {
3660     gen_helper_wrccr(tcg_env, src);
3661 }
3662 
3663 TRANS(WRCCR, 64, do_wr_special, a, true, do_wrccr)
3664 
3665 static void do_wrasi(DisasContext *dc, TCGv src)
3666 {
3667     TCGv tmp = tcg_temp_new();
3668 
3669     tcg_gen_ext8u_tl(tmp, src);
3670     tcg_gen_st32_tl(tmp, tcg_env, env64_field_offsetof(asi));
3671     /* End TB to notice changed ASI. */
3672     dc->base.is_jmp = DISAS_EXIT;
3673 }
3674 
3675 TRANS(WRASI, 64, do_wr_special, a, true, do_wrasi)
3676 
3677 static void do_wrfprs(DisasContext *dc, TCGv src)
3678 {
3679 #ifdef TARGET_SPARC64
3680     tcg_gen_trunc_tl_i32(cpu_fprs, src);
3681     dc->fprs_dirty = 0;
3682     dc->base.is_jmp = DISAS_EXIT;
3683 #else
3684     qemu_build_not_reached();
3685 #endif
3686 }
3687 
3688 TRANS(WRFPRS, 64, do_wr_special, a, true, do_wrfprs)
3689 
3690 static void do_wrgsr(DisasContext *dc, TCGv src)
3691 {
3692     gen_trap_ifnofpu(dc);
3693     tcg_gen_mov_tl(cpu_gsr, src);
3694 }
3695 
3696 TRANS(WRGSR, 64, do_wr_special, a, true, do_wrgsr)
3697 
3698 static void do_wrsoftint_set(DisasContext *dc, TCGv src)
3699 {
3700     gen_helper_set_softint(tcg_env, src);
3701 }
3702 
3703 TRANS(WRSOFTINT_SET, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_set)
3704 
3705 static void do_wrsoftint_clr(DisasContext *dc, TCGv src)
3706 {
3707     gen_helper_clear_softint(tcg_env, src);
3708 }
3709 
3710 TRANS(WRSOFTINT_CLR, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_clr)
3711 
3712 static void do_wrsoftint(DisasContext *dc, TCGv src)
3713 {
3714     gen_helper_write_softint(tcg_env, src);
3715 }
3716 
3717 TRANS(WRSOFTINT, 64, do_wr_special, a, supervisor(dc), do_wrsoftint)
3718 
3719 static void do_wrtick_cmpr(DisasContext *dc, TCGv src)
3720 {
3721     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3722 
3723     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(tick_cmpr));
3724     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
3725     translator_io_start(&dc->base);
3726     gen_helper_tick_set_limit(r_tickptr, src);
3727     /* End TB to handle timer interrupt */
3728     dc->base.is_jmp = DISAS_EXIT;
3729 }
3730 
3731 TRANS(WRTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrtick_cmpr)
3732 
3733 static void do_wrstick(DisasContext *dc, TCGv src)
3734 {
3735 #ifdef TARGET_SPARC64
3736     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3737 
3738     tcg_gen_ld_ptr(r_tickptr, tcg_env, offsetof(CPUSPARCState, stick));
3739     translator_io_start(&dc->base);
3740     gen_helper_tick_set_count(r_tickptr, src);
3741     /* End TB to handle timer interrupt */
3742     dc->base.is_jmp = DISAS_EXIT;
3743 #else
3744     qemu_build_not_reached();
3745 #endif
3746 }
3747 
3748 TRANS(WRSTICK, 64, do_wr_special, a, supervisor(dc), do_wrstick)
3749 
3750 static void do_wrstick_cmpr(DisasContext *dc, TCGv src)
3751 {
3752     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3753 
3754     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(stick_cmpr));
3755     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick));
3756     translator_io_start(&dc->base);
3757     gen_helper_tick_set_limit(r_tickptr, src);
3758     /* End TB to handle timer interrupt */
3759     dc->base.is_jmp = DISAS_EXIT;
3760 }
3761 
3762 TRANS(WRSTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrstick_cmpr)
3763 
3764 static void do_wrpowerdown(DisasContext *dc, TCGv src)
3765 {
3766     save_state(dc);
3767     gen_helper_power_down(tcg_env);
3768 }
3769 
3770 TRANS(WRPOWERDOWN, POWERDOWN, do_wr_special, a, supervisor(dc), do_wrpowerdown)
3771 
3772 static void do_wrpsr(DisasContext *dc, TCGv src)
3773 {
3774     gen_helper_wrpsr(tcg_env, src);
3775     tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
3776     dc->cc_op = CC_OP_FLAGS;
3777     dc->base.is_jmp = DISAS_EXIT;
3778 }
3779 
3780 TRANS(WRPSR, 32, do_wr_special, a, supervisor(dc), do_wrpsr)
3781 
3782 static void do_wrwim(DisasContext *dc, TCGv src)
3783 {
3784     target_ulong mask = MAKE_64BIT_MASK(0, dc->def->nwindows);
3785     TCGv tmp = tcg_temp_new();
3786 
3787     tcg_gen_andi_tl(tmp, src, mask);
3788     tcg_gen_st_tl(tmp, tcg_env, env32_field_offsetof(wim));
3789 }
3790 
3791 TRANS(WRWIM, 32, do_wr_special, a, supervisor(dc), do_wrwim)
3792 
3793 static void do_wrtpc(DisasContext *dc, TCGv src)
3794 {
3795 #ifdef TARGET_SPARC64
3796     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3797 
3798     gen_load_trap_state_at_tl(r_tsptr);
3799     tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tpc));
3800 #else
3801     qemu_build_not_reached();
3802 #endif
3803 }
3804 
3805 TRANS(WRPR_tpc, 64, do_wr_special, a, supervisor(dc), do_wrtpc)
3806 
3807 static void do_wrtnpc(DisasContext *dc, TCGv src)
3808 {
3809 #ifdef TARGET_SPARC64
3810     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3811 
3812     gen_load_trap_state_at_tl(r_tsptr);
3813     tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tnpc));
3814 #else
3815     qemu_build_not_reached();
3816 #endif
3817 }
3818 
3819 TRANS(WRPR_tnpc, 64, do_wr_special, a, supervisor(dc), do_wrtnpc)
3820 
3821 static void do_wrtstate(DisasContext *dc, TCGv src)
3822 {
3823 #ifdef TARGET_SPARC64
3824     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3825 
3826     gen_load_trap_state_at_tl(r_tsptr);
3827     tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tstate));
3828 #else
3829     qemu_build_not_reached();
3830 #endif
3831 }
3832 
3833 TRANS(WRPR_tstate, 64, do_wr_special, a, supervisor(dc), do_wrtstate)
3834 
3835 static void do_wrtt(DisasContext *dc, TCGv src)
3836 {
3837 #ifdef TARGET_SPARC64
3838     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3839 
3840     gen_load_trap_state_at_tl(r_tsptr);
3841     tcg_gen_st32_tl(src, r_tsptr, offsetof(trap_state, tt));
3842 #else
3843     qemu_build_not_reached();
3844 #endif
3845 }
3846 
3847 TRANS(WRPR_tt, 64, do_wr_special, a, supervisor(dc), do_wrtt)
3848 
3849 static void do_wrtick(DisasContext *dc, TCGv src)
3850 {
3851     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3852 
3853     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
3854     translator_io_start(&dc->base);
3855     gen_helper_tick_set_count(r_tickptr, src);
3856     /* End TB to handle timer interrupt */
3857     dc->base.is_jmp = DISAS_EXIT;
3858 }
3859 
3860 TRANS(WRPR_tick, 64, do_wr_special, a, supervisor(dc), do_wrtick)
3861 
3862 static void do_wrtba(DisasContext *dc, TCGv src)
3863 {
3864     tcg_gen_mov_tl(cpu_tbr, src);
3865 }
3866 
3867 TRANS(WRPR_tba, 64, do_wr_special, a, supervisor(dc), do_wrtba)
3868 
3869 static void do_wrpstate(DisasContext *dc, TCGv src)
3870 {
3871     save_state(dc);
3872     if (translator_io_start(&dc->base)) {
3873         dc->base.is_jmp = DISAS_EXIT;
3874     }
3875     gen_helper_wrpstate(tcg_env, src);
3876     dc->npc = DYNAMIC_PC;
3877 }
3878 
3879 TRANS(WRPR_pstate, 64, do_wr_special, a, supervisor(dc), do_wrpstate)
3880 
3881 static void do_wrtl(DisasContext *dc, TCGv src)
3882 {
3883     save_state(dc);
3884     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(tl));
3885     dc->npc = DYNAMIC_PC;
3886 }
3887 
3888 TRANS(WRPR_tl, 64, do_wr_special, a, supervisor(dc), do_wrtl)
3889 
3890 static void do_wrpil(DisasContext *dc, TCGv src)
3891 {
3892     if (translator_io_start(&dc->base)) {
3893         dc->base.is_jmp = DISAS_EXIT;
3894     }
3895     gen_helper_wrpil(tcg_env, src);
3896 }
3897 
3898 TRANS(WRPR_pil, 64, do_wr_special, a, supervisor(dc), do_wrpil)
3899 
3900 static void do_wrcwp(DisasContext *dc, TCGv src)
3901 {
3902     gen_helper_wrcwp(tcg_env, src);
3903 }
3904 
3905 TRANS(WRPR_cwp, 64, do_wr_special, a, supervisor(dc), do_wrcwp)
3906 
3907 static void do_wrcansave(DisasContext *dc, TCGv src)
3908 {
3909     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cansave));
3910 }
3911 
3912 TRANS(WRPR_cansave, 64, do_wr_special, a, supervisor(dc), do_wrcansave)
3913 
3914 static void do_wrcanrestore(DisasContext *dc, TCGv src)
3915 {
3916     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(canrestore));
3917 }
3918 
3919 TRANS(WRPR_canrestore, 64, do_wr_special, a, supervisor(dc), do_wrcanrestore)
3920 
3921 static void do_wrcleanwin(DisasContext *dc, TCGv src)
3922 {
3923     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cleanwin));
3924 }
3925 
3926 TRANS(WRPR_cleanwin, 64, do_wr_special, a, supervisor(dc), do_wrcleanwin)
3927 
3928 static void do_wrotherwin(DisasContext *dc, TCGv src)
3929 {
3930     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(otherwin));
3931 }
3932 
3933 TRANS(WRPR_otherwin, 64, do_wr_special, a, supervisor(dc), do_wrotherwin)
3934 
3935 static void do_wrwstate(DisasContext *dc, TCGv src)
3936 {
3937     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(wstate));
3938 }
3939 
3940 TRANS(WRPR_wstate, 64, do_wr_special, a, supervisor(dc), do_wrwstate)
3941 
3942 static void do_wrgl(DisasContext *dc, TCGv src)
3943 {
3944     gen_helper_wrgl(tcg_env, src);
3945 }
3946 
3947 TRANS(WRPR_gl, GL, do_wr_special, a, supervisor(dc), do_wrgl)
3948 
3949 /* UA2005 strand status */
3950 static void do_wrssr(DisasContext *dc, TCGv src)
3951 {
3952     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(ssr));
3953 }
3954 
3955 TRANS(WRPR_strand_status, HYPV, do_wr_special, a, hypervisor(dc), do_wrssr)
3956 
3957 TRANS(WRTBR, 32, do_wr_special, a, supervisor(dc), do_wrtba)
3958 
3959 static void do_wrhpstate(DisasContext *dc, TCGv src)
3960 {
3961     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hpstate));
3962     dc->base.is_jmp = DISAS_EXIT;
3963 }
3964 
3965 TRANS(WRHPR_hpstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhpstate)
3966 
3967 static void do_wrhtstate(DisasContext *dc, TCGv src)
3968 {
3969     TCGv_i32 tl = tcg_temp_new_i32();
3970     TCGv_ptr tp = tcg_temp_new_ptr();
3971 
3972     tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl));
3973     tcg_gen_andi_i32(tl, tl, MAXTL_MASK);
3974     tcg_gen_shli_i32(tl, tl, 3);
3975     tcg_gen_ext_i32_ptr(tp, tl);
3976     tcg_gen_add_ptr(tp, tp, tcg_env);
3977 
3978     tcg_gen_st_tl(src, tp, env64_field_offsetof(htstate));
3979 }
3980 
3981 TRANS(WRHPR_htstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtstate)
3982 
3983 static void do_wrhintp(DisasContext *dc, TCGv src)
3984 {
3985     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hintp));
3986 }
3987 
3988 TRANS(WRHPR_hintp, HYPV, do_wr_special, a, hypervisor(dc), do_wrhintp)
3989 
3990 static void do_wrhtba(DisasContext *dc, TCGv src)
3991 {
3992     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(htba));
3993 }
3994 
3995 TRANS(WRHPR_htba, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtba)
3996 
3997 static void do_wrhstick_cmpr(DisasContext *dc, TCGv src)
3998 {
3999     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
4000 
4001     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hstick_cmpr));
4002     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(hstick));
4003     translator_io_start(&dc->base);
4004     gen_helper_tick_set_limit(r_tickptr, src);
4005     /* End TB to handle timer interrupt */
4006     dc->base.is_jmp = DISAS_EXIT;
4007 }
4008 
4009 TRANS(WRHPR_hstick_cmpr, HYPV, do_wr_special, a, hypervisor(dc),
4010       do_wrhstick_cmpr)
4011 
4012 static bool do_saved_restored(DisasContext *dc, bool saved)
4013 {
4014     if (!supervisor(dc)) {
4015         return raise_priv(dc);
4016     }
4017     if (saved) {
4018         gen_helper_saved(tcg_env);
4019     } else {
4020         gen_helper_restored(tcg_env);
4021     }
4022     return advance_pc(dc);
4023 }
4024 
4025 TRANS(SAVED, 64, do_saved_restored, true)
4026 TRANS(RESTORED, 64, do_saved_restored, false)
4027 
4028 static bool trans_NOP(DisasContext *dc, arg_NOP *a)
4029 {
4030     return advance_pc(dc);
4031 }
4032 
4033 /*
4034  * TODO: Need a feature bit for sparcv8.
4035  * In the meantime, treat all 32-bit cpus like sparcv7.
4036  */
4037 TRANS(NOP_v7, 32, trans_NOP, a)
4038 TRANS(NOP_v9, 64, trans_NOP, a)
4039 
4040 static bool do_arith_int(DisasContext *dc, arg_r_r_ri_cc *a, int cc_op,
4041                          void (*func)(TCGv, TCGv, TCGv),
4042                          void (*funci)(TCGv, TCGv, target_long))
4043 {
4044     TCGv dst, src1;
4045 
4046     /* For simplicity, we under-decoded the rs2 form. */
4047     if (!a->imm && a->rs2_or_imm & ~0x1f) {
4048         return false;
4049     }
4050 
4051     if (a->cc) {
4052         dst = cpu_cc_dst;
4053     } else {
4054         dst = gen_dest_gpr(dc, a->rd);
4055     }
4056     src1 = gen_load_gpr(dc, a->rs1);
4057 
4058     if (a->imm || a->rs2_or_imm == 0) {
4059         if (funci) {
4060             funci(dst, src1, a->rs2_or_imm);
4061         } else {
4062             func(dst, src1, tcg_constant_tl(a->rs2_or_imm));
4063         }
4064     } else {
4065         func(dst, src1, cpu_regs[a->rs2_or_imm]);
4066     }
4067     gen_store_gpr(dc, a->rd, dst);
4068 
4069     if (a->cc) {
4070         tcg_gen_movi_i32(cpu_cc_op, cc_op);
4071         dc->cc_op = cc_op;
4072     }
4073     return advance_pc(dc);
4074 }
4075 
4076 static bool do_arith(DisasContext *dc, arg_r_r_ri_cc *a, int cc_op,
4077                      void (*func)(TCGv, TCGv, TCGv),
4078                      void (*funci)(TCGv, TCGv, target_long),
4079                      void (*func_cc)(TCGv, TCGv, TCGv))
4080 {
4081     if (a->cc) {
4082         assert(cc_op >= 0);
4083         return do_arith_int(dc, a, cc_op, func_cc, NULL);
4084     }
4085     return do_arith_int(dc, a, cc_op, func, funci);
4086 }
4087 
4088 static bool do_logic(DisasContext *dc, arg_r_r_ri_cc *a,
4089                      void (*func)(TCGv, TCGv, TCGv),
4090                      void (*funci)(TCGv, TCGv, target_long))
4091 {
4092     return do_arith_int(dc, a, CC_OP_LOGIC, func, funci);
4093 }
4094 
4095 TRANS(ADD, ALL, do_arith, a, CC_OP_ADD,
4096       tcg_gen_add_tl, tcg_gen_addi_tl, gen_op_add_cc)
4097 TRANS(SUB, ALL, do_arith, a, CC_OP_SUB,
4098       tcg_gen_sub_tl, tcg_gen_subi_tl, gen_op_sub_cc)
4099 
4100 TRANS(TADDcc, ALL, do_arith, a, CC_OP_TADD, NULL, NULL, gen_op_add_cc)
4101 TRANS(TSUBcc, ALL, do_arith, a, CC_OP_TSUB, NULL, NULL, gen_op_sub_cc)
4102 TRANS(TADDccTV, ALL, do_arith, a, CC_OP_TADDTV, NULL, NULL, gen_op_taddcctv)
4103 TRANS(TSUBccTV, ALL, do_arith, a, CC_OP_TSUBTV, NULL, NULL, gen_op_tsubcctv)
4104 
4105 TRANS(AND, ALL, do_logic, a, tcg_gen_and_tl, tcg_gen_andi_tl)
4106 TRANS(XOR, ALL, do_logic, a, tcg_gen_xor_tl, tcg_gen_xori_tl)
4107 TRANS(ANDN, ALL, do_logic, a, tcg_gen_andc_tl, NULL)
4108 TRANS(ORN, ALL, do_logic, a, tcg_gen_orc_tl, NULL)
4109 TRANS(XORN, ALL, do_logic, a, tcg_gen_eqv_tl, NULL)
4110 
4111 TRANS(MULX, 64, do_arith, a, -1, tcg_gen_mul_tl, tcg_gen_muli_tl, NULL)
4112 TRANS(UMUL, MUL, do_logic, a, gen_op_umul, NULL)
4113 TRANS(SMUL, MUL, do_logic, a, gen_op_smul, NULL)
4114 
4115 TRANS(UDIVX, 64, do_arith, a, -1, gen_op_udivx, NULL, NULL)
4116 TRANS(SDIVX, 64, do_arith, a, -1, gen_op_sdivx, NULL, NULL)
4117 TRANS(UDIV, DIV, do_arith, a, CC_OP_DIV, gen_op_udiv, NULL, gen_op_udivcc)
4118 TRANS(SDIV, DIV, do_arith, a, CC_OP_DIV, gen_op_sdiv, NULL, gen_op_sdivcc)
4119 
4120 /* TODO: Should have feature bit -- comes in with UltraSparc T2. */
4121 TRANS(POPC, 64, do_arith, a, -1, gen_op_popc, NULL, NULL)
4122 
4123 static bool trans_OR(DisasContext *dc, arg_r_r_ri_cc *a)
4124 {
4125     /* OR with %g0 is the canonical alias for MOV. */
4126     if (!a->cc && a->rs1 == 0) {
4127         if (a->imm || a->rs2_or_imm == 0) {
4128             gen_store_gpr(dc, a->rd, tcg_constant_tl(a->rs2_or_imm));
4129         } else if (a->rs2_or_imm & ~0x1f) {
4130             /* For simplicity, we under-decoded the rs2 form. */
4131             return false;
4132         } else {
4133             gen_store_gpr(dc, a->rd, cpu_regs[a->rs2_or_imm]);
4134         }
4135         return advance_pc(dc);
4136     }
4137     return do_logic(dc, a, tcg_gen_or_tl, tcg_gen_ori_tl);
4138 }
4139 
4140 static bool trans_ADDC(DisasContext *dc, arg_r_r_ri_cc *a)
4141 {
4142     switch (dc->cc_op) {
4143     case CC_OP_DIV:
4144     case CC_OP_LOGIC:
4145         /* Carry is known to be zero.  Fall back to plain ADD.  */
4146         return do_arith(dc, a, CC_OP_ADD,
4147                         tcg_gen_add_tl, tcg_gen_addi_tl, gen_op_add_cc);
4148     case CC_OP_ADD:
4149     case CC_OP_TADD:
4150     case CC_OP_TADDTV:
4151         return do_arith(dc, a, CC_OP_ADDX,
4152                         gen_op_addc_add, NULL, gen_op_addccc_add);
4153     case CC_OP_SUB:
4154     case CC_OP_TSUB:
4155     case CC_OP_TSUBTV:
4156         return do_arith(dc, a, CC_OP_ADDX,
4157                         gen_op_addc_sub, NULL, gen_op_addccc_sub);
4158     default:
4159         return do_arith(dc, a, CC_OP_ADDX,
4160                         gen_op_addc_generic, NULL, gen_op_addccc_generic);
4161     }
4162 }
4163 
4164 static bool trans_SUBC(DisasContext *dc, arg_r_r_ri_cc *a)
4165 {
4166     switch (dc->cc_op) {
4167     case CC_OP_DIV:
4168     case CC_OP_LOGIC:
4169         /* Carry is known to be zero.  Fall back to plain SUB.  */
4170         return do_arith(dc, a, CC_OP_SUB,
4171                         tcg_gen_sub_tl, tcg_gen_subi_tl, gen_op_sub_cc);
4172     case CC_OP_ADD:
4173     case CC_OP_TADD:
4174     case CC_OP_TADDTV:
4175         return do_arith(dc, a, CC_OP_SUBX,
4176                         gen_op_subc_add, NULL, gen_op_subccc_add);
4177     case CC_OP_SUB:
4178     case CC_OP_TSUB:
4179     case CC_OP_TSUBTV:
4180         return do_arith(dc, a, CC_OP_SUBX,
4181                         gen_op_subc_sub, NULL, gen_op_subccc_sub);
4182     default:
4183         return do_arith(dc, a, CC_OP_SUBX,
4184                         gen_op_subc_generic, NULL, gen_op_subccc_generic);
4185     }
4186 }
4187 
4188 static bool trans_MULScc(DisasContext *dc, arg_r_r_ri_cc *a)
4189 {
4190     update_psr(dc);
4191     return do_arith(dc, a, CC_OP_ADD, NULL, NULL, gen_op_mulscc);
4192 }
4193 
4194 static bool do_shift_r(DisasContext *dc, arg_shiftr *a, bool l, bool u)
4195 {
4196     TCGv dst, src1, src2;
4197 
4198     /* Reject 64-bit shifts for sparc32. */
4199     if (avail_32(dc) && a->x) {
4200         return false;
4201     }
4202 
4203     src2 = tcg_temp_new();
4204     tcg_gen_andi_tl(src2, gen_load_gpr(dc, a->rs2), a->x ? 63 : 31);
4205     src1 = gen_load_gpr(dc, a->rs1);
4206     dst = gen_dest_gpr(dc, a->rd);
4207 
4208     if (l) {
4209         tcg_gen_shl_tl(dst, src1, src2);
4210         if (!a->x) {
4211             tcg_gen_ext32u_tl(dst, dst);
4212         }
4213     } else if (u) {
4214         if (!a->x) {
4215             tcg_gen_ext32u_tl(dst, src1);
4216             src1 = dst;
4217         }
4218         tcg_gen_shr_tl(dst, src1, src2);
4219     } else {
4220         if (!a->x) {
4221             tcg_gen_ext32s_tl(dst, src1);
4222             src1 = dst;
4223         }
4224         tcg_gen_sar_tl(dst, src1, src2);
4225     }
4226     gen_store_gpr(dc, a->rd, dst);
4227     return advance_pc(dc);
4228 }
4229 
4230 TRANS(SLL_r, ALL, do_shift_r, a, true, true)
4231 TRANS(SRL_r, ALL, do_shift_r, a, false, true)
4232 TRANS(SRA_r, ALL, do_shift_r, a, false, false)
4233 
4234 static bool do_shift_i(DisasContext *dc, arg_shifti *a, bool l, bool u)
4235 {
4236     TCGv dst, src1;
4237 
4238     /* Reject 64-bit shifts for sparc32. */
4239     if (avail_32(dc) && (a->x || a->i >= 32)) {
4240         return false;
4241     }
4242 
4243     src1 = gen_load_gpr(dc, a->rs1);
4244     dst = gen_dest_gpr(dc, a->rd);
4245 
4246     if (avail_32(dc) || a->x) {
4247         if (l) {
4248             tcg_gen_shli_tl(dst, src1, a->i);
4249         } else if (u) {
4250             tcg_gen_shri_tl(dst, src1, a->i);
4251         } else {
4252             tcg_gen_sari_tl(dst, src1, a->i);
4253         }
4254     } else {
4255         if (l) {
4256             tcg_gen_deposit_z_tl(dst, src1, a->i, 32 - a->i);
4257         } else if (u) {
4258             tcg_gen_extract_tl(dst, src1, a->i, 32 - a->i);
4259         } else {
4260             tcg_gen_sextract_tl(dst, src1, a->i, 32 - a->i);
4261         }
4262     }
4263     gen_store_gpr(dc, a->rd, dst);
4264     return advance_pc(dc);
4265 }
4266 
4267 TRANS(SLL_i, ALL, do_shift_i, a, true, true)
4268 TRANS(SRL_i, ALL, do_shift_i, a, false, true)
4269 TRANS(SRA_i, ALL, do_shift_i, a, false, false)
4270 
4271 static TCGv gen_rs2_or_imm(DisasContext *dc, bool imm, int rs2_or_imm)
4272 {
4273     /* For simplicity, we under-decoded the rs2 form. */
4274     if (!imm && rs2_or_imm & ~0x1f) {
4275         return NULL;
4276     }
4277     if (imm || rs2_or_imm == 0) {
4278         return tcg_constant_tl(rs2_or_imm);
4279     } else {
4280         return cpu_regs[rs2_or_imm];
4281     }
4282 }
4283 
4284 static bool do_mov_cond(DisasContext *dc, DisasCompare *cmp, int rd, TCGv src2)
4285 {
4286     TCGv dst = gen_load_gpr(dc, rd);
4287 
4288     tcg_gen_movcond_tl(cmp->cond, dst, cmp->c1, cmp->c2, src2, dst);
4289     gen_store_gpr(dc, rd, dst);
4290     return advance_pc(dc);
4291 }
4292 
4293 static bool trans_MOVcc(DisasContext *dc, arg_MOVcc *a)
4294 {
4295     TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4296     DisasCompare cmp;
4297 
4298     if (src2 == NULL) {
4299         return false;
4300     }
4301     gen_compare(&cmp, a->cc, a->cond, dc);
4302     return do_mov_cond(dc, &cmp, a->rd, src2);
4303 }
4304 
4305 static bool trans_MOVfcc(DisasContext *dc, arg_MOVfcc *a)
4306 {
4307     TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4308     DisasCompare cmp;
4309 
4310     if (src2 == NULL) {
4311         return false;
4312     }
4313     gen_fcompare(&cmp, a->cc, a->cond);
4314     return do_mov_cond(dc, &cmp, a->rd, src2);
4315 }
4316 
4317 static bool trans_MOVR(DisasContext *dc, arg_MOVR *a)
4318 {
4319     TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4320     DisasCompare cmp;
4321 
4322     if (src2 == NULL) {
4323         return false;
4324     }
4325     gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1));
4326     return do_mov_cond(dc, &cmp, a->rd, src2);
4327 }
4328 
4329 static bool do_add_special(DisasContext *dc, arg_r_r_ri *a,
4330                            bool (*func)(DisasContext *dc, int rd, TCGv src))
4331 {
4332     TCGv src1, sum;
4333 
4334     /* For simplicity, we under-decoded the rs2 form. */
4335     if (!a->imm && a->rs2_or_imm & ~0x1f) {
4336         return false;
4337     }
4338 
4339     /*
4340      * Always load the sum into a new temporary.
4341      * This is required to capture the value across a window change,
4342      * e.g. SAVE and RESTORE, and may be optimized away otherwise.
4343      */
4344     sum = tcg_temp_new();
4345     src1 = gen_load_gpr(dc, a->rs1);
4346     if (a->imm || a->rs2_or_imm == 0) {
4347         tcg_gen_addi_tl(sum, src1, a->rs2_or_imm);
4348     } else {
4349         tcg_gen_add_tl(sum, src1, cpu_regs[a->rs2_or_imm]);
4350     }
4351     return func(dc, a->rd, sum);
4352 }
4353 
4354 static bool do_jmpl(DisasContext *dc, int rd, TCGv src)
4355 {
4356     /*
4357      * Preserve pc across advance, so that we can delay
4358      * the writeback to rd until after src is consumed.
4359      */
4360     target_ulong cur_pc = dc->pc;
4361 
4362     gen_check_align(dc, src, 3);
4363 
4364     gen_mov_pc_npc(dc);
4365     tcg_gen_mov_tl(cpu_npc, src);
4366     gen_address_mask(dc, cpu_npc);
4367     gen_store_gpr(dc, rd, tcg_constant_tl(cur_pc));
4368 
4369     dc->npc = DYNAMIC_PC_LOOKUP;
4370     return true;
4371 }
4372 
4373 TRANS(JMPL, ALL, do_add_special, a, do_jmpl)
4374 
4375 static bool do_rett(DisasContext *dc, int rd, TCGv src)
4376 {
4377     if (!supervisor(dc)) {
4378         return raise_priv(dc);
4379     }
4380 
4381     gen_check_align(dc, src, 3);
4382 
4383     gen_mov_pc_npc(dc);
4384     tcg_gen_mov_tl(cpu_npc, src);
4385     gen_helper_rett(tcg_env);
4386 
4387     dc->npc = DYNAMIC_PC;
4388     return true;
4389 }
4390 
4391 TRANS(RETT, 32, do_add_special, a, do_rett)
4392 
4393 static bool do_return(DisasContext *dc, int rd, TCGv src)
4394 {
4395     gen_check_align(dc, src, 3);
4396 
4397     gen_mov_pc_npc(dc);
4398     tcg_gen_mov_tl(cpu_npc, src);
4399     gen_address_mask(dc, cpu_npc);
4400 
4401     gen_helper_restore(tcg_env);
4402     dc->npc = DYNAMIC_PC_LOOKUP;
4403     return true;
4404 }
4405 
4406 TRANS(RETURN, 64, do_add_special, a, do_return)
4407 
4408 static bool do_save(DisasContext *dc, int rd, TCGv src)
4409 {
4410     gen_helper_save(tcg_env);
4411     gen_store_gpr(dc, rd, src);
4412     return advance_pc(dc);
4413 }
4414 
4415 TRANS(SAVE, ALL, do_add_special, a, do_save)
4416 
4417 static bool do_restore(DisasContext *dc, int rd, TCGv src)
4418 {
4419     gen_helper_restore(tcg_env);
4420     gen_store_gpr(dc, rd, src);
4421     return advance_pc(dc);
4422 }
4423 
4424 TRANS(RESTORE, ALL, do_add_special, a, do_restore)
4425 
4426 static bool do_done_retry(DisasContext *dc, bool done)
4427 {
4428     if (!supervisor(dc)) {
4429         return raise_priv(dc);
4430     }
4431     dc->npc = DYNAMIC_PC;
4432     dc->pc = DYNAMIC_PC;
4433     translator_io_start(&dc->base);
4434     if (done) {
4435         gen_helper_done(tcg_env);
4436     } else {
4437         gen_helper_retry(tcg_env);
4438     }
4439     return true;
4440 }
4441 
4442 TRANS(DONE, 64, do_done_retry, true)
4443 TRANS(RETRY, 64, do_done_retry, false)
4444 
4445 /*
4446  * Major opcode 11 -- load and store instructions
4447  */
4448 
4449 static TCGv gen_ldst_addr(DisasContext *dc, int rs1, bool imm, int rs2_or_imm)
4450 {
4451     TCGv addr, tmp = NULL;
4452 
4453     /* For simplicity, we under-decoded the rs2 form. */
4454     if (!imm && rs2_or_imm & ~0x1f) {
4455         return NULL;
4456     }
4457 
4458     addr = gen_load_gpr(dc, rs1);
4459     if (rs2_or_imm) {
4460         tmp = tcg_temp_new();
4461         if (imm) {
4462             tcg_gen_addi_tl(tmp, addr, rs2_or_imm);
4463         } else {
4464             tcg_gen_add_tl(tmp, addr, cpu_regs[rs2_or_imm]);
4465         }
4466         addr = tmp;
4467     }
4468     if (AM_CHECK(dc)) {
4469         if (!tmp) {
4470             tmp = tcg_temp_new();
4471         }
4472         tcg_gen_ext32u_tl(tmp, addr);
4473         addr = tmp;
4474     }
4475     return addr;
4476 }
4477 
4478 static bool do_ld_gpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4479 {
4480     TCGv reg, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4481     DisasASI da;
4482 
4483     if (addr == NULL) {
4484         return false;
4485     }
4486     da = resolve_asi(dc, a->asi, mop);
4487 
4488     reg = gen_dest_gpr(dc, a->rd);
4489     gen_ld_asi(dc, &da, reg, addr);
4490     gen_store_gpr(dc, a->rd, reg);
4491     return advance_pc(dc);
4492 }
4493 
4494 TRANS(LDUW, ALL, do_ld_gpr, a, MO_TEUL)
4495 TRANS(LDUB, ALL, do_ld_gpr, a, MO_UB)
4496 TRANS(LDUH, ALL, do_ld_gpr, a, MO_TEUW)
4497 TRANS(LDSB, ALL, do_ld_gpr, a, MO_SB)
4498 TRANS(LDSH, ALL, do_ld_gpr, a, MO_TESW)
4499 TRANS(LDSW, 64, do_ld_gpr, a, MO_TESL)
4500 TRANS(LDX, 64, do_ld_gpr, a, MO_TEUQ)
4501 
4502 static bool do_st_gpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4503 {
4504     TCGv reg, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4505     DisasASI da;
4506 
4507     if (addr == NULL) {
4508         return false;
4509     }
4510     da = resolve_asi(dc, a->asi, mop);
4511 
4512     reg = gen_load_gpr(dc, a->rd);
4513     gen_st_asi(dc, &da, reg, addr);
4514     return advance_pc(dc);
4515 }
4516 
4517 TRANS(STW, ALL, do_st_gpr, a, MO_TEUL)
4518 TRANS(STB, ALL, do_st_gpr, a, MO_UB)
4519 TRANS(STH, ALL, do_st_gpr, a, MO_TEUW)
4520 TRANS(STX, 64, do_st_gpr, a, MO_TEUQ)
4521 
4522 static bool trans_LDD(DisasContext *dc, arg_r_r_ri_asi *a)
4523 {
4524     TCGv addr;
4525     DisasASI da;
4526 
4527     if (a->rd & 1) {
4528         return false;
4529     }
4530     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4531     if (addr == NULL) {
4532         return false;
4533     }
4534     da = resolve_asi(dc, a->asi, MO_TEUQ);
4535     gen_ldda_asi(dc, &da, addr, a->rd);
4536     return advance_pc(dc);
4537 }
4538 
4539 static bool trans_STD(DisasContext *dc, arg_r_r_ri_asi *a)
4540 {
4541     TCGv addr;
4542     DisasASI da;
4543 
4544     if (a->rd & 1) {
4545         return false;
4546     }
4547     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4548     if (addr == NULL) {
4549         return false;
4550     }
4551     da = resolve_asi(dc, a->asi, MO_TEUQ);
4552     gen_stda_asi(dc, &da, addr, a->rd);
4553     return advance_pc(dc);
4554 }
4555 
4556 static bool trans_LDSTUB(DisasContext *dc, arg_r_r_ri_asi *a)
4557 {
4558     TCGv addr, reg;
4559     DisasASI da;
4560 
4561     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4562     if (addr == NULL) {
4563         return false;
4564     }
4565     da = resolve_asi(dc, a->asi, MO_UB);
4566 
4567     reg = gen_dest_gpr(dc, a->rd);
4568     gen_ldstub_asi(dc, &da, reg, addr);
4569     gen_store_gpr(dc, a->rd, reg);
4570     return advance_pc(dc);
4571 }
4572 
4573 static bool trans_SWAP(DisasContext *dc, arg_r_r_ri_asi *a)
4574 {
4575     TCGv addr, dst, src;
4576     DisasASI da;
4577 
4578     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4579     if (addr == NULL) {
4580         return false;
4581     }
4582     da = resolve_asi(dc, a->asi, MO_TEUL);
4583 
4584     dst = gen_dest_gpr(dc, a->rd);
4585     src = gen_load_gpr(dc, a->rd);
4586     gen_swap_asi(dc, &da, dst, src, addr);
4587     gen_store_gpr(dc, a->rd, dst);
4588     return advance_pc(dc);
4589 }
4590 
4591 static bool do_casa(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4592 {
4593     TCGv addr, o, n, c;
4594     DisasASI da;
4595 
4596     addr = gen_ldst_addr(dc, a->rs1, true, 0);
4597     if (addr == NULL) {
4598         return false;
4599     }
4600     da = resolve_asi(dc, a->asi, mop);
4601 
4602     o = gen_dest_gpr(dc, a->rd);
4603     n = gen_load_gpr(dc, a->rd);
4604     c = gen_load_gpr(dc, a->rs2_or_imm);
4605     gen_cas_asi(dc, &da, o, n, c, addr);
4606     gen_store_gpr(dc, a->rd, o);
4607     return advance_pc(dc);
4608 }
4609 
4610 TRANS(CASA, CASA, do_casa, a, MO_TEUL)
4611 TRANS(CASXA, 64, do_casa, a, MO_TEUQ)
4612 
4613 static bool do_ld_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz)
4614 {
4615     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4616     DisasASI da;
4617 
4618     if (addr == NULL) {
4619         return false;
4620     }
4621     if (gen_trap_ifnofpu(dc)) {
4622         return true;
4623     }
4624     if (sz == MO_128 && gen_trap_float128(dc)) {
4625         return true;
4626     }
4627     da = resolve_asi(dc, a->asi, MO_TE | sz);
4628     gen_ldf_asi(dc, &da, sz, addr, a->rd);
4629     gen_update_fprs_dirty(dc, a->rd);
4630     return advance_pc(dc);
4631 }
4632 
4633 TRANS(LDF, ALL, do_ld_fpr, a, MO_32)
4634 TRANS(LDDF, ALL, do_ld_fpr, a, MO_64)
4635 TRANS(LDQF, ALL, do_ld_fpr, a, MO_128)
4636 
4637 TRANS(LDFA, 64, do_ld_fpr, a, MO_32)
4638 TRANS(LDDFA, 64, do_ld_fpr, a, MO_64)
4639 TRANS(LDQFA, 64, do_ld_fpr, a, MO_128)
4640 
4641 static bool do_st_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz)
4642 {
4643     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4644     DisasASI da;
4645 
4646     if (addr == NULL) {
4647         return false;
4648     }
4649     if (gen_trap_ifnofpu(dc)) {
4650         return true;
4651     }
4652     if (sz == MO_128 && gen_trap_float128(dc)) {
4653         return true;
4654     }
4655     da = resolve_asi(dc, a->asi, MO_TE | sz);
4656     gen_stf_asi(dc, &da, sz, addr, a->rd);
4657     return advance_pc(dc);
4658 }
4659 
4660 TRANS(STF, ALL, do_st_fpr, a, MO_32)
4661 TRANS(STDF, ALL, do_st_fpr, a, MO_64)
4662 TRANS(STQF, ALL, do_st_fpr, a, MO_128)
4663 
4664 TRANS(STFA, 64, do_st_fpr, a, MO_32)
4665 TRANS(STDFA, 64, do_st_fpr, a, MO_64)
4666 TRANS(STQFA, 64, do_st_fpr, a, MO_128)
4667 
4668 static bool trans_STDFQ(DisasContext *dc, arg_STDFQ *a)
4669 {
4670     if (!avail_32(dc)) {
4671         return false;
4672     }
4673     if (!supervisor(dc)) {
4674         return raise_priv(dc);
4675     }
4676     if (gen_trap_ifnofpu(dc)) {
4677         return true;
4678     }
4679     gen_op_fpexception_im(dc, FSR_FTT_SEQ_ERROR);
4680     return true;
4681 }
4682 
4683 #define CHECK_IU_FEATURE(dc, FEATURE)                      \
4684     if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE))  \
4685         goto illegal_insn;
4686 #define CHECK_FPU_FEATURE(dc, FEATURE)                     \
4687     if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE))  \
4688         goto nfpu_insn;
4689 
4690 /* before an instruction, dc->pc must be static */
4691 static void disas_sparc_legacy(DisasContext *dc, unsigned int insn)
4692 {
4693     unsigned int opc, rs1, rs2, rd;
4694     TCGv cpu_src1 __attribute__((unused));
4695     TCGv cpu_src2 __attribute__((unused));
4696     TCGv_i32 cpu_src1_32, cpu_src2_32, cpu_dst_32;
4697     TCGv_i64 cpu_src1_64, cpu_src2_64;
4698     TCGv_i64 cpu_dst_64 __attribute__((unused));
4699     target_long simm;
4700 
4701     opc = GET_FIELD(insn, 0, 1);
4702     rd = GET_FIELD(insn, 2, 6);
4703 
4704     switch (opc) {
4705     case 0:
4706         goto illegal_insn; /* in decodetree */
4707     case 1:
4708         g_assert_not_reached(); /* in decodetree */
4709     case 2:                     /* FPU & Logical Operations */
4710         {
4711             unsigned int xop = GET_FIELD(insn, 7, 12);
4712             TCGv cpu_dst __attribute__((unused)) = tcg_temp_new();
4713 
4714             if (xop == 0x34) {   /* FPU Operations */
4715                 if (gen_trap_ifnofpu(dc)) {
4716                     goto jmp_insn;
4717                 }
4718                 gen_op_clear_ieee_excp_and_FTT();
4719                 rs1 = GET_FIELD(insn, 13, 17);
4720                 rs2 = GET_FIELD(insn, 27, 31);
4721                 xop = GET_FIELD(insn, 18, 26);
4722 
4723                 switch (xop) {
4724                 case 0x1: /* fmovs */
4725                     cpu_src1_32 = gen_load_fpr_F(dc, rs2);
4726                     gen_store_fpr_F(dc, rd, cpu_src1_32);
4727                     break;
4728                 case 0x5: /* fnegs */
4729                     gen_ne_fop_FF(dc, rd, rs2, gen_helper_fnegs);
4730                     break;
4731                 case 0x9: /* fabss */
4732                     gen_ne_fop_FF(dc, rd, rs2, gen_helper_fabss);
4733                     break;
4734                 case 0x29: /* fsqrts */
4735                     gen_fop_FF(dc, rd, rs2, gen_helper_fsqrts);
4736                     break;
4737                 case 0x2a: /* fsqrtd */
4738                     gen_fop_DD(dc, rd, rs2, gen_helper_fsqrtd);
4739                     break;
4740                 case 0x2b: /* fsqrtq */
4741                     CHECK_FPU_FEATURE(dc, FLOAT128);
4742                     gen_fop_QQ(dc, rd, rs2, gen_helper_fsqrtq);
4743                     break;
4744                 case 0x41: /* fadds */
4745                     gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fadds);
4746                     break;
4747                 case 0x42: /* faddd */
4748                     gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_faddd);
4749                     break;
4750                 case 0x43: /* faddq */
4751                     CHECK_FPU_FEATURE(dc, FLOAT128);
4752                     gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_faddq);
4753                     break;
4754                 case 0x45: /* fsubs */
4755                     gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fsubs);
4756                     break;
4757                 case 0x46: /* fsubd */
4758                     gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fsubd);
4759                     break;
4760                 case 0x47: /* fsubq */
4761                     CHECK_FPU_FEATURE(dc, FLOAT128);
4762                     gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fsubq);
4763                     break;
4764                 case 0x49: /* fmuls */
4765                     gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fmuls);
4766                     break;
4767                 case 0x4a: /* fmuld */
4768                     gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld);
4769                     break;
4770                 case 0x4b: /* fmulq */
4771                     CHECK_FPU_FEATURE(dc, FLOAT128);
4772                     gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fmulq);
4773                     break;
4774                 case 0x4d: /* fdivs */
4775                     gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fdivs);
4776                     break;
4777                 case 0x4e: /* fdivd */
4778                     gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fdivd);
4779                     break;
4780                 case 0x4f: /* fdivq */
4781                     CHECK_FPU_FEATURE(dc, FLOAT128);
4782                     gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fdivq);
4783                     break;
4784                 case 0x69: /* fsmuld */
4785                     CHECK_FPU_FEATURE(dc, FSMULD);
4786                     gen_fop_DFF(dc, rd, rs1, rs2, gen_helper_fsmuld);
4787                     break;
4788                 case 0x6e: /* fdmulq */
4789                     CHECK_FPU_FEATURE(dc, FLOAT128);
4790                     gen_fop_QDD(dc, rd, rs1, rs2, gen_helper_fdmulq);
4791                     break;
4792                 case 0xc4: /* fitos */
4793                     gen_fop_FF(dc, rd, rs2, gen_helper_fitos);
4794                     break;
4795                 case 0xc6: /* fdtos */
4796                     gen_fop_FD(dc, rd, rs2, gen_helper_fdtos);
4797                     break;
4798                 case 0xc7: /* fqtos */
4799                     CHECK_FPU_FEATURE(dc, FLOAT128);
4800                     gen_fop_FQ(dc, rd, rs2, gen_helper_fqtos);
4801                     break;
4802                 case 0xc8: /* fitod */
4803                     gen_ne_fop_DF(dc, rd, rs2, gen_helper_fitod);
4804                     break;
4805                 case 0xc9: /* fstod */
4806                     gen_ne_fop_DF(dc, rd, rs2, gen_helper_fstod);
4807                     break;
4808                 case 0xcb: /* fqtod */
4809                     CHECK_FPU_FEATURE(dc, FLOAT128);
4810                     gen_fop_DQ(dc, rd, rs2, gen_helper_fqtod);
4811                     break;
4812                 case 0xcc: /* fitoq */
4813                     CHECK_FPU_FEATURE(dc, FLOAT128);
4814                     gen_ne_fop_QF(dc, rd, rs2, gen_helper_fitoq);
4815                     break;
4816                 case 0xcd: /* fstoq */
4817                     CHECK_FPU_FEATURE(dc, FLOAT128);
4818                     gen_ne_fop_QF(dc, rd, rs2, gen_helper_fstoq);
4819                     break;
4820                 case 0xce: /* fdtoq */
4821                     CHECK_FPU_FEATURE(dc, FLOAT128);
4822                     gen_ne_fop_QD(dc, rd, rs2, gen_helper_fdtoq);
4823                     break;
4824                 case 0xd1: /* fstoi */
4825                     gen_fop_FF(dc, rd, rs2, gen_helper_fstoi);
4826                     break;
4827                 case 0xd2: /* fdtoi */
4828                     gen_fop_FD(dc, rd, rs2, gen_helper_fdtoi);
4829                     break;
4830                 case 0xd3: /* fqtoi */
4831                     CHECK_FPU_FEATURE(dc, FLOAT128);
4832                     gen_fop_FQ(dc, rd, rs2, gen_helper_fqtoi);
4833                     break;
4834 #ifdef TARGET_SPARC64
4835                 case 0x2: /* V9 fmovd */
4836                     cpu_src1_64 = gen_load_fpr_D(dc, rs2);
4837                     gen_store_fpr_D(dc, rd, cpu_src1_64);
4838                     break;
4839                 case 0x3: /* V9 fmovq */
4840                     CHECK_FPU_FEATURE(dc, FLOAT128);
4841                     gen_move_Q(dc, rd, rs2);
4842                     break;
4843                 case 0x6: /* V9 fnegd */
4844                     gen_ne_fop_DD(dc, rd, rs2, gen_helper_fnegd);
4845                     break;
4846                 case 0x7: /* V9 fnegq */
4847                     CHECK_FPU_FEATURE(dc, FLOAT128);
4848                     gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fnegq);
4849                     break;
4850                 case 0xa: /* V9 fabsd */
4851                     gen_ne_fop_DD(dc, rd, rs2, gen_helper_fabsd);
4852                     break;
4853                 case 0xb: /* V9 fabsq */
4854                     CHECK_FPU_FEATURE(dc, FLOAT128);
4855                     gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fabsq);
4856                     break;
4857                 case 0x81: /* V9 fstox */
4858                     gen_fop_DF(dc, rd, rs2, gen_helper_fstox);
4859                     break;
4860                 case 0x82: /* V9 fdtox */
4861                     gen_fop_DD(dc, rd, rs2, gen_helper_fdtox);
4862                     break;
4863                 case 0x83: /* V9 fqtox */
4864                     CHECK_FPU_FEATURE(dc, FLOAT128);
4865                     gen_fop_DQ(dc, rd, rs2, gen_helper_fqtox);
4866                     break;
4867                 case 0x84: /* V9 fxtos */
4868                     gen_fop_FD(dc, rd, rs2, gen_helper_fxtos);
4869                     break;
4870                 case 0x88: /* V9 fxtod */
4871                     gen_fop_DD(dc, rd, rs2, gen_helper_fxtod);
4872                     break;
4873                 case 0x8c: /* V9 fxtoq */
4874                     CHECK_FPU_FEATURE(dc, FLOAT128);
4875                     gen_ne_fop_QD(dc, rd, rs2, gen_helper_fxtoq);
4876                     break;
4877 #endif
4878                 default:
4879                     goto illegal_insn;
4880                 }
4881             } else if (xop == 0x35) {   /* FPU Operations */
4882 #ifdef TARGET_SPARC64
4883                 int cond;
4884 #endif
4885                 if (gen_trap_ifnofpu(dc)) {
4886                     goto jmp_insn;
4887                 }
4888                 gen_op_clear_ieee_excp_and_FTT();
4889                 rs1 = GET_FIELD(insn, 13, 17);
4890                 rs2 = GET_FIELD(insn, 27, 31);
4891                 xop = GET_FIELD(insn, 18, 26);
4892 
4893 #ifdef TARGET_SPARC64
4894 #define FMOVR(sz)                                                  \
4895                 do {                                               \
4896                     DisasCompare cmp;                              \
4897                     cond = GET_FIELD_SP(insn, 10, 12);             \
4898                     cpu_src1 = get_src1(dc, insn);                 \
4899                     gen_compare_reg(&cmp, cond, cpu_src1);         \
4900                     gen_fmov##sz(dc, &cmp, rd, rs2);               \
4901                 } while (0)
4902 
4903                 if ((xop & 0x11f) == 0x005) { /* V9 fmovsr */
4904                     FMOVR(s);
4905                     break;
4906                 } else if ((xop & 0x11f) == 0x006) { // V9 fmovdr
4907                     FMOVR(d);
4908                     break;
4909                 } else if ((xop & 0x11f) == 0x007) { // V9 fmovqr
4910                     CHECK_FPU_FEATURE(dc, FLOAT128);
4911                     FMOVR(q);
4912                     break;
4913                 }
4914 #undef FMOVR
4915 #endif
4916                 switch (xop) {
4917 #ifdef TARGET_SPARC64
4918 #define FMOVCC(fcc, sz)                                                 \
4919                     do {                                                \
4920                         DisasCompare cmp;                               \
4921                         cond = GET_FIELD_SP(insn, 14, 17);              \
4922                         gen_fcompare(&cmp, fcc, cond);                  \
4923                         gen_fmov##sz(dc, &cmp, rd, rs2);                \
4924                     } while (0)
4925 
4926                     case 0x001: /* V9 fmovscc %fcc0 */
4927                         FMOVCC(0, s);
4928                         break;
4929                     case 0x002: /* V9 fmovdcc %fcc0 */
4930                         FMOVCC(0, d);
4931                         break;
4932                     case 0x003: /* V9 fmovqcc %fcc0 */
4933                         CHECK_FPU_FEATURE(dc, FLOAT128);
4934                         FMOVCC(0, q);
4935                         break;
4936                     case 0x041: /* V9 fmovscc %fcc1 */
4937                         FMOVCC(1, s);
4938                         break;
4939                     case 0x042: /* V9 fmovdcc %fcc1 */
4940                         FMOVCC(1, d);
4941                         break;
4942                     case 0x043: /* V9 fmovqcc %fcc1 */
4943                         CHECK_FPU_FEATURE(dc, FLOAT128);
4944                         FMOVCC(1, q);
4945                         break;
4946                     case 0x081: /* V9 fmovscc %fcc2 */
4947                         FMOVCC(2, s);
4948                         break;
4949                     case 0x082: /* V9 fmovdcc %fcc2 */
4950                         FMOVCC(2, d);
4951                         break;
4952                     case 0x083: /* V9 fmovqcc %fcc2 */
4953                         CHECK_FPU_FEATURE(dc, FLOAT128);
4954                         FMOVCC(2, q);
4955                         break;
4956                     case 0x0c1: /* V9 fmovscc %fcc3 */
4957                         FMOVCC(3, s);
4958                         break;
4959                     case 0x0c2: /* V9 fmovdcc %fcc3 */
4960                         FMOVCC(3, d);
4961                         break;
4962                     case 0x0c3: /* V9 fmovqcc %fcc3 */
4963                         CHECK_FPU_FEATURE(dc, FLOAT128);
4964                         FMOVCC(3, q);
4965                         break;
4966 #undef FMOVCC
4967 #define FMOVCC(xcc, sz)                                                 \
4968                     do {                                                \
4969                         DisasCompare cmp;                               \
4970                         cond = GET_FIELD_SP(insn, 14, 17);              \
4971                         gen_compare(&cmp, xcc, cond, dc);               \
4972                         gen_fmov##sz(dc, &cmp, rd, rs2);                \
4973                     } while (0)
4974 
4975                     case 0x101: /* V9 fmovscc %icc */
4976                         FMOVCC(0, s);
4977                         break;
4978                     case 0x102: /* V9 fmovdcc %icc */
4979                         FMOVCC(0, d);
4980                         break;
4981                     case 0x103: /* V9 fmovqcc %icc */
4982                         CHECK_FPU_FEATURE(dc, FLOAT128);
4983                         FMOVCC(0, q);
4984                         break;
4985                     case 0x181: /* V9 fmovscc %xcc */
4986                         FMOVCC(1, s);
4987                         break;
4988                     case 0x182: /* V9 fmovdcc %xcc */
4989                         FMOVCC(1, d);
4990                         break;
4991                     case 0x183: /* V9 fmovqcc %xcc */
4992                         CHECK_FPU_FEATURE(dc, FLOAT128);
4993                         FMOVCC(1, q);
4994                         break;
4995 #undef FMOVCC
4996 #endif
4997                     case 0x51: /* fcmps, V9 %fcc */
4998                         cpu_src1_32 = gen_load_fpr_F(dc, rs1);
4999                         cpu_src2_32 = gen_load_fpr_F(dc, rs2);
5000                         gen_op_fcmps(rd & 3, cpu_src1_32, cpu_src2_32);
5001                         break;
5002                     case 0x52: /* fcmpd, V9 %fcc */
5003                         cpu_src1_64 = gen_load_fpr_D(dc, rs1);
5004                         cpu_src2_64 = gen_load_fpr_D(dc, rs2);
5005                         gen_op_fcmpd(rd & 3, cpu_src1_64, cpu_src2_64);
5006                         break;
5007                     case 0x53: /* fcmpq, V9 %fcc */
5008                         CHECK_FPU_FEATURE(dc, FLOAT128);
5009                         gen_op_load_fpr_QT0(QFPREG(rs1));
5010                         gen_op_load_fpr_QT1(QFPREG(rs2));
5011                         gen_op_fcmpq(rd & 3);
5012                         break;
5013                     case 0x55: /* fcmpes, V9 %fcc */
5014                         cpu_src1_32 = gen_load_fpr_F(dc, rs1);
5015                         cpu_src2_32 = gen_load_fpr_F(dc, rs2);
5016                         gen_op_fcmpes(rd & 3, cpu_src1_32, cpu_src2_32);
5017                         break;
5018                     case 0x56: /* fcmped, V9 %fcc */
5019                         cpu_src1_64 = gen_load_fpr_D(dc, rs1);
5020                         cpu_src2_64 = gen_load_fpr_D(dc, rs2);
5021                         gen_op_fcmped(rd & 3, cpu_src1_64, cpu_src2_64);
5022                         break;
5023                     case 0x57: /* fcmpeq, V9 %fcc */
5024                         CHECK_FPU_FEATURE(dc, FLOAT128);
5025                         gen_op_load_fpr_QT0(QFPREG(rs1));
5026                         gen_op_load_fpr_QT1(QFPREG(rs2));
5027                         gen_op_fcmpeq(rd & 3);
5028                         break;
5029                     default:
5030                         goto illegal_insn;
5031                 }
5032             } else if (xop == 0x36) {
5033 #ifdef TARGET_SPARC64
5034                 /* VIS */
5035                 int opf = GET_FIELD_SP(insn, 5, 13);
5036                 rs1 = GET_FIELD(insn, 13, 17);
5037                 rs2 = GET_FIELD(insn, 27, 31);
5038                 if (gen_trap_ifnofpu(dc)) {
5039                     goto jmp_insn;
5040                 }
5041 
5042                 switch (opf) {
5043                 case 0x000: /* VIS I edge8cc */
5044                     CHECK_FPU_FEATURE(dc, VIS1);
5045                     cpu_src1 = gen_load_gpr(dc, rs1);
5046                     cpu_src2 = gen_load_gpr(dc, rs2);
5047                     gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 0);
5048                     gen_store_gpr(dc, rd, cpu_dst);
5049                     break;
5050                 case 0x001: /* VIS II edge8n */
5051                     CHECK_FPU_FEATURE(dc, VIS2);
5052                     cpu_src1 = gen_load_gpr(dc, rs1);
5053                     cpu_src2 = gen_load_gpr(dc, rs2);
5054                     gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 0);
5055                     gen_store_gpr(dc, rd, cpu_dst);
5056                     break;
5057                 case 0x002: /* VIS I edge8lcc */
5058                     CHECK_FPU_FEATURE(dc, VIS1);
5059                     cpu_src1 = gen_load_gpr(dc, rs1);
5060                     cpu_src2 = gen_load_gpr(dc, rs2);
5061                     gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 1);
5062                     gen_store_gpr(dc, rd, cpu_dst);
5063                     break;
5064                 case 0x003: /* VIS II edge8ln */
5065                     CHECK_FPU_FEATURE(dc, VIS2);
5066                     cpu_src1 = gen_load_gpr(dc, rs1);
5067                     cpu_src2 = gen_load_gpr(dc, rs2);
5068                     gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 1);
5069                     gen_store_gpr(dc, rd, cpu_dst);
5070                     break;
5071                 case 0x004: /* VIS I edge16cc */
5072                     CHECK_FPU_FEATURE(dc, VIS1);
5073                     cpu_src1 = gen_load_gpr(dc, rs1);
5074                     cpu_src2 = gen_load_gpr(dc, rs2);
5075                     gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 0);
5076                     gen_store_gpr(dc, rd, cpu_dst);
5077                     break;
5078                 case 0x005: /* VIS II edge16n */
5079                     CHECK_FPU_FEATURE(dc, VIS2);
5080                     cpu_src1 = gen_load_gpr(dc, rs1);
5081                     cpu_src2 = gen_load_gpr(dc, rs2);
5082                     gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 0);
5083                     gen_store_gpr(dc, rd, cpu_dst);
5084                     break;
5085                 case 0x006: /* VIS I edge16lcc */
5086                     CHECK_FPU_FEATURE(dc, VIS1);
5087                     cpu_src1 = gen_load_gpr(dc, rs1);
5088                     cpu_src2 = gen_load_gpr(dc, rs2);
5089                     gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 1);
5090                     gen_store_gpr(dc, rd, cpu_dst);
5091                     break;
5092                 case 0x007: /* VIS II edge16ln */
5093                     CHECK_FPU_FEATURE(dc, VIS2);
5094                     cpu_src1 = gen_load_gpr(dc, rs1);
5095                     cpu_src2 = gen_load_gpr(dc, rs2);
5096                     gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 1);
5097                     gen_store_gpr(dc, rd, cpu_dst);
5098                     break;
5099                 case 0x008: /* VIS I edge32cc */
5100                     CHECK_FPU_FEATURE(dc, VIS1);
5101                     cpu_src1 = gen_load_gpr(dc, rs1);
5102                     cpu_src2 = gen_load_gpr(dc, rs2);
5103                     gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 0);
5104                     gen_store_gpr(dc, rd, cpu_dst);
5105                     break;
5106                 case 0x009: /* VIS II edge32n */
5107                     CHECK_FPU_FEATURE(dc, VIS2);
5108                     cpu_src1 = gen_load_gpr(dc, rs1);
5109                     cpu_src2 = gen_load_gpr(dc, rs2);
5110                     gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 0);
5111                     gen_store_gpr(dc, rd, cpu_dst);
5112                     break;
5113                 case 0x00a: /* VIS I edge32lcc */
5114                     CHECK_FPU_FEATURE(dc, VIS1);
5115                     cpu_src1 = gen_load_gpr(dc, rs1);
5116                     cpu_src2 = gen_load_gpr(dc, rs2);
5117                     gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 1);
5118                     gen_store_gpr(dc, rd, cpu_dst);
5119                     break;
5120                 case 0x00b: /* VIS II edge32ln */
5121                     CHECK_FPU_FEATURE(dc, VIS2);
5122                     cpu_src1 = gen_load_gpr(dc, rs1);
5123                     cpu_src2 = gen_load_gpr(dc, rs2);
5124                     gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 1);
5125                     gen_store_gpr(dc, rd, cpu_dst);
5126                     break;
5127                 case 0x010: /* VIS I array8 */
5128                     CHECK_FPU_FEATURE(dc, VIS1);
5129                     cpu_src1 = gen_load_gpr(dc, rs1);
5130                     cpu_src2 = gen_load_gpr(dc, rs2);
5131                     gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
5132                     gen_store_gpr(dc, rd, cpu_dst);
5133                     break;
5134                 case 0x012: /* VIS I array16 */
5135                     CHECK_FPU_FEATURE(dc, VIS1);
5136                     cpu_src1 = gen_load_gpr(dc, rs1);
5137                     cpu_src2 = gen_load_gpr(dc, rs2);
5138                     gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
5139                     tcg_gen_shli_i64(cpu_dst, cpu_dst, 1);
5140                     gen_store_gpr(dc, rd, cpu_dst);
5141                     break;
5142                 case 0x014: /* VIS I array32 */
5143                     CHECK_FPU_FEATURE(dc, VIS1);
5144                     cpu_src1 = gen_load_gpr(dc, rs1);
5145                     cpu_src2 = gen_load_gpr(dc, rs2);
5146                     gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
5147                     tcg_gen_shli_i64(cpu_dst, cpu_dst, 2);
5148                     gen_store_gpr(dc, rd, cpu_dst);
5149                     break;
5150                 case 0x018: /* VIS I alignaddr */
5151                     CHECK_FPU_FEATURE(dc, VIS1);
5152                     cpu_src1 = gen_load_gpr(dc, rs1);
5153                     cpu_src2 = gen_load_gpr(dc, rs2);
5154                     gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 0);
5155                     gen_store_gpr(dc, rd, cpu_dst);
5156                     break;
5157                 case 0x01a: /* VIS I alignaddrl */
5158                     CHECK_FPU_FEATURE(dc, VIS1);
5159                     cpu_src1 = gen_load_gpr(dc, rs1);
5160                     cpu_src2 = gen_load_gpr(dc, rs2);
5161                     gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 1);
5162                     gen_store_gpr(dc, rd, cpu_dst);
5163                     break;
5164                 case 0x019: /* VIS II bmask */
5165                     CHECK_FPU_FEATURE(dc, VIS2);
5166                     cpu_src1 = gen_load_gpr(dc, rs1);
5167                     cpu_src2 = gen_load_gpr(dc, rs2);
5168                     tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
5169                     tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, cpu_dst, 32, 32);
5170                     gen_store_gpr(dc, rd, cpu_dst);
5171                     break;
5172                 case 0x020: /* VIS I fcmple16 */
5173                     CHECK_FPU_FEATURE(dc, VIS1);
5174                     cpu_src1_64 = gen_load_fpr_D(dc, rs1);
5175                     cpu_src2_64 = gen_load_fpr_D(dc, rs2);
5176                     gen_helper_fcmple16(cpu_dst, cpu_src1_64, cpu_src2_64);
5177                     gen_store_gpr(dc, rd, cpu_dst);
5178                     break;
5179                 case 0x022: /* VIS I fcmpne16 */
5180                     CHECK_FPU_FEATURE(dc, VIS1);
5181                     cpu_src1_64 = gen_load_fpr_D(dc, rs1);
5182                     cpu_src2_64 = gen_load_fpr_D(dc, rs2);
5183                     gen_helper_fcmpne16(cpu_dst, cpu_src1_64, cpu_src2_64);
5184                     gen_store_gpr(dc, rd, cpu_dst);
5185                     break;
5186                 case 0x024: /* VIS I fcmple32 */
5187                     CHECK_FPU_FEATURE(dc, VIS1);
5188                     cpu_src1_64 = gen_load_fpr_D(dc, rs1);
5189                     cpu_src2_64 = gen_load_fpr_D(dc, rs2);
5190                     gen_helper_fcmple32(cpu_dst, cpu_src1_64, cpu_src2_64);
5191                     gen_store_gpr(dc, rd, cpu_dst);
5192                     break;
5193                 case 0x026: /* VIS I fcmpne32 */
5194                     CHECK_FPU_FEATURE(dc, VIS1);
5195                     cpu_src1_64 = gen_load_fpr_D(dc, rs1);
5196                     cpu_src2_64 = gen_load_fpr_D(dc, rs2);
5197                     gen_helper_fcmpne32(cpu_dst, cpu_src1_64, cpu_src2_64);
5198                     gen_store_gpr(dc, rd, cpu_dst);
5199                     break;
5200                 case 0x028: /* VIS I fcmpgt16 */
5201                     CHECK_FPU_FEATURE(dc, VIS1);
5202                     cpu_src1_64 = gen_load_fpr_D(dc, rs1);
5203                     cpu_src2_64 = gen_load_fpr_D(dc, rs2);
5204                     gen_helper_fcmpgt16(cpu_dst, cpu_src1_64, cpu_src2_64);
5205                     gen_store_gpr(dc, rd, cpu_dst);
5206                     break;
5207                 case 0x02a: /* VIS I fcmpeq16 */
5208                     CHECK_FPU_FEATURE(dc, VIS1);
5209                     cpu_src1_64 = gen_load_fpr_D(dc, rs1);
5210                     cpu_src2_64 = gen_load_fpr_D(dc, rs2);
5211                     gen_helper_fcmpeq16(cpu_dst, cpu_src1_64, cpu_src2_64);
5212                     gen_store_gpr(dc, rd, cpu_dst);
5213                     break;
5214                 case 0x02c: /* VIS I fcmpgt32 */
5215                     CHECK_FPU_FEATURE(dc, VIS1);
5216                     cpu_src1_64 = gen_load_fpr_D(dc, rs1);
5217                     cpu_src2_64 = gen_load_fpr_D(dc, rs2);
5218                     gen_helper_fcmpgt32(cpu_dst, cpu_src1_64, cpu_src2_64);
5219                     gen_store_gpr(dc, rd, cpu_dst);
5220                     break;
5221                 case 0x02e: /* VIS I fcmpeq32 */
5222                     CHECK_FPU_FEATURE(dc, VIS1);
5223                     cpu_src1_64 = gen_load_fpr_D(dc, rs1);
5224                     cpu_src2_64 = gen_load_fpr_D(dc, rs2);
5225                     gen_helper_fcmpeq32(cpu_dst, cpu_src1_64, cpu_src2_64);
5226                     gen_store_gpr(dc, rd, cpu_dst);
5227                     break;
5228                 case 0x031: /* VIS I fmul8x16 */
5229                     CHECK_FPU_FEATURE(dc, VIS1);
5230                     gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16);
5231                     break;
5232                 case 0x033: /* VIS I fmul8x16au */
5233                     CHECK_FPU_FEATURE(dc, VIS1);
5234                     gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16au);
5235                     break;
5236                 case 0x035: /* VIS I fmul8x16al */
5237                     CHECK_FPU_FEATURE(dc, VIS1);
5238                     gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16al);
5239                     break;
5240                 case 0x036: /* VIS I fmul8sux16 */
5241                     CHECK_FPU_FEATURE(dc, VIS1);
5242                     gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8sux16);
5243                     break;
5244                 case 0x037: /* VIS I fmul8ulx16 */
5245                     CHECK_FPU_FEATURE(dc, VIS1);
5246                     gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8ulx16);
5247                     break;
5248                 case 0x038: /* VIS I fmuld8sux16 */
5249                     CHECK_FPU_FEATURE(dc, VIS1);
5250                     gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8sux16);
5251                     break;
5252                 case 0x039: /* VIS I fmuld8ulx16 */
5253                     CHECK_FPU_FEATURE(dc, VIS1);
5254                     gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8ulx16);
5255                     break;
5256                 case 0x03a: /* VIS I fpack32 */
5257                     CHECK_FPU_FEATURE(dc, VIS1);
5258                     gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpack32);
5259                     break;
5260                 case 0x03b: /* VIS I fpack16 */
5261                     CHECK_FPU_FEATURE(dc, VIS1);
5262                     cpu_src1_64 = gen_load_fpr_D(dc, rs2);
5263                     cpu_dst_32 = gen_dest_fpr_F(dc);
5264                     gen_helper_fpack16(cpu_dst_32, cpu_gsr, cpu_src1_64);
5265                     gen_store_fpr_F(dc, rd, cpu_dst_32);
5266                     break;
5267                 case 0x03d: /* VIS I fpackfix */
5268                     CHECK_FPU_FEATURE(dc, VIS1);
5269                     cpu_src1_64 = gen_load_fpr_D(dc, rs2);
5270                     cpu_dst_32 = gen_dest_fpr_F(dc);
5271                     gen_helper_fpackfix(cpu_dst_32, cpu_gsr, cpu_src1_64);
5272                     gen_store_fpr_F(dc, rd, cpu_dst_32);
5273                     break;
5274                 case 0x03e: /* VIS I pdist */
5275                     CHECK_FPU_FEATURE(dc, VIS1);
5276                     gen_ne_fop_DDDD(dc, rd, rs1, rs2, gen_helper_pdist);
5277                     break;
5278                 case 0x048: /* VIS I faligndata */
5279                     CHECK_FPU_FEATURE(dc, VIS1);
5280                     gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_faligndata);
5281                     break;
5282                 case 0x04b: /* VIS I fpmerge */
5283                     CHECK_FPU_FEATURE(dc, VIS1);
5284                     gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpmerge);
5285                     break;
5286                 case 0x04c: /* VIS II bshuffle */
5287                     CHECK_FPU_FEATURE(dc, VIS2);
5288                     gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_bshuffle);
5289                     break;
5290                 case 0x04d: /* VIS I fexpand */
5291                     CHECK_FPU_FEATURE(dc, VIS1);
5292                     gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fexpand);
5293                     break;
5294                 case 0x050: /* VIS I fpadd16 */
5295                     CHECK_FPU_FEATURE(dc, VIS1);
5296                     gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd16);
5297                     break;
5298                 case 0x051: /* VIS I fpadd16s */
5299                     CHECK_FPU_FEATURE(dc, VIS1);
5300                     gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpadd16s);
5301                     break;
5302                 case 0x052: /* VIS I fpadd32 */
5303                     CHECK_FPU_FEATURE(dc, VIS1);
5304                     gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd32);
5305                     break;
5306                 case 0x053: /* VIS I fpadd32s */
5307                     CHECK_FPU_FEATURE(dc, VIS1);
5308                     gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_add_i32);
5309                     break;
5310                 case 0x054: /* VIS I fpsub16 */
5311                     CHECK_FPU_FEATURE(dc, VIS1);
5312                     gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub16);
5313                     break;
5314                 case 0x055: /* VIS I fpsub16s */
5315                     CHECK_FPU_FEATURE(dc, VIS1);
5316                     gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpsub16s);
5317                     break;
5318                 case 0x056: /* VIS I fpsub32 */
5319                     CHECK_FPU_FEATURE(dc, VIS1);
5320                     gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub32);
5321                     break;
5322                 case 0x057: /* VIS I fpsub32s */
5323                     CHECK_FPU_FEATURE(dc, VIS1);
5324                     gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_sub_i32);
5325                     break;
5326                 case 0x060: /* VIS I fzero */
5327                     CHECK_FPU_FEATURE(dc, VIS1);
5328                     cpu_dst_64 = gen_dest_fpr_D(dc, rd);
5329                     tcg_gen_movi_i64(cpu_dst_64, 0);
5330                     gen_store_fpr_D(dc, rd, cpu_dst_64);
5331                     break;
5332                 case 0x061: /* VIS I fzeros */
5333                     CHECK_FPU_FEATURE(dc, VIS1);
5334                     cpu_dst_32 = gen_dest_fpr_F(dc);
5335                     tcg_gen_movi_i32(cpu_dst_32, 0);
5336                     gen_store_fpr_F(dc, rd, cpu_dst_32);
5337                     break;
5338                 case 0x062: /* VIS I fnor */
5339                     CHECK_FPU_FEATURE(dc, VIS1);
5340                     gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nor_i64);
5341                     break;
5342                 case 0x063: /* VIS I fnors */
5343                     CHECK_FPU_FEATURE(dc, VIS1);
5344                     gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nor_i32);
5345                     break;
5346                 case 0x064: /* VIS I fandnot2 */
5347                     CHECK_FPU_FEATURE(dc, VIS1);
5348                     gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_andc_i64);
5349                     break;
5350                 case 0x065: /* VIS I fandnot2s */
5351                     CHECK_FPU_FEATURE(dc, VIS1);
5352                     gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_andc_i32);
5353                     break;
5354                 case 0x066: /* VIS I fnot2 */
5355                     CHECK_FPU_FEATURE(dc, VIS1);
5356                     gen_ne_fop_DD(dc, rd, rs2, tcg_gen_not_i64);
5357                     break;
5358                 case 0x067: /* VIS I fnot2s */
5359                     CHECK_FPU_FEATURE(dc, VIS1);
5360                     gen_ne_fop_FF(dc, rd, rs2, tcg_gen_not_i32);
5361                     break;
5362                 case 0x068: /* VIS I fandnot1 */
5363                     CHECK_FPU_FEATURE(dc, VIS1);
5364                     gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_andc_i64);
5365                     break;
5366                 case 0x069: /* VIS I fandnot1s */
5367                     CHECK_FPU_FEATURE(dc, VIS1);
5368                     gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_andc_i32);
5369                     break;
5370                 case 0x06a: /* VIS I fnot1 */
5371                     CHECK_FPU_FEATURE(dc, VIS1);
5372                     gen_ne_fop_DD(dc, rd, rs1, tcg_gen_not_i64);
5373                     break;
5374                 case 0x06b: /* VIS I fnot1s */
5375                     CHECK_FPU_FEATURE(dc, VIS1);
5376                     gen_ne_fop_FF(dc, rd, rs1, tcg_gen_not_i32);
5377                     break;
5378                 case 0x06c: /* VIS I fxor */
5379                     CHECK_FPU_FEATURE(dc, VIS1);
5380                     gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_xor_i64);
5381                     break;
5382                 case 0x06d: /* VIS I fxors */
5383                     CHECK_FPU_FEATURE(dc, VIS1);
5384                     gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_xor_i32);
5385                     break;
5386                 case 0x06e: /* VIS I fnand */
5387                     CHECK_FPU_FEATURE(dc, VIS1);
5388                     gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nand_i64);
5389                     break;
5390                 case 0x06f: /* VIS I fnands */
5391                     CHECK_FPU_FEATURE(dc, VIS1);
5392                     gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nand_i32);
5393                     break;
5394                 case 0x070: /* VIS I fand */
5395                     CHECK_FPU_FEATURE(dc, VIS1);
5396                     gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_and_i64);
5397                     break;
5398                 case 0x071: /* VIS I fands */
5399                     CHECK_FPU_FEATURE(dc, VIS1);
5400                     gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_and_i32);
5401                     break;
5402                 case 0x072: /* VIS I fxnor */
5403                     CHECK_FPU_FEATURE(dc, VIS1);
5404                     gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_eqv_i64);
5405                     break;
5406                 case 0x073: /* VIS I fxnors */
5407                     CHECK_FPU_FEATURE(dc, VIS1);
5408                     gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_eqv_i32);
5409                     break;
5410                 case 0x074: /* VIS I fsrc1 */
5411                     CHECK_FPU_FEATURE(dc, VIS1);
5412                     cpu_src1_64 = gen_load_fpr_D(dc, rs1);
5413                     gen_store_fpr_D(dc, rd, cpu_src1_64);
5414                     break;
5415                 case 0x075: /* VIS I fsrc1s */
5416                     CHECK_FPU_FEATURE(dc, VIS1);
5417                     cpu_src1_32 = gen_load_fpr_F(dc, rs1);
5418                     gen_store_fpr_F(dc, rd, cpu_src1_32);
5419                     break;
5420                 case 0x076: /* VIS I fornot2 */
5421                     CHECK_FPU_FEATURE(dc, VIS1);
5422                     gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_orc_i64);
5423                     break;
5424                 case 0x077: /* VIS I fornot2s */
5425                     CHECK_FPU_FEATURE(dc, VIS1);
5426                     gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_orc_i32);
5427                     break;
5428                 case 0x078: /* VIS I fsrc2 */
5429                     CHECK_FPU_FEATURE(dc, VIS1);
5430                     cpu_src1_64 = gen_load_fpr_D(dc, rs2);
5431                     gen_store_fpr_D(dc, rd, cpu_src1_64);
5432                     break;
5433                 case 0x079: /* VIS I fsrc2s */
5434                     CHECK_FPU_FEATURE(dc, VIS1);
5435                     cpu_src1_32 = gen_load_fpr_F(dc, rs2);
5436                     gen_store_fpr_F(dc, rd, cpu_src1_32);
5437                     break;
5438                 case 0x07a: /* VIS I fornot1 */
5439                     CHECK_FPU_FEATURE(dc, VIS1);
5440                     gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_orc_i64);
5441                     break;
5442                 case 0x07b: /* VIS I fornot1s */
5443                     CHECK_FPU_FEATURE(dc, VIS1);
5444                     gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_orc_i32);
5445                     break;
5446                 case 0x07c: /* VIS I for */
5447                     CHECK_FPU_FEATURE(dc, VIS1);
5448                     gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_or_i64);
5449                     break;
5450                 case 0x07d: /* VIS I fors */
5451                     CHECK_FPU_FEATURE(dc, VIS1);
5452                     gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_or_i32);
5453                     break;
5454                 case 0x07e: /* VIS I fone */
5455                     CHECK_FPU_FEATURE(dc, VIS1);
5456                     cpu_dst_64 = gen_dest_fpr_D(dc, rd);
5457                     tcg_gen_movi_i64(cpu_dst_64, -1);
5458                     gen_store_fpr_D(dc, rd, cpu_dst_64);
5459                     break;
5460                 case 0x07f: /* VIS I fones */
5461                     CHECK_FPU_FEATURE(dc, VIS1);
5462                     cpu_dst_32 = gen_dest_fpr_F(dc);
5463                     tcg_gen_movi_i32(cpu_dst_32, -1);
5464                     gen_store_fpr_F(dc, rd, cpu_dst_32);
5465                     break;
5466                 case 0x080: /* VIS I shutdown */
5467                 case 0x081: /* VIS II siam */
5468                     // XXX
5469                     goto illegal_insn;
5470                 default:
5471                     goto illegal_insn;
5472                 }
5473 #endif
5474             } else {
5475                 goto illegal_insn; /* in decodetree */
5476             }
5477         }
5478         break;
5479     case 3:                     /* load/store instructions */
5480         {
5481             unsigned int xop = GET_FIELD(insn, 7, 12);
5482             /* ??? gen_address_mask prevents us from using a source
5483                register directly.  Always generate a temporary.  */
5484             TCGv cpu_addr = tcg_temp_new();
5485 
5486             tcg_gen_mov_tl(cpu_addr, get_src1(dc, insn));
5487             if (IS_IMM) {     /* immediate */
5488                 simm = GET_FIELDs(insn, 19, 31);
5489                 if (simm != 0) {
5490                     tcg_gen_addi_tl(cpu_addr, cpu_addr, simm);
5491                 }
5492             } else {            /* register */
5493                 rs2 = GET_FIELD(insn, 27, 31);
5494                 if (rs2 != 0) {
5495                     tcg_gen_add_tl(cpu_addr, cpu_addr, gen_load_gpr(dc, rs2));
5496                 }
5497             }
5498             if (xop < 4 || (xop > 7 && xop < 0x14 && xop != 0x0e) ||
5499                 (xop > 0x17 && xop <= 0x1d ) ||
5500                 (xop > 0x2c && xop <= 0x33) || xop == 0x1f || xop == 0x3d) {
5501                 goto illegal_insn;  /* in decodetree */
5502             } else if (xop >= 0x20 && xop < 0x24) {
5503                 if (gen_trap_ifnofpu(dc)) {
5504                     goto jmp_insn;
5505                 }
5506                 switch (xop) {
5507                 case 0x20:      /* ldf, load fpreg */
5508                 case 0x22:      /* ldqf, load quad fpreg */
5509                 case 0x23:      /* lddf, load double fpreg */
5510                     g_assert_not_reached();  /* in decodetree */
5511                 case 0x21:      /* ldfsr, V9 ldxfsr */
5512 #ifdef TARGET_SPARC64
5513                     gen_address_mask(dc, cpu_addr);
5514                     if (rd == 1) {
5515                         TCGv_i64 t64 = tcg_temp_new_i64();
5516                         tcg_gen_qemu_ld_i64(t64, cpu_addr,
5517                                             dc->mem_idx, MO_TEUQ | MO_ALIGN);
5518                         gen_helper_ldxfsr(cpu_fsr, tcg_env, cpu_fsr, t64);
5519                         break;
5520                     }
5521 #endif
5522                     cpu_dst_32 = tcg_temp_new_i32();
5523                     tcg_gen_qemu_ld_i32(cpu_dst_32, cpu_addr,
5524                                         dc->mem_idx, MO_TEUL | MO_ALIGN);
5525                     gen_helper_ldfsr(cpu_fsr, tcg_env, cpu_fsr, cpu_dst_32);
5526                     break;
5527                 default:
5528                     goto illegal_insn;
5529                 }
5530             } else if (xop > 0x23 && xop < 0x28) {
5531                 if (gen_trap_ifnofpu(dc)) {
5532                     goto jmp_insn;
5533                 }
5534                 switch (xop) {
5535                 case 0x24: /* stf, store fpreg */
5536                 case 0x26: /* v9 stqf, v8 stdfq */
5537                 case 0x27: /* stdf, store double fpreg */
5538                     g_assert_not_reached();
5539                 case 0x25: /* stfsr, V9 stxfsr */
5540                     {
5541 #ifdef TARGET_SPARC64
5542                         gen_address_mask(dc, cpu_addr);
5543                         if (rd == 1) {
5544                             tcg_gen_qemu_st_tl(cpu_fsr, cpu_addr,
5545                                                dc->mem_idx, MO_TEUQ | MO_ALIGN);
5546                             break;
5547                         }
5548 #endif
5549                         tcg_gen_qemu_st_tl(cpu_fsr, cpu_addr,
5550                                            dc->mem_idx, MO_TEUL | MO_ALIGN);
5551                     }
5552                     break;
5553                 default:
5554                     goto illegal_insn;
5555                 }
5556             } else if (xop > 0x33 && xop < 0x3f) {
5557                 goto illegal_insn; /* in decodetree */
5558             } else {
5559                 goto illegal_insn;
5560             }
5561         }
5562         break;
5563     }
5564     advance_pc(dc);
5565  jmp_insn:
5566     return;
5567  illegal_insn:
5568     gen_exception(dc, TT_ILL_INSN);
5569     return;
5570  nfpu_insn:
5571     gen_op_fpexception_im(dc, FSR_FTT_UNIMPFPOP);
5572     return;
5573 }
5574 
5575 static void sparc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
5576 {
5577     DisasContext *dc = container_of(dcbase, DisasContext, base);
5578     CPUSPARCState *env = cpu_env(cs);
5579     int bound;
5580 
5581     dc->pc = dc->base.pc_first;
5582     dc->npc = (target_ulong)dc->base.tb->cs_base;
5583     dc->cc_op = CC_OP_DYNAMIC;
5584     dc->mem_idx = dc->base.tb->flags & TB_FLAG_MMU_MASK;
5585     dc->def = &env->def;
5586     dc->fpu_enabled = tb_fpu_enabled(dc->base.tb->flags);
5587     dc->address_mask_32bit = tb_am_enabled(dc->base.tb->flags);
5588 #ifndef CONFIG_USER_ONLY
5589     dc->supervisor = (dc->base.tb->flags & TB_FLAG_SUPER) != 0;
5590 #endif
5591 #ifdef TARGET_SPARC64
5592     dc->fprs_dirty = 0;
5593     dc->asi = (dc->base.tb->flags >> TB_FLAG_ASI_SHIFT) & 0xff;
5594 #ifndef CONFIG_USER_ONLY
5595     dc->hypervisor = (dc->base.tb->flags & TB_FLAG_HYPER) != 0;
5596 #endif
5597 #endif
5598     /*
5599      * if we reach a page boundary, we stop generation so that the
5600      * PC of a TT_TFAULT exception is always in the right page
5601      */
5602     bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
5603     dc->base.max_insns = MIN(dc->base.max_insns, bound);
5604 }
5605 
5606 static void sparc_tr_tb_start(DisasContextBase *db, CPUState *cs)
5607 {
5608 }
5609 
5610 static void sparc_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
5611 {
5612     DisasContext *dc = container_of(dcbase, DisasContext, base);
5613     target_ulong npc = dc->npc;
5614 
5615     if (npc & 3) {
5616         switch (npc) {
5617         case JUMP_PC:
5618             assert(dc->jump_pc[1] == dc->pc + 4);
5619             npc = dc->jump_pc[0] | JUMP_PC;
5620             break;
5621         case DYNAMIC_PC:
5622         case DYNAMIC_PC_LOOKUP:
5623             npc = DYNAMIC_PC;
5624             break;
5625         default:
5626             g_assert_not_reached();
5627         }
5628     }
5629     tcg_gen_insn_start(dc->pc, npc);
5630 }
5631 
5632 static void sparc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
5633 {
5634     DisasContext *dc = container_of(dcbase, DisasContext, base);
5635     CPUSPARCState *env = cpu_env(cs);
5636     unsigned int insn;
5637 
5638     insn = translator_ldl(env, &dc->base, dc->pc);
5639     dc->base.pc_next += 4;
5640 
5641     if (!decode(dc, insn)) {
5642         disas_sparc_legacy(dc, insn);
5643     }
5644 
5645     if (dc->base.is_jmp == DISAS_NORETURN) {
5646         return;
5647     }
5648     if (dc->pc != dc->base.pc_next) {
5649         dc->base.is_jmp = DISAS_TOO_MANY;
5650     }
5651 }
5652 
5653 static void sparc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
5654 {
5655     DisasContext *dc = container_of(dcbase, DisasContext, base);
5656     DisasDelayException *e, *e_next;
5657     bool may_lookup;
5658 
5659     switch (dc->base.is_jmp) {
5660     case DISAS_NEXT:
5661     case DISAS_TOO_MANY:
5662         if (((dc->pc | dc->npc) & 3) == 0) {
5663             /* static PC and NPC: we can use direct chaining */
5664             gen_goto_tb(dc, 0, dc->pc, dc->npc);
5665             break;
5666         }
5667 
5668         may_lookup = true;
5669         if (dc->pc & 3) {
5670             switch (dc->pc) {
5671             case DYNAMIC_PC_LOOKUP:
5672                 break;
5673             case DYNAMIC_PC:
5674                 may_lookup = false;
5675                 break;
5676             default:
5677                 g_assert_not_reached();
5678             }
5679         } else {
5680             tcg_gen_movi_tl(cpu_pc, dc->pc);
5681         }
5682 
5683         if (dc->npc & 3) {
5684             switch (dc->npc) {
5685             case JUMP_PC:
5686                 gen_generic_branch(dc);
5687                 break;
5688             case DYNAMIC_PC:
5689                 may_lookup = false;
5690                 break;
5691             case DYNAMIC_PC_LOOKUP:
5692                 break;
5693             default:
5694                 g_assert_not_reached();
5695             }
5696         } else {
5697             tcg_gen_movi_tl(cpu_npc, dc->npc);
5698         }
5699         if (may_lookup) {
5700             tcg_gen_lookup_and_goto_ptr();
5701         } else {
5702             tcg_gen_exit_tb(NULL, 0);
5703         }
5704         break;
5705 
5706     case DISAS_NORETURN:
5707        break;
5708 
5709     case DISAS_EXIT:
5710         /* Exit TB */
5711         save_state(dc);
5712         tcg_gen_exit_tb(NULL, 0);
5713         break;
5714 
5715     default:
5716         g_assert_not_reached();
5717     }
5718 
5719     for (e = dc->delay_excp_list; e ; e = e_next) {
5720         gen_set_label(e->lab);
5721 
5722         tcg_gen_movi_tl(cpu_pc, e->pc);
5723         if (e->npc % 4 == 0) {
5724             tcg_gen_movi_tl(cpu_npc, e->npc);
5725         }
5726         gen_helper_raise_exception(tcg_env, e->excp);
5727 
5728         e_next = e->next;
5729         g_free(e);
5730     }
5731 }
5732 
5733 static void sparc_tr_disas_log(const DisasContextBase *dcbase,
5734                                CPUState *cpu, FILE *logfile)
5735 {
5736     fprintf(logfile, "IN: %s\n", lookup_symbol(dcbase->pc_first));
5737     target_disas(logfile, cpu, dcbase->pc_first, dcbase->tb->size);
5738 }
5739 
5740 static const TranslatorOps sparc_tr_ops = {
5741     .init_disas_context = sparc_tr_init_disas_context,
5742     .tb_start           = sparc_tr_tb_start,
5743     .insn_start         = sparc_tr_insn_start,
5744     .translate_insn     = sparc_tr_translate_insn,
5745     .tb_stop            = sparc_tr_tb_stop,
5746     .disas_log          = sparc_tr_disas_log,
5747 };
5748 
5749 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
5750                            target_ulong pc, void *host_pc)
5751 {
5752     DisasContext dc = {};
5753 
5754     translator_loop(cs, tb, max_insns, pc, host_pc, &sparc_tr_ops, &dc.base);
5755 }
5756 
5757 void sparc_tcg_init(void)
5758 {
5759     static const char gregnames[32][4] = {
5760         "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7",
5761         "o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7",
5762         "l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7",
5763         "i0", "i1", "i2", "i3", "i4", "i5", "i6", "i7",
5764     };
5765     static const char fregnames[32][4] = {
5766         "f0", "f2", "f4", "f6", "f8", "f10", "f12", "f14",
5767         "f16", "f18", "f20", "f22", "f24", "f26", "f28", "f30",
5768         "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46",
5769         "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62",
5770     };
5771 
5772     static const struct { TCGv_i32 *ptr; int off; const char *name; } r32[] = {
5773 #ifdef TARGET_SPARC64
5774         { &cpu_xcc, offsetof(CPUSPARCState, xcc), "xcc" },
5775         { &cpu_fprs, offsetof(CPUSPARCState, fprs), "fprs" },
5776 #endif
5777         { &cpu_cc_op, offsetof(CPUSPARCState, cc_op), "cc_op" },
5778         { &cpu_psr, offsetof(CPUSPARCState, psr), "psr" },
5779     };
5780 
5781     static const struct { TCGv *ptr; int off; const char *name; } rtl[] = {
5782 #ifdef TARGET_SPARC64
5783         { &cpu_gsr, offsetof(CPUSPARCState, gsr), "gsr" },
5784 #endif
5785         { &cpu_cond, offsetof(CPUSPARCState, cond), "cond" },
5786         { &cpu_cc_src, offsetof(CPUSPARCState, cc_src), "cc_src" },
5787         { &cpu_cc_src2, offsetof(CPUSPARCState, cc_src2), "cc_src2" },
5788         { &cpu_cc_dst, offsetof(CPUSPARCState, cc_dst), "cc_dst" },
5789         { &cpu_fsr, offsetof(CPUSPARCState, fsr), "fsr" },
5790         { &cpu_pc, offsetof(CPUSPARCState, pc), "pc" },
5791         { &cpu_npc, offsetof(CPUSPARCState, npc), "npc" },
5792         { &cpu_y, offsetof(CPUSPARCState, y), "y" },
5793         { &cpu_tbr, offsetof(CPUSPARCState, tbr), "tbr" },
5794     };
5795 
5796     unsigned int i;
5797 
5798     cpu_regwptr = tcg_global_mem_new_ptr(tcg_env,
5799                                          offsetof(CPUSPARCState, regwptr),
5800                                          "regwptr");
5801 
5802     for (i = 0; i < ARRAY_SIZE(r32); ++i) {
5803         *r32[i].ptr = tcg_global_mem_new_i32(tcg_env, r32[i].off, r32[i].name);
5804     }
5805 
5806     for (i = 0; i < ARRAY_SIZE(rtl); ++i) {
5807         *rtl[i].ptr = tcg_global_mem_new(tcg_env, rtl[i].off, rtl[i].name);
5808     }
5809 
5810     cpu_regs[0] = NULL;
5811     for (i = 1; i < 8; ++i) {
5812         cpu_regs[i] = tcg_global_mem_new(tcg_env,
5813                                          offsetof(CPUSPARCState, gregs[i]),
5814                                          gregnames[i]);
5815     }
5816 
5817     for (i = 8; i < 32; ++i) {
5818         cpu_regs[i] = tcg_global_mem_new(cpu_regwptr,
5819                                          (i - 8) * sizeof(target_ulong),
5820                                          gregnames[i]);
5821     }
5822 
5823     for (i = 0; i < TARGET_DPREGS; i++) {
5824         cpu_fpr[i] = tcg_global_mem_new_i64(tcg_env,
5825                                             offsetof(CPUSPARCState, fpr[i]),
5826                                             fregnames[i]);
5827     }
5828 }
5829 
5830 void sparc_restore_state_to_opc(CPUState *cs,
5831                                 const TranslationBlock *tb,
5832                                 const uint64_t *data)
5833 {
5834     SPARCCPU *cpu = SPARC_CPU(cs);
5835     CPUSPARCState *env = &cpu->env;
5836     target_ulong pc = data[0];
5837     target_ulong npc = data[1];
5838 
5839     env->pc = pc;
5840     if (npc == DYNAMIC_PC) {
5841         /* dynamic NPC: already stored */
5842     } else if (npc & JUMP_PC) {
5843         /* jump PC: use 'cond' and the jump targets of the translation */
5844         if (env->cond) {
5845             env->npc = npc & ~3;
5846         } else {
5847             env->npc = pc + 4;
5848         }
5849     } else {
5850         env->npc = npc;
5851     }
5852 }
5853