xref: /openbmc/qemu/target/sparc/translate.c (revision 3a6b8de3)
1 /*
2    SPARC translation
3 
4    Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5    Copyright (C) 2003-2005 Fabrice Bellard
6 
7    This library is free software; you can redistribute it and/or
8    modify it under the terms of the GNU Lesser General Public
9    License as published by the Free Software Foundation; either
10    version 2.1 of the License, or (at your option) any later version.
11 
12    This library is distributed in the hope that it will be useful,
13    but WITHOUT ANY WARRANTY; without even the implied warranty of
14    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15    Lesser General Public License for more details.
16 
17    You should have received a copy of the GNU Lesser General Public
18    License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 
23 #include "cpu.h"
24 #include "disas/disas.h"
25 #include "exec/helper-proto.h"
26 #include "exec/exec-all.h"
27 #include "tcg/tcg-op.h"
28 #include "tcg/tcg-op-gvec.h"
29 #include "exec/helper-gen.h"
30 #include "exec/translator.h"
31 #include "exec/log.h"
32 #include "asi.h"
33 
34 #define HELPER_H "helper.h"
35 #include "exec/helper-info.c.inc"
36 #undef  HELPER_H
37 
38 #ifdef TARGET_SPARC64
39 # define gen_helper_rdpsr(D, E)                 qemu_build_not_reached()
40 # define gen_helper_rett(E)                     qemu_build_not_reached()
41 # define gen_helper_power_down(E)               qemu_build_not_reached()
42 # define gen_helper_wrpsr(E, S)                 qemu_build_not_reached()
43 #else
44 # define gen_helper_clear_softint(E, S)         qemu_build_not_reached()
45 # define gen_helper_done(E)                     qemu_build_not_reached()
46 # define gen_helper_fabsd(D, S)                 qemu_build_not_reached()
47 # define gen_helper_flushw(E)                   qemu_build_not_reached()
48 # define gen_helper_fnegd(D, S)                 qemu_build_not_reached()
49 # define gen_helper_rdccr(D, E)                 qemu_build_not_reached()
50 # define gen_helper_rdcwp(D, E)                 qemu_build_not_reached()
51 # define gen_helper_restored(E)                 qemu_build_not_reached()
52 # define gen_helper_retry(E)                    qemu_build_not_reached()
53 # define gen_helper_saved(E)                    qemu_build_not_reached()
54 # define gen_helper_set_softint(E, S)           qemu_build_not_reached()
55 # define gen_helper_tick_get_count(D, E, T, C)  qemu_build_not_reached()
56 # define gen_helper_tick_set_count(P, S)        qemu_build_not_reached()
57 # define gen_helper_tick_set_limit(P, S)        qemu_build_not_reached()
58 # define gen_helper_wrccr(E, S)                 qemu_build_not_reached()
59 # define gen_helper_wrcwp(E, S)                 qemu_build_not_reached()
60 # define gen_helper_wrgl(E, S)                  qemu_build_not_reached()
61 # define gen_helper_write_softint(E, S)         qemu_build_not_reached()
62 # define gen_helper_wrpil(E, S)                 qemu_build_not_reached()
63 # define gen_helper_wrpstate(E, S)              qemu_build_not_reached()
64 # define gen_helper_fabsq                ({ qemu_build_not_reached(); NULL; })
65 # define gen_helper_fcmpeq16             ({ qemu_build_not_reached(); NULL; })
66 # define gen_helper_fcmpeq32             ({ qemu_build_not_reached(); NULL; })
67 # define gen_helper_fcmpgt16             ({ qemu_build_not_reached(); NULL; })
68 # define gen_helper_fcmpgt32             ({ qemu_build_not_reached(); NULL; })
69 # define gen_helper_fcmple16             ({ qemu_build_not_reached(); NULL; })
70 # define gen_helper_fcmple32             ({ qemu_build_not_reached(); NULL; })
71 # define gen_helper_fcmpne16             ({ qemu_build_not_reached(); NULL; })
72 # define gen_helper_fcmpne32             ({ qemu_build_not_reached(); NULL; })
73 # define gen_helper_fdtox                ({ qemu_build_not_reached(); NULL; })
74 # define gen_helper_fexpand              ({ qemu_build_not_reached(); NULL; })
75 # define gen_helper_fmul8sux16           ({ qemu_build_not_reached(); NULL; })
76 # define gen_helper_fmul8ulx16           ({ qemu_build_not_reached(); NULL; })
77 # define gen_helper_fmul8x16al           ({ qemu_build_not_reached(); NULL; })
78 # define gen_helper_fmul8x16au           ({ qemu_build_not_reached(); NULL; })
79 # define gen_helper_fmul8x16             ({ qemu_build_not_reached(); NULL; })
80 # define gen_helper_fmuld8sux16          ({ qemu_build_not_reached(); NULL; })
81 # define gen_helper_fmuld8ulx16          ({ qemu_build_not_reached(); NULL; })
82 # define gen_helper_fnegq                ({ qemu_build_not_reached(); NULL; })
83 # define gen_helper_fpmerge              ({ qemu_build_not_reached(); NULL; })
84 # define gen_helper_fqtox                ({ qemu_build_not_reached(); NULL; })
85 # define gen_helper_fstox                ({ qemu_build_not_reached(); NULL; })
86 # define gen_helper_fxtod                ({ qemu_build_not_reached(); NULL; })
87 # define gen_helper_fxtoq                ({ qemu_build_not_reached(); NULL; })
88 # define gen_helper_fxtos                ({ qemu_build_not_reached(); NULL; })
89 # define gen_helper_pdist                ({ qemu_build_not_reached(); NULL; })
90 # define FSR_LDXFSR_MASK                        0
91 # define FSR_LDXFSR_OLDMASK                     0
92 # define MAXTL_MASK                             0
93 #endif
94 
95 /* Dynamic PC, must exit to main loop. */
96 #define DYNAMIC_PC         1
97 /* Dynamic PC, one of two values according to jump_pc[T2]. */
98 #define JUMP_PC            2
99 /* Dynamic PC, may lookup next TB. */
100 #define DYNAMIC_PC_LOOKUP  3
101 
102 #define DISAS_EXIT  DISAS_TARGET_0
103 
104 /* global register indexes */
105 static TCGv_ptr cpu_regwptr;
106 static TCGv cpu_fsr, cpu_pc, cpu_npc;
107 static TCGv cpu_regs[32];
108 static TCGv cpu_y;
109 static TCGv cpu_tbr;
110 static TCGv cpu_cond;
111 static TCGv cpu_cc_N;
112 static TCGv cpu_cc_V;
113 static TCGv cpu_icc_Z;
114 static TCGv cpu_icc_C;
115 #ifdef TARGET_SPARC64
116 static TCGv cpu_xcc_Z;
117 static TCGv cpu_xcc_C;
118 static TCGv_i32 cpu_fprs;
119 static TCGv cpu_gsr;
120 #else
121 # define cpu_fprs               ({ qemu_build_not_reached(); (TCGv)NULL; })
122 # define cpu_gsr                ({ qemu_build_not_reached(); (TCGv)NULL; })
123 #endif
124 
125 #ifdef TARGET_SPARC64
126 #define cpu_cc_Z  cpu_xcc_Z
127 #define cpu_cc_C  cpu_xcc_C
128 #else
129 #define cpu_cc_Z  cpu_icc_Z
130 #define cpu_cc_C  cpu_icc_C
131 #define cpu_xcc_Z ({ qemu_build_not_reached(); NULL; })
132 #define cpu_xcc_C ({ qemu_build_not_reached(); NULL; })
133 #endif
134 
135 /* Floating point registers */
136 static TCGv_i64 cpu_fpr[TARGET_DPREGS];
137 
138 #define env_field_offsetof(X)     offsetof(CPUSPARCState, X)
139 #ifdef TARGET_SPARC64
140 # define env32_field_offsetof(X)  ({ qemu_build_not_reached(); 0; })
141 # define env64_field_offsetof(X)  env_field_offsetof(X)
142 #else
143 # define env32_field_offsetof(X)  env_field_offsetof(X)
144 # define env64_field_offsetof(X)  ({ qemu_build_not_reached(); 0; })
145 #endif
146 
147 typedef struct DisasCompare {
148     TCGCond cond;
149     TCGv c1;
150     int c2;
151 } DisasCompare;
152 
153 typedef struct DisasDelayException {
154     struct DisasDelayException *next;
155     TCGLabel *lab;
156     TCGv_i32 excp;
157     /* Saved state at parent insn. */
158     target_ulong pc;
159     target_ulong npc;
160 } DisasDelayException;
161 
162 typedef struct DisasContext {
163     DisasContextBase base;
164     target_ulong pc;    /* current Program Counter: integer or DYNAMIC_PC */
165     target_ulong npc;   /* next PC: integer or DYNAMIC_PC or JUMP_PC */
166 
167     /* Used when JUMP_PC value is used. */
168     DisasCompare jump;
169     target_ulong jump_pc[2];
170 
171     int mem_idx;
172     bool cpu_cond_live;
173     bool fpu_enabled;
174     bool address_mask_32bit;
175 #ifndef CONFIG_USER_ONLY
176     bool supervisor;
177 #ifdef TARGET_SPARC64
178     bool hypervisor;
179 #endif
180 #endif
181 
182     sparc_def_t *def;
183 #ifdef TARGET_SPARC64
184     int fprs_dirty;
185     int asi;
186 #endif
187     DisasDelayException *delay_excp_list;
188 } DisasContext;
189 
190 // This function uses non-native bit order
191 #define GET_FIELD(X, FROM, TO)                                  \
192     ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
193 
194 // This function uses the order in the manuals, i.e. bit 0 is 2^0
195 #define GET_FIELD_SP(X, FROM, TO)               \
196     GET_FIELD(X, 31 - (TO), 31 - (FROM))
197 
198 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
199 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
200 
201 #ifdef TARGET_SPARC64
202 #define DFPREG(r) (((r & 1) << 5) | (r & 0x1e))
203 #define QFPREG(r) (((r & 1) << 5) | (r & 0x1c))
204 #else
205 #define DFPREG(r) (r & 0x1e)
206 #define QFPREG(r) (r & 0x1c)
207 #endif
208 
209 #define UA2005_HTRAP_MASK 0xff
210 #define V8_TRAP_MASK 0x7f
211 
212 #define IS_IMM (insn & (1<<13))
213 
214 static void gen_update_fprs_dirty(DisasContext *dc, int rd)
215 {
216 #if defined(TARGET_SPARC64)
217     int bit = (rd < 32) ? 1 : 2;
218     /* If we know we've already set this bit within the TB,
219        we can avoid setting it again.  */
220     if (!(dc->fprs_dirty & bit)) {
221         dc->fprs_dirty |= bit;
222         tcg_gen_ori_i32(cpu_fprs, cpu_fprs, bit);
223     }
224 #endif
225 }
226 
227 /* floating point registers moves */
228 static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src)
229 {
230     TCGv_i32 ret = tcg_temp_new_i32();
231     if (src & 1) {
232         tcg_gen_extrl_i64_i32(ret, cpu_fpr[src / 2]);
233     } else {
234         tcg_gen_extrh_i64_i32(ret, cpu_fpr[src / 2]);
235     }
236     return ret;
237 }
238 
239 static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v)
240 {
241     TCGv_i64 t = tcg_temp_new_i64();
242 
243     tcg_gen_extu_i32_i64(t, v);
244     tcg_gen_deposit_i64(cpu_fpr[dst / 2], cpu_fpr[dst / 2], t,
245                         (dst & 1 ? 0 : 32), 32);
246     gen_update_fprs_dirty(dc, dst);
247 }
248 
249 static TCGv_i32 gen_dest_fpr_F(DisasContext *dc)
250 {
251     return tcg_temp_new_i32();
252 }
253 
254 static TCGv_i64 gen_load_fpr_D(DisasContext *dc, unsigned int src)
255 {
256     src = DFPREG(src);
257     return cpu_fpr[src / 2];
258 }
259 
260 static void gen_store_fpr_D(DisasContext *dc, unsigned int dst, TCGv_i64 v)
261 {
262     dst = DFPREG(dst);
263     tcg_gen_mov_i64(cpu_fpr[dst / 2], v);
264     gen_update_fprs_dirty(dc, dst);
265 }
266 
267 static TCGv_i64 gen_dest_fpr_D(DisasContext *dc, unsigned int dst)
268 {
269     return cpu_fpr[DFPREG(dst) / 2];
270 }
271 
272 static void gen_op_load_fpr_QT0(unsigned int src)
273 {
274     tcg_gen_st_i64(cpu_fpr[src / 2], tcg_env, offsetof(CPUSPARCState, qt0) +
275                    offsetof(CPU_QuadU, ll.upper));
276     tcg_gen_st_i64(cpu_fpr[src/2 + 1], tcg_env, offsetof(CPUSPARCState, qt0) +
277                    offsetof(CPU_QuadU, ll.lower));
278 }
279 
280 static void gen_op_load_fpr_QT1(unsigned int src)
281 {
282     tcg_gen_st_i64(cpu_fpr[src / 2], tcg_env, offsetof(CPUSPARCState, qt1) +
283                    offsetof(CPU_QuadU, ll.upper));
284     tcg_gen_st_i64(cpu_fpr[src/2 + 1], tcg_env, offsetof(CPUSPARCState, qt1) +
285                    offsetof(CPU_QuadU, ll.lower));
286 }
287 
288 static void gen_op_store_QT0_fpr(unsigned int dst)
289 {
290     tcg_gen_ld_i64(cpu_fpr[dst / 2], tcg_env, offsetof(CPUSPARCState, qt0) +
291                    offsetof(CPU_QuadU, ll.upper));
292     tcg_gen_ld_i64(cpu_fpr[dst/2 + 1], tcg_env, offsetof(CPUSPARCState, qt0) +
293                    offsetof(CPU_QuadU, ll.lower));
294 }
295 
296 /* moves */
297 #ifdef CONFIG_USER_ONLY
298 #define supervisor(dc) 0
299 #define hypervisor(dc) 0
300 #else
301 #ifdef TARGET_SPARC64
302 #define hypervisor(dc) (dc->hypervisor)
303 #define supervisor(dc) (dc->supervisor | dc->hypervisor)
304 #else
305 #define supervisor(dc) (dc->supervisor)
306 #define hypervisor(dc) 0
307 #endif
308 #endif
309 
310 #if !defined(TARGET_SPARC64)
311 # define AM_CHECK(dc)  false
312 #elif defined(TARGET_ABI32)
313 # define AM_CHECK(dc)  true
314 #elif defined(CONFIG_USER_ONLY)
315 # define AM_CHECK(dc)  false
316 #else
317 # define AM_CHECK(dc)  ((dc)->address_mask_32bit)
318 #endif
319 
320 static void gen_address_mask(DisasContext *dc, TCGv addr)
321 {
322     if (AM_CHECK(dc)) {
323         tcg_gen_andi_tl(addr, addr, 0xffffffffULL);
324     }
325 }
326 
327 static target_ulong address_mask_i(DisasContext *dc, target_ulong addr)
328 {
329     return AM_CHECK(dc) ? (uint32_t)addr : addr;
330 }
331 
332 static TCGv gen_load_gpr(DisasContext *dc, int reg)
333 {
334     if (reg > 0) {
335         assert(reg < 32);
336         return cpu_regs[reg];
337     } else {
338         TCGv t = tcg_temp_new();
339         tcg_gen_movi_tl(t, 0);
340         return t;
341     }
342 }
343 
344 static void gen_store_gpr(DisasContext *dc, int reg, TCGv v)
345 {
346     if (reg > 0) {
347         assert(reg < 32);
348         tcg_gen_mov_tl(cpu_regs[reg], v);
349     }
350 }
351 
352 static TCGv gen_dest_gpr(DisasContext *dc, int reg)
353 {
354     if (reg > 0) {
355         assert(reg < 32);
356         return cpu_regs[reg];
357     } else {
358         return tcg_temp_new();
359     }
360 }
361 
362 static bool use_goto_tb(DisasContext *s, target_ulong pc, target_ulong npc)
363 {
364     return translator_use_goto_tb(&s->base, pc) &&
365            translator_use_goto_tb(&s->base, npc);
366 }
367 
368 static void gen_goto_tb(DisasContext *s, int tb_num,
369                         target_ulong pc, target_ulong npc)
370 {
371     if (use_goto_tb(s, pc, npc))  {
372         /* jump to same page: we can use a direct jump */
373         tcg_gen_goto_tb(tb_num);
374         tcg_gen_movi_tl(cpu_pc, pc);
375         tcg_gen_movi_tl(cpu_npc, npc);
376         tcg_gen_exit_tb(s->base.tb, tb_num);
377     } else {
378         /* jump to another page: we can use an indirect jump */
379         tcg_gen_movi_tl(cpu_pc, pc);
380         tcg_gen_movi_tl(cpu_npc, npc);
381         tcg_gen_lookup_and_goto_ptr();
382     }
383 }
384 
385 static TCGv gen_carry32(void)
386 {
387     if (TARGET_LONG_BITS == 64) {
388         TCGv t = tcg_temp_new();
389         tcg_gen_extract_tl(t, cpu_icc_C, 32, 1);
390         return t;
391     }
392     return cpu_icc_C;
393 }
394 
395 static void gen_op_addcc_int(TCGv dst, TCGv src1, TCGv src2, TCGv cin)
396 {
397     TCGv z = tcg_constant_tl(0);
398 
399     if (cin) {
400         tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, src1, z, cin, z);
401         tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, cpu_cc_N, cpu_cc_C, src2, z);
402     } else {
403         tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, src1, z, src2, z);
404     }
405     tcg_gen_xor_tl(cpu_cc_Z, src1, src2);
406     tcg_gen_xor_tl(cpu_cc_V, cpu_cc_N, src2);
407     tcg_gen_andc_tl(cpu_cc_V, cpu_cc_V, cpu_cc_Z);
408     if (TARGET_LONG_BITS == 64) {
409         /*
410          * Carry-in to bit 32 is result ^ src1 ^ src2.
411          * We already have the src xor term in Z, from computation of V.
412          */
413         tcg_gen_xor_tl(cpu_icc_C, cpu_cc_Z, cpu_cc_N);
414         tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
415     }
416     tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
417     tcg_gen_mov_tl(dst, cpu_cc_N);
418 }
419 
420 static void gen_op_addcc(TCGv dst, TCGv src1, TCGv src2)
421 {
422     gen_op_addcc_int(dst, src1, src2, NULL);
423 }
424 
425 static void gen_op_taddcc(TCGv dst, TCGv src1, TCGv src2)
426 {
427     TCGv t = tcg_temp_new();
428 
429     /* Save the tag bits around modification of dst. */
430     tcg_gen_or_tl(t, src1, src2);
431 
432     gen_op_addcc(dst, src1, src2);
433 
434     /* Incorprate tag bits into icc.V */
435     tcg_gen_andi_tl(t, t, 3);
436     tcg_gen_neg_tl(t, t);
437     tcg_gen_ext32u_tl(t, t);
438     tcg_gen_or_tl(cpu_cc_V, cpu_cc_V, t);
439 }
440 
441 static void gen_op_addc(TCGv dst, TCGv src1, TCGv src2)
442 {
443     tcg_gen_add_tl(dst, src1, src2);
444     tcg_gen_add_tl(dst, dst, gen_carry32());
445 }
446 
447 static void gen_op_addccc(TCGv dst, TCGv src1, TCGv src2)
448 {
449     gen_op_addcc_int(dst, src1, src2, gen_carry32());
450 }
451 
452 static void gen_op_subcc_int(TCGv dst, TCGv src1, TCGv src2, TCGv cin)
453 {
454     TCGv z = tcg_constant_tl(0);
455 
456     if (cin) {
457         tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, src1, z, cin, z);
458         tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, cpu_cc_N, cpu_cc_C, src2, z);
459     } else {
460         tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, src1, z, src2, z);
461     }
462     tcg_gen_neg_tl(cpu_cc_C, cpu_cc_C);
463     tcg_gen_xor_tl(cpu_cc_Z, src1, src2);
464     tcg_gen_xor_tl(cpu_cc_V, cpu_cc_N, src1);
465     tcg_gen_and_tl(cpu_cc_V, cpu_cc_V, cpu_cc_Z);
466 #ifdef TARGET_SPARC64
467     tcg_gen_xor_tl(cpu_icc_C, cpu_cc_Z, cpu_cc_N);
468     tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
469 #endif
470     tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
471     tcg_gen_mov_tl(dst, cpu_cc_N);
472 }
473 
474 static void gen_op_subcc(TCGv dst, TCGv src1, TCGv src2)
475 {
476     gen_op_subcc_int(dst, src1, src2, NULL);
477 }
478 
479 static void gen_op_tsubcc(TCGv dst, TCGv src1, TCGv src2)
480 {
481     TCGv t = tcg_temp_new();
482 
483     /* Save the tag bits around modification of dst. */
484     tcg_gen_or_tl(t, src1, src2);
485 
486     gen_op_subcc(dst, src1, src2);
487 
488     /* Incorprate tag bits into icc.V */
489     tcg_gen_andi_tl(t, t, 3);
490     tcg_gen_neg_tl(t, t);
491     tcg_gen_ext32u_tl(t, t);
492     tcg_gen_or_tl(cpu_cc_V, cpu_cc_V, t);
493 }
494 
495 static void gen_op_subc(TCGv dst, TCGv src1, TCGv src2)
496 {
497     tcg_gen_sub_tl(dst, src1, src2);
498     tcg_gen_sub_tl(dst, dst, gen_carry32());
499 }
500 
501 static void gen_op_subccc(TCGv dst, TCGv src1, TCGv src2)
502 {
503     gen_op_subcc_int(dst, src1, src2, gen_carry32());
504 }
505 
506 static void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
507 {
508     TCGv zero = tcg_constant_tl(0);
509     TCGv t_src1 = tcg_temp_new();
510     TCGv t_src2 = tcg_temp_new();
511     TCGv t0 = tcg_temp_new();
512 
513     tcg_gen_ext32u_tl(t_src1, src1);
514     tcg_gen_ext32u_tl(t_src2, src2);
515 
516     /*
517      * if (!(env->y & 1))
518      *   src2 = 0;
519      */
520     tcg_gen_andi_tl(t0, cpu_y, 0x1);
521     tcg_gen_movcond_tl(TCG_COND_EQ, t_src2, t0, zero, zero, t_src2);
522 
523     /*
524      * b2 = src1 & 1;
525      * y = (b2 << 31) | (y >> 1);
526      */
527     tcg_gen_extract_tl(t0, cpu_y, 1, 31);
528     tcg_gen_deposit_tl(cpu_y, t0, src1, 31, 1);
529 
530     // b1 = N ^ V;
531     tcg_gen_xor_tl(t0, cpu_cc_N, cpu_cc_V);
532 
533     /*
534      * src1 = (b1 << 31) | (src1 >> 1)
535      */
536     tcg_gen_andi_tl(t0, t0, 1u << 31);
537     tcg_gen_shri_tl(t_src1, t_src1, 1);
538     tcg_gen_or_tl(t_src1, t_src1, t0);
539 
540     gen_op_addcc(dst, t_src1, t_src2);
541 }
542 
543 static void gen_op_multiply(TCGv dst, TCGv src1, TCGv src2, int sign_ext)
544 {
545 #if TARGET_LONG_BITS == 32
546     if (sign_ext) {
547         tcg_gen_muls2_tl(dst, cpu_y, src1, src2);
548     } else {
549         tcg_gen_mulu2_tl(dst, cpu_y, src1, src2);
550     }
551 #else
552     TCGv t0 = tcg_temp_new_i64();
553     TCGv t1 = tcg_temp_new_i64();
554 
555     if (sign_ext) {
556         tcg_gen_ext32s_i64(t0, src1);
557         tcg_gen_ext32s_i64(t1, src2);
558     } else {
559         tcg_gen_ext32u_i64(t0, src1);
560         tcg_gen_ext32u_i64(t1, src2);
561     }
562 
563     tcg_gen_mul_i64(dst, t0, t1);
564     tcg_gen_shri_i64(cpu_y, dst, 32);
565 #endif
566 }
567 
568 static void gen_op_umul(TCGv dst, TCGv src1, TCGv src2)
569 {
570     /* zero-extend truncated operands before multiplication */
571     gen_op_multiply(dst, src1, src2, 0);
572 }
573 
574 static void gen_op_smul(TCGv dst, TCGv src1, TCGv src2)
575 {
576     /* sign-extend truncated operands before multiplication */
577     gen_op_multiply(dst, src1, src2, 1);
578 }
579 
580 static void gen_op_sdiv(TCGv dst, TCGv src1, TCGv src2)
581 {
582 #ifdef TARGET_SPARC64
583     gen_helper_sdiv(dst, tcg_env, src1, src2);
584     tcg_gen_ext32s_tl(dst, dst);
585 #else
586     TCGv_i64 t64 = tcg_temp_new_i64();
587     gen_helper_sdiv(t64, tcg_env, src1, src2);
588     tcg_gen_trunc_i64_tl(dst, t64);
589 #endif
590 }
591 
592 static void gen_op_udivcc(TCGv dst, TCGv src1, TCGv src2)
593 {
594     TCGv_i64 t64;
595 
596 #ifdef TARGET_SPARC64
597     t64 = cpu_cc_V;
598 #else
599     t64 = tcg_temp_new_i64();
600 #endif
601 
602     gen_helper_udiv(t64, tcg_env, src1, src2);
603 
604 #ifdef TARGET_SPARC64
605     tcg_gen_ext32u_tl(cpu_cc_N, t64);
606     tcg_gen_shri_tl(cpu_cc_V, t64, 32);
607     tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
608     tcg_gen_movi_tl(cpu_icc_C, 0);
609 #else
610     tcg_gen_extr_i64_tl(cpu_cc_N, cpu_cc_V, t64);
611 #endif
612     tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
613     tcg_gen_movi_tl(cpu_cc_C, 0);
614     tcg_gen_mov_tl(dst, cpu_cc_N);
615 }
616 
617 static void gen_op_sdivcc(TCGv dst, TCGv src1, TCGv src2)
618 {
619     TCGv_i64 t64;
620 
621 #ifdef TARGET_SPARC64
622     t64 = cpu_cc_V;
623 #else
624     t64 = tcg_temp_new_i64();
625 #endif
626 
627     gen_helper_sdiv(t64, tcg_env, src1, src2);
628 
629 #ifdef TARGET_SPARC64
630     tcg_gen_ext32s_tl(cpu_cc_N, t64);
631     tcg_gen_shri_tl(cpu_cc_V, t64, 32);
632     tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
633     tcg_gen_movi_tl(cpu_icc_C, 0);
634 #else
635     tcg_gen_extr_i64_tl(cpu_cc_N, cpu_cc_V, t64);
636 #endif
637     tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
638     tcg_gen_movi_tl(cpu_cc_C, 0);
639     tcg_gen_mov_tl(dst, cpu_cc_N);
640 }
641 
642 static void gen_op_taddcctv(TCGv dst, TCGv src1, TCGv src2)
643 {
644     gen_helper_taddcctv(dst, tcg_env, src1, src2);
645 }
646 
647 static void gen_op_tsubcctv(TCGv dst, TCGv src1, TCGv src2)
648 {
649     gen_helper_tsubcctv(dst, tcg_env, src1, src2);
650 }
651 
652 static void gen_op_popc(TCGv dst, TCGv src1, TCGv src2)
653 {
654     tcg_gen_ctpop_tl(dst, src2);
655 }
656 
657 #ifndef TARGET_SPARC64
658 static void gen_helper_array8(TCGv dst, TCGv src1, TCGv src2)
659 {
660     g_assert_not_reached();
661 }
662 #endif
663 
664 static void gen_op_array16(TCGv dst, TCGv src1, TCGv src2)
665 {
666     gen_helper_array8(dst, src1, src2);
667     tcg_gen_shli_tl(dst, dst, 1);
668 }
669 
670 static void gen_op_array32(TCGv dst, TCGv src1, TCGv src2)
671 {
672     gen_helper_array8(dst, src1, src2);
673     tcg_gen_shli_tl(dst, dst, 2);
674 }
675 
676 static void gen_op_fpack16(TCGv_i32 dst, TCGv_i64 src)
677 {
678 #ifdef TARGET_SPARC64
679     gen_helper_fpack16(dst, cpu_gsr, src);
680 #else
681     g_assert_not_reached();
682 #endif
683 }
684 
685 static void gen_op_fpackfix(TCGv_i32 dst, TCGv_i64 src)
686 {
687 #ifdef TARGET_SPARC64
688     gen_helper_fpackfix(dst, cpu_gsr, src);
689 #else
690     g_assert_not_reached();
691 #endif
692 }
693 
694 static void gen_op_fpack32(TCGv_i64 dst, TCGv_i64 src1, TCGv_i64 src2)
695 {
696 #ifdef TARGET_SPARC64
697     gen_helper_fpack32(dst, cpu_gsr, src1, src2);
698 #else
699     g_assert_not_reached();
700 #endif
701 }
702 
703 static void gen_op_faligndata(TCGv_i64 dst, TCGv_i64 s1, TCGv_i64 s2)
704 {
705 #ifdef TARGET_SPARC64
706     TCGv t1, t2, shift;
707 
708     t1 = tcg_temp_new();
709     t2 = tcg_temp_new();
710     shift = tcg_temp_new();
711 
712     tcg_gen_andi_tl(shift, cpu_gsr, 7);
713     tcg_gen_shli_tl(shift, shift, 3);
714     tcg_gen_shl_tl(t1, s1, shift);
715 
716     /*
717      * A shift of 64 does not produce 0 in TCG.  Divide this into a
718      * shift of (up to 63) followed by a constant shift of 1.
719      */
720     tcg_gen_xori_tl(shift, shift, 63);
721     tcg_gen_shr_tl(t2, s2, shift);
722     tcg_gen_shri_tl(t2, t2, 1);
723 
724     tcg_gen_or_tl(dst, t1, t2);
725 #else
726     g_assert_not_reached();
727 #endif
728 }
729 
730 static void gen_op_bshuffle(TCGv_i64 dst, TCGv_i64 src1, TCGv_i64 src2)
731 {
732 #ifdef TARGET_SPARC64
733     gen_helper_bshuffle(dst, cpu_gsr, src1, src2);
734 #else
735     g_assert_not_reached();
736 #endif
737 }
738 
739 // 1
740 static void gen_op_eval_ba(TCGv dst)
741 {
742     tcg_gen_movi_tl(dst, 1);
743 }
744 
745 // 0
746 static void gen_op_eval_bn(TCGv dst)
747 {
748     tcg_gen_movi_tl(dst, 0);
749 }
750 
751 /*
752   FPSR bit field FCC1 | FCC0:
753    0 =
754    1 <
755    2 >
756    3 unordered
757 */
758 static void gen_mov_reg_FCC0(TCGv reg, TCGv src,
759                                     unsigned int fcc_offset)
760 {
761     tcg_gen_shri_tl(reg, src, FSR_FCC0_SHIFT + fcc_offset);
762     tcg_gen_andi_tl(reg, reg, 0x1);
763 }
764 
765 static void gen_mov_reg_FCC1(TCGv reg, TCGv src, unsigned int fcc_offset)
766 {
767     tcg_gen_shri_tl(reg, src, FSR_FCC1_SHIFT + fcc_offset);
768     tcg_gen_andi_tl(reg, reg, 0x1);
769 }
770 
771 // !0: FCC0 | FCC1
772 static void gen_op_eval_fbne(TCGv dst, TCGv src, unsigned int fcc_offset)
773 {
774     TCGv t0 = tcg_temp_new();
775     gen_mov_reg_FCC0(dst, src, fcc_offset);
776     gen_mov_reg_FCC1(t0, src, fcc_offset);
777     tcg_gen_or_tl(dst, dst, t0);
778 }
779 
780 // 1 or 2: FCC0 ^ FCC1
781 static void gen_op_eval_fblg(TCGv dst, TCGv src, unsigned int fcc_offset)
782 {
783     TCGv t0 = tcg_temp_new();
784     gen_mov_reg_FCC0(dst, src, fcc_offset);
785     gen_mov_reg_FCC1(t0, src, fcc_offset);
786     tcg_gen_xor_tl(dst, dst, t0);
787 }
788 
789 // 1 or 3: FCC0
790 static void gen_op_eval_fbul(TCGv dst, TCGv src, unsigned int fcc_offset)
791 {
792     gen_mov_reg_FCC0(dst, src, fcc_offset);
793 }
794 
795 // 1: FCC0 & !FCC1
796 static void gen_op_eval_fbl(TCGv dst, TCGv src, unsigned int fcc_offset)
797 {
798     TCGv t0 = tcg_temp_new();
799     gen_mov_reg_FCC0(dst, src, fcc_offset);
800     gen_mov_reg_FCC1(t0, src, fcc_offset);
801     tcg_gen_andc_tl(dst, dst, t0);
802 }
803 
804 // 2 or 3: FCC1
805 static void gen_op_eval_fbug(TCGv dst, TCGv src, unsigned int fcc_offset)
806 {
807     gen_mov_reg_FCC1(dst, src, fcc_offset);
808 }
809 
810 // 2: !FCC0 & FCC1
811 static void gen_op_eval_fbg(TCGv dst, TCGv src, unsigned int fcc_offset)
812 {
813     TCGv t0 = tcg_temp_new();
814     gen_mov_reg_FCC0(dst, src, fcc_offset);
815     gen_mov_reg_FCC1(t0, src, fcc_offset);
816     tcg_gen_andc_tl(dst, t0, dst);
817 }
818 
819 // 3: FCC0 & FCC1
820 static void gen_op_eval_fbu(TCGv dst, TCGv src, unsigned int fcc_offset)
821 {
822     TCGv t0 = tcg_temp_new();
823     gen_mov_reg_FCC0(dst, src, fcc_offset);
824     gen_mov_reg_FCC1(t0, src, fcc_offset);
825     tcg_gen_and_tl(dst, dst, t0);
826 }
827 
828 // 0: !(FCC0 | FCC1)
829 static void gen_op_eval_fbe(TCGv dst, TCGv src, unsigned int fcc_offset)
830 {
831     TCGv t0 = tcg_temp_new();
832     gen_mov_reg_FCC0(dst, src, fcc_offset);
833     gen_mov_reg_FCC1(t0, src, fcc_offset);
834     tcg_gen_or_tl(dst, dst, t0);
835     tcg_gen_xori_tl(dst, dst, 0x1);
836 }
837 
838 // 0 or 3: !(FCC0 ^ FCC1)
839 static void gen_op_eval_fbue(TCGv dst, TCGv src, unsigned int fcc_offset)
840 {
841     TCGv t0 = tcg_temp_new();
842     gen_mov_reg_FCC0(dst, src, fcc_offset);
843     gen_mov_reg_FCC1(t0, src, fcc_offset);
844     tcg_gen_xor_tl(dst, dst, t0);
845     tcg_gen_xori_tl(dst, dst, 0x1);
846 }
847 
848 // 0 or 2: !FCC0
849 static void gen_op_eval_fbge(TCGv dst, TCGv src, unsigned int fcc_offset)
850 {
851     gen_mov_reg_FCC0(dst, src, fcc_offset);
852     tcg_gen_xori_tl(dst, dst, 0x1);
853 }
854 
855 // !1: !(FCC0 & !FCC1)
856 static void gen_op_eval_fbuge(TCGv dst, TCGv src, unsigned int fcc_offset)
857 {
858     TCGv t0 = tcg_temp_new();
859     gen_mov_reg_FCC0(dst, src, fcc_offset);
860     gen_mov_reg_FCC1(t0, src, fcc_offset);
861     tcg_gen_andc_tl(dst, dst, t0);
862     tcg_gen_xori_tl(dst, dst, 0x1);
863 }
864 
865 // 0 or 1: !FCC1
866 static void gen_op_eval_fble(TCGv dst, TCGv src, unsigned int fcc_offset)
867 {
868     gen_mov_reg_FCC1(dst, src, fcc_offset);
869     tcg_gen_xori_tl(dst, dst, 0x1);
870 }
871 
872 // !2: !(!FCC0 & FCC1)
873 static void gen_op_eval_fbule(TCGv dst, TCGv src, unsigned int fcc_offset)
874 {
875     TCGv t0 = tcg_temp_new();
876     gen_mov_reg_FCC0(dst, src, fcc_offset);
877     gen_mov_reg_FCC1(t0, src, fcc_offset);
878     tcg_gen_andc_tl(dst, t0, dst);
879     tcg_gen_xori_tl(dst, dst, 0x1);
880 }
881 
882 // !3: !(FCC0 & FCC1)
883 static void gen_op_eval_fbo(TCGv dst, TCGv src, unsigned int fcc_offset)
884 {
885     TCGv t0 = tcg_temp_new();
886     gen_mov_reg_FCC0(dst, src, fcc_offset);
887     gen_mov_reg_FCC1(t0, src, fcc_offset);
888     tcg_gen_and_tl(dst, dst, t0);
889     tcg_gen_xori_tl(dst, dst, 0x1);
890 }
891 
892 static void finishing_insn(DisasContext *dc)
893 {
894     /*
895      * From here, there is no future path through an unwinding exception.
896      * If the current insn cannot raise an exception, the computation of
897      * cpu_cond may be able to be elided.
898      */
899     if (dc->cpu_cond_live) {
900         tcg_gen_discard_tl(cpu_cond);
901         dc->cpu_cond_live = false;
902     }
903 }
904 
905 static void gen_generic_branch(DisasContext *dc)
906 {
907     TCGv npc0 = tcg_constant_tl(dc->jump_pc[0]);
908     TCGv npc1 = tcg_constant_tl(dc->jump_pc[1]);
909     TCGv c2 = tcg_constant_tl(dc->jump.c2);
910 
911     tcg_gen_movcond_tl(dc->jump.cond, cpu_npc, dc->jump.c1, c2, npc0, npc1);
912 }
913 
914 /* call this function before using the condition register as it may
915    have been set for a jump */
916 static void flush_cond(DisasContext *dc)
917 {
918     if (dc->npc == JUMP_PC) {
919         gen_generic_branch(dc);
920         dc->npc = DYNAMIC_PC_LOOKUP;
921     }
922 }
923 
924 static void save_npc(DisasContext *dc)
925 {
926     if (dc->npc & 3) {
927         switch (dc->npc) {
928         case JUMP_PC:
929             gen_generic_branch(dc);
930             dc->npc = DYNAMIC_PC_LOOKUP;
931             break;
932         case DYNAMIC_PC:
933         case DYNAMIC_PC_LOOKUP:
934             break;
935         default:
936             g_assert_not_reached();
937         }
938     } else {
939         tcg_gen_movi_tl(cpu_npc, dc->npc);
940     }
941 }
942 
943 static void save_state(DisasContext *dc)
944 {
945     tcg_gen_movi_tl(cpu_pc, dc->pc);
946     save_npc(dc);
947 }
948 
949 static void gen_exception(DisasContext *dc, int which)
950 {
951     finishing_insn(dc);
952     save_state(dc);
953     gen_helper_raise_exception(tcg_env, tcg_constant_i32(which));
954     dc->base.is_jmp = DISAS_NORETURN;
955 }
956 
957 static TCGLabel *delay_exceptionv(DisasContext *dc, TCGv_i32 excp)
958 {
959     DisasDelayException *e = g_new0(DisasDelayException, 1);
960 
961     e->next = dc->delay_excp_list;
962     dc->delay_excp_list = e;
963 
964     e->lab = gen_new_label();
965     e->excp = excp;
966     e->pc = dc->pc;
967     /* Caller must have used flush_cond before branch. */
968     assert(e->npc != JUMP_PC);
969     e->npc = dc->npc;
970 
971     return e->lab;
972 }
973 
974 static TCGLabel *delay_exception(DisasContext *dc, int excp)
975 {
976     return delay_exceptionv(dc, tcg_constant_i32(excp));
977 }
978 
979 static void gen_check_align(DisasContext *dc, TCGv addr, int mask)
980 {
981     TCGv t = tcg_temp_new();
982     TCGLabel *lab;
983 
984     tcg_gen_andi_tl(t, addr, mask);
985 
986     flush_cond(dc);
987     lab = delay_exception(dc, TT_UNALIGNED);
988     tcg_gen_brcondi_tl(TCG_COND_NE, t, 0, lab);
989 }
990 
991 static void gen_mov_pc_npc(DisasContext *dc)
992 {
993     finishing_insn(dc);
994 
995     if (dc->npc & 3) {
996         switch (dc->npc) {
997         case JUMP_PC:
998             gen_generic_branch(dc);
999             tcg_gen_mov_tl(cpu_pc, cpu_npc);
1000             dc->pc = DYNAMIC_PC_LOOKUP;
1001             break;
1002         case DYNAMIC_PC:
1003         case DYNAMIC_PC_LOOKUP:
1004             tcg_gen_mov_tl(cpu_pc, cpu_npc);
1005             dc->pc = dc->npc;
1006             break;
1007         default:
1008             g_assert_not_reached();
1009         }
1010     } else {
1011         dc->pc = dc->npc;
1012     }
1013 }
1014 
1015 static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond,
1016                         DisasContext *dc)
1017 {
1018     TCGv t1;
1019 
1020     cmp->c1 = t1 = tcg_temp_new();
1021     cmp->c2 = 0;
1022 
1023     switch (cond & 7) {
1024     case 0x0: /* never */
1025         cmp->cond = TCG_COND_NEVER;
1026         cmp->c1 = tcg_constant_tl(0);
1027         break;
1028 
1029     case 0x1: /* eq: Z */
1030         cmp->cond = TCG_COND_EQ;
1031         if (TARGET_LONG_BITS == 32 || xcc) {
1032             tcg_gen_mov_tl(t1, cpu_cc_Z);
1033         } else {
1034             tcg_gen_ext32u_tl(t1, cpu_icc_Z);
1035         }
1036         break;
1037 
1038     case 0x2: /* le: Z | (N ^ V) */
1039         /*
1040          * Simplify:
1041          *   cc_Z || (N ^ V) < 0        NE
1042          *   cc_Z && !((N ^ V) < 0)     EQ
1043          *   cc_Z & ~((N ^ V) >> TLB)   EQ
1044          */
1045         cmp->cond = TCG_COND_EQ;
1046         tcg_gen_xor_tl(t1, cpu_cc_N, cpu_cc_V);
1047         tcg_gen_sextract_tl(t1, t1, xcc ? 63 : 31, 1);
1048         tcg_gen_andc_tl(t1, xcc ? cpu_cc_Z : cpu_icc_Z, t1);
1049         if (TARGET_LONG_BITS == 64 && !xcc) {
1050             tcg_gen_ext32u_tl(t1, t1);
1051         }
1052         break;
1053 
1054     case 0x3: /* lt: N ^ V */
1055         cmp->cond = TCG_COND_LT;
1056         tcg_gen_xor_tl(t1, cpu_cc_N, cpu_cc_V);
1057         if (TARGET_LONG_BITS == 64 && !xcc) {
1058             tcg_gen_ext32s_tl(t1, t1);
1059         }
1060         break;
1061 
1062     case 0x4: /* leu: Z | C */
1063         /*
1064          * Simplify:
1065          *   cc_Z == 0 || cc_C != 0     NE
1066          *   cc_Z != 0 && cc_C == 0     EQ
1067          *   cc_Z & (cc_C ? 0 : -1)     EQ
1068          *   cc_Z & (cc_C - 1)          EQ
1069          */
1070         cmp->cond = TCG_COND_EQ;
1071         if (TARGET_LONG_BITS == 32 || xcc) {
1072             tcg_gen_subi_tl(t1, cpu_cc_C, 1);
1073             tcg_gen_and_tl(t1, t1, cpu_cc_Z);
1074         } else {
1075             tcg_gen_extract_tl(t1, cpu_icc_C, 32, 1);
1076             tcg_gen_subi_tl(t1, t1, 1);
1077             tcg_gen_and_tl(t1, t1, cpu_icc_Z);
1078             tcg_gen_ext32u_tl(t1, t1);
1079         }
1080         break;
1081 
1082     case 0x5: /* ltu: C */
1083         cmp->cond = TCG_COND_NE;
1084         if (TARGET_LONG_BITS == 32 || xcc) {
1085             tcg_gen_mov_tl(t1, cpu_cc_C);
1086         } else {
1087             tcg_gen_extract_tl(t1, cpu_icc_C, 32, 1);
1088         }
1089         break;
1090 
1091     case 0x6: /* neg: N */
1092         cmp->cond = TCG_COND_LT;
1093         if (TARGET_LONG_BITS == 32 || xcc) {
1094             tcg_gen_mov_tl(t1, cpu_cc_N);
1095         } else {
1096             tcg_gen_ext32s_tl(t1, cpu_cc_N);
1097         }
1098         break;
1099 
1100     case 0x7: /* vs: V */
1101         cmp->cond = TCG_COND_LT;
1102         if (TARGET_LONG_BITS == 32 || xcc) {
1103             tcg_gen_mov_tl(t1, cpu_cc_V);
1104         } else {
1105             tcg_gen_ext32s_tl(t1, cpu_cc_V);
1106         }
1107         break;
1108     }
1109     if (cond & 8) {
1110         cmp->cond = tcg_invert_cond(cmp->cond);
1111     }
1112 }
1113 
1114 static void gen_fcompare(DisasCompare *cmp, unsigned int cc, unsigned int cond)
1115 {
1116     unsigned int offset;
1117     TCGv r_dst;
1118 
1119     /* For now we still generate a straight boolean result.  */
1120     cmp->cond = TCG_COND_NE;
1121     cmp->c1 = r_dst = tcg_temp_new();
1122     cmp->c2 = 0;
1123 
1124     switch (cc) {
1125     default:
1126     case 0x0:
1127         offset = 0;
1128         break;
1129     case 0x1:
1130         offset = 32 - 10;
1131         break;
1132     case 0x2:
1133         offset = 34 - 10;
1134         break;
1135     case 0x3:
1136         offset = 36 - 10;
1137         break;
1138     }
1139 
1140     switch (cond) {
1141     case 0x0:
1142         gen_op_eval_bn(r_dst);
1143         break;
1144     case 0x1:
1145         gen_op_eval_fbne(r_dst, cpu_fsr, offset);
1146         break;
1147     case 0x2:
1148         gen_op_eval_fblg(r_dst, cpu_fsr, offset);
1149         break;
1150     case 0x3:
1151         gen_op_eval_fbul(r_dst, cpu_fsr, offset);
1152         break;
1153     case 0x4:
1154         gen_op_eval_fbl(r_dst, cpu_fsr, offset);
1155         break;
1156     case 0x5:
1157         gen_op_eval_fbug(r_dst, cpu_fsr, offset);
1158         break;
1159     case 0x6:
1160         gen_op_eval_fbg(r_dst, cpu_fsr, offset);
1161         break;
1162     case 0x7:
1163         gen_op_eval_fbu(r_dst, cpu_fsr, offset);
1164         break;
1165     case 0x8:
1166         gen_op_eval_ba(r_dst);
1167         break;
1168     case 0x9:
1169         gen_op_eval_fbe(r_dst, cpu_fsr, offset);
1170         break;
1171     case 0xa:
1172         gen_op_eval_fbue(r_dst, cpu_fsr, offset);
1173         break;
1174     case 0xb:
1175         gen_op_eval_fbge(r_dst, cpu_fsr, offset);
1176         break;
1177     case 0xc:
1178         gen_op_eval_fbuge(r_dst, cpu_fsr, offset);
1179         break;
1180     case 0xd:
1181         gen_op_eval_fble(r_dst, cpu_fsr, offset);
1182         break;
1183     case 0xe:
1184         gen_op_eval_fbule(r_dst, cpu_fsr, offset);
1185         break;
1186     case 0xf:
1187         gen_op_eval_fbo(r_dst, cpu_fsr, offset);
1188         break;
1189     }
1190 }
1191 
1192 // Inverted logic
1193 static const TCGCond gen_tcg_cond_reg[8] = {
1194     TCG_COND_NEVER,  /* reserved */
1195     TCG_COND_NE,
1196     TCG_COND_GT,
1197     TCG_COND_GE,
1198     TCG_COND_NEVER,  /* reserved */
1199     TCG_COND_EQ,
1200     TCG_COND_LE,
1201     TCG_COND_LT,
1202 };
1203 
1204 static void gen_compare_reg(DisasCompare *cmp, int cond, TCGv r_src)
1205 {
1206     cmp->cond = tcg_invert_cond(gen_tcg_cond_reg[cond]);
1207     cmp->c1 = tcg_temp_new();
1208     cmp->c2 = 0;
1209     tcg_gen_mov_tl(cmp->c1, r_src);
1210 }
1211 
1212 static void gen_op_clear_ieee_excp_and_FTT(void)
1213 {
1214     tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_CEXC_NMASK);
1215 }
1216 
1217 static void gen_op_fmovs(TCGv_i32 dst, TCGv_i32 src)
1218 {
1219     gen_op_clear_ieee_excp_and_FTT();
1220     tcg_gen_mov_i32(dst, src);
1221 }
1222 
1223 static void gen_op_fnegs(TCGv_i32 dst, TCGv_i32 src)
1224 {
1225     gen_op_clear_ieee_excp_and_FTT();
1226     gen_helper_fnegs(dst, src);
1227 }
1228 
1229 static void gen_op_fabss(TCGv_i32 dst, TCGv_i32 src)
1230 {
1231     gen_op_clear_ieee_excp_and_FTT();
1232     gen_helper_fabss(dst, src);
1233 }
1234 
1235 static void gen_op_fmovd(TCGv_i64 dst, TCGv_i64 src)
1236 {
1237     gen_op_clear_ieee_excp_and_FTT();
1238     tcg_gen_mov_i64(dst, src);
1239 }
1240 
1241 static void gen_op_fnegd(TCGv_i64 dst, TCGv_i64 src)
1242 {
1243     gen_op_clear_ieee_excp_and_FTT();
1244     gen_helper_fnegd(dst, src);
1245 }
1246 
1247 static void gen_op_fabsd(TCGv_i64 dst, TCGv_i64 src)
1248 {
1249     gen_op_clear_ieee_excp_and_FTT();
1250     gen_helper_fabsd(dst, src);
1251 }
1252 
1253 #ifdef TARGET_SPARC64
1254 static void gen_op_fcmps(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1255 {
1256     switch (fccno) {
1257     case 0:
1258         gen_helper_fcmps(cpu_fsr, tcg_env, r_rs1, r_rs2);
1259         break;
1260     case 1:
1261         gen_helper_fcmps_fcc1(cpu_fsr, tcg_env, r_rs1, r_rs2);
1262         break;
1263     case 2:
1264         gen_helper_fcmps_fcc2(cpu_fsr, tcg_env, r_rs1, r_rs2);
1265         break;
1266     case 3:
1267         gen_helper_fcmps_fcc3(cpu_fsr, tcg_env, r_rs1, r_rs2);
1268         break;
1269     }
1270 }
1271 
1272 static void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1273 {
1274     switch (fccno) {
1275     case 0:
1276         gen_helper_fcmpd(cpu_fsr, tcg_env, r_rs1, r_rs2);
1277         break;
1278     case 1:
1279         gen_helper_fcmpd_fcc1(cpu_fsr, tcg_env, r_rs1, r_rs2);
1280         break;
1281     case 2:
1282         gen_helper_fcmpd_fcc2(cpu_fsr, tcg_env, r_rs1, r_rs2);
1283         break;
1284     case 3:
1285         gen_helper_fcmpd_fcc3(cpu_fsr, tcg_env, r_rs1, r_rs2);
1286         break;
1287     }
1288 }
1289 
1290 static void gen_op_fcmpq(int fccno)
1291 {
1292     switch (fccno) {
1293     case 0:
1294         gen_helper_fcmpq(cpu_fsr, tcg_env);
1295         break;
1296     case 1:
1297         gen_helper_fcmpq_fcc1(cpu_fsr, tcg_env);
1298         break;
1299     case 2:
1300         gen_helper_fcmpq_fcc2(cpu_fsr, tcg_env);
1301         break;
1302     case 3:
1303         gen_helper_fcmpq_fcc3(cpu_fsr, tcg_env);
1304         break;
1305     }
1306 }
1307 
1308 static void gen_op_fcmpes(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1309 {
1310     switch (fccno) {
1311     case 0:
1312         gen_helper_fcmpes(cpu_fsr, tcg_env, r_rs1, r_rs2);
1313         break;
1314     case 1:
1315         gen_helper_fcmpes_fcc1(cpu_fsr, tcg_env, r_rs1, r_rs2);
1316         break;
1317     case 2:
1318         gen_helper_fcmpes_fcc2(cpu_fsr, tcg_env, r_rs1, r_rs2);
1319         break;
1320     case 3:
1321         gen_helper_fcmpes_fcc3(cpu_fsr, tcg_env, r_rs1, r_rs2);
1322         break;
1323     }
1324 }
1325 
1326 static void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1327 {
1328     switch (fccno) {
1329     case 0:
1330         gen_helper_fcmped(cpu_fsr, tcg_env, r_rs1, r_rs2);
1331         break;
1332     case 1:
1333         gen_helper_fcmped_fcc1(cpu_fsr, tcg_env, r_rs1, r_rs2);
1334         break;
1335     case 2:
1336         gen_helper_fcmped_fcc2(cpu_fsr, tcg_env, r_rs1, r_rs2);
1337         break;
1338     case 3:
1339         gen_helper_fcmped_fcc3(cpu_fsr, tcg_env, r_rs1, r_rs2);
1340         break;
1341     }
1342 }
1343 
1344 static void gen_op_fcmpeq(int fccno)
1345 {
1346     switch (fccno) {
1347     case 0:
1348         gen_helper_fcmpeq(cpu_fsr, tcg_env);
1349         break;
1350     case 1:
1351         gen_helper_fcmpeq_fcc1(cpu_fsr, tcg_env);
1352         break;
1353     case 2:
1354         gen_helper_fcmpeq_fcc2(cpu_fsr, tcg_env);
1355         break;
1356     case 3:
1357         gen_helper_fcmpeq_fcc3(cpu_fsr, tcg_env);
1358         break;
1359     }
1360 }
1361 
1362 #else
1363 
1364 static void gen_op_fcmps(int fccno, TCGv r_rs1, TCGv r_rs2)
1365 {
1366     gen_helper_fcmps(cpu_fsr, tcg_env, r_rs1, r_rs2);
1367 }
1368 
1369 static void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1370 {
1371     gen_helper_fcmpd(cpu_fsr, tcg_env, r_rs1, r_rs2);
1372 }
1373 
1374 static void gen_op_fcmpq(int fccno)
1375 {
1376     gen_helper_fcmpq(cpu_fsr, tcg_env);
1377 }
1378 
1379 static void gen_op_fcmpes(int fccno, TCGv r_rs1, TCGv r_rs2)
1380 {
1381     gen_helper_fcmpes(cpu_fsr, tcg_env, r_rs1, r_rs2);
1382 }
1383 
1384 static void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1385 {
1386     gen_helper_fcmped(cpu_fsr, tcg_env, r_rs1, r_rs2);
1387 }
1388 
1389 static void gen_op_fcmpeq(int fccno)
1390 {
1391     gen_helper_fcmpeq(cpu_fsr, tcg_env);
1392 }
1393 #endif
1394 
1395 static void gen_op_fpexception_im(DisasContext *dc, int fsr_flags)
1396 {
1397     tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_NMASK);
1398     tcg_gen_ori_tl(cpu_fsr, cpu_fsr, fsr_flags);
1399     gen_exception(dc, TT_FP_EXCP);
1400 }
1401 
1402 static int gen_trap_ifnofpu(DisasContext *dc)
1403 {
1404 #if !defined(CONFIG_USER_ONLY)
1405     if (!dc->fpu_enabled) {
1406         gen_exception(dc, TT_NFPU_INSN);
1407         return 1;
1408     }
1409 #endif
1410     return 0;
1411 }
1412 
1413 /* asi moves */
1414 typedef enum {
1415     GET_ASI_HELPER,
1416     GET_ASI_EXCP,
1417     GET_ASI_DIRECT,
1418     GET_ASI_DTWINX,
1419     GET_ASI_BLOCK,
1420     GET_ASI_SHORT,
1421     GET_ASI_BCOPY,
1422     GET_ASI_BFILL,
1423 } ASIType;
1424 
1425 typedef struct {
1426     ASIType type;
1427     int asi;
1428     int mem_idx;
1429     MemOp memop;
1430 } DisasASI;
1431 
1432 /*
1433  * Build DisasASI.
1434  * For asi == -1, treat as non-asi.
1435  * For ask == -2, treat as immediate offset (v8 error, v9 %asi).
1436  */
1437 static DisasASI resolve_asi(DisasContext *dc, int asi, MemOp memop)
1438 {
1439     ASIType type = GET_ASI_HELPER;
1440     int mem_idx = dc->mem_idx;
1441 
1442     if (asi == -1) {
1443         /* Artificial "non-asi" case. */
1444         type = GET_ASI_DIRECT;
1445         goto done;
1446     }
1447 
1448 #ifndef TARGET_SPARC64
1449     /* Before v9, all asis are immediate and privileged.  */
1450     if (asi < 0) {
1451         gen_exception(dc, TT_ILL_INSN);
1452         type = GET_ASI_EXCP;
1453     } else if (supervisor(dc)
1454                /* Note that LEON accepts ASI_USERDATA in user mode, for
1455                   use with CASA.  Also note that previous versions of
1456                   QEMU allowed (and old versions of gcc emitted) ASI_P
1457                   for LEON, which is incorrect.  */
1458                || (asi == ASI_USERDATA
1459                    && (dc->def->features & CPU_FEATURE_CASA))) {
1460         switch (asi) {
1461         case ASI_USERDATA:   /* User data access */
1462             mem_idx = MMU_USER_IDX;
1463             type = GET_ASI_DIRECT;
1464             break;
1465         case ASI_KERNELDATA: /* Supervisor data access */
1466             mem_idx = MMU_KERNEL_IDX;
1467             type = GET_ASI_DIRECT;
1468             break;
1469         case ASI_M_BYPASS:    /* MMU passthrough */
1470         case ASI_LEON_BYPASS: /* LEON MMU passthrough */
1471             mem_idx = MMU_PHYS_IDX;
1472             type = GET_ASI_DIRECT;
1473             break;
1474         case ASI_M_BCOPY: /* Block copy, sta access */
1475             mem_idx = MMU_KERNEL_IDX;
1476             type = GET_ASI_BCOPY;
1477             break;
1478         case ASI_M_BFILL: /* Block fill, stda access */
1479             mem_idx = MMU_KERNEL_IDX;
1480             type = GET_ASI_BFILL;
1481             break;
1482         }
1483 
1484         /* MMU_PHYS_IDX is used when the MMU is disabled to passthrough the
1485          * permissions check in get_physical_address(..).
1486          */
1487         mem_idx = (dc->mem_idx == MMU_PHYS_IDX) ? MMU_PHYS_IDX : mem_idx;
1488     } else {
1489         gen_exception(dc, TT_PRIV_INSN);
1490         type = GET_ASI_EXCP;
1491     }
1492 #else
1493     if (asi < 0) {
1494         asi = dc->asi;
1495     }
1496     /* With v9, all asis below 0x80 are privileged.  */
1497     /* ??? We ought to check cpu_has_hypervisor, but we didn't copy
1498        down that bit into DisasContext.  For the moment that's ok,
1499        since the direct implementations below doesn't have any ASIs
1500        in the restricted [0x30, 0x7f] range, and the check will be
1501        done properly in the helper.  */
1502     if (!supervisor(dc) && asi < 0x80) {
1503         gen_exception(dc, TT_PRIV_ACT);
1504         type = GET_ASI_EXCP;
1505     } else {
1506         switch (asi) {
1507         case ASI_REAL:      /* Bypass */
1508         case ASI_REAL_IO:   /* Bypass, non-cacheable */
1509         case ASI_REAL_L:    /* Bypass LE */
1510         case ASI_REAL_IO_L: /* Bypass, non-cacheable LE */
1511         case ASI_TWINX_REAL:   /* Real address, twinx */
1512         case ASI_TWINX_REAL_L: /* Real address, twinx, LE */
1513         case ASI_QUAD_LDD_PHYS:
1514         case ASI_QUAD_LDD_PHYS_L:
1515             mem_idx = MMU_PHYS_IDX;
1516             break;
1517         case ASI_N:  /* Nucleus */
1518         case ASI_NL: /* Nucleus LE */
1519         case ASI_TWINX_N:
1520         case ASI_TWINX_NL:
1521         case ASI_NUCLEUS_QUAD_LDD:
1522         case ASI_NUCLEUS_QUAD_LDD_L:
1523             if (hypervisor(dc)) {
1524                 mem_idx = MMU_PHYS_IDX;
1525             } else {
1526                 mem_idx = MMU_NUCLEUS_IDX;
1527             }
1528             break;
1529         case ASI_AIUP:  /* As if user primary */
1530         case ASI_AIUPL: /* As if user primary LE */
1531         case ASI_TWINX_AIUP:
1532         case ASI_TWINX_AIUP_L:
1533         case ASI_BLK_AIUP_4V:
1534         case ASI_BLK_AIUP_L_4V:
1535         case ASI_BLK_AIUP:
1536         case ASI_BLK_AIUPL:
1537             mem_idx = MMU_USER_IDX;
1538             break;
1539         case ASI_AIUS:  /* As if user secondary */
1540         case ASI_AIUSL: /* As if user secondary LE */
1541         case ASI_TWINX_AIUS:
1542         case ASI_TWINX_AIUS_L:
1543         case ASI_BLK_AIUS_4V:
1544         case ASI_BLK_AIUS_L_4V:
1545         case ASI_BLK_AIUS:
1546         case ASI_BLK_AIUSL:
1547             mem_idx = MMU_USER_SECONDARY_IDX;
1548             break;
1549         case ASI_S:  /* Secondary */
1550         case ASI_SL: /* Secondary LE */
1551         case ASI_TWINX_S:
1552         case ASI_TWINX_SL:
1553         case ASI_BLK_COMMIT_S:
1554         case ASI_BLK_S:
1555         case ASI_BLK_SL:
1556         case ASI_FL8_S:
1557         case ASI_FL8_SL:
1558         case ASI_FL16_S:
1559         case ASI_FL16_SL:
1560             if (mem_idx == MMU_USER_IDX) {
1561                 mem_idx = MMU_USER_SECONDARY_IDX;
1562             } else if (mem_idx == MMU_KERNEL_IDX) {
1563                 mem_idx = MMU_KERNEL_SECONDARY_IDX;
1564             }
1565             break;
1566         case ASI_P:  /* Primary */
1567         case ASI_PL: /* Primary LE */
1568         case ASI_TWINX_P:
1569         case ASI_TWINX_PL:
1570         case ASI_BLK_COMMIT_P:
1571         case ASI_BLK_P:
1572         case ASI_BLK_PL:
1573         case ASI_FL8_P:
1574         case ASI_FL8_PL:
1575         case ASI_FL16_P:
1576         case ASI_FL16_PL:
1577             break;
1578         }
1579         switch (asi) {
1580         case ASI_REAL:
1581         case ASI_REAL_IO:
1582         case ASI_REAL_L:
1583         case ASI_REAL_IO_L:
1584         case ASI_N:
1585         case ASI_NL:
1586         case ASI_AIUP:
1587         case ASI_AIUPL:
1588         case ASI_AIUS:
1589         case ASI_AIUSL:
1590         case ASI_S:
1591         case ASI_SL:
1592         case ASI_P:
1593         case ASI_PL:
1594             type = GET_ASI_DIRECT;
1595             break;
1596         case ASI_TWINX_REAL:
1597         case ASI_TWINX_REAL_L:
1598         case ASI_TWINX_N:
1599         case ASI_TWINX_NL:
1600         case ASI_TWINX_AIUP:
1601         case ASI_TWINX_AIUP_L:
1602         case ASI_TWINX_AIUS:
1603         case ASI_TWINX_AIUS_L:
1604         case ASI_TWINX_P:
1605         case ASI_TWINX_PL:
1606         case ASI_TWINX_S:
1607         case ASI_TWINX_SL:
1608         case ASI_QUAD_LDD_PHYS:
1609         case ASI_QUAD_LDD_PHYS_L:
1610         case ASI_NUCLEUS_QUAD_LDD:
1611         case ASI_NUCLEUS_QUAD_LDD_L:
1612             type = GET_ASI_DTWINX;
1613             break;
1614         case ASI_BLK_COMMIT_P:
1615         case ASI_BLK_COMMIT_S:
1616         case ASI_BLK_AIUP_4V:
1617         case ASI_BLK_AIUP_L_4V:
1618         case ASI_BLK_AIUP:
1619         case ASI_BLK_AIUPL:
1620         case ASI_BLK_AIUS_4V:
1621         case ASI_BLK_AIUS_L_4V:
1622         case ASI_BLK_AIUS:
1623         case ASI_BLK_AIUSL:
1624         case ASI_BLK_S:
1625         case ASI_BLK_SL:
1626         case ASI_BLK_P:
1627         case ASI_BLK_PL:
1628             type = GET_ASI_BLOCK;
1629             break;
1630         case ASI_FL8_S:
1631         case ASI_FL8_SL:
1632         case ASI_FL8_P:
1633         case ASI_FL8_PL:
1634             memop = MO_UB;
1635             type = GET_ASI_SHORT;
1636             break;
1637         case ASI_FL16_S:
1638         case ASI_FL16_SL:
1639         case ASI_FL16_P:
1640         case ASI_FL16_PL:
1641             memop = MO_TEUW;
1642             type = GET_ASI_SHORT;
1643             break;
1644         }
1645         /* The little-endian asis all have bit 3 set.  */
1646         if (asi & 8) {
1647             memop ^= MO_BSWAP;
1648         }
1649     }
1650 #endif
1651 
1652  done:
1653     return (DisasASI){ type, asi, mem_idx, memop };
1654 }
1655 
1656 #if defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
1657 static void gen_helper_ld_asi(TCGv_i64 r, TCGv_env e, TCGv a,
1658                               TCGv_i32 asi, TCGv_i32 mop)
1659 {
1660     g_assert_not_reached();
1661 }
1662 
1663 static void gen_helper_st_asi(TCGv_env e, TCGv a, TCGv_i64 r,
1664                               TCGv_i32 asi, TCGv_i32 mop)
1665 {
1666     g_assert_not_reached();
1667 }
1668 #endif
1669 
1670 static void gen_ld_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr)
1671 {
1672     switch (da->type) {
1673     case GET_ASI_EXCP:
1674         break;
1675     case GET_ASI_DTWINX: /* Reserved for ldda.  */
1676         gen_exception(dc, TT_ILL_INSN);
1677         break;
1678     case GET_ASI_DIRECT:
1679         tcg_gen_qemu_ld_tl(dst, addr, da->mem_idx, da->memop | MO_ALIGN);
1680         break;
1681     default:
1682         {
1683             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1684             TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN);
1685 
1686             save_state(dc);
1687 #ifdef TARGET_SPARC64
1688             gen_helper_ld_asi(dst, tcg_env, addr, r_asi, r_mop);
1689 #else
1690             {
1691                 TCGv_i64 t64 = tcg_temp_new_i64();
1692                 gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
1693                 tcg_gen_trunc_i64_tl(dst, t64);
1694             }
1695 #endif
1696         }
1697         break;
1698     }
1699 }
1700 
1701 static void gen_st_asi(DisasContext *dc, DisasASI *da, TCGv src, TCGv addr)
1702 {
1703     switch (da->type) {
1704     case GET_ASI_EXCP:
1705         break;
1706 
1707     case GET_ASI_DTWINX: /* Reserved for stda.  */
1708         if (TARGET_LONG_BITS == 32) {
1709             gen_exception(dc, TT_ILL_INSN);
1710             break;
1711         } else if (!(dc->def->features & CPU_FEATURE_HYPV)) {
1712             /* Pre OpenSPARC CPUs don't have these */
1713             gen_exception(dc, TT_ILL_INSN);
1714             break;
1715         }
1716         /* In OpenSPARC T1+ CPUs TWINX ASIs in store are ST_BLKINIT_ ASIs */
1717         /* fall through */
1718 
1719     case GET_ASI_DIRECT:
1720         tcg_gen_qemu_st_tl(src, addr, da->mem_idx, da->memop | MO_ALIGN);
1721         break;
1722 
1723     case GET_ASI_BCOPY:
1724         assert(TARGET_LONG_BITS == 32);
1725         /* Copy 32 bytes from the address in SRC to ADDR.  */
1726         /* ??? The original qemu code suggests 4-byte alignment, dropping
1727            the low bits, but the only place I can see this used is in the
1728            Linux kernel with 32 byte alignment, which would make more sense
1729            as a cacheline-style operation.  */
1730         {
1731             TCGv saddr = tcg_temp_new();
1732             TCGv daddr = tcg_temp_new();
1733             TCGv four = tcg_constant_tl(4);
1734             TCGv_i32 tmp = tcg_temp_new_i32();
1735             int i;
1736 
1737             tcg_gen_andi_tl(saddr, src, -4);
1738             tcg_gen_andi_tl(daddr, addr, -4);
1739             for (i = 0; i < 32; i += 4) {
1740                 /* Since the loads and stores are paired, allow the
1741                    copy to happen in the host endianness.  */
1742                 tcg_gen_qemu_ld_i32(tmp, saddr, da->mem_idx, MO_UL);
1743                 tcg_gen_qemu_st_i32(tmp, daddr, da->mem_idx, MO_UL);
1744                 tcg_gen_add_tl(saddr, saddr, four);
1745                 tcg_gen_add_tl(daddr, daddr, four);
1746             }
1747         }
1748         break;
1749 
1750     default:
1751         {
1752             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1753             TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN);
1754 
1755             save_state(dc);
1756 #ifdef TARGET_SPARC64
1757             gen_helper_st_asi(tcg_env, addr, src, r_asi, r_mop);
1758 #else
1759             {
1760                 TCGv_i64 t64 = tcg_temp_new_i64();
1761                 tcg_gen_extu_tl_i64(t64, src);
1762                 gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
1763             }
1764 #endif
1765 
1766             /* A write to a TLB register may alter page maps.  End the TB. */
1767             dc->npc = DYNAMIC_PC;
1768         }
1769         break;
1770     }
1771 }
1772 
1773 static void gen_swap_asi(DisasContext *dc, DisasASI *da,
1774                          TCGv dst, TCGv src, TCGv addr)
1775 {
1776     switch (da->type) {
1777     case GET_ASI_EXCP:
1778         break;
1779     case GET_ASI_DIRECT:
1780         tcg_gen_atomic_xchg_tl(dst, addr, src,
1781                                da->mem_idx, da->memop | MO_ALIGN);
1782         break;
1783     default:
1784         /* ??? Should be DAE_invalid_asi.  */
1785         gen_exception(dc, TT_DATA_ACCESS);
1786         break;
1787     }
1788 }
1789 
1790 static void gen_cas_asi(DisasContext *dc, DisasASI *da,
1791                         TCGv oldv, TCGv newv, TCGv cmpv, TCGv addr)
1792 {
1793     switch (da->type) {
1794     case GET_ASI_EXCP:
1795         return;
1796     case GET_ASI_DIRECT:
1797         tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, newv,
1798                                   da->mem_idx, da->memop | MO_ALIGN);
1799         break;
1800     default:
1801         /* ??? Should be DAE_invalid_asi.  */
1802         gen_exception(dc, TT_DATA_ACCESS);
1803         break;
1804     }
1805 }
1806 
1807 static void gen_ldstub_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr)
1808 {
1809     switch (da->type) {
1810     case GET_ASI_EXCP:
1811         break;
1812     case GET_ASI_DIRECT:
1813         tcg_gen_atomic_xchg_tl(dst, addr, tcg_constant_tl(0xff),
1814                                da->mem_idx, MO_UB);
1815         break;
1816     default:
1817         /* ??? In theory, this should be raise DAE_invalid_asi.
1818            But the SS-20 roms do ldstuba [%l0] #ASI_M_CTL, %o1.  */
1819         if (tb_cflags(dc->base.tb) & CF_PARALLEL) {
1820             gen_helper_exit_atomic(tcg_env);
1821         } else {
1822             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1823             TCGv_i32 r_mop = tcg_constant_i32(MO_UB);
1824             TCGv_i64 s64, t64;
1825 
1826             save_state(dc);
1827             t64 = tcg_temp_new_i64();
1828             gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
1829 
1830             s64 = tcg_constant_i64(0xff);
1831             gen_helper_st_asi(tcg_env, addr, s64, r_asi, r_mop);
1832 
1833             tcg_gen_trunc_i64_tl(dst, t64);
1834 
1835             /* End the TB.  */
1836             dc->npc = DYNAMIC_PC;
1837         }
1838         break;
1839     }
1840 }
1841 
1842 static void gen_ldf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size,
1843                         TCGv addr, int rd)
1844 {
1845     MemOp memop = da->memop;
1846     MemOp size = memop & MO_SIZE;
1847     TCGv_i32 d32;
1848     TCGv_i64 d64;
1849     TCGv addr_tmp;
1850 
1851     /* TODO: Use 128-bit load/store below. */
1852     if (size == MO_128) {
1853         memop = (memop & ~MO_SIZE) | MO_64;
1854     }
1855 
1856     switch (da->type) {
1857     case GET_ASI_EXCP:
1858         break;
1859 
1860     case GET_ASI_DIRECT:
1861         memop |= MO_ALIGN_4;
1862         switch (size) {
1863         case MO_32:
1864             d32 = gen_dest_fpr_F(dc);
1865             tcg_gen_qemu_ld_i32(d32, addr, da->mem_idx, memop);
1866             gen_store_fpr_F(dc, rd, d32);
1867             break;
1868 
1869         case MO_64:
1870             tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da->mem_idx, memop);
1871             break;
1872 
1873         case MO_128:
1874             d64 = tcg_temp_new_i64();
1875             tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx, memop);
1876             addr_tmp = tcg_temp_new();
1877             tcg_gen_addi_tl(addr_tmp, addr, 8);
1878             tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2 + 1], addr_tmp, da->mem_idx, memop);
1879             tcg_gen_mov_i64(cpu_fpr[rd / 2], d64);
1880             break;
1881         default:
1882             g_assert_not_reached();
1883         }
1884         break;
1885 
1886     case GET_ASI_BLOCK:
1887         /* Valid for lddfa on aligned registers only.  */
1888         if (orig_size == MO_64 && (rd & 7) == 0) {
1889             /* The first operation checks required alignment.  */
1890             addr_tmp = tcg_temp_new();
1891             for (int i = 0; ; ++i) {
1892                 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2 + i], addr, da->mem_idx,
1893                                     memop | (i == 0 ? MO_ALIGN_64 : 0));
1894                 if (i == 7) {
1895                     break;
1896                 }
1897                 tcg_gen_addi_tl(addr_tmp, addr, 8);
1898                 addr = addr_tmp;
1899             }
1900         } else {
1901             gen_exception(dc, TT_ILL_INSN);
1902         }
1903         break;
1904 
1905     case GET_ASI_SHORT:
1906         /* Valid for lddfa only.  */
1907         if (orig_size == MO_64) {
1908             tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da->mem_idx,
1909                                 memop | MO_ALIGN);
1910         } else {
1911             gen_exception(dc, TT_ILL_INSN);
1912         }
1913         break;
1914 
1915     default:
1916         {
1917             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1918             TCGv_i32 r_mop = tcg_constant_i32(memop | MO_ALIGN);
1919 
1920             save_state(dc);
1921             /* According to the table in the UA2011 manual, the only
1922                other asis that are valid for ldfa/lddfa/ldqfa are
1923                the NO_FAULT asis.  We still need a helper for these,
1924                but we can just use the integer asi helper for them.  */
1925             switch (size) {
1926             case MO_32:
1927                 d64 = tcg_temp_new_i64();
1928                 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
1929                 d32 = gen_dest_fpr_F(dc);
1930                 tcg_gen_extrl_i64_i32(d32, d64);
1931                 gen_store_fpr_F(dc, rd, d32);
1932                 break;
1933             case MO_64:
1934                 gen_helper_ld_asi(cpu_fpr[rd / 2], tcg_env, addr,
1935                                   r_asi, r_mop);
1936                 break;
1937             case MO_128:
1938                 d64 = tcg_temp_new_i64();
1939                 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
1940                 addr_tmp = tcg_temp_new();
1941                 tcg_gen_addi_tl(addr_tmp, addr, 8);
1942                 gen_helper_ld_asi(cpu_fpr[rd / 2 + 1], tcg_env, addr_tmp,
1943                                   r_asi, r_mop);
1944                 tcg_gen_mov_i64(cpu_fpr[rd / 2], d64);
1945                 break;
1946             default:
1947                 g_assert_not_reached();
1948             }
1949         }
1950         break;
1951     }
1952 }
1953 
1954 static void gen_stf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size,
1955                         TCGv addr, int rd)
1956 {
1957     MemOp memop = da->memop;
1958     MemOp size = memop & MO_SIZE;
1959     TCGv_i32 d32;
1960     TCGv addr_tmp;
1961 
1962     /* TODO: Use 128-bit load/store below. */
1963     if (size == MO_128) {
1964         memop = (memop & ~MO_SIZE) | MO_64;
1965     }
1966 
1967     switch (da->type) {
1968     case GET_ASI_EXCP:
1969         break;
1970 
1971     case GET_ASI_DIRECT:
1972         memop |= MO_ALIGN_4;
1973         switch (size) {
1974         case MO_32:
1975             d32 = gen_load_fpr_F(dc, rd);
1976             tcg_gen_qemu_st_i32(d32, addr, da->mem_idx, memop | MO_ALIGN);
1977             break;
1978         case MO_64:
1979             tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da->mem_idx,
1980                                 memop | MO_ALIGN_4);
1981             break;
1982         case MO_128:
1983             /* Only 4-byte alignment required.  However, it is legal for the
1984                cpu to signal the alignment fault, and the OS trap handler is
1985                required to fix it up.  Requiring 16-byte alignment here avoids
1986                having to probe the second page before performing the first
1987                write.  */
1988             tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da->mem_idx,
1989                                 memop | MO_ALIGN_16);
1990             addr_tmp = tcg_temp_new();
1991             tcg_gen_addi_tl(addr_tmp, addr, 8);
1992             tcg_gen_qemu_st_i64(cpu_fpr[rd / 2 + 1], addr_tmp, da->mem_idx, memop);
1993             break;
1994         default:
1995             g_assert_not_reached();
1996         }
1997         break;
1998 
1999     case GET_ASI_BLOCK:
2000         /* Valid for stdfa on aligned registers only.  */
2001         if (orig_size == MO_64 && (rd & 7) == 0) {
2002             /* The first operation checks required alignment.  */
2003             addr_tmp = tcg_temp_new();
2004             for (int i = 0; ; ++i) {
2005                 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2 + i], addr, da->mem_idx,
2006                                     memop | (i == 0 ? MO_ALIGN_64 : 0));
2007                 if (i == 7) {
2008                     break;
2009                 }
2010                 tcg_gen_addi_tl(addr_tmp, addr, 8);
2011                 addr = addr_tmp;
2012             }
2013         } else {
2014             gen_exception(dc, TT_ILL_INSN);
2015         }
2016         break;
2017 
2018     case GET_ASI_SHORT:
2019         /* Valid for stdfa only.  */
2020         if (orig_size == MO_64) {
2021             tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da->mem_idx,
2022                                 memop | MO_ALIGN);
2023         } else {
2024             gen_exception(dc, TT_ILL_INSN);
2025         }
2026         break;
2027 
2028     default:
2029         /* According to the table in the UA2011 manual, the only
2030            other asis that are valid for ldfa/lddfa/ldqfa are
2031            the PST* asis, which aren't currently handled.  */
2032         gen_exception(dc, TT_ILL_INSN);
2033         break;
2034     }
2035 }
2036 
2037 static void gen_ldda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd)
2038 {
2039     TCGv hi = gen_dest_gpr(dc, rd);
2040     TCGv lo = gen_dest_gpr(dc, rd + 1);
2041 
2042     switch (da->type) {
2043     case GET_ASI_EXCP:
2044         return;
2045 
2046     case GET_ASI_DTWINX:
2047 #ifdef TARGET_SPARC64
2048         {
2049             MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16;
2050             TCGv_i128 t = tcg_temp_new_i128();
2051 
2052             tcg_gen_qemu_ld_i128(t, addr, da->mem_idx, mop);
2053             /*
2054              * Note that LE twinx acts as if each 64-bit register result is
2055              * byte swapped.  We perform one 128-bit LE load, so must swap
2056              * the order of the writebacks.
2057              */
2058             if ((mop & MO_BSWAP) == MO_TE) {
2059                 tcg_gen_extr_i128_i64(lo, hi, t);
2060             } else {
2061                 tcg_gen_extr_i128_i64(hi, lo, t);
2062             }
2063         }
2064         break;
2065 #else
2066         g_assert_not_reached();
2067 #endif
2068 
2069     case GET_ASI_DIRECT:
2070         {
2071             TCGv_i64 tmp = tcg_temp_new_i64();
2072 
2073             tcg_gen_qemu_ld_i64(tmp, addr, da->mem_idx, da->memop | MO_ALIGN);
2074 
2075             /* Note that LE ldda acts as if each 32-bit register
2076                result is byte swapped.  Having just performed one
2077                64-bit bswap, we need now to swap the writebacks.  */
2078             if ((da->memop & MO_BSWAP) == MO_TE) {
2079                 tcg_gen_extr_i64_tl(lo, hi, tmp);
2080             } else {
2081                 tcg_gen_extr_i64_tl(hi, lo, tmp);
2082             }
2083         }
2084         break;
2085 
2086     default:
2087         /* ??? In theory we've handled all of the ASIs that are valid
2088            for ldda, and this should raise DAE_invalid_asi.  However,
2089            real hardware allows others.  This can be seen with e.g.
2090            FreeBSD 10.3 wrt ASI_IC_TAG.  */
2091         {
2092             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2093             TCGv_i32 r_mop = tcg_constant_i32(da->memop);
2094             TCGv_i64 tmp = tcg_temp_new_i64();
2095 
2096             save_state(dc);
2097             gen_helper_ld_asi(tmp, tcg_env, addr, r_asi, r_mop);
2098 
2099             /* See above.  */
2100             if ((da->memop & MO_BSWAP) == MO_TE) {
2101                 tcg_gen_extr_i64_tl(lo, hi, tmp);
2102             } else {
2103                 tcg_gen_extr_i64_tl(hi, lo, tmp);
2104             }
2105         }
2106         break;
2107     }
2108 
2109     gen_store_gpr(dc, rd, hi);
2110     gen_store_gpr(dc, rd + 1, lo);
2111 }
2112 
2113 static void gen_stda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd)
2114 {
2115     TCGv hi = gen_load_gpr(dc, rd);
2116     TCGv lo = gen_load_gpr(dc, rd + 1);
2117 
2118     switch (da->type) {
2119     case GET_ASI_EXCP:
2120         break;
2121 
2122     case GET_ASI_DTWINX:
2123 #ifdef TARGET_SPARC64
2124         {
2125             MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16;
2126             TCGv_i128 t = tcg_temp_new_i128();
2127 
2128             /*
2129              * Note that LE twinx acts as if each 64-bit register result is
2130              * byte swapped.  We perform one 128-bit LE store, so must swap
2131              * the order of the construction.
2132              */
2133             if ((mop & MO_BSWAP) == MO_TE) {
2134                 tcg_gen_concat_i64_i128(t, lo, hi);
2135             } else {
2136                 tcg_gen_concat_i64_i128(t, hi, lo);
2137             }
2138             tcg_gen_qemu_st_i128(t, addr, da->mem_idx, mop);
2139         }
2140         break;
2141 #else
2142         g_assert_not_reached();
2143 #endif
2144 
2145     case GET_ASI_DIRECT:
2146         {
2147             TCGv_i64 t64 = tcg_temp_new_i64();
2148 
2149             /* Note that LE stda acts as if each 32-bit register result is
2150                byte swapped.  We will perform one 64-bit LE store, so now
2151                we must swap the order of the construction.  */
2152             if ((da->memop & MO_BSWAP) == MO_TE) {
2153                 tcg_gen_concat_tl_i64(t64, lo, hi);
2154             } else {
2155                 tcg_gen_concat_tl_i64(t64, hi, lo);
2156             }
2157             tcg_gen_qemu_st_i64(t64, addr, da->mem_idx, da->memop | MO_ALIGN);
2158         }
2159         break;
2160 
2161     case GET_ASI_BFILL:
2162         assert(TARGET_LONG_BITS == 32);
2163         /* Store 32 bytes of T64 to ADDR.  */
2164         /* ??? The original qemu code suggests 8-byte alignment, dropping
2165            the low bits, but the only place I can see this used is in the
2166            Linux kernel with 32 byte alignment, which would make more sense
2167            as a cacheline-style operation.  */
2168         {
2169             TCGv_i64 t64 = tcg_temp_new_i64();
2170             TCGv d_addr = tcg_temp_new();
2171             TCGv eight = tcg_constant_tl(8);
2172             int i;
2173 
2174             tcg_gen_concat_tl_i64(t64, lo, hi);
2175             tcg_gen_andi_tl(d_addr, addr, -8);
2176             for (i = 0; i < 32; i += 8) {
2177                 tcg_gen_qemu_st_i64(t64, d_addr, da->mem_idx, da->memop);
2178                 tcg_gen_add_tl(d_addr, d_addr, eight);
2179             }
2180         }
2181         break;
2182 
2183     default:
2184         /* ??? In theory we've handled all of the ASIs that are valid
2185            for stda, and this should raise DAE_invalid_asi.  */
2186         {
2187             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2188             TCGv_i32 r_mop = tcg_constant_i32(da->memop);
2189             TCGv_i64 t64 = tcg_temp_new_i64();
2190 
2191             /* See above.  */
2192             if ((da->memop & MO_BSWAP) == MO_TE) {
2193                 tcg_gen_concat_tl_i64(t64, lo, hi);
2194             } else {
2195                 tcg_gen_concat_tl_i64(t64, hi, lo);
2196             }
2197 
2198             save_state(dc);
2199             gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
2200         }
2201         break;
2202     }
2203 }
2204 
2205 static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2206 {
2207 #ifdef TARGET_SPARC64
2208     TCGv_i32 c32, zero, dst, s1, s2;
2209     TCGv_i64 c64 = tcg_temp_new_i64();
2210 
2211     /* We have two choices here: extend the 32 bit data and use movcond_i64,
2212        or fold the comparison down to 32 bits and use movcond_i32.  Choose
2213        the later.  */
2214     c32 = tcg_temp_new_i32();
2215     tcg_gen_setcondi_i64(cmp->cond, c64, cmp->c1, cmp->c2);
2216     tcg_gen_extrl_i64_i32(c32, c64);
2217 
2218     s1 = gen_load_fpr_F(dc, rs);
2219     s2 = gen_load_fpr_F(dc, rd);
2220     dst = gen_dest_fpr_F(dc);
2221     zero = tcg_constant_i32(0);
2222 
2223     tcg_gen_movcond_i32(TCG_COND_NE, dst, c32, zero, s1, s2);
2224 
2225     gen_store_fpr_F(dc, rd, dst);
2226 #else
2227     qemu_build_not_reached();
2228 #endif
2229 }
2230 
2231 static void gen_fmovd(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2232 {
2233 #ifdef TARGET_SPARC64
2234     TCGv_i64 dst = gen_dest_fpr_D(dc, rd);
2235     tcg_gen_movcond_i64(cmp->cond, dst, cmp->c1, tcg_constant_tl(cmp->c2),
2236                         gen_load_fpr_D(dc, rs),
2237                         gen_load_fpr_D(dc, rd));
2238     gen_store_fpr_D(dc, rd, dst);
2239 #else
2240     qemu_build_not_reached();
2241 #endif
2242 }
2243 
2244 static void gen_fmovq(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2245 {
2246 #ifdef TARGET_SPARC64
2247     int qd = QFPREG(rd);
2248     int qs = QFPREG(rs);
2249     TCGv c2 = tcg_constant_tl(cmp->c2);
2250 
2251     tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2], cmp->c1, c2,
2252                         cpu_fpr[qs / 2], cpu_fpr[qd / 2]);
2253     tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2 + 1], cmp->c1, c2,
2254                         cpu_fpr[qs / 2 + 1], cpu_fpr[qd / 2 + 1]);
2255 
2256     gen_update_fprs_dirty(dc, qd);
2257 #else
2258     qemu_build_not_reached();
2259 #endif
2260 }
2261 
2262 #ifdef TARGET_SPARC64
2263 static void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr)
2264 {
2265     TCGv_i32 r_tl = tcg_temp_new_i32();
2266 
2267     /* load env->tl into r_tl */
2268     tcg_gen_ld_i32(r_tl, tcg_env, offsetof(CPUSPARCState, tl));
2269 
2270     /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2271     tcg_gen_andi_i32(r_tl, r_tl, MAXTL_MASK);
2272 
2273     /* calculate offset to current trap state from env->ts, reuse r_tl */
2274     tcg_gen_muli_i32(r_tl, r_tl, sizeof (trap_state));
2275     tcg_gen_addi_ptr(r_tsptr, tcg_env, offsetof(CPUSPARCState, ts));
2276 
2277     /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2278     {
2279         TCGv_ptr r_tl_tmp = tcg_temp_new_ptr();
2280         tcg_gen_ext_i32_ptr(r_tl_tmp, r_tl);
2281         tcg_gen_add_ptr(r_tsptr, r_tsptr, r_tl_tmp);
2282     }
2283 }
2284 #endif
2285 
2286 static int extract_dfpreg(DisasContext *dc, int x)
2287 {
2288     return DFPREG(x);
2289 }
2290 
2291 static int extract_qfpreg(DisasContext *dc, int x)
2292 {
2293     return QFPREG(x);
2294 }
2295 
2296 /* Include the auto-generated decoder.  */
2297 #include "decode-insns.c.inc"
2298 
2299 #define TRANS(NAME, AVAIL, FUNC, ...) \
2300     static bool trans_##NAME(DisasContext *dc, arg_##NAME *a) \
2301     { return avail_##AVAIL(dc) && FUNC(dc, __VA_ARGS__); }
2302 
2303 #define avail_ALL(C)      true
2304 #ifdef TARGET_SPARC64
2305 # define avail_32(C)      false
2306 # define avail_ASR17(C)   false
2307 # define avail_CASA(C)    true
2308 # define avail_DIV(C)     true
2309 # define avail_MUL(C)     true
2310 # define avail_POWERDOWN(C) false
2311 # define avail_64(C)      true
2312 # define avail_GL(C)      ((C)->def->features & CPU_FEATURE_GL)
2313 # define avail_HYPV(C)    ((C)->def->features & CPU_FEATURE_HYPV)
2314 # define avail_VIS1(C)    ((C)->def->features & CPU_FEATURE_VIS1)
2315 # define avail_VIS2(C)    ((C)->def->features & CPU_FEATURE_VIS2)
2316 #else
2317 # define avail_32(C)      true
2318 # define avail_ASR17(C)   ((C)->def->features & CPU_FEATURE_ASR17)
2319 # define avail_CASA(C)    ((C)->def->features & CPU_FEATURE_CASA)
2320 # define avail_DIV(C)     ((C)->def->features & CPU_FEATURE_DIV)
2321 # define avail_MUL(C)     ((C)->def->features & CPU_FEATURE_MUL)
2322 # define avail_POWERDOWN(C) ((C)->def->features & CPU_FEATURE_POWERDOWN)
2323 # define avail_64(C)      false
2324 # define avail_GL(C)      false
2325 # define avail_HYPV(C)    false
2326 # define avail_VIS1(C)    false
2327 # define avail_VIS2(C)    false
2328 #endif
2329 
2330 /* Default case for non jump instructions. */
2331 static bool advance_pc(DisasContext *dc)
2332 {
2333     TCGLabel *l1;
2334 
2335     finishing_insn(dc);
2336 
2337     if (dc->npc & 3) {
2338         switch (dc->npc) {
2339         case DYNAMIC_PC:
2340         case DYNAMIC_PC_LOOKUP:
2341             dc->pc = dc->npc;
2342             tcg_gen_mov_tl(cpu_pc, cpu_npc);
2343             tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
2344             break;
2345 
2346         case JUMP_PC:
2347             /* we can do a static jump */
2348             l1 = gen_new_label();
2349             tcg_gen_brcondi_tl(dc->jump.cond, dc->jump.c1, dc->jump.c2, l1);
2350 
2351             /* jump not taken */
2352             gen_goto_tb(dc, 1, dc->jump_pc[1], dc->jump_pc[1] + 4);
2353 
2354             /* jump taken */
2355             gen_set_label(l1);
2356             gen_goto_tb(dc, 0, dc->jump_pc[0], dc->jump_pc[0] + 4);
2357 
2358             dc->base.is_jmp = DISAS_NORETURN;
2359             break;
2360 
2361         default:
2362             g_assert_not_reached();
2363         }
2364     } else {
2365         dc->pc = dc->npc;
2366         dc->npc = dc->npc + 4;
2367     }
2368     return true;
2369 }
2370 
2371 /*
2372  * Major opcodes 00 and 01 -- branches, call, and sethi
2373  */
2374 
2375 static bool advance_jump_cond(DisasContext *dc, DisasCompare *cmp,
2376                               bool annul, int disp)
2377 {
2378     target_ulong dest = address_mask_i(dc, dc->pc + disp * 4);
2379     target_ulong npc;
2380 
2381     finishing_insn(dc);
2382 
2383     if (cmp->cond == TCG_COND_ALWAYS) {
2384         if (annul) {
2385             dc->pc = dest;
2386             dc->npc = dest + 4;
2387         } else {
2388             gen_mov_pc_npc(dc);
2389             dc->npc = dest;
2390         }
2391         return true;
2392     }
2393 
2394     if (cmp->cond == TCG_COND_NEVER) {
2395         npc = dc->npc;
2396         if (npc & 3) {
2397             gen_mov_pc_npc(dc);
2398             if (annul) {
2399                 tcg_gen_addi_tl(cpu_pc, cpu_pc, 4);
2400             }
2401             tcg_gen_addi_tl(cpu_npc, cpu_pc, 4);
2402         } else {
2403             dc->pc = npc + (annul ? 4 : 0);
2404             dc->npc = dc->pc + 4;
2405         }
2406         return true;
2407     }
2408 
2409     flush_cond(dc);
2410     npc = dc->npc;
2411 
2412     if (annul) {
2413         TCGLabel *l1 = gen_new_label();
2414 
2415         tcg_gen_brcondi_tl(tcg_invert_cond(cmp->cond), cmp->c1, cmp->c2, l1);
2416         gen_goto_tb(dc, 0, npc, dest);
2417         gen_set_label(l1);
2418         gen_goto_tb(dc, 1, npc + 4, npc + 8);
2419 
2420         dc->base.is_jmp = DISAS_NORETURN;
2421     } else {
2422         if (npc & 3) {
2423             switch (npc) {
2424             case DYNAMIC_PC:
2425             case DYNAMIC_PC_LOOKUP:
2426                 tcg_gen_mov_tl(cpu_pc, cpu_npc);
2427                 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
2428                 tcg_gen_movcond_tl(cmp->cond, cpu_npc,
2429                                    cmp->c1, tcg_constant_tl(cmp->c2),
2430                                    tcg_constant_tl(dest), cpu_npc);
2431                 dc->pc = npc;
2432                 break;
2433             default:
2434                 g_assert_not_reached();
2435             }
2436         } else {
2437             dc->pc = npc;
2438             dc->npc = JUMP_PC;
2439             dc->jump = *cmp;
2440             dc->jump_pc[0] = dest;
2441             dc->jump_pc[1] = npc + 4;
2442 
2443             /* The condition for cpu_cond is always NE -- normalize. */
2444             if (cmp->cond == TCG_COND_NE) {
2445                 tcg_gen_xori_tl(cpu_cond, cmp->c1, cmp->c2);
2446             } else {
2447                 tcg_gen_setcondi_tl(cmp->cond, cpu_cond, cmp->c1, cmp->c2);
2448             }
2449             dc->cpu_cond_live = true;
2450         }
2451     }
2452     return true;
2453 }
2454 
2455 static bool raise_priv(DisasContext *dc)
2456 {
2457     gen_exception(dc, TT_PRIV_INSN);
2458     return true;
2459 }
2460 
2461 static bool raise_unimpfpop(DisasContext *dc)
2462 {
2463     gen_op_fpexception_im(dc, FSR_FTT_UNIMPFPOP);
2464     return true;
2465 }
2466 
2467 static bool gen_trap_float128(DisasContext *dc)
2468 {
2469     if (dc->def->features & CPU_FEATURE_FLOAT128) {
2470         return false;
2471     }
2472     return raise_unimpfpop(dc);
2473 }
2474 
2475 static bool do_bpcc(DisasContext *dc, arg_bcc *a)
2476 {
2477     DisasCompare cmp;
2478 
2479     gen_compare(&cmp, a->cc, a->cond, dc);
2480     return advance_jump_cond(dc, &cmp, a->a, a->i);
2481 }
2482 
2483 TRANS(Bicc, ALL, do_bpcc, a)
2484 TRANS(BPcc,  64, do_bpcc, a)
2485 
2486 static bool do_fbpfcc(DisasContext *dc, arg_bcc *a)
2487 {
2488     DisasCompare cmp;
2489 
2490     if (gen_trap_ifnofpu(dc)) {
2491         return true;
2492     }
2493     gen_fcompare(&cmp, a->cc, a->cond);
2494     return advance_jump_cond(dc, &cmp, a->a, a->i);
2495 }
2496 
2497 TRANS(FBPfcc,  64, do_fbpfcc, a)
2498 TRANS(FBfcc,  ALL, do_fbpfcc, a)
2499 
2500 static bool trans_BPr(DisasContext *dc, arg_BPr *a)
2501 {
2502     DisasCompare cmp;
2503 
2504     if (!avail_64(dc)) {
2505         return false;
2506     }
2507     if (gen_tcg_cond_reg[a->cond] == TCG_COND_NEVER) {
2508         return false;
2509     }
2510 
2511     gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1));
2512     return advance_jump_cond(dc, &cmp, a->a, a->i);
2513 }
2514 
2515 static bool trans_CALL(DisasContext *dc, arg_CALL *a)
2516 {
2517     target_long target = address_mask_i(dc, dc->pc + a->i * 4);
2518 
2519     gen_store_gpr(dc, 15, tcg_constant_tl(dc->pc));
2520     gen_mov_pc_npc(dc);
2521     dc->npc = target;
2522     return true;
2523 }
2524 
2525 static bool trans_NCP(DisasContext *dc, arg_NCP *a)
2526 {
2527     /*
2528      * For sparc32, always generate the no-coprocessor exception.
2529      * For sparc64, always generate illegal instruction.
2530      */
2531 #ifdef TARGET_SPARC64
2532     return false;
2533 #else
2534     gen_exception(dc, TT_NCP_INSN);
2535     return true;
2536 #endif
2537 }
2538 
2539 static bool trans_SETHI(DisasContext *dc, arg_SETHI *a)
2540 {
2541     /* Special-case %g0 because that's the canonical nop.  */
2542     if (a->rd) {
2543         gen_store_gpr(dc, a->rd, tcg_constant_tl((uint32_t)a->i << 10));
2544     }
2545     return advance_pc(dc);
2546 }
2547 
2548 /*
2549  * Major Opcode 10 -- integer, floating-point, vis, and system insns.
2550  */
2551 
2552 static bool do_tcc(DisasContext *dc, int cond, int cc,
2553                    int rs1, bool imm, int rs2_or_imm)
2554 {
2555     int mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc)
2556                 ? UA2005_HTRAP_MASK : V8_TRAP_MASK);
2557     DisasCompare cmp;
2558     TCGLabel *lab;
2559     TCGv_i32 trap;
2560 
2561     /* Trap never.  */
2562     if (cond == 0) {
2563         return advance_pc(dc);
2564     }
2565 
2566     /*
2567      * Immediate traps are the most common case.  Since this value is
2568      * live across the branch, it really pays to evaluate the constant.
2569      */
2570     if (rs1 == 0 && (imm || rs2_or_imm == 0)) {
2571         trap = tcg_constant_i32((rs2_or_imm & mask) + TT_TRAP);
2572     } else {
2573         trap = tcg_temp_new_i32();
2574         tcg_gen_trunc_tl_i32(trap, gen_load_gpr(dc, rs1));
2575         if (imm) {
2576             tcg_gen_addi_i32(trap, trap, rs2_or_imm);
2577         } else {
2578             TCGv_i32 t2 = tcg_temp_new_i32();
2579             tcg_gen_trunc_tl_i32(t2, gen_load_gpr(dc, rs2_or_imm));
2580             tcg_gen_add_i32(trap, trap, t2);
2581         }
2582         tcg_gen_andi_i32(trap, trap, mask);
2583         tcg_gen_addi_i32(trap, trap, TT_TRAP);
2584     }
2585 
2586     finishing_insn(dc);
2587 
2588     /* Trap always.  */
2589     if (cond == 8) {
2590         save_state(dc);
2591         gen_helper_raise_exception(tcg_env, trap);
2592         dc->base.is_jmp = DISAS_NORETURN;
2593         return true;
2594     }
2595 
2596     /* Conditional trap.  */
2597     flush_cond(dc);
2598     lab = delay_exceptionv(dc, trap);
2599     gen_compare(&cmp, cc, cond, dc);
2600     tcg_gen_brcondi_tl(cmp.cond, cmp.c1, cmp.c2, lab);
2601 
2602     return advance_pc(dc);
2603 }
2604 
2605 static bool trans_Tcc_r(DisasContext *dc, arg_Tcc_r *a)
2606 {
2607     if (avail_32(dc) && a->cc) {
2608         return false;
2609     }
2610     return do_tcc(dc, a->cond, a->cc, a->rs1, false, a->rs2);
2611 }
2612 
2613 static bool trans_Tcc_i_v7(DisasContext *dc, arg_Tcc_i_v7 *a)
2614 {
2615     if (avail_64(dc)) {
2616         return false;
2617     }
2618     return do_tcc(dc, a->cond, 0, a->rs1, true, a->i);
2619 }
2620 
2621 static bool trans_Tcc_i_v9(DisasContext *dc, arg_Tcc_i_v9 *a)
2622 {
2623     if (avail_32(dc)) {
2624         return false;
2625     }
2626     return do_tcc(dc, a->cond, a->cc, a->rs1, true, a->i);
2627 }
2628 
2629 static bool trans_STBAR(DisasContext *dc, arg_STBAR *a)
2630 {
2631     tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
2632     return advance_pc(dc);
2633 }
2634 
2635 static bool trans_MEMBAR(DisasContext *dc, arg_MEMBAR *a)
2636 {
2637     if (avail_32(dc)) {
2638         return false;
2639     }
2640     if (a->mmask) {
2641         /* Note TCG_MO_* was modeled on sparc64, so mmask matches. */
2642         tcg_gen_mb(a->mmask | TCG_BAR_SC);
2643     }
2644     if (a->cmask) {
2645         /* For #Sync, etc, end the TB to recognize interrupts. */
2646         dc->base.is_jmp = DISAS_EXIT;
2647     }
2648     return advance_pc(dc);
2649 }
2650 
2651 static bool do_rd_special(DisasContext *dc, bool priv, int rd,
2652                           TCGv (*func)(DisasContext *, TCGv))
2653 {
2654     if (!priv) {
2655         return raise_priv(dc);
2656     }
2657     gen_store_gpr(dc, rd, func(dc, gen_dest_gpr(dc, rd)));
2658     return advance_pc(dc);
2659 }
2660 
2661 static TCGv do_rdy(DisasContext *dc, TCGv dst)
2662 {
2663     return cpu_y;
2664 }
2665 
2666 static bool trans_RDY(DisasContext *dc, arg_RDY *a)
2667 {
2668     /*
2669      * TODO: Need a feature bit for sparcv8.  In the meantime, treat all
2670      * 32-bit cpus like sparcv7, which ignores the rs1 field.
2671      * This matches after all other ASR, so Leon3 Asr17 is handled first.
2672      */
2673     if (avail_64(dc) && a->rs1 != 0) {
2674         return false;
2675     }
2676     return do_rd_special(dc, true, a->rd, do_rdy);
2677 }
2678 
2679 static TCGv do_rd_leon3_config(DisasContext *dc, TCGv dst)
2680 {
2681     uint32_t val;
2682 
2683     /*
2684      * TODO: There are many more fields to be filled,
2685      * some of which are writable.
2686      */
2687     val = dc->def->nwindows - 1;   /* [4:0] NWIN */
2688     val |= 1 << 8;                 /* [8]   V8   */
2689 
2690     return tcg_constant_tl(val);
2691 }
2692 
2693 TRANS(RDASR17, ASR17, do_rd_special, true, a->rd, do_rd_leon3_config)
2694 
2695 static TCGv do_rdccr(DisasContext *dc, TCGv dst)
2696 {
2697     gen_helper_rdccr(dst, tcg_env);
2698     return dst;
2699 }
2700 
2701 TRANS(RDCCR, 64, do_rd_special, true, a->rd, do_rdccr)
2702 
2703 static TCGv do_rdasi(DisasContext *dc, TCGv dst)
2704 {
2705 #ifdef TARGET_SPARC64
2706     return tcg_constant_tl(dc->asi);
2707 #else
2708     qemu_build_not_reached();
2709 #endif
2710 }
2711 
2712 TRANS(RDASI, 64, do_rd_special, true, a->rd, do_rdasi)
2713 
2714 static TCGv do_rdtick(DisasContext *dc, TCGv dst)
2715 {
2716     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
2717 
2718     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
2719     if (translator_io_start(&dc->base)) {
2720         dc->base.is_jmp = DISAS_EXIT;
2721     }
2722     gen_helper_tick_get_count(dst, tcg_env, r_tickptr,
2723                               tcg_constant_i32(dc->mem_idx));
2724     return dst;
2725 }
2726 
2727 /* TODO: non-priv access only allowed when enabled. */
2728 TRANS(RDTICK, 64, do_rd_special, true, a->rd, do_rdtick)
2729 
2730 static TCGv do_rdpc(DisasContext *dc, TCGv dst)
2731 {
2732     return tcg_constant_tl(address_mask_i(dc, dc->pc));
2733 }
2734 
2735 TRANS(RDPC, 64, do_rd_special, true, a->rd, do_rdpc)
2736 
2737 static TCGv do_rdfprs(DisasContext *dc, TCGv dst)
2738 {
2739     tcg_gen_ext_i32_tl(dst, cpu_fprs);
2740     return dst;
2741 }
2742 
2743 TRANS(RDFPRS, 64, do_rd_special, true, a->rd, do_rdfprs)
2744 
2745 static TCGv do_rdgsr(DisasContext *dc, TCGv dst)
2746 {
2747     gen_trap_ifnofpu(dc);
2748     return cpu_gsr;
2749 }
2750 
2751 TRANS(RDGSR, 64, do_rd_special, true, a->rd, do_rdgsr)
2752 
2753 static TCGv do_rdsoftint(DisasContext *dc, TCGv dst)
2754 {
2755     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(softint));
2756     return dst;
2757 }
2758 
2759 TRANS(RDSOFTINT, 64, do_rd_special, supervisor(dc), a->rd, do_rdsoftint)
2760 
2761 static TCGv do_rdtick_cmpr(DisasContext *dc, TCGv dst)
2762 {
2763     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(tick_cmpr));
2764     return dst;
2765 }
2766 
2767 /* TODO: non-priv access only allowed when enabled. */
2768 TRANS(RDTICK_CMPR, 64, do_rd_special, true, a->rd, do_rdtick_cmpr)
2769 
2770 static TCGv do_rdstick(DisasContext *dc, TCGv dst)
2771 {
2772     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
2773 
2774     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick));
2775     if (translator_io_start(&dc->base)) {
2776         dc->base.is_jmp = DISAS_EXIT;
2777     }
2778     gen_helper_tick_get_count(dst, tcg_env, r_tickptr,
2779                               tcg_constant_i32(dc->mem_idx));
2780     return dst;
2781 }
2782 
2783 /* TODO: non-priv access only allowed when enabled. */
2784 TRANS(RDSTICK, 64, do_rd_special, true, a->rd, do_rdstick)
2785 
2786 static TCGv do_rdstick_cmpr(DisasContext *dc, TCGv dst)
2787 {
2788     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(stick_cmpr));
2789     return dst;
2790 }
2791 
2792 /* TODO: supervisor access only allowed when enabled by hypervisor. */
2793 TRANS(RDSTICK_CMPR, 64, do_rd_special, supervisor(dc), a->rd, do_rdstick_cmpr)
2794 
2795 /*
2796  * UltraSPARC-T1 Strand status.
2797  * HYPV check maybe not enough, UA2005 & UA2007 describe
2798  * this ASR as impl. dep
2799  */
2800 static TCGv do_rdstrand_status(DisasContext *dc, TCGv dst)
2801 {
2802     return tcg_constant_tl(1);
2803 }
2804 
2805 TRANS(RDSTRAND_STATUS, HYPV, do_rd_special, true, a->rd, do_rdstrand_status)
2806 
2807 static TCGv do_rdpsr(DisasContext *dc, TCGv dst)
2808 {
2809     gen_helper_rdpsr(dst, tcg_env);
2810     return dst;
2811 }
2812 
2813 TRANS(RDPSR, 32, do_rd_special, supervisor(dc), a->rd, do_rdpsr)
2814 
2815 static TCGv do_rdhpstate(DisasContext *dc, TCGv dst)
2816 {
2817     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hpstate));
2818     return dst;
2819 }
2820 
2821 TRANS(RDHPR_hpstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhpstate)
2822 
2823 static TCGv do_rdhtstate(DisasContext *dc, TCGv dst)
2824 {
2825     TCGv_i32 tl = tcg_temp_new_i32();
2826     TCGv_ptr tp = tcg_temp_new_ptr();
2827 
2828     tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl));
2829     tcg_gen_andi_i32(tl, tl, MAXTL_MASK);
2830     tcg_gen_shli_i32(tl, tl, 3);
2831     tcg_gen_ext_i32_ptr(tp, tl);
2832     tcg_gen_add_ptr(tp, tp, tcg_env);
2833 
2834     tcg_gen_ld_tl(dst, tp, env64_field_offsetof(htstate));
2835     return dst;
2836 }
2837 
2838 TRANS(RDHPR_htstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtstate)
2839 
2840 static TCGv do_rdhintp(DisasContext *dc, TCGv dst)
2841 {
2842     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hintp));
2843     return dst;
2844 }
2845 
2846 TRANS(RDHPR_hintp, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhintp)
2847 
2848 static TCGv do_rdhtba(DisasContext *dc, TCGv dst)
2849 {
2850     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(htba));
2851     return dst;
2852 }
2853 
2854 TRANS(RDHPR_htba, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtba)
2855 
2856 static TCGv do_rdhver(DisasContext *dc, TCGv dst)
2857 {
2858     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hver));
2859     return dst;
2860 }
2861 
2862 TRANS(RDHPR_hver, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhver)
2863 
2864 static TCGv do_rdhstick_cmpr(DisasContext *dc, TCGv dst)
2865 {
2866     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hstick_cmpr));
2867     return dst;
2868 }
2869 
2870 TRANS(RDHPR_hstick_cmpr, HYPV, do_rd_special, hypervisor(dc), a->rd,
2871       do_rdhstick_cmpr)
2872 
2873 static TCGv do_rdwim(DisasContext *dc, TCGv dst)
2874 {
2875     tcg_gen_ld_tl(dst, tcg_env, env32_field_offsetof(wim));
2876     return dst;
2877 }
2878 
2879 TRANS(RDWIM, 32, do_rd_special, supervisor(dc), a->rd, do_rdwim)
2880 
2881 static TCGv do_rdtpc(DisasContext *dc, TCGv dst)
2882 {
2883 #ifdef TARGET_SPARC64
2884     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2885 
2886     gen_load_trap_state_at_tl(r_tsptr);
2887     tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tpc));
2888     return dst;
2889 #else
2890     qemu_build_not_reached();
2891 #endif
2892 }
2893 
2894 TRANS(RDPR_tpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtpc)
2895 
2896 static TCGv do_rdtnpc(DisasContext *dc, TCGv dst)
2897 {
2898 #ifdef TARGET_SPARC64
2899     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2900 
2901     gen_load_trap_state_at_tl(r_tsptr);
2902     tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tnpc));
2903     return dst;
2904 #else
2905     qemu_build_not_reached();
2906 #endif
2907 }
2908 
2909 TRANS(RDPR_tnpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtnpc)
2910 
2911 static TCGv do_rdtstate(DisasContext *dc, TCGv dst)
2912 {
2913 #ifdef TARGET_SPARC64
2914     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2915 
2916     gen_load_trap_state_at_tl(r_tsptr);
2917     tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tstate));
2918     return dst;
2919 #else
2920     qemu_build_not_reached();
2921 #endif
2922 }
2923 
2924 TRANS(RDPR_tstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdtstate)
2925 
2926 static TCGv do_rdtt(DisasContext *dc, TCGv dst)
2927 {
2928 #ifdef TARGET_SPARC64
2929     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2930 
2931     gen_load_trap_state_at_tl(r_tsptr);
2932     tcg_gen_ld32s_tl(dst, r_tsptr, offsetof(trap_state, tt));
2933     return dst;
2934 #else
2935     qemu_build_not_reached();
2936 #endif
2937 }
2938 
2939 TRANS(RDPR_tt, 64, do_rd_special, supervisor(dc), a->rd, do_rdtt)
2940 TRANS(RDPR_tick, 64, do_rd_special, supervisor(dc), a->rd, do_rdtick)
2941 
2942 static TCGv do_rdtba(DisasContext *dc, TCGv dst)
2943 {
2944     return cpu_tbr;
2945 }
2946 
2947 TRANS(RDTBR, 32, do_rd_special, supervisor(dc), a->rd, do_rdtba)
2948 TRANS(RDPR_tba, 64, do_rd_special, supervisor(dc), a->rd, do_rdtba)
2949 
2950 static TCGv do_rdpstate(DisasContext *dc, TCGv dst)
2951 {
2952     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(pstate));
2953     return dst;
2954 }
2955 
2956 TRANS(RDPR_pstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdpstate)
2957 
2958 static TCGv do_rdtl(DisasContext *dc, TCGv dst)
2959 {
2960     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(tl));
2961     return dst;
2962 }
2963 
2964 TRANS(RDPR_tl, 64, do_rd_special, supervisor(dc), a->rd, do_rdtl)
2965 
2966 static TCGv do_rdpil(DisasContext *dc, TCGv dst)
2967 {
2968     tcg_gen_ld32s_tl(dst, tcg_env, env_field_offsetof(psrpil));
2969     return dst;
2970 }
2971 
2972 TRANS(RDPR_pil, 64, do_rd_special, supervisor(dc), a->rd, do_rdpil)
2973 
2974 static TCGv do_rdcwp(DisasContext *dc, TCGv dst)
2975 {
2976     gen_helper_rdcwp(dst, tcg_env);
2977     return dst;
2978 }
2979 
2980 TRANS(RDPR_cwp, 64, do_rd_special, supervisor(dc), a->rd, do_rdcwp)
2981 
2982 static TCGv do_rdcansave(DisasContext *dc, TCGv dst)
2983 {
2984     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cansave));
2985     return dst;
2986 }
2987 
2988 TRANS(RDPR_cansave, 64, do_rd_special, supervisor(dc), a->rd, do_rdcansave)
2989 
2990 static TCGv do_rdcanrestore(DisasContext *dc, TCGv dst)
2991 {
2992     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(canrestore));
2993     return dst;
2994 }
2995 
2996 TRANS(RDPR_canrestore, 64, do_rd_special, supervisor(dc), a->rd,
2997       do_rdcanrestore)
2998 
2999 static TCGv do_rdcleanwin(DisasContext *dc, TCGv dst)
3000 {
3001     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cleanwin));
3002     return dst;
3003 }
3004 
3005 TRANS(RDPR_cleanwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdcleanwin)
3006 
3007 static TCGv do_rdotherwin(DisasContext *dc, TCGv dst)
3008 {
3009     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(otherwin));
3010     return dst;
3011 }
3012 
3013 TRANS(RDPR_otherwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdotherwin)
3014 
3015 static TCGv do_rdwstate(DisasContext *dc, TCGv dst)
3016 {
3017     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(wstate));
3018     return dst;
3019 }
3020 
3021 TRANS(RDPR_wstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdwstate)
3022 
3023 static TCGv do_rdgl(DisasContext *dc, TCGv dst)
3024 {
3025     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(gl));
3026     return dst;
3027 }
3028 
3029 TRANS(RDPR_gl, GL, do_rd_special, supervisor(dc), a->rd, do_rdgl)
3030 
3031 /* UA2005 strand status */
3032 static TCGv do_rdssr(DisasContext *dc, TCGv dst)
3033 {
3034     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(ssr));
3035     return dst;
3036 }
3037 
3038 TRANS(RDPR_strand_status, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdssr)
3039 
3040 static TCGv do_rdver(DisasContext *dc, TCGv dst)
3041 {
3042     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(version));
3043     return dst;
3044 }
3045 
3046 TRANS(RDPR_ver, 64, do_rd_special, supervisor(dc), a->rd, do_rdver)
3047 
3048 static bool trans_FLUSHW(DisasContext *dc, arg_FLUSHW *a)
3049 {
3050     if (avail_64(dc)) {
3051         gen_helper_flushw(tcg_env);
3052         return advance_pc(dc);
3053     }
3054     return false;
3055 }
3056 
3057 static bool do_wr_special(DisasContext *dc, arg_r_r_ri *a, bool priv,
3058                           void (*func)(DisasContext *, TCGv))
3059 {
3060     TCGv src;
3061 
3062     /* For simplicity, we under-decoded the rs2 form. */
3063     if (!a->imm && (a->rs2_or_imm & ~0x1f)) {
3064         return false;
3065     }
3066     if (!priv) {
3067         return raise_priv(dc);
3068     }
3069 
3070     if (a->rs1 == 0 && (a->imm || a->rs2_or_imm == 0)) {
3071         src = tcg_constant_tl(a->rs2_or_imm);
3072     } else {
3073         TCGv src1 = gen_load_gpr(dc, a->rs1);
3074         if (a->rs2_or_imm == 0) {
3075             src = src1;
3076         } else {
3077             src = tcg_temp_new();
3078             if (a->imm) {
3079                 tcg_gen_xori_tl(src, src1, a->rs2_or_imm);
3080             } else {
3081                 tcg_gen_xor_tl(src, src1, gen_load_gpr(dc, a->rs2_or_imm));
3082             }
3083         }
3084     }
3085     func(dc, src);
3086     return advance_pc(dc);
3087 }
3088 
3089 static void do_wry(DisasContext *dc, TCGv src)
3090 {
3091     tcg_gen_ext32u_tl(cpu_y, src);
3092 }
3093 
3094 TRANS(WRY, ALL, do_wr_special, a, true, do_wry)
3095 
3096 static void do_wrccr(DisasContext *dc, TCGv src)
3097 {
3098     gen_helper_wrccr(tcg_env, src);
3099 }
3100 
3101 TRANS(WRCCR, 64, do_wr_special, a, true, do_wrccr)
3102 
3103 static void do_wrasi(DisasContext *dc, TCGv src)
3104 {
3105     TCGv tmp = tcg_temp_new();
3106 
3107     tcg_gen_ext8u_tl(tmp, src);
3108     tcg_gen_st32_tl(tmp, tcg_env, env64_field_offsetof(asi));
3109     /* End TB to notice changed ASI. */
3110     dc->base.is_jmp = DISAS_EXIT;
3111 }
3112 
3113 TRANS(WRASI, 64, do_wr_special, a, true, do_wrasi)
3114 
3115 static void do_wrfprs(DisasContext *dc, TCGv src)
3116 {
3117 #ifdef TARGET_SPARC64
3118     tcg_gen_trunc_tl_i32(cpu_fprs, src);
3119     dc->fprs_dirty = 0;
3120     dc->base.is_jmp = DISAS_EXIT;
3121 #else
3122     qemu_build_not_reached();
3123 #endif
3124 }
3125 
3126 TRANS(WRFPRS, 64, do_wr_special, a, true, do_wrfprs)
3127 
3128 static void do_wrgsr(DisasContext *dc, TCGv src)
3129 {
3130     gen_trap_ifnofpu(dc);
3131     tcg_gen_mov_tl(cpu_gsr, src);
3132 }
3133 
3134 TRANS(WRGSR, 64, do_wr_special, a, true, do_wrgsr)
3135 
3136 static void do_wrsoftint_set(DisasContext *dc, TCGv src)
3137 {
3138     gen_helper_set_softint(tcg_env, src);
3139 }
3140 
3141 TRANS(WRSOFTINT_SET, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_set)
3142 
3143 static void do_wrsoftint_clr(DisasContext *dc, TCGv src)
3144 {
3145     gen_helper_clear_softint(tcg_env, src);
3146 }
3147 
3148 TRANS(WRSOFTINT_CLR, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_clr)
3149 
3150 static void do_wrsoftint(DisasContext *dc, TCGv src)
3151 {
3152     gen_helper_write_softint(tcg_env, src);
3153 }
3154 
3155 TRANS(WRSOFTINT, 64, do_wr_special, a, supervisor(dc), do_wrsoftint)
3156 
3157 static void do_wrtick_cmpr(DisasContext *dc, TCGv src)
3158 {
3159     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3160 
3161     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(tick_cmpr));
3162     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
3163     translator_io_start(&dc->base);
3164     gen_helper_tick_set_limit(r_tickptr, src);
3165     /* End TB to handle timer interrupt */
3166     dc->base.is_jmp = DISAS_EXIT;
3167 }
3168 
3169 TRANS(WRTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrtick_cmpr)
3170 
3171 static void do_wrstick(DisasContext *dc, TCGv src)
3172 {
3173 #ifdef TARGET_SPARC64
3174     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3175 
3176     tcg_gen_ld_ptr(r_tickptr, tcg_env, offsetof(CPUSPARCState, stick));
3177     translator_io_start(&dc->base);
3178     gen_helper_tick_set_count(r_tickptr, src);
3179     /* End TB to handle timer interrupt */
3180     dc->base.is_jmp = DISAS_EXIT;
3181 #else
3182     qemu_build_not_reached();
3183 #endif
3184 }
3185 
3186 TRANS(WRSTICK, 64, do_wr_special, a, supervisor(dc), do_wrstick)
3187 
3188 static void do_wrstick_cmpr(DisasContext *dc, TCGv src)
3189 {
3190     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3191 
3192     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(stick_cmpr));
3193     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick));
3194     translator_io_start(&dc->base);
3195     gen_helper_tick_set_limit(r_tickptr, src);
3196     /* End TB to handle timer interrupt */
3197     dc->base.is_jmp = DISAS_EXIT;
3198 }
3199 
3200 TRANS(WRSTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrstick_cmpr)
3201 
3202 static void do_wrpowerdown(DisasContext *dc, TCGv src)
3203 {
3204     finishing_insn(dc);
3205     save_state(dc);
3206     gen_helper_power_down(tcg_env);
3207 }
3208 
3209 TRANS(WRPOWERDOWN, POWERDOWN, do_wr_special, a, supervisor(dc), do_wrpowerdown)
3210 
3211 static void do_wrpsr(DisasContext *dc, TCGv src)
3212 {
3213     gen_helper_wrpsr(tcg_env, src);
3214     dc->base.is_jmp = DISAS_EXIT;
3215 }
3216 
3217 TRANS(WRPSR, 32, do_wr_special, a, supervisor(dc), do_wrpsr)
3218 
3219 static void do_wrwim(DisasContext *dc, TCGv src)
3220 {
3221     target_ulong mask = MAKE_64BIT_MASK(0, dc->def->nwindows);
3222     TCGv tmp = tcg_temp_new();
3223 
3224     tcg_gen_andi_tl(tmp, src, mask);
3225     tcg_gen_st_tl(tmp, tcg_env, env32_field_offsetof(wim));
3226 }
3227 
3228 TRANS(WRWIM, 32, do_wr_special, a, supervisor(dc), do_wrwim)
3229 
3230 static void do_wrtpc(DisasContext *dc, TCGv src)
3231 {
3232 #ifdef TARGET_SPARC64
3233     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3234 
3235     gen_load_trap_state_at_tl(r_tsptr);
3236     tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tpc));
3237 #else
3238     qemu_build_not_reached();
3239 #endif
3240 }
3241 
3242 TRANS(WRPR_tpc, 64, do_wr_special, a, supervisor(dc), do_wrtpc)
3243 
3244 static void do_wrtnpc(DisasContext *dc, TCGv src)
3245 {
3246 #ifdef TARGET_SPARC64
3247     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3248 
3249     gen_load_trap_state_at_tl(r_tsptr);
3250     tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tnpc));
3251 #else
3252     qemu_build_not_reached();
3253 #endif
3254 }
3255 
3256 TRANS(WRPR_tnpc, 64, do_wr_special, a, supervisor(dc), do_wrtnpc)
3257 
3258 static void do_wrtstate(DisasContext *dc, TCGv src)
3259 {
3260 #ifdef TARGET_SPARC64
3261     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3262 
3263     gen_load_trap_state_at_tl(r_tsptr);
3264     tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tstate));
3265 #else
3266     qemu_build_not_reached();
3267 #endif
3268 }
3269 
3270 TRANS(WRPR_tstate, 64, do_wr_special, a, supervisor(dc), do_wrtstate)
3271 
3272 static void do_wrtt(DisasContext *dc, TCGv src)
3273 {
3274 #ifdef TARGET_SPARC64
3275     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3276 
3277     gen_load_trap_state_at_tl(r_tsptr);
3278     tcg_gen_st32_tl(src, r_tsptr, offsetof(trap_state, tt));
3279 #else
3280     qemu_build_not_reached();
3281 #endif
3282 }
3283 
3284 TRANS(WRPR_tt, 64, do_wr_special, a, supervisor(dc), do_wrtt)
3285 
3286 static void do_wrtick(DisasContext *dc, TCGv src)
3287 {
3288     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3289 
3290     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
3291     translator_io_start(&dc->base);
3292     gen_helper_tick_set_count(r_tickptr, src);
3293     /* End TB to handle timer interrupt */
3294     dc->base.is_jmp = DISAS_EXIT;
3295 }
3296 
3297 TRANS(WRPR_tick, 64, do_wr_special, a, supervisor(dc), do_wrtick)
3298 
3299 static void do_wrtba(DisasContext *dc, TCGv src)
3300 {
3301     tcg_gen_mov_tl(cpu_tbr, src);
3302 }
3303 
3304 TRANS(WRPR_tba, 64, do_wr_special, a, supervisor(dc), do_wrtba)
3305 
3306 static void do_wrpstate(DisasContext *dc, TCGv src)
3307 {
3308     save_state(dc);
3309     if (translator_io_start(&dc->base)) {
3310         dc->base.is_jmp = DISAS_EXIT;
3311     }
3312     gen_helper_wrpstate(tcg_env, src);
3313     dc->npc = DYNAMIC_PC;
3314 }
3315 
3316 TRANS(WRPR_pstate, 64, do_wr_special, a, supervisor(dc), do_wrpstate)
3317 
3318 static void do_wrtl(DisasContext *dc, TCGv src)
3319 {
3320     save_state(dc);
3321     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(tl));
3322     dc->npc = DYNAMIC_PC;
3323 }
3324 
3325 TRANS(WRPR_tl, 64, do_wr_special, a, supervisor(dc), do_wrtl)
3326 
3327 static void do_wrpil(DisasContext *dc, TCGv src)
3328 {
3329     if (translator_io_start(&dc->base)) {
3330         dc->base.is_jmp = DISAS_EXIT;
3331     }
3332     gen_helper_wrpil(tcg_env, src);
3333 }
3334 
3335 TRANS(WRPR_pil, 64, do_wr_special, a, supervisor(dc), do_wrpil)
3336 
3337 static void do_wrcwp(DisasContext *dc, TCGv src)
3338 {
3339     gen_helper_wrcwp(tcg_env, src);
3340 }
3341 
3342 TRANS(WRPR_cwp, 64, do_wr_special, a, supervisor(dc), do_wrcwp)
3343 
3344 static void do_wrcansave(DisasContext *dc, TCGv src)
3345 {
3346     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cansave));
3347 }
3348 
3349 TRANS(WRPR_cansave, 64, do_wr_special, a, supervisor(dc), do_wrcansave)
3350 
3351 static void do_wrcanrestore(DisasContext *dc, TCGv src)
3352 {
3353     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(canrestore));
3354 }
3355 
3356 TRANS(WRPR_canrestore, 64, do_wr_special, a, supervisor(dc), do_wrcanrestore)
3357 
3358 static void do_wrcleanwin(DisasContext *dc, TCGv src)
3359 {
3360     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cleanwin));
3361 }
3362 
3363 TRANS(WRPR_cleanwin, 64, do_wr_special, a, supervisor(dc), do_wrcleanwin)
3364 
3365 static void do_wrotherwin(DisasContext *dc, TCGv src)
3366 {
3367     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(otherwin));
3368 }
3369 
3370 TRANS(WRPR_otherwin, 64, do_wr_special, a, supervisor(dc), do_wrotherwin)
3371 
3372 static void do_wrwstate(DisasContext *dc, TCGv src)
3373 {
3374     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(wstate));
3375 }
3376 
3377 TRANS(WRPR_wstate, 64, do_wr_special, a, supervisor(dc), do_wrwstate)
3378 
3379 static void do_wrgl(DisasContext *dc, TCGv src)
3380 {
3381     gen_helper_wrgl(tcg_env, src);
3382 }
3383 
3384 TRANS(WRPR_gl, GL, do_wr_special, a, supervisor(dc), do_wrgl)
3385 
3386 /* UA2005 strand status */
3387 static void do_wrssr(DisasContext *dc, TCGv src)
3388 {
3389     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(ssr));
3390 }
3391 
3392 TRANS(WRPR_strand_status, HYPV, do_wr_special, a, hypervisor(dc), do_wrssr)
3393 
3394 TRANS(WRTBR, 32, do_wr_special, a, supervisor(dc), do_wrtba)
3395 
3396 static void do_wrhpstate(DisasContext *dc, TCGv src)
3397 {
3398     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hpstate));
3399     dc->base.is_jmp = DISAS_EXIT;
3400 }
3401 
3402 TRANS(WRHPR_hpstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhpstate)
3403 
3404 static void do_wrhtstate(DisasContext *dc, TCGv src)
3405 {
3406     TCGv_i32 tl = tcg_temp_new_i32();
3407     TCGv_ptr tp = tcg_temp_new_ptr();
3408 
3409     tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl));
3410     tcg_gen_andi_i32(tl, tl, MAXTL_MASK);
3411     tcg_gen_shli_i32(tl, tl, 3);
3412     tcg_gen_ext_i32_ptr(tp, tl);
3413     tcg_gen_add_ptr(tp, tp, tcg_env);
3414 
3415     tcg_gen_st_tl(src, tp, env64_field_offsetof(htstate));
3416 }
3417 
3418 TRANS(WRHPR_htstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtstate)
3419 
3420 static void do_wrhintp(DisasContext *dc, TCGv src)
3421 {
3422     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hintp));
3423 }
3424 
3425 TRANS(WRHPR_hintp, HYPV, do_wr_special, a, hypervisor(dc), do_wrhintp)
3426 
3427 static void do_wrhtba(DisasContext *dc, TCGv src)
3428 {
3429     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(htba));
3430 }
3431 
3432 TRANS(WRHPR_htba, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtba)
3433 
3434 static void do_wrhstick_cmpr(DisasContext *dc, TCGv src)
3435 {
3436     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3437 
3438     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hstick_cmpr));
3439     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(hstick));
3440     translator_io_start(&dc->base);
3441     gen_helper_tick_set_limit(r_tickptr, src);
3442     /* End TB to handle timer interrupt */
3443     dc->base.is_jmp = DISAS_EXIT;
3444 }
3445 
3446 TRANS(WRHPR_hstick_cmpr, HYPV, do_wr_special, a, hypervisor(dc),
3447       do_wrhstick_cmpr)
3448 
3449 static bool do_saved_restored(DisasContext *dc, bool saved)
3450 {
3451     if (!supervisor(dc)) {
3452         return raise_priv(dc);
3453     }
3454     if (saved) {
3455         gen_helper_saved(tcg_env);
3456     } else {
3457         gen_helper_restored(tcg_env);
3458     }
3459     return advance_pc(dc);
3460 }
3461 
3462 TRANS(SAVED, 64, do_saved_restored, true)
3463 TRANS(RESTORED, 64, do_saved_restored, false)
3464 
3465 static bool trans_NOP(DisasContext *dc, arg_NOP *a)
3466 {
3467     return advance_pc(dc);
3468 }
3469 
3470 /*
3471  * TODO: Need a feature bit for sparcv8.
3472  * In the meantime, treat all 32-bit cpus like sparcv7.
3473  */
3474 TRANS(NOP_v7, 32, trans_NOP, a)
3475 TRANS(NOP_v9, 64, trans_NOP, a)
3476 
3477 static bool do_arith_int(DisasContext *dc, arg_r_r_ri_cc *a,
3478                          void (*func)(TCGv, TCGv, TCGv),
3479                          void (*funci)(TCGv, TCGv, target_long),
3480                          bool logic_cc)
3481 {
3482     TCGv dst, src1;
3483 
3484     /* For simplicity, we under-decoded the rs2 form. */
3485     if (!a->imm && a->rs2_or_imm & ~0x1f) {
3486         return false;
3487     }
3488 
3489     if (logic_cc) {
3490         dst = cpu_cc_N;
3491     } else {
3492         dst = gen_dest_gpr(dc, a->rd);
3493     }
3494     src1 = gen_load_gpr(dc, a->rs1);
3495 
3496     if (a->imm || a->rs2_or_imm == 0) {
3497         if (funci) {
3498             funci(dst, src1, a->rs2_or_imm);
3499         } else {
3500             func(dst, src1, tcg_constant_tl(a->rs2_or_imm));
3501         }
3502     } else {
3503         func(dst, src1, cpu_regs[a->rs2_or_imm]);
3504     }
3505 
3506     if (logic_cc) {
3507         if (TARGET_LONG_BITS == 64) {
3508             tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
3509             tcg_gen_movi_tl(cpu_icc_C, 0);
3510         }
3511         tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
3512         tcg_gen_movi_tl(cpu_cc_C, 0);
3513         tcg_gen_movi_tl(cpu_cc_V, 0);
3514     }
3515 
3516     gen_store_gpr(dc, a->rd, dst);
3517     return advance_pc(dc);
3518 }
3519 
3520 static bool do_arith(DisasContext *dc, arg_r_r_ri_cc *a,
3521                      void (*func)(TCGv, TCGv, TCGv),
3522                      void (*funci)(TCGv, TCGv, target_long),
3523                      void (*func_cc)(TCGv, TCGv, TCGv))
3524 {
3525     if (a->cc) {
3526         return do_arith_int(dc, a, func_cc, NULL, false);
3527     }
3528     return do_arith_int(dc, a, func, funci, false);
3529 }
3530 
3531 static bool do_logic(DisasContext *dc, arg_r_r_ri_cc *a,
3532                      void (*func)(TCGv, TCGv, TCGv),
3533                      void (*funci)(TCGv, TCGv, target_long))
3534 {
3535     return do_arith_int(dc, a, func, funci, a->cc);
3536 }
3537 
3538 TRANS(ADD, ALL, do_arith, a, tcg_gen_add_tl, tcg_gen_addi_tl, gen_op_addcc)
3539 TRANS(SUB, ALL, do_arith, a, tcg_gen_sub_tl, tcg_gen_subi_tl, gen_op_subcc)
3540 TRANS(ADDC, ALL, do_arith, a, gen_op_addc, NULL, gen_op_addccc)
3541 TRANS(SUBC, ALL, do_arith, a, gen_op_subc, NULL, gen_op_subccc)
3542 
3543 TRANS(TADDcc, ALL, do_arith, a, NULL, NULL, gen_op_taddcc)
3544 TRANS(TSUBcc, ALL, do_arith, a, NULL, NULL, gen_op_tsubcc)
3545 TRANS(TADDccTV, ALL, do_arith, a, NULL, NULL, gen_op_taddcctv)
3546 TRANS(TSUBccTV, ALL, do_arith, a, NULL, NULL, gen_op_tsubcctv)
3547 
3548 TRANS(AND, ALL, do_logic, a, tcg_gen_and_tl, tcg_gen_andi_tl)
3549 TRANS(XOR, ALL, do_logic, a, tcg_gen_xor_tl, tcg_gen_xori_tl)
3550 TRANS(ANDN, ALL, do_logic, a, tcg_gen_andc_tl, NULL)
3551 TRANS(ORN, ALL, do_logic, a, tcg_gen_orc_tl, NULL)
3552 TRANS(XORN, ALL, do_logic, a, tcg_gen_eqv_tl, NULL)
3553 
3554 TRANS(MULX, 64, do_arith, a, tcg_gen_mul_tl, tcg_gen_muli_tl, NULL)
3555 TRANS(UMUL, MUL, do_logic, a, gen_op_umul, NULL)
3556 TRANS(SMUL, MUL, do_logic, a, gen_op_smul, NULL)
3557 TRANS(MULScc, ALL, do_arith, a, NULL, NULL, gen_op_mulscc)
3558 
3559 TRANS(UDIVcc, DIV, do_arith, a, NULL, NULL, gen_op_udivcc)
3560 TRANS(SDIV, DIV, do_arith, a, gen_op_sdiv, NULL, gen_op_sdivcc)
3561 
3562 /* TODO: Should have feature bit -- comes in with UltraSparc T2. */
3563 TRANS(POPC, 64, do_arith, a, gen_op_popc, NULL, NULL)
3564 
3565 static bool trans_OR(DisasContext *dc, arg_r_r_ri_cc *a)
3566 {
3567     /* OR with %g0 is the canonical alias for MOV. */
3568     if (!a->cc && a->rs1 == 0) {
3569         if (a->imm || a->rs2_or_imm == 0) {
3570             gen_store_gpr(dc, a->rd, tcg_constant_tl(a->rs2_or_imm));
3571         } else if (a->rs2_or_imm & ~0x1f) {
3572             /* For simplicity, we under-decoded the rs2 form. */
3573             return false;
3574         } else {
3575             gen_store_gpr(dc, a->rd, cpu_regs[a->rs2_or_imm]);
3576         }
3577         return advance_pc(dc);
3578     }
3579     return do_logic(dc, a, tcg_gen_or_tl, tcg_gen_ori_tl);
3580 }
3581 
3582 static bool trans_UDIV(DisasContext *dc, arg_r_r_ri *a)
3583 {
3584     TCGv_i64 t1, t2;
3585     TCGv dst;
3586 
3587     if (!avail_DIV(dc)) {
3588         return false;
3589     }
3590     /* For simplicity, we under-decoded the rs2 form. */
3591     if (!a->imm && a->rs2_or_imm & ~0x1f) {
3592         return false;
3593     }
3594 
3595     if (unlikely(a->rs2_or_imm == 0)) {
3596         gen_exception(dc, TT_DIV_ZERO);
3597         return true;
3598     }
3599 
3600     if (a->imm) {
3601         t2 = tcg_constant_i64((uint32_t)a->rs2_or_imm);
3602     } else {
3603         TCGLabel *lab;
3604         TCGv_i32 n2;
3605 
3606         finishing_insn(dc);
3607         flush_cond(dc);
3608 
3609         n2 = tcg_temp_new_i32();
3610         tcg_gen_trunc_tl_i32(n2, cpu_regs[a->rs2_or_imm]);
3611 
3612         lab = delay_exception(dc, TT_DIV_ZERO);
3613         tcg_gen_brcondi_i32(TCG_COND_EQ, n2, 0, lab);
3614 
3615         t2 = tcg_temp_new_i64();
3616 #ifdef TARGET_SPARC64
3617         tcg_gen_ext32u_i64(t2, cpu_regs[a->rs2_or_imm]);
3618 #else
3619         tcg_gen_extu_i32_i64(t2, cpu_regs[a->rs2_or_imm]);
3620 #endif
3621     }
3622 
3623     t1 = tcg_temp_new_i64();
3624     tcg_gen_concat_tl_i64(t1, gen_load_gpr(dc, a->rs1), cpu_y);
3625 
3626     tcg_gen_divu_i64(t1, t1, t2);
3627     tcg_gen_umin_i64(t1, t1, tcg_constant_i64(UINT32_MAX));
3628 
3629     dst = gen_dest_gpr(dc, a->rd);
3630     tcg_gen_trunc_i64_tl(dst, t1);
3631     gen_store_gpr(dc, a->rd, dst);
3632     return advance_pc(dc);
3633 }
3634 
3635 static bool trans_UDIVX(DisasContext *dc, arg_r_r_ri *a)
3636 {
3637     TCGv dst, src1, src2;
3638 
3639     if (!avail_64(dc)) {
3640         return false;
3641     }
3642     /* For simplicity, we under-decoded the rs2 form. */
3643     if (!a->imm && a->rs2_or_imm & ~0x1f) {
3644         return false;
3645     }
3646 
3647     if (unlikely(a->rs2_or_imm == 0)) {
3648         gen_exception(dc, TT_DIV_ZERO);
3649         return true;
3650     }
3651 
3652     if (a->imm) {
3653         src2 = tcg_constant_tl(a->rs2_or_imm);
3654     } else {
3655         TCGLabel *lab;
3656 
3657         finishing_insn(dc);
3658         flush_cond(dc);
3659 
3660         lab = delay_exception(dc, TT_DIV_ZERO);
3661         src2 = cpu_regs[a->rs2_or_imm];
3662         tcg_gen_brcondi_tl(TCG_COND_EQ, src2, 0, lab);
3663     }
3664 
3665     dst = gen_dest_gpr(dc, a->rd);
3666     src1 = gen_load_gpr(dc, a->rs1);
3667 
3668     tcg_gen_divu_tl(dst, src1, src2);
3669     gen_store_gpr(dc, a->rd, dst);
3670     return advance_pc(dc);
3671 }
3672 
3673 static bool trans_SDIVX(DisasContext *dc, arg_r_r_ri *a)
3674 {
3675     TCGv dst, src1, src2;
3676 
3677     if (!avail_64(dc)) {
3678         return false;
3679     }
3680     /* For simplicity, we under-decoded the rs2 form. */
3681     if (!a->imm && a->rs2_or_imm & ~0x1f) {
3682         return false;
3683     }
3684 
3685     if (unlikely(a->rs2_or_imm == 0)) {
3686         gen_exception(dc, TT_DIV_ZERO);
3687         return true;
3688     }
3689 
3690     dst = gen_dest_gpr(dc, a->rd);
3691     src1 = gen_load_gpr(dc, a->rs1);
3692 
3693     if (a->imm) {
3694         if (unlikely(a->rs2_or_imm == -1)) {
3695             tcg_gen_neg_tl(dst, src1);
3696             gen_store_gpr(dc, a->rd, dst);
3697             return advance_pc(dc);
3698         }
3699         src2 = tcg_constant_tl(a->rs2_or_imm);
3700     } else {
3701         TCGLabel *lab;
3702         TCGv t1, t2;
3703 
3704         finishing_insn(dc);
3705         flush_cond(dc);
3706 
3707         lab = delay_exception(dc, TT_DIV_ZERO);
3708         src2 = cpu_regs[a->rs2_or_imm];
3709         tcg_gen_brcondi_tl(TCG_COND_EQ, src2, 0, lab);
3710 
3711         /*
3712          * Need to avoid INT64_MIN / -1, which will trap on x86 host.
3713          * Set SRC2 to 1 as a new divisor, to produce the correct result.
3714          */
3715         t1 = tcg_temp_new();
3716         t2 = tcg_temp_new();
3717         tcg_gen_setcondi_tl(TCG_COND_EQ, t1, src1, (target_long)INT64_MIN);
3718         tcg_gen_setcondi_tl(TCG_COND_EQ, t2, src2, -1);
3719         tcg_gen_and_tl(t1, t1, t2);
3720         tcg_gen_movcond_tl(TCG_COND_NE, t1, t1, tcg_constant_tl(0),
3721                            tcg_constant_tl(1), src2);
3722         src2 = t1;
3723     }
3724 
3725     tcg_gen_div_tl(dst, src1, src2);
3726     gen_store_gpr(dc, a->rd, dst);
3727     return advance_pc(dc);
3728 }
3729 
3730 static bool gen_edge(DisasContext *dc, arg_r_r_r *a,
3731                      int width, bool cc, bool left)
3732 {
3733     TCGv dst, s1, s2, lo1, lo2;
3734     uint64_t amask, tabl, tabr;
3735     int shift, imask, omask;
3736 
3737     dst = gen_dest_gpr(dc, a->rd);
3738     s1 = gen_load_gpr(dc, a->rs1);
3739     s2 = gen_load_gpr(dc, a->rs2);
3740 
3741     if (cc) {
3742         gen_op_subcc(cpu_cc_N, s1, s2);
3743     }
3744 
3745     /*
3746      * Theory of operation: there are two tables, left and right (not to
3747      * be confused with the left and right versions of the opcode).  These
3748      * are indexed by the low 3 bits of the inputs.  To make things "easy",
3749      * these tables are loaded into two constants, TABL and TABR below.
3750      * The operation index = (input & imask) << shift calculates the index
3751      * into the constant, while val = (table >> index) & omask calculates
3752      * the value we're looking for.
3753      */
3754     switch (width) {
3755     case 8:
3756         imask = 0x7;
3757         shift = 3;
3758         omask = 0xff;
3759         if (left) {
3760             tabl = 0x80c0e0f0f8fcfeffULL;
3761             tabr = 0xff7f3f1f0f070301ULL;
3762         } else {
3763             tabl = 0x0103070f1f3f7fffULL;
3764             tabr = 0xfffefcf8f0e0c080ULL;
3765         }
3766         break;
3767     case 16:
3768         imask = 0x6;
3769         shift = 1;
3770         omask = 0xf;
3771         if (left) {
3772             tabl = 0x8cef;
3773             tabr = 0xf731;
3774         } else {
3775             tabl = 0x137f;
3776             tabr = 0xfec8;
3777         }
3778         break;
3779     case 32:
3780         imask = 0x4;
3781         shift = 0;
3782         omask = 0x3;
3783         if (left) {
3784             tabl = (2 << 2) | 3;
3785             tabr = (3 << 2) | 1;
3786         } else {
3787             tabl = (1 << 2) | 3;
3788             tabr = (3 << 2) | 2;
3789         }
3790         break;
3791     default:
3792         abort();
3793     }
3794 
3795     lo1 = tcg_temp_new();
3796     lo2 = tcg_temp_new();
3797     tcg_gen_andi_tl(lo1, s1, imask);
3798     tcg_gen_andi_tl(lo2, s2, imask);
3799     tcg_gen_shli_tl(lo1, lo1, shift);
3800     tcg_gen_shli_tl(lo2, lo2, shift);
3801 
3802     tcg_gen_shr_tl(lo1, tcg_constant_tl(tabl), lo1);
3803     tcg_gen_shr_tl(lo2, tcg_constant_tl(tabr), lo2);
3804     tcg_gen_andi_tl(lo1, lo1, omask);
3805     tcg_gen_andi_tl(lo2, lo2, omask);
3806 
3807     amask = address_mask_i(dc, -8);
3808     tcg_gen_andi_tl(s1, s1, amask);
3809     tcg_gen_andi_tl(s2, s2, amask);
3810 
3811     /* Compute dst = (s1 == s2 ? lo1 : lo1 & lo2). */
3812     tcg_gen_and_tl(lo2, lo2, lo1);
3813     tcg_gen_movcond_tl(TCG_COND_EQ, dst, s1, s2, lo1, lo2);
3814 
3815     gen_store_gpr(dc, a->rd, dst);
3816     return advance_pc(dc);
3817 }
3818 
3819 TRANS(EDGE8cc, VIS1, gen_edge, a, 8, 1, 0)
3820 TRANS(EDGE8Lcc, VIS1, gen_edge, a, 8, 1, 1)
3821 TRANS(EDGE16cc, VIS1, gen_edge, a, 16, 1, 0)
3822 TRANS(EDGE16Lcc, VIS1, gen_edge, a, 16, 1, 1)
3823 TRANS(EDGE32cc, VIS1, gen_edge, a, 32, 1, 0)
3824 TRANS(EDGE32Lcc, VIS1, gen_edge, a, 32, 1, 1)
3825 
3826 TRANS(EDGE8N, VIS2, gen_edge, a, 8, 0, 0)
3827 TRANS(EDGE8LN, VIS2, gen_edge, a, 8, 0, 1)
3828 TRANS(EDGE16N, VIS2, gen_edge, a, 16, 0, 0)
3829 TRANS(EDGE16LN, VIS2, gen_edge, a, 16, 0, 1)
3830 TRANS(EDGE32N, VIS2, gen_edge, a, 32, 0, 0)
3831 TRANS(EDGE32LN, VIS2, gen_edge, a, 32, 0, 1)
3832 
3833 static bool do_rrr(DisasContext *dc, arg_r_r_r *a,
3834                    void (*func)(TCGv, TCGv, TCGv))
3835 {
3836     TCGv dst = gen_dest_gpr(dc, a->rd);
3837     TCGv src1 = gen_load_gpr(dc, a->rs1);
3838     TCGv src2 = gen_load_gpr(dc, a->rs2);
3839 
3840     func(dst, src1, src2);
3841     gen_store_gpr(dc, a->rd, dst);
3842     return advance_pc(dc);
3843 }
3844 
3845 TRANS(ARRAY8, VIS1, do_rrr, a, gen_helper_array8)
3846 TRANS(ARRAY16, VIS1, do_rrr, a, gen_op_array16)
3847 TRANS(ARRAY32, VIS1, do_rrr, a, gen_op_array32)
3848 
3849 static void gen_op_alignaddr(TCGv dst, TCGv s1, TCGv s2)
3850 {
3851 #ifdef TARGET_SPARC64
3852     TCGv tmp = tcg_temp_new();
3853 
3854     tcg_gen_add_tl(tmp, s1, s2);
3855     tcg_gen_andi_tl(dst, tmp, -8);
3856     tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
3857 #else
3858     g_assert_not_reached();
3859 #endif
3860 }
3861 
3862 static void gen_op_alignaddrl(TCGv dst, TCGv s1, TCGv s2)
3863 {
3864 #ifdef TARGET_SPARC64
3865     TCGv tmp = tcg_temp_new();
3866 
3867     tcg_gen_add_tl(tmp, s1, s2);
3868     tcg_gen_andi_tl(dst, tmp, -8);
3869     tcg_gen_neg_tl(tmp, tmp);
3870     tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
3871 #else
3872     g_assert_not_reached();
3873 #endif
3874 }
3875 
3876 TRANS(ALIGNADDR, VIS1, do_rrr, a, gen_op_alignaddr)
3877 TRANS(ALIGNADDRL, VIS1, do_rrr, a, gen_op_alignaddrl)
3878 
3879 static void gen_op_bmask(TCGv dst, TCGv s1, TCGv s2)
3880 {
3881 #ifdef TARGET_SPARC64
3882     tcg_gen_add_tl(dst, s1, s2);
3883     tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, dst, 32, 32);
3884 #else
3885     g_assert_not_reached();
3886 #endif
3887 }
3888 
3889 TRANS(BMASK, VIS2, do_rrr, a, gen_op_bmask)
3890 
3891 static bool do_shift_r(DisasContext *dc, arg_shiftr *a, bool l, bool u)
3892 {
3893     TCGv dst, src1, src2;
3894 
3895     /* Reject 64-bit shifts for sparc32. */
3896     if (avail_32(dc) && a->x) {
3897         return false;
3898     }
3899 
3900     src2 = tcg_temp_new();
3901     tcg_gen_andi_tl(src2, gen_load_gpr(dc, a->rs2), a->x ? 63 : 31);
3902     src1 = gen_load_gpr(dc, a->rs1);
3903     dst = gen_dest_gpr(dc, a->rd);
3904 
3905     if (l) {
3906         tcg_gen_shl_tl(dst, src1, src2);
3907         if (!a->x) {
3908             tcg_gen_ext32u_tl(dst, dst);
3909         }
3910     } else if (u) {
3911         if (!a->x) {
3912             tcg_gen_ext32u_tl(dst, src1);
3913             src1 = dst;
3914         }
3915         tcg_gen_shr_tl(dst, src1, src2);
3916     } else {
3917         if (!a->x) {
3918             tcg_gen_ext32s_tl(dst, src1);
3919             src1 = dst;
3920         }
3921         tcg_gen_sar_tl(dst, src1, src2);
3922     }
3923     gen_store_gpr(dc, a->rd, dst);
3924     return advance_pc(dc);
3925 }
3926 
3927 TRANS(SLL_r, ALL, do_shift_r, a, true, true)
3928 TRANS(SRL_r, ALL, do_shift_r, a, false, true)
3929 TRANS(SRA_r, ALL, do_shift_r, a, false, false)
3930 
3931 static bool do_shift_i(DisasContext *dc, arg_shifti *a, bool l, bool u)
3932 {
3933     TCGv dst, src1;
3934 
3935     /* Reject 64-bit shifts for sparc32. */
3936     if (avail_32(dc) && (a->x || a->i >= 32)) {
3937         return false;
3938     }
3939 
3940     src1 = gen_load_gpr(dc, a->rs1);
3941     dst = gen_dest_gpr(dc, a->rd);
3942 
3943     if (avail_32(dc) || a->x) {
3944         if (l) {
3945             tcg_gen_shli_tl(dst, src1, a->i);
3946         } else if (u) {
3947             tcg_gen_shri_tl(dst, src1, a->i);
3948         } else {
3949             tcg_gen_sari_tl(dst, src1, a->i);
3950         }
3951     } else {
3952         if (l) {
3953             tcg_gen_deposit_z_tl(dst, src1, a->i, 32 - a->i);
3954         } else if (u) {
3955             tcg_gen_extract_tl(dst, src1, a->i, 32 - a->i);
3956         } else {
3957             tcg_gen_sextract_tl(dst, src1, a->i, 32 - a->i);
3958         }
3959     }
3960     gen_store_gpr(dc, a->rd, dst);
3961     return advance_pc(dc);
3962 }
3963 
3964 TRANS(SLL_i, ALL, do_shift_i, a, true, true)
3965 TRANS(SRL_i, ALL, do_shift_i, a, false, true)
3966 TRANS(SRA_i, ALL, do_shift_i, a, false, false)
3967 
3968 static TCGv gen_rs2_or_imm(DisasContext *dc, bool imm, int rs2_or_imm)
3969 {
3970     /* For simplicity, we under-decoded the rs2 form. */
3971     if (!imm && rs2_or_imm & ~0x1f) {
3972         return NULL;
3973     }
3974     if (imm || rs2_or_imm == 0) {
3975         return tcg_constant_tl(rs2_or_imm);
3976     } else {
3977         return cpu_regs[rs2_or_imm];
3978     }
3979 }
3980 
3981 static bool do_mov_cond(DisasContext *dc, DisasCompare *cmp, int rd, TCGv src2)
3982 {
3983     TCGv dst = gen_load_gpr(dc, rd);
3984     TCGv c2 = tcg_constant_tl(cmp->c2);
3985 
3986     tcg_gen_movcond_tl(cmp->cond, dst, cmp->c1, c2, src2, dst);
3987     gen_store_gpr(dc, rd, dst);
3988     return advance_pc(dc);
3989 }
3990 
3991 static bool trans_MOVcc(DisasContext *dc, arg_MOVcc *a)
3992 {
3993     TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
3994     DisasCompare cmp;
3995 
3996     if (src2 == NULL) {
3997         return false;
3998     }
3999     gen_compare(&cmp, a->cc, a->cond, dc);
4000     return do_mov_cond(dc, &cmp, a->rd, src2);
4001 }
4002 
4003 static bool trans_MOVfcc(DisasContext *dc, arg_MOVfcc *a)
4004 {
4005     TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4006     DisasCompare cmp;
4007 
4008     if (src2 == NULL) {
4009         return false;
4010     }
4011     gen_fcompare(&cmp, a->cc, a->cond);
4012     return do_mov_cond(dc, &cmp, a->rd, src2);
4013 }
4014 
4015 static bool trans_MOVR(DisasContext *dc, arg_MOVR *a)
4016 {
4017     TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4018     DisasCompare cmp;
4019 
4020     if (src2 == NULL) {
4021         return false;
4022     }
4023     gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1));
4024     return do_mov_cond(dc, &cmp, a->rd, src2);
4025 }
4026 
4027 static bool do_add_special(DisasContext *dc, arg_r_r_ri *a,
4028                            bool (*func)(DisasContext *dc, int rd, TCGv src))
4029 {
4030     TCGv src1, sum;
4031 
4032     /* For simplicity, we under-decoded the rs2 form. */
4033     if (!a->imm && a->rs2_or_imm & ~0x1f) {
4034         return false;
4035     }
4036 
4037     /*
4038      * Always load the sum into a new temporary.
4039      * This is required to capture the value across a window change,
4040      * e.g. SAVE and RESTORE, and may be optimized away otherwise.
4041      */
4042     sum = tcg_temp_new();
4043     src1 = gen_load_gpr(dc, a->rs1);
4044     if (a->imm || a->rs2_or_imm == 0) {
4045         tcg_gen_addi_tl(sum, src1, a->rs2_or_imm);
4046     } else {
4047         tcg_gen_add_tl(sum, src1, cpu_regs[a->rs2_or_imm]);
4048     }
4049     return func(dc, a->rd, sum);
4050 }
4051 
4052 static bool do_jmpl(DisasContext *dc, int rd, TCGv src)
4053 {
4054     /*
4055      * Preserve pc across advance, so that we can delay
4056      * the writeback to rd until after src is consumed.
4057      */
4058     target_ulong cur_pc = dc->pc;
4059 
4060     gen_check_align(dc, src, 3);
4061 
4062     gen_mov_pc_npc(dc);
4063     tcg_gen_mov_tl(cpu_npc, src);
4064     gen_address_mask(dc, cpu_npc);
4065     gen_store_gpr(dc, rd, tcg_constant_tl(cur_pc));
4066 
4067     dc->npc = DYNAMIC_PC_LOOKUP;
4068     return true;
4069 }
4070 
4071 TRANS(JMPL, ALL, do_add_special, a, do_jmpl)
4072 
4073 static bool do_rett(DisasContext *dc, int rd, TCGv src)
4074 {
4075     if (!supervisor(dc)) {
4076         return raise_priv(dc);
4077     }
4078 
4079     gen_check_align(dc, src, 3);
4080 
4081     gen_mov_pc_npc(dc);
4082     tcg_gen_mov_tl(cpu_npc, src);
4083     gen_helper_rett(tcg_env);
4084 
4085     dc->npc = DYNAMIC_PC;
4086     return true;
4087 }
4088 
4089 TRANS(RETT, 32, do_add_special, a, do_rett)
4090 
4091 static bool do_return(DisasContext *dc, int rd, TCGv src)
4092 {
4093     gen_check_align(dc, src, 3);
4094 
4095     gen_mov_pc_npc(dc);
4096     tcg_gen_mov_tl(cpu_npc, src);
4097     gen_address_mask(dc, cpu_npc);
4098 
4099     gen_helper_restore(tcg_env);
4100     dc->npc = DYNAMIC_PC_LOOKUP;
4101     return true;
4102 }
4103 
4104 TRANS(RETURN, 64, do_add_special, a, do_return)
4105 
4106 static bool do_save(DisasContext *dc, int rd, TCGv src)
4107 {
4108     gen_helper_save(tcg_env);
4109     gen_store_gpr(dc, rd, src);
4110     return advance_pc(dc);
4111 }
4112 
4113 TRANS(SAVE, ALL, do_add_special, a, do_save)
4114 
4115 static bool do_restore(DisasContext *dc, int rd, TCGv src)
4116 {
4117     gen_helper_restore(tcg_env);
4118     gen_store_gpr(dc, rd, src);
4119     return advance_pc(dc);
4120 }
4121 
4122 TRANS(RESTORE, ALL, do_add_special, a, do_restore)
4123 
4124 static bool do_done_retry(DisasContext *dc, bool done)
4125 {
4126     if (!supervisor(dc)) {
4127         return raise_priv(dc);
4128     }
4129     dc->npc = DYNAMIC_PC;
4130     dc->pc = DYNAMIC_PC;
4131     translator_io_start(&dc->base);
4132     if (done) {
4133         gen_helper_done(tcg_env);
4134     } else {
4135         gen_helper_retry(tcg_env);
4136     }
4137     return true;
4138 }
4139 
4140 TRANS(DONE, 64, do_done_retry, true)
4141 TRANS(RETRY, 64, do_done_retry, false)
4142 
4143 /*
4144  * Major opcode 11 -- load and store instructions
4145  */
4146 
4147 static TCGv gen_ldst_addr(DisasContext *dc, int rs1, bool imm, int rs2_or_imm)
4148 {
4149     TCGv addr, tmp = NULL;
4150 
4151     /* For simplicity, we under-decoded the rs2 form. */
4152     if (!imm && rs2_or_imm & ~0x1f) {
4153         return NULL;
4154     }
4155 
4156     addr = gen_load_gpr(dc, rs1);
4157     if (rs2_or_imm) {
4158         tmp = tcg_temp_new();
4159         if (imm) {
4160             tcg_gen_addi_tl(tmp, addr, rs2_or_imm);
4161         } else {
4162             tcg_gen_add_tl(tmp, addr, cpu_regs[rs2_or_imm]);
4163         }
4164         addr = tmp;
4165     }
4166     if (AM_CHECK(dc)) {
4167         if (!tmp) {
4168             tmp = tcg_temp_new();
4169         }
4170         tcg_gen_ext32u_tl(tmp, addr);
4171         addr = tmp;
4172     }
4173     return addr;
4174 }
4175 
4176 static bool do_ld_gpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4177 {
4178     TCGv reg, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4179     DisasASI da;
4180 
4181     if (addr == NULL) {
4182         return false;
4183     }
4184     da = resolve_asi(dc, a->asi, mop);
4185 
4186     reg = gen_dest_gpr(dc, a->rd);
4187     gen_ld_asi(dc, &da, reg, addr);
4188     gen_store_gpr(dc, a->rd, reg);
4189     return advance_pc(dc);
4190 }
4191 
4192 TRANS(LDUW, ALL, do_ld_gpr, a, MO_TEUL)
4193 TRANS(LDUB, ALL, do_ld_gpr, a, MO_UB)
4194 TRANS(LDUH, ALL, do_ld_gpr, a, MO_TEUW)
4195 TRANS(LDSB, ALL, do_ld_gpr, a, MO_SB)
4196 TRANS(LDSH, ALL, do_ld_gpr, a, MO_TESW)
4197 TRANS(LDSW, 64, do_ld_gpr, a, MO_TESL)
4198 TRANS(LDX, 64, do_ld_gpr, a, MO_TEUQ)
4199 
4200 static bool do_st_gpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4201 {
4202     TCGv reg, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4203     DisasASI da;
4204 
4205     if (addr == NULL) {
4206         return false;
4207     }
4208     da = resolve_asi(dc, a->asi, mop);
4209 
4210     reg = gen_load_gpr(dc, a->rd);
4211     gen_st_asi(dc, &da, reg, addr);
4212     return advance_pc(dc);
4213 }
4214 
4215 TRANS(STW, ALL, do_st_gpr, a, MO_TEUL)
4216 TRANS(STB, ALL, do_st_gpr, a, MO_UB)
4217 TRANS(STH, ALL, do_st_gpr, a, MO_TEUW)
4218 TRANS(STX, 64, do_st_gpr, a, MO_TEUQ)
4219 
4220 static bool trans_LDD(DisasContext *dc, arg_r_r_ri_asi *a)
4221 {
4222     TCGv addr;
4223     DisasASI da;
4224 
4225     if (a->rd & 1) {
4226         return false;
4227     }
4228     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4229     if (addr == NULL) {
4230         return false;
4231     }
4232     da = resolve_asi(dc, a->asi, MO_TEUQ);
4233     gen_ldda_asi(dc, &da, addr, a->rd);
4234     return advance_pc(dc);
4235 }
4236 
4237 static bool trans_STD(DisasContext *dc, arg_r_r_ri_asi *a)
4238 {
4239     TCGv addr;
4240     DisasASI da;
4241 
4242     if (a->rd & 1) {
4243         return false;
4244     }
4245     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4246     if (addr == NULL) {
4247         return false;
4248     }
4249     da = resolve_asi(dc, a->asi, MO_TEUQ);
4250     gen_stda_asi(dc, &da, addr, a->rd);
4251     return advance_pc(dc);
4252 }
4253 
4254 static bool trans_LDSTUB(DisasContext *dc, arg_r_r_ri_asi *a)
4255 {
4256     TCGv addr, reg;
4257     DisasASI da;
4258 
4259     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4260     if (addr == NULL) {
4261         return false;
4262     }
4263     da = resolve_asi(dc, a->asi, MO_UB);
4264 
4265     reg = gen_dest_gpr(dc, a->rd);
4266     gen_ldstub_asi(dc, &da, reg, addr);
4267     gen_store_gpr(dc, a->rd, reg);
4268     return advance_pc(dc);
4269 }
4270 
4271 static bool trans_SWAP(DisasContext *dc, arg_r_r_ri_asi *a)
4272 {
4273     TCGv addr, dst, src;
4274     DisasASI da;
4275 
4276     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4277     if (addr == NULL) {
4278         return false;
4279     }
4280     da = resolve_asi(dc, a->asi, MO_TEUL);
4281 
4282     dst = gen_dest_gpr(dc, a->rd);
4283     src = gen_load_gpr(dc, a->rd);
4284     gen_swap_asi(dc, &da, dst, src, addr);
4285     gen_store_gpr(dc, a->rd, dst);
4286     return advance_pc(dc);
4287 }
4288 
4289 static bool do_casa(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4290 {
4291     TCGv addr, o, n, c;
4292     DisasASI da;
4293 
4294     addr = gen_ldst_addr(dc, a->rs1, true, 0);
4295     if (addr == NULL) {
4296         return false;
4297     }
4298     da = resolve_asi(dc, a->asi, mop);
4299 
4300     o = gen_dest_gpr(dc, a->rd);
4301     n = gen_load_gpr(dc, a->rd);
4302     c = gen_load_gpr(dc, a->rs2_or_imm);
4303     gen_cas_asi(dc, &da, o, n, c, addr);
4304     gen_store_gpr(dc, a->rd, o);
4305     return advance_pc(dc);
4306 }
4307 
4308 TRANS(CASA, CASA, do_casa, a, MO_TEUL)
4309 TRANS(CASXA, 64, do_casa, a, MO_TEUQ)
4310 
4311 static bool do_ld_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz)
4312 {
4313     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4314     DisasASI da;
4315 
4316     if (addr == NULL) {
4317         return false;
4318     }
4319     if (gen_trap_ifnofpu(dc)) {
4320         return true;
4321     }
4322     if (sz == MO_128 && gen_trap_float128(dc)) {
4323         return true;
4324     }
4325     da = resolve_asi(dc, a->asi, MO_TE | sz);
4326     gen_ldf_asi(dc, &da, sz, addr, a->rd);
4327     gen_update_fprs_dirty(dc, a->rd);
4328     return advance_pc(dc);
4329 }
4330 
4331 TRANS(LDF, ALL, do_ld_fpr, a, MO_32)
4332 TRANS(LDDF, ALL, do_ld_fpr, a, MO_64)
4333 TRANS(LDQF, ALL, do_ld_fpr, a, MO_128)
4334 
4335 TRANS(LDFA, 64, do_ld_fpr, a, MO_32)
4336 TRANS(LDDFA, 64, do_ld_fpr, a, MO_64)
4337 TRANS(LDQFA, 64, do_ld_fpr, a, MO_128)
4338 
4339 static bool do_st_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz)
4340 {
4341     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4342     DisasASI da;
4343 
4344     if (addr == NULL) {
4345         return false;
4346     }
4347     if (gen_trap_ifnofpu(dc)) {
4348         return true;
4349     }
4350     if (sz == MO_128 && gen_trap_float128(dc)) {
4351         return true;
4352     }
4353     da = resolve_asi(dc, a->asi, MO_TE | sz);
4354     gen_stf_asi(dc, &da, sz, addr, a->rd);
4355     return advance_pc(dc);
4356 }
4357 
4358 TRANS(STF, ALL, do_st_fpr, a, MO_32)
4359 TRANS(STDF, ALL, do_st_fpr, a, MO_64)
4360 TRANS(STQF, ALL, do_st_fpr, a, MO_128)
4361 
4362 TRANS(STFA, 64, do_st_fpr, a, MO_32)
4363 TRANS(STDFA, 64, do_st_fpr, a, MO_64)
4364 TRANS(STQFA, 64, do_st_fpr, a, MO_128)
4365 
4366 static bool trans_STDFQ(DisasContext *dc, arg_STDFQ *a)
4367 {
4368     if (!avail_32(dc)) {
4369         return false;
4370     }
4371     if (!supervisor(dc)) {
4372         return raise_priv(dc);
4373     }
4374     if (gen_trap_ifnofpu(dc)) {
4375         return true;
4376     }
4377     gen_op_fpexception_im(dc, FSR_FTT_SEQ_ERROR);
4378     return true;
4379 }
4380 
4381 static bool do_ldfsr(DisasContext *dc, arg_r_r_ri *a, MemOp mop,
4382                      target_ulong new_mask, target_ulong old_mask)
4383 {
4384     TCGv tmp, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4385     if (addr == NULL) {
4386         return false;
4387     }
4388     if (gen_trap_ifnofpu(dc)) {
4389         return true;
4390     }
4391     tmp = tcg_temp_new();
4392     tcg_gen_qemu_ld_tl(tmp, addr, dc->mem_idx, mop | MO_ALIGN);
4393     tcg_gen_andi_tl(tmp, tmp, new_mask);
4394     tcg_gen_andi_tl(cpu_fsr, cpu_fsr, old_mask);
4395     tcg_gen_or_tl(cpu_fsr, cpu_fsr, tmp);
4396     gen_helper_set_fsr(tcg_env, cpu_fsr);
4397     return advance_pc(dc);
4398 }
4399 
4400 TRANS(LDFSR, ALL, do_ldfsr, a, MO_TEUL, FSR_LDFSR_MASK, FSR_LDFSR_OLDMASK)
4401 TRANS(LDXFSR, 64, do_ldfsr, a, MO_TEUQ, FSR_LDXFSR_MASK, FSR_LDXFSR_OLDMASK)
4402 
4403 static bool do_stfsr(DisasContext *dc, arg_r_r_ri *a, MemOp mop)
4404 {
4405     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4406     if (addr == NULL) {
4407         return false;
4408     }
4409     if (gen_trap_ifnofpu(dc)) {
4410         return true;
4411     }
4412     tcg_gen_qemu_st_tl(cpu_fsr, addr, dc->mem_idx, mop | MO_ALIGN);
4413     return advance_pc(dc);
4414 }
4415 
4416 TRANS(STFSR, ALL, do_stfsr, a, MO_TEUL)
4417 TRANS(STXFSR, 64, do_stfsr, a, MO_TEUQ)
4418 
4419 static bool do_fc(DisasContext *dc, int rd, bool c)
4420 {
4421     uint64_t mask;
4422 
4423     if (gen_trap_ifnofpu(dc)) {
4424         return true;
4425     }
4426 
4427     if (rd & 1) {
4428         mask = MAKE_64BIT_MASK(0, 32);
4429     } else {
4430         mask = MAKE_64BIT_MASK(32, 32);
4431     }
4432     if (c) {
4433         tcg_gen_ori_i64(cpu_fpr[rd / 2], cpu_fpr[rd / 2], mask);
4434     } else {
4435         tcg_gen_andi_i64(cpu_fpr[rd / 2], cpu_fpr[rd / 2], ~mask);
4436     }
4437     gen_update_fprs_dirty(dc, rd);
4438     return advance_pc(dc);
4439 }
4440 
4441 TRANS(FZEROs, VIS1, do_fc, a->rd, 0)
4442 TRANS(FONEs, VIS1, do_fc, a->rd, 1)
4443 
4444 static bool do_dc(DisasContext *dc, int rd, int64_t c)
4445 {
4446     if (gen_trap_ifnofpu(dc)) {
4447         return true;
4448     }
4449 
4450     tcg_gen_movi_i64(cpu_fpr[rd / 2], c);
4451     gen_update_fprs_dirty(dc, rd);
4452     return advance_pc(dc);
4453 }
4454 
4455 TRANS(FZEROd, VIS1, do_dc, a->rd, 0)
4456 TRANS(FONEd, VIS1, do_dc, a->rd, -1)
4457 
4458 static bool do_ff(DisasContext *dc, arg_r_r *a,
4459                   void (*func)(TCGv_i32, TCGv_i32))
4460 {
4461     TCGv_i32 tmp;
4462 
4463     if (gen_trap_ifnofpu(dc)) {
4464         return true;
4465     }
4466 
4467     tmp = gen_load_fpr_F(dc, a->rs);
4468     func(tmp, tmp);
4469     gen_store_fpr_F(dc, a->rd, tmp);
4470     return advance_pc(dc);
4471 }
4472 
4473 TRANS(FMOVs, ALL, do_ff, a, gen_op_fmovs)
4474 TRANS(FNEGs, ALL, do_ff, a, gen_op_fnegs)
4475 TRANS(FABSs, ALL, do_ff, a, gen_op_fabss)
4476 TRANS(FSRCs, VIS1, do_ff, a, tcg_gen_mov_i32)
4477 TRANS(FNOTs, VIS1, do_ff, a, tcg_gen_not_i32)
4478 
4479 static bool do_fd(DisasContext *dc, arg_r_r *a,
4480                   void (*func)(TCGv_i32, TCGv_i64))
4481 {
4482     TCGv_i32 dst;
4483     TCGv_i64 src;
4484 
4485     if (gen_trap_ifnofpu(dc)) {
4486         return true;
4487     }
4488 
4489     dst = gen_dest_fpr_F(dc);
4490     src = gen_load_fpr_D(dc, a->rs);
4491     func(dst, src);
4492     gen_store_fpr_F(dc, a->rd, dst);
4493     return advance_pc(dc);
4494 }
4495 
4496 TRANS(FPACK16, VIS1, do_fd, a, gen_op_fpack16)
4497 TRANS(FPACKFIX, VIS1, do_fd, a, gen_op_fpackfix)
4498 
4499 static bool do_env_ff(DisasContext *dc, arg_r_r *a,
4500                       void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
4501 {
4502     TCGv_i32 tmp;
4503 
4504     if (gen_trap_ifnofpu(dc)) {
4505         return true;
4506     }
4507 
4508     gen_op_clear_ieee_excp_and_FTT();
4509     tmp = gen_load_fpr_F(dc, a->rs);
4510     func(tmp, tcg_env, tmp);
4511     gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4512     gen_store_fpr_F(dc, a->rd, tmp);
4513     return advance_pc(dc);
4514 }
4515 
4516 TRANS(FSQRTs, ALL, do_env_ff, a, gen_helper_fsqrts)
4517 TRANS(FiTOs, ALL, do_env_ff, a, gen_helper_fitos)
4518 TRANS(FsTOi, ALL, do_env_ff, a, gen_helper_fstoi)
4519 
4520 static bool do_env_fd(DisasContext *dc, arg_r_r *a,
4521                       void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
4522 {
4523     TCGv_i32 dst;
4524     TCGv_i64 src;
4525 
4526     if (gen_trap_ifnofpu(dc)) {
4527         return true;
4528     }
4529 
4530     gen_op_clear_ieee_excp_and_FTT();
4531     dst = gen_dest_fpr_F(dc);
4532     src = gen_load_fpr_D(dc, a->rs);
4533     func(dst, tcg_env, src);
4534     gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4535     gen_store_fpr_F(dc, a->rd, dst);
4536     return advance_pc(dc);
4537 }
4538 
4539 TRANS(FdTOs, ALL, do_env_fd, a, gen_helper_fdtos)
4540 TRANS(FdTOi, ALL, do_env_fd, a, gen_helper_fdtoi)
4541 TRANS(FxTOs, 64, do_env_fd, a, gen_helper_fxtos)
4542 
4543 static bool do_dd(DisasContext *dc, arg_r_r *a,
4544                   void (*func)(TCGv_i64, TCGv_i64))
4545 {
4546     TCGv_i64 dst, src;
4547 
4548     if (gen_trap_ifnofpu(dc)) {
4549         return true;
4550     }
4551 
4552     dst = gen_dest_fpr_D(dc, a->rd);
4553     src = gen_load_fpr_D(dc, a->rs);
4554     func(dst, src);
4555     gen_store_fpr_D(dc, a->rd, dst);
4556     return advance_pc(dc);
4557 }
4558 
4559 TRANS(FMOVd, 64, do_dd, a, gen_op_fmovd)
4560 TRANS(FNEGd, 64, do_dd, a, gen_op_fnegd)
4561 TRANS(FABSd, 64, do_dd, a, gen_op_fabsd)
4562 TRANS(FSRCd, VIS1, do_dd, a, tcg_gen_mov_i64)
4563 TRANS(FNOTd, VIS1, do_dd, a, tcg_gen_not_i64)
4564 
4565 static bool do_env_dd(DisasContext *dc, arg_r_r *a,
4566                       void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
4567 {
4568     TCGv_i64 dst, src;
4569 
4570     if (gen_trap_ifnofpu(dc)) {
4571         return true;
4572     }
4573 
4574     gen_op_clear_ieee_excp_and_FTT();
4575     dst = gen_dest_fpr_D(dc, a->rd);
4576     src = gen_load_fpr_D(dc, a->rs);
4577     func(dst, tcg_env, src);
4578     gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4579     gen_store_fpr_D(dc, a->rd, dst);
4580     return advance_pc(dc);
4581 }
4582 
4583 TRANS(FSQRTd, ALL, do_env_dd, a, gen_helper_fsqrtd)
4584 TRANS(FxTOd, 64, do_env_dd, a, gen_helper_fxtod)
4585 TRANS(FdTOx, 64, do_env_dd, a, gen_helper_fdtox)
4586 
4587 static bool do_env_df(DisasContext *dc, arg_r_r *a,
4588                       void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
4589 {
4590     TCGv_i64 dst;
4591     TCGv_i32 src;
4592 
4593     if (gen_trap_ifnofpu(dc)) {
4594         return true;
4595     }
4596 
4597     gen_op_clear_ieee_excp_and_FTT();
4598     dst = gen_dest_fpr_D(dc, a->rd);
4599     src = gen_load_fpr_F(dc, a->rs);
4600     func(dst, tcg_env, src);
4601     gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4602     gen_store_fpr_D(dc, a->rd, dst);
4603     return advance_pc(dc);
4604 }
4605 
4606 TRANS(FiTOd, ALL, do_env_df, a, gen_helper_fitod)
4607 TRANS(FsTOd, ALL, do_env_df, a, gen_helper_fstod)
4608 TRANS(FsTOx, 64, do_env_df, a, gen_helper_fstox)
4609 
4610 static bool trans_FMOVq(DisasContext *dc, arg_FMOVq *a)
4611 {
4612     int rd, rs;
4613 
4614     if (!avail_64(dc)) {
4615         return false;
4616     }
4617     if (gen_trap_ifnofpu(dc)) {
4618         return true;
4619     }
4620     if (gen_trap_float128(dc)) {
4621         return true;
4622     }
4623 
4624     gen_op_clear_ieee_excp_and_FTT();
4625     rd = QFPREG(a->rd);
4626     rs = QFPREG(a->rs);
4627     tcg_gen_mov_i64(cpu_fpr[rd / 2], cpu_fpr[rs / 2]);
4628     tcg_gen_mov_i64(cpu_fpr[rd / 2 + 1], cpu_fpr[rs / 2 + 1]);
4629     gen_update_fprs_dirty(dc, rd);
4630     return advance_pc(dc);
4631 }
4632 
4633 static bool do_qq(DisasContext *dc, arg_r_r *a,
4634                   void (*func)(TCGv_env))
4635 {
4636     if (gen_trap_ifnofpu(dc)) {
4637         return true;
4638     }
4639     if (gen_trap_float128(dc)) {
4640         return true;
4641     }
4642 
4643     gen_op_clear_ieee_excp_and_FTT();
4644     gen_op_load_fpr_QT1(QFPREG(a->rs));
4645     func(tcg_env);
4646     gen_op_store_QT0_fpr(QFPREG(a->rd));
4647     gen_update_fprs_dirty(dc, QFPREG(a->rd));
4648     return advance_pc(dc);
4649 }
4650 
4651 TRANS(FNEGq, 64, do_qq, a, gen_helper_fnegq)
4652 TRANS(FABSq, 64, do_qq, a, gen_helper_fabsq)
4653 
4654 static bool do_env_qq(DisasContext *dc, arg_r_r *a,
4655                        void (*func)(TCGv_env))
4656 {
4657     if (gen_trap_ifnofpu(dc)) {
4658         return true;
4659     }
4660     if (gen_trap_float128(dc)) {
4661         return true;
4662     }
4663 
4664     gen_op_clear_ieee_excp_and_FTT();
4665     gen_op_load_fpr_QT1(QFPREG(a->rs));
4666     func(tcg_env);
4667     gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4668     gen_op_store_QT0_fpr(QFPREG(a->rd));
4669     gen_update_fprs_dirty(dc, QFPREG(a->rd));
4670     return advance_pc(dc);
4671 }
4672 
4673 TRANS(FSQRTq, ALL, do_env_qq, a, gen_helper_fsqrtq)
4674 
4675 static bool do_env_fq(DisasContext *dc, arg_r_r *a,
4676                       void (*func)(TCGv_i32, TCGv_env))
4677 {
4678     TCGv_i32 dst;
4679 
4680     if (gen_trap_ifnofpu(dc)) {
4681         return true;
4682     }
4683     if (gen_trap_float128(dc)) {
4684         return true;
4685     }
4686 
4687     gen_op_clear_ieee_excp_and_FTT();
4688     gen_op_load_fpr_QT1(QFPREG(a->rs));
4689     dst = gen_dest_fpr_F(dc);
4690     func(dst, tcg_env);
4691     gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4692     gen_store_fpr_F(dc, a->rd, dst);
4693     return advance_pc(dc);
4694 }
4695 
4696 TRANS(FqTOs, ALL, do_env_fq, a, gen_helper_fqtos)
4697 TRANS(FqTOi, ALL, do_env_fq, a, gen_helper_fqtoi)
4698 
4699 static bool do_env_dq(DisasContext *dc, arg_r_r *a,
4700                       void (*func)(TCGv_i64, TCGv_env))
4701 {
4702     TCGv_i64 dst;
4703 
4704     if (gen_trap_ifnofpu(dc)) {
4705         return true;
4706     }
4707     if (gen_trap_float128(dc)) {
4708         return true;
4709     }
4710 
4711     gen_op_clear_ieee_excp_and_FTT();
4712     gen_op_load_fpr_QT1(QFPREG(a->rs));
4713     dst = gen_dest_fpr_D(dc, a->rd);
4714     func(dst, tcg_env);
4715     gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4716     gen_store_fpr_D(dc, a->rd, dst);
4717     return advance_pc(dc);
4718 }
4719 
4720 TRANS(FqTOd, ALL, do_env_dq, a, gen_helper_fqtod)
4721 TRANS(FqTOx, 64, do_env_dq, a, gen_helper_fqtox)
4722 
4723 static bool do_env_qf(DisasContext *dc, arg_r_r *a,
4724                       void (*func)(TCGv_env, TCGv_i32))
4725 {
4726     TCGv_i32 src;
4727 
4728     if (gen_trap_ifnofpu(dc)) {
4729         return true;
4730     }
4731     if (gen_trap_float128(dc)) {
4732         return true;
4733     }
4734 
4735     gen_op_clear_ieee_excp_and_FTT();
4736     src = gen_load_fpr_F(dc, a->rs);
4737     func(tcg_env, src);
4738     gen_op_store_QT0_fpr(QFPREG(a->rd));
4739     gen_update_fprs_dirty(dc, QFPREG(a->rd));
4740     return advance_pc(dc);
4741 }
4742 
4743 TRANS(FiTOq, ALL, do_env_qf, a, gen_helper_fitoq)
4744 TRANS(FsTOq, ALL, do_env_qf, a, gen_helper_fstoq)
4745 
4746 static bool do_env_qd(DisasContext *dc, arg_r_r *a,
4747                       void (*func)(TCGv_env, TCGv_i64))
4748 {
4749     TCGv_i64 src;
4750 
4751     if (gen_trap_ifnofpu(dc)) {
4752         return true;
4753     }
4754     if (gen_trap_float128(dc)) {
4755         return true;
4756     }
4757 
4758     gen_op_clear_ieee_excp_and_FTT();
4759     src = gen_load_fpr_D(dc, a->rs);
4760     func(tcg_env, src);
4761     gen_op_store_QT0_fpr(QFPREG(a->rd));
4762     gen_update_fprs_dirty(dc, QFPREG(a->rd));
4763     return advance_pc(dc);
4764 }
4765 
4766 TRANS(FdTOq, ALL, do_env_qd, a, gen_helper_fdtoq)
4767 TRANS(FxTOq, 64, do_env_qd, a, gen_helper_fxtoq)
4768 
4769 static bool do_fff(DisasContext *dc, arg_r_r_r *a,
4770                    void (*func)(TCGv_i32, TCGv_i32, TCGv_i32))
4771 {
4772     TCGv_i32 src1, src2;
4773 
4774     if (gen_trap_ifnofpu(dc)) {
4775         return true;
4776     }
4777 
4778     src1 = gen_load_fpr_F(dc, a->rs1);
4779     src2 = gen_load_fpr_F(dc, a->rs2);
4780     func(src1, src1, src2);
4781     gen_store_fpr_F(dc, a->rd, src1);
4782     return advance_pc(dc);
4783 }
4784 
4785 TRANS(FPADD16s, VIS1, do_fff, a, tcg_gen_vec_add16_i32)
4786 TRANS(FPADD32s, VIS1, do_fff, a, tcg_gen_add_i32)
4787 TRANS(FPSUB16s, VIS1, do_fff, a, tcg_gen_vec_sub16_i32)
4788 TRANS(FPSUB32s, VIS1, do_fff, a, tcg_gen_sub_i32)
4789 TRANS(FNORs, VIS1, do_fff, a, tcg_gen_nor_i32)
4790 TRANS(FANDNOTs, VIS1, do_fff, a, tcg_gen_andc_i32)
4791 TRANS(FXORs, VIS1, do_fff, a, tcg_gen_xor_i32)
4792 TRANS(FNANDs, VIS1, do_fff, a, tcg_gen_nand_i32)
4793 TRANS(FANDs, VIS1, do_fff, a, tcg_gen_and_i32)
4794 TRANS(FXNORs, VIS1, do_fff, a, tcg_gen_eqv_i32)
4795 TRANS(FORNOTs, VIS1, do_fff, a, tcg_gen_orc_i32)
4796 TRANS(FORs, VIS1, do_fff, a, tcg_gen_or_i32)
4797 
4798 static bool do_env_fff(DisasContext *dc, arg_r_r_r *a,
4799                        void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
4800 {
4801     TCGv_i32 src1, src2;
4802 
4803     if (gen_trap_ifnofpu(dc)) {
4804         return true;
4805     }
4806 
4807     gen_op_clear_ieee_excp_and_FTT();
4808     src1 = gen_load_fpr_F(dc, a->rs1);
4809     src2 = gen_load_fpr_F(dc, a->rs2);
4810     func(src1, tcg_env, src1, src2);
4811     gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4812     gen_store_fpr_F(dc, a->rd, src1);
4813     return advance_pc(dc);
4814 }
4815 
4816 TRANS(FADDs, ALL, do_env_fff, a, gen_helper_fadds)
4817 TRANS(FSUBs, ALL, do_env_fff, a, gen_helper_fsubs)
4818 TRANS(FMULs, ALL, do_env_fff, a, gen_helper_fmuls)
4819 TRANS(FDIVs, ALL, do_env_fff, a, gen_helper_fdivs)
4820 
4821 static bool do_ddd(DisasContext *dc, arg_r_r_r *a,
4822                    void (*func)(TCGv_i64, TCGv_i64, TCGv_i64))
4823 {
4824     TCGv_i64 dst, src1, src2;
4825 
4826     if (gen_trap_ifnofpu(dc)) {
4827         return true;
4828     }
4829 
4830     dst = gen_dest_fpr_D(dc, a->rd);
4831     src1 = gen_load_fpr_D(dc, a->rs1);
4832     src2 = gen_load_fpr_D(dc, a->rs2);
4833     func(dst, src1, src2);
4834     gen_store_fpr_D(dc, a->rd, dst);
4835     return advance_pc(dc);
4836 }
4837 
4838 TRANS(FMUL8x16, VIS1, do_ddd, a, gen_helper_fmul8x16)
4839 TRANS(FMUL8x16AU, VIS1, do_ddd, a, gen_helper_fmul8x16au)
4840 TRANS(FMUL8x16AL, VIS1, do_ddd, a, gen_helper_fmul8x16al)
4841 TRANS(FMUL8SUx16, VIS1, do_ddd, a, gen_helper_fmul8sux16)
4842 TRANS(FMUL8ULx16, VIS1, do_ddd, a, gen_helper_fmul8ulx16)
4843 TRANS(FMULD8SUx16, VIS1, do_ddd, a, gen_helper_fmuld8sux16)
4844 TRANS(FMULD8ULx16, VIS1, do_ddd, a, gen_helper_fmuld8ulx16)
4845 TRANS(FPMERGE, VIS1, do_ddd, a, gen_helper_fpmerge)
4846 TRANS(FEXPAND, VIS1, do_ddd, a, gen_helper_fexpand)
4847 
4848 TRANS(FPADD16, VIS1, do_ddd, a, tcg_gen_vec_add16_i64)
4849 TRANS(FPADD32, VIS1, do_ddd, a, tcg_gen_vec_add32_i64)
4850 TRANS(FPSUB16, VIS1, do_ddd, a, tcg_gen_vec_sub16_i64)
4851 TRANS(FPSUB32, VIS1, do_ddd, a, tcg_gen_vec_sub32_i64)
4852 TRANS(FNORd, VIS1, do_ddd, a, tcg_gen_nor_i64)
4853 TRANS(FANDNOTd, VIS1, do_ddd, a, tcg_gen_andc_i64)
4854 TRANS(FXORd, VIS1, do_ddd, a, tcg_gen_xor_i64)
4855 TRANS(FNANDd, VIS1, do_ddd, a, tcg_gen_nand_i64)
4856 TRANS(FANDd, VIS1, do_ddd, a, tcg_gen_and_i64)
4857 TRANS(FXNORd, VIS1, do_ddd, a, tcg_gen_eqv_i64)
4858 TRANS(FORNOTd, VIS1, do_ddd, a, tcg_gen_orc_i64)
4859 TRANS(FORd, VIS1, do_ddd, a, tcg_gen_or_i64)
4860 
4861 TRANS(FPACK32, VIS1, do_ddd, a, gen_op_fpack32)
4862 TRANS(FALIGNDATAg, VIS1, do_ddd, a, gen_op_faligndata)
4863 TRANS(BSHUFFLE, VIS2, do_ddd, a, gen_op_bshuffle)
4864 
4865 static bool do_rdd(DisasContext *dc, arg_r_r_r *a,
4866                    void (*func)(TCGv, TCGv_i64, TCGv_i64))
4867 {
4868     TCGv_i64 src1, src2;
4869     TCGv dst;
4870 
4871     if (gen_trap_ifnofpu(dc)) {
4872         return true;
4873     }
4874 
4875     dst = gen_dest_gpr(dc, a->rd);
4876     src1 = gen_load_fpr_D(dc, a->rs1);
4877     src2 = gen_load_fpr_D(dc, a->rs2);
4878     func(dst, src1, src2);
4879     gen_store_gpr(dc, a->rd, dst);
4880     return advance_pc(dc);
4881 }
4882 
4883 TRANS(FPCMPLE16, VIS1, do_rdd, a, gen_helper_fcmple16)
4884 TRANS(FPCMPNE16, VIS1, do_rdd, a, gen_helper_fcmpne16)
4885 TRANS(FPCMPGT16, VIS1, do_rdd, a, gen_helper_fcmpgt16)
4886 TRANS(FPCMPEQ16, VIS1, do_rdd, a, gen_helper_fcmpeq16)
4887 
4888 TRANS(FPCMPLE32, VIS1, do_rdd, a, gen_helper_fcmple32)
4889 TRANS(FPCMPNE32, VIS1, do_rdd, a, gen_helper_fcmpne32)
4890 TRANS(FPCMPGT32, VIS1, do_rdd, a, gen_helper_fcmpgt32)
4891 TRANS(FPCMPEQ32, VIS1, do_rdd, a, gen_helper_fcmpeq32)
4892 
4893 static bool do_env_ddd(DisasContext *dc, arg_r_r_r *a,
4894                        void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
4895 {
4896     TCGv_i64 dst, src1, src2;
4897 
4898     if (gen_trap_ifnofpu(dc)) {
4899         return true;
4900     }
4901 
4902     gen_op_clear_ieee_excp_and_FTT();
4903     dst = gen_dest_fpr_D(dc, a->rd);
4904     src1 = gen_load_fpr_D(dc, a->rs1);
4905     src2 = gen_load_fpr_D(dc, a->rs2);
4906     func(dst, tcg_env, src1, src2);
4907     gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4908     gen_store_fpr_D(dc, a->rd, dst);
4909     return advance_pc(dc);
4910 }
4911 
4912 TRANS(FADDd, ALL, do_env_ddd, a, gen_helper_faddd)
4913 TRANS(FSUBd, ALL, do_env_ddd, a, gen_helper_fsubd)
4914 TRANS(FMULd, ALL, do_env_ddd, a, gen_helper_fmuld)
4915 TRANS(FDIVd, ALL, do_env_ddd, a, gen_helper_fdivd)
4916 
4917 static bool trans_FsMULd(DisasContext *dc, arg_r_r_r *a)
4918 {
4919     TCGv_i64 dst;
4920     TCGv_i32 src1, src2;
4921 
4922     if (gen_trap_ifnofpu(dc)) {
4923         return true;
4924     }
4925     if (!(dc->def->features & CPU_FEATURE_FSMULD)) {
4926         return raise_unimpfpop(dc);
4927     }
4928 
4929     gen_op_clear_ieee_excp_and_FTT();
4930     dst = gen_dest_fpr_D(dc, a->rd);
4931     src1 = gen_load_fpr_F(dc, a->rs1);
4932     src2 = gen_load_fpr_F(dc, a->rs2);
4933     gen_helper_fsmuld(dst, tcg_env, src1, src2);
4934     gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4935     gen_store_fpr_D(dc, a->rd, dst);
4936     return advance_pc(dc);
4937 }
4938 
4939 static bool do_dddd(DisasContext *dc, arg_r_r_r *a,
4940                     void (*func)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
4941 {
4942     TCGv_i64 dst, src0, src1, src2;
4943 
4944     if (gen_trap_ifnofpu(dc)) {
4945         return true;
4946     }
4947 
4948     dst  = gen_dest_fpr_D(dc, a->rd);
4949     src0 = gen_load_fpr_D(dc, a->rd);
4950     src1 = gen_load_fpr_D(dc, a->rs1);
4951     src2 = gen_load_fpr_D(dc, a->rs2);
4952     func(dst, src0, src1, src2);
4953     gen_store_fpr_D(dc, a->rd, dst);
4954     return advance_pc(dc);
4955 }
4956 
4957 TRANS(PDIST, VIS1, do_dddd, a, gen_helper_pdist)
4958 
4959 static bool do_env_qqq(DisasContext *dc, arg_r_r_r *a,
4960                        void (*func)(TCGv_env))
4961 {
4962     if (gen_trap_ifnofpu(dc)) {
4963         return true;
4964     }
4965     if (gen_trap_float128(dc)) {
4966         return true;
4967     }
4968 
4969     gen_op_clear_ieee_excp_and_FTT();
4970     gen_op_load_fpr_QT0(QFPREG(a->rs1));
4971     gen_op_load_fpr_QT1(QFPREG(a->rs2));
4972     func(tcg_env);
4973     gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4974     gen_op_store_QT0_fpr(QFPREG(a->rd));
4975     gen_update_fprs_dirty(dc, QFPREG(a->rd));
4976     return advance_pc(dc);
4977 }
4978 
4979 TRANS(FADDq, ALL, do_env_qqq, a, gen_helper_faddq)
4980 TRANS(FSUBq, ALL, do_env_qqq, a, gen_helper_fsubq)
4981 TRANS(FMULq, ALL, do_env_qqq, a, gen_helper_fmulq)
4982 TRANS(FDIVq, ALL, do_env_qqq, a, gen_helper_fdivq)
4983 
4984 static bool trans_FdMULq(DisasContext *dc, arg_r_r_r *a)
4985 {
4986     TCGv_i64 src1, src2;
4987 
4988     if (gen_trap_ifnofpu(dc)) {
4989         return true;
4990     }
4991     if (gen_trap_float128(dc)) {
4992         return true;
4993     }
4994 
4995     gen_op_clear_ieee_excp_and_FTT();
4996     src1 = gen_load_fpr_D(dc, a->rs1);
4997     src2 = gen_load_fpr_D(dc, a->rs2);
4998     gen_helper_fdmulq(tcg_env, src1, src2);
4999     gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
5000     gen_op_store_QT0_fpr(QFPREG(a->rd));
5001     gen_update_fprs_dirty(dc, QFPREG(a->rd));
5002     return advance_pc(dc);
5003 }
5004 
5005 static bool do_fmovr(DisasContext *dc, arg_FMOVRs *a, bool is_128,
5006                      void (*func)(DisasContext *, DisasCompare *, int, int))
5007 {
5008     DisasCompare cmp;
5009 
5010     if (gen_trap_ifnofpu(dc)) {
5011         return true;
5012     }
5013     if (is_128 && gen_trap_float128(dc)) {
5014         return true;
5015     }
5016 
5017     gen_op_clear_ieee_excp_and_FTT();
5018     gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1));
5019     func(dc, &cmp, a->rd, a->rs2);
5020     return advance_pc(dc);
5021 }
5022 
5023 TRANS(FMOVRs, 64, do_fmovr, a, false, gen_fmovs)
5024 TRANS(FMOVRd, 64, do_fmovr, a, false, gen_fmovd)
5025 TRANS(FMOVRq, 64, do_fmovr, a, true, gen_fmovq)
5026 
5027 static bool do_fmovcc(DisasContext *dc, arg_FMOVscc *a, bool is_128,
5028                       void (*func)(DisasContext *, DisasCompare *, int, int))
5029 {
5030     DisasCompare cmp;
5031 
5032     if (gen_trap_ifnofpu(dc)) {
5033         return true;
5034     }
5035     if (is_128 && gen_trap_float128(dc)) {
5036         return true;
5037     }
5038 
5039     gen_op_clear_ieee_excp_and_FTT();
5040     gen_compare(&cmp, a->cc, a->cond, dc);
5041     func(dc, &cmp, a->rd, a->rs2);
5042     return advance_pc(dc);
5043 }
5044 
5045 TRANS(FMOVscc, 64, do_fmovcc, a, false, gen_fmovs)
5046 TRANS(FMOVdcc, 64, do_fmovcc, a, false, gen_fmovd)
5047 TRANS(FMOVqcc, 64, do_fmovcc, a, true, gen_fmovq)
5048 
5049 static bool do_fmovfcc(DisasContext *dc, arg_FMOVsfcc *a, bool is_128,
5050                        void (*func)(DisasContext *, DisasCompare *, int, int))
5051 {
5052     DisasCompare cmp;
5053 
5054     if (gen_trap_ifnofpu(dc)) {
5055         return true;
5056     }
5057     if (is_128 && gen_trap_float128(dc)) {
5058         return true;
5059     }
5060 
5061     gen_op_clear_ieee_excp_and_FTT();
5062     gen_fcompare(&cmp, a->cc, a->cond);
5063     func(dc, &cmp, a->rd, a->rs2);
5064     return advance_pc(dc);
5065 }
5066 
5067 TRANS(FMOVsfcc, 64, do_fmovfcc, a, false, gen_fmovs)
5068 TRANS(FMOVdfcc, 64, do_fmovfcc, a, false, gen_fmovd)
5069 TRANS(FMOVqfcc, 64, do_fmovfcc, a, true, gen_fmovq)
5070 
5071 static bool do_fcmps(DisasContext *dc, arg_FCMPs *a, bool e)
5072 {
5073     TCGv_i32 src1, src2;
5074 
5075     if (avail_32(dc) && a->cc != 0) {
5076         return false;
5077     }
5078     if (gen_trap_ifnofpu(dc)) {
5079         return true;
5080     }
5081 
5082     gen_op_clear_ieee_excp_and_FTT();
5083     src1 = gen_load_fpr_F(dc, a->rs1);
5084     src2 = gen_load_fpr_F(dc, a->rs2);
5085     if (e) {
5086         gen_op_fcmpes(a->cc, src1, src2);
5087     } else {
5088         gen_op_fcmps(a->cc, src1, src2);
5089     }
5090     return advance_pc(dc);
5091 }
5092 
5093 TRANS(FCMPs, ALL, do_fcmps, a, false)
5094 TRANS(FCMPEs, ALL, do_fcmps, a, true)
5095 
5096 static bool do_fcmpd(DisasContext *dc, arg_FCMPd *a, bool e)
5097 {
5098     TCGv_i64 src1, src2;
5099 
5100     if (avail_32(dc) && a->cc != 0) {
5101         return false;
5102     }
5103     if (gen_trap_ifnofpu(dc)) {
5104         return true;
5105     }
5106 
5107     gen_op_clear_ieee_excp_and_FTT();
5108     src1 = gen_load_fpr_D(dc, a->rs1);
5109     src2 = gen_load_fpr_D(dc, a->rs2);
5110     if (e) {
5111         gen_op_fcmped(a->cc, src1, src2);
5112     } else {
5113         gen_op_fcmpd(a->cc, src1, src2);
5114     }
5115     return advance_pc(dc);
5116 }
5117 
5118 TRANS(FCMPd, ALL, do_fcmpd, a, false)
5119 TRANS(FCMPEd, ALL, do_fcmpd, a, true)
5120 
5121 static bool do_fcmpq(DisasContext *dc, arg_FCMPq *a, bool e)
5122 {
5123     if (avail_32(dc) && a->cc != 0) {
5124         return false;
5125     }
5126     if (gen_trap_ifnofpu(dc)) {
5127         return true;
5128     }
5129     if (gen_trap_float128(dc)) {
5130         return true;
5131     }
5132 
5133     gen_op_clear_ieee_excp_and_FTT();
5134     gen_op_load_fpr_QT0(QFPREG(a->rs1));
5135     gen_op_load_fpr_QT1(QFPREG(a->rs2));
5136     if (e) {
5137         gen_op_fcmpeq(a->cc);
5138     } else {
5139         gen_op_fcmpq(a->cc);
5140     }
5141     return advance_pc(dc);
5142 }
5143 
5144 TRANS(FCMPq, ALL, do_fcmpq, a, false)
5145 TRANS(FCMPEq, ALL, do_fcmpq, a, true)
5146 
5147 static void sparc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
5148 {
5149     DisasContext *dc = container_of(dcbase, DisasContext, base);
5150     CPUSPARCState *env = cpu_env(cs);
5151     int bound;
5152 
5153     dc->pc = dc->base.pc_first;
5154     dc->npc = (target_ulong)dc->base.tb->cs_base;
5155     dc->mem_idx = dc->base.tb->flags & TB_FLAG_MMU_MASK;
5156     dc->def = &env->def;
5157     dc->fpu_enabled = tb_fpu_enabled(dc->base.tb->flags);
5158     dc->address_mask_32bit = tb_am_enabled(dc->base.tb->flags);
5159 #ifndef CONFIG_USER_ONLY
5160     dc->supervisor = (dc->base.tb->flags & TB_FLAG_SUPER) != 0;
5161 #endif
5162 #ifdef TARGET_SPARC64
5163     dc->fprs_dirty = 0;
5164     dc->asi = (dc->base.tb->flags >> TB_FLAG_ASI_SHIFT) & 0xff;
5165 #ifndef CONFIG_USER_ONLY
5166     dc->hypervisor = (dc->base.tb->flags & TB_FLAG_HYPER) != 0;
5167 #endif
5168 #endif
5169     /*
5170      * if we reach a page boundary, we stop generation so that the
5171      * PC of a TT_TFAULT exception is always in the right page
5172      */
5173     bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
5174     dc->base.max_insns = MIN(dc->base.max_insns, bound);
5175 }
5176 
5177 static void sparc_tr_tb_start(DisasContextBase *db, CPUState *cs)
5178 {
5179 }
5180 
5181 static void sparc_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
5182 {
5183     DisasContext *dc = container_of(dcbase, DisasContext, base);
5184     target_ulong npc = dc->npc;
5185 
5186     if (npc & 3) {
5187         switch (npc) {
5188         case JUMP_PC:
5189             assert(dc->jump_pc[1] == dc->pc + 4);
5190             npc = dc->jump_pc[0] | JUMP_PC;
5191             break;
5192         case DYNAMIC_PC:
5193         case DYNAMIC_PC_LOOKUP:
5194             npc = DYNAMIC_PC;
5195             break;
5196         default:
5197             g_assert_not_reached();
5198         }
5199     }
5200     tcg_gen_insn_start(dc->pc, npc);
5201 }
5202 
5203 static void sparc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
5204 {
5205     DisasContext *dc = container_of(dcbase, DisasContext, base);
5206     CPUSPARCState *env = cpu_env(cs);
5207     unsigned int insn;
5208 
5209     insn = translator_ldl(env, &dc->base, dc->pc);
5210     dc->base.pc_next += 4;
5211 
5212     if (!decode(dc, insn)) {
5213         gen_exception(dc, TT_ILL_INSN);
5214     }
5215 
5216     if (dc->base.is_jmp == DISAS_NORETURN) {
5217         return;
5218     }
5219     if (dc->pc != dc->base.pc_next) {
5220         dc->base.is_jmp = DISAS_TOO_MANY;
5221     }
5222 }
5223 
5224 static void sparc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
5225 {
5226     DisasContext *dc = container_of(dcbase, DisasContext, base);
5227     DisasDelayException *e, *e_next;
5228     bool may_lookup;
5229 
5230     finishing_insn(dc);
5231 
5232     switch (dc->base.is_jmp) {
5233     case DISAS_NEXT:
5234     case DISAS_TOO_MANY:
5235         if (((dc->pc | dc->npc) & 3) == 0) {
5236             /* static PC and NPC: we can use direct chaining */
5237             gen_goto_tb(dc, 0, dc->pc, dc->npc);
5238             break;
5239         }
5240 
5241         may_lookup = true;
5242         if (dc->pc & 3) {
5243             switch (dc->pc) {
5244             case DYNAMIC_PC_LOOKUP:
5245                 break;
5246             case DYNAMIC_PC:
5247                 may_lookup = false;
5248                 break;
5249             default:
5250                 g_assert_not_reached();
5251             }
5252         } else {
5253             tcg_gen_movi_tl(cpu_pc, dc->pc);
5254         }
5255 
5256         if (dc->npc & 3) {
5257             switch (dc->npc) {
5258             case JUMP_PC:
5259                 gen_generic_branch(dc);
5260                 break;
5261             case DYNAMIC_PC:
5262                 may_lookup = false;
5263                 break;
5264             case DYNAMIC_PC_LOOKUP:
5265                 break;
5266             default:
5267                 g_assert_not_reached();
5268             }
5269         } else {
5270             tcg_gen_movi_tl(cpu_npc, dc->npc);
5271         }
5272         if (may_lookup) {
5273             tcg_gen_lookup_and_goto_ptr();
5274         } else {
5275             tcg_gen_exit_tb(NULL, 0);
5276         }
5277         break;
5278 
5279     case DISAS_NORETURN:
5280        break;
5281 
5282     case DISAS_EXIT:
5283         /* Exit TB */
5284         save_state(dc);
5285         tcg_gen_exit_tb(NULL, 0);
5286         break;
5287 
5288     default:
5289         g_assert_not_reached();
5290     }
5291 
5292     for (e = dc->delay_excp_list; e ; e = e_next) {
5293         gen_set_label(e->lab);
5294 
5295         tcg_gen_movi_tl(cpu_pc, e->pc);
5296         if (e->npc % 4 == 0) {
5297             tcg_gen_movi_tl(cpu_npc, e->npc);
5298         }
5299         gen_helper_raise_exception(tcg_env, e->excp);
5300 
5301         e_next = e->next;
5302         g_free(e);
5303     }
5304 }
5305 
5306 static void sparc_tr_disas_log(const DisasContextBase *dcbase,
5307                                CPUState *cpu, FILE *logfile)
5308 {
5309     fprintf(logfile, "IN: %s\n", lookup_symbol(dcbase->pc_first));
5310     target_disas(logfile, cpu, dcbase->pc_first, dcbase->tb->size);
5311 }
5312 
5313 static const TranslatorOps sparc_tr_ops = {
5314     .init_disas_context = sparc_tr_init_disas_context,
5315     .tb_start           = sparc_tr_tb_start,
5316     .insn_start         = sparc_tr_insn_start,
5317     .translate_insn     = sparc_tr_translate_insn,
5318     .tb_stop            = sparc_tr_tb_stop,
5319     .disas_log          = sparc_tr_disas_log,
5320 };
5321 
5322 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
5323                            target_ulong pc, void *host_pc)
5324 {
5325     DisasContext dc = {};
5326 
5327     translator_loop(cs, tb, max_insns, pc, host_pc, &sparc_tr_ops, &dc.base);
5328 }
5329 
5330 void sparc_tcg_init(void)
5331 {
5332     static const char gregnames[32][4] = {
5333         "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7",
5334         "o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7",
5335         "l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7",
5336         "i0", "i1", "i2", "i3", "i4", "i5", "i6", "i7",
5337     };
5338     static const char fregnames[32][4] = {
5339         "f0", "f2", "f4", "f6", "f8", "f10", "f12", "f14",
5340         "f16", "f18", "f20", "f22", "f24", "f26", "f28", "f30",
5341         "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46",
5342         "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62",
5343     };
5344 
5345     static const struct { TCGv *ptr; int off; const char *name; } rtl[] = {
5346 #ifdef TARGET_SPARC64
5347         { &cpu_gsr, offsetof(CPUSPARCState, gsr), "gsr" },
5348         { &cpu_xcc_Z, offsetof(CPUSPARCState, xcc_Z), "xcc_Z" },
5349         { &cpu_xcc_C, offsetof(CPUSPARCState, xcc_C), "xcc_C" },
5350 #endif
5351         { &cpu_cc_N, offsetof(CPUSPARCState, cc_N), "cc_N" },
5352         { &cpu_cc_V, offsetof(CPUSPARCState, cc_V), "cc_V" },
5353         { &cpu_icc_Z, offsetof(CPUSPARCState, icc_Z), "icc_Z" },
5354         { &cpu_icc_C, offsetof(CPUSPARCState, icc_C), "icc_C" },
5355         { &cpu_cond, offsetof(CPUSPARCState, cond), "cond" },
5356         { &cpu_fsr, offsetof(CPUSPARCState, fsr), "fsr" },
5357         { &cpu_pc, offsetof(CPUSPARCState, pc), "pc" },
5358         { &cpu_npc, offsetof(CPUSPARCState, npc), "npc" },
5359         { &cpu_y, offsetof(CPUSPARCState, y), "y" },
5360         { &cpu_tbr, offsetof(CPUSPARCState, tbr), "tbr" },
5361     };
5362 
5363     unsigned int i;
5364 
5365     cpu_regwptr = tcg_global_mem_new_ptr(tcg_env,
5366                                          offsetof(CPUSPARCState, regwptr),
5367                                          "regwptr");
5368 
5369     for (i = 0; i < ARRAY_SIZE(rtl); ++i) {
5370         *rtl[i].ptr = tcg_global_mem_new(tcg_env, rtl[i].off, rtl[i].name);
5371     }
5372 
5373     cpu_regs[0] = NULL;
5374     for (i = 1; i < 8; ++i) {
5375         cpu_regs[i] = tcg_global_mem_new(tcg_env,
5376                                          offsetof(CPUSPARCState, gregs[i]),
5377                                          gregnames[i]);
5378     }
5379 
5380     for (i = 8; i < 32; ++i) {
5381         cpu_regs[i] = tcg_global_mem_new(cpu_regwptr,
5382                                          (i - 8) * sizeof(target_ulong),
5383                                          gregnames[i]);
5384     }
5385 
5386     for (i = 0; i < TARGET_DPREGS; i++) {
5387         cpu_fpr[i] = tcg_global_mem_new_i64(tcg_env,
5388                                             offsetof(CPUSPARCState, fpr[i]),
5389                                             fregnames[i]);
5390     }
5391 
5392 #ifdef TARGET_SPARC64
5393     cpu_fprs = tcg_global_mem_new_i32(tcg_env,
5394                                       offsetof(CPUSPARCState, fprs), "fprs");
5395 #endif
5396 }
5397 
5398 void sparc_restore_state_to_opc(CPUState *cs,
5399                                 const TranslationBlock *tb,
5400                                 const uint64_t *data)
5401 {
5402     SPARCCPU *cpu = SPARC_CPU(cs);
5403     CPUSPARCState *env = &cpu->env;
5404     target_ulong pc = data[0];
5405     target_ulong npc = data[1];
5406 
5407     env->pc = pc;
5408     if (npc == DYNAMIC_PC) {
5409         /* dynamic NPC: already stored */
5410     } else if (npc & JUMP_PC) {
5411         /* jump PC: use 'cond' and the jump targets of the translation */
5412         if (env->cond) {
5413             env->npc = npc & ~3;
5414         } else {
5415             env->npc = pc + 4;
5416         }
5417     } else {
5418         env->npc = npc;
5419     }
5420 }
5421