xref: /openbmc/qemu/target/sparc/translate.c (revision dd7dbfcc)
1 /*
2    SPARC translation
3 
4    Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5    Copyright (C) 2003-2005 Fabrice Bellard
6 
7    This library is free software; you can redistribute it and/or
8    modify it under the terms of the GNU Lesser General Public
9    License as published by the Free Software Foundation; either
10    version 2.1 of the License, or (at your option) any later version.
11 
12    This library is distributed in the hope that it will be useful,
13    but WITHOUT ANY WARRANTY; without even the implied warranty of
14    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15    Lesser General Public License for more details.
16 
17    You should have received a copy of the GNU Lesser General Public
18    License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 
23 #include "cpu.h"
24 #include "disas/disas.h"
25 #include "exec/helper-proto.h"
26 #include "exec/exec-all.h"
27 #include "tcg/tcg-op.h"
28 #include "tcg/tcg-op-gvec.h"
29 #include "exec/helper-gen.h"
30 #include "exec/translator.h"
31 #include "exec/log.h"
32 #include "asi.h"
33 
34 #define HELPER_H "helper.h"
35 #include "exec/helper-info.c.inc"
36 #undef  HELPER_H
37 
38 #ifdef TARGET_SPARC64
39 # define gen_helper_rdpsr(D, E)                 qemu_build_not_reached()
40 # define gen_helper_rett(E)                     qemu_build_not_reached()
41 # define gen_helper_power_down(E)               qemu_build_not_reached()
42 # define gen_helper_wrpsr(E, S)                 qemu_build_not_reached()
43 #else
44 # define gen_helper_clear_softint(E, S)         qemu_build_not_reached()
45 # define gen_helper_done(E)                     qemu_build_not_reached()
46 # define gen_helper_fabsd(D, S)                 qemu_build_not_reached()
47 # define gen_helper_flushw(E)                   qemu_build_not_reached()
48 # define gen_helper_fnegd(D, S)                 qemu_build_not_reached()
49 # define gen_helper_rdccr(D, E)                 qemu_build_not_reached()
50 # define gen_helper_rdcwp(D, E)                 qemu_build_not_reached()
51 # define gen_helper_restored(E)                 qemu_build_not_reached()
52 # define gen_helper_retry(E)                    qemu_build_not_reached()
53 # define gen_helper_saved(E)                    qemu_build_not_reached()
54 # define gen_helper_sdivx(D, E, A, B)           qemu_build_not_reached()
55 # define gen_helper_set_softint(E, S)           qemu_build_not_reached()
56 # define gen_helper_tick_get_count(D, E, T, C)  qemu_build_not_reached()
57 # define gen_helper_tick_set_count(P, S)        qemu_build_not_reached()
58 # define gen_helper_tick_set_limit(P, S)        qemu_build_not_reached()
59 # define gen_helper_udivx(D, E, A, B)           qemu_build_not_reached()
60 # define gen_helper_wrccr(E, S)                 qemu_build_not_reached()
61 # define gen_helper_wrcwp(E, S)                 qemu_build_not_reached()
62 # define gen_helper_wrgl(E, S)                  qemu_build_not_reached()
63 # define gen_helper_write_softint(E, S)         qemu_build_not_reached()
64 # define gen_helper_wrpil(E, S)                 qemu_build_not_reached()
65 # define gen_helper_wrpstate(E, S)              qemu_build_not_reached()
66 # define gen_helper_fabsq                ({ qemu_build_not_reached(); NULL; })
67 # define gen_helper_fcmpeq16             ({ qemu_build_not_reached(); NULL; })
68 # define gen_helper_fcmpeq32             ({ qemu_build_not_reached(); NULL; })
69 # define gen_helper_fcmpgt16             ({ qemu_build_not_reached(); NULL; })
70 # define gen_helper_fcmpgt32             ({ qemu_build_not_reached(); NULL; })
71 # define gen_helper_fcmple16             ({ qemu_build_not_reached(); NULL; })
72 # define gen_helper_fcmple32             ({ qemu_build_not_reached(); NULL; })
73 # define gen_helper_fcmpne16             ({ qemu_build_not_reached(); NULL; })
74 # define gen_helper_fcmpne32             ({ qemu_build_not_reached(); NULL; })
75 # define gen_helper_fdtox                ({ qemu_build_not_reached(); NULL; })
76 # define gen_helper_fexpand              ({ qemu_build_not_reached(); NULL; })
77 # define gen_helper_fmul8sux16           ({ qemu_build_not_reached(); NULL; })
78 # define gen_helper_fmul8ulx16           ({ qemu_build_not_reached(); NULL; })
79 # define gen_helper_fmul8x16al           ({ qemu_build_not_reached(); NULL; })
80 # define gen_helper_fmul8x16au           ({ qemu_build_not_reached(); NULL; })
81 # define gen_helper_fmul8x16             ({ qemu_build_not_reached(); NULL; })
82 # define gen_helper_fmuld8sux16          ({ qemu_build_not_reached(); NULL; })
83 # define gen_helper_fmuld8ulx16          ({ qemu_build_not_reached(); NULL; })
84 # define gen_helper_fnegq                ({ qemu_build_not_reached(); NULL; })
85 # define gen_helper_fpmerge              ({ qemu_build_not_reached(); NULL; })
86 # define gen_helper_fqtox                ({ qemu_build_not_reached(); NULL; })
87 # define gen_helper_fstox                ({ qemu_build_not_reached(); NULL; })
88 # define gen_helper_fxtod                ({ qemu_build_not_reached(); NULL; })
89 # define gen_helper_fxtoq                ({ qemu_build_not_reached(); NULL; })
90 # define gen_helper_fxtos                ({ qemu_build_not_reached(); NULL; })
91 # define gen_helper_pdist                ({ qemu_build_not_reached(); NULL; })
92 # define FSR_LDXFSR_MASK                        0
93 # define FSR_LDXFSR_OLDMASK                     0
94 # define MAXTL_MASK                             0
95 #endif
96 
97 /* Dynamic PC, must exit to main loop. */
98 #define DYNAMIC_PC         1
99 /* Dynamic PC, one of two values according to jump_pc[T2]. */
100 #define JUMP_PC            2
101 /* Dynamic PC, may lookup next TB. */
102 #define DYNAMIC_PC_LOOKUP  3
103 
104 #define DISAS_EXIT  DISAS_TARGET_0
105 
106 /* global register indexes */
107 static TCGv_ptr cpu_regwptr;
108 static TCGv cpu_fsr, cpu_pc, cpu_npc;
109 static TCGv cpu_regs[32];
110 static TCGv cpu_y;
111 static TCGv cpu_tbr;
112 static TCGv cpu_cond;
113 static TCGv cpu_cc_N;
114 static TCGv cpu_cc_V;
115 static TCGv cpu_icc_Z;
116 static TCGv cpu_icc_C;
117 #ifdef TARGET_SPARC64
118 static TCGv cpu_xcc_Z;
119 static TCGv cpu_xcc_C;
120 static TCGv_i32 cpu_fprs;
121 static TCGv cpu_gsr;
122 #else
123 # define cpu_fprs               ({ qemu_build_not_reached(); (TCGv)NULL; })
124 # define cpu_gsr                ({ qemu_build_not_reached(); (TCGv)NULL; })
125 #endif
126 
127 #ifdef TARGET_SPARC64
128 #define cpu_cc_Z  cpu_xcc_Z
129 #define cpu_cc_C  cpu_xcc_C
130 #else
131 #define cpu_cc_Z  cpu_icc_Z
132 #define cpu_cc_C  cpu_icc_C
133 #define cpu_xcc_Z ({ qemu_build_not_reached(); NULL; })
134 #define cpu_xcc_C ({ qemu_build_not_reached(); NULL; })
135 #endif
136 
137 /* Floating point registers */
138 static TCGv_i64 cpu_fpr[TARGET_DPREGS];
139 
140 #define env_field_offsetof(X)     offsetof(CPUSPARCState, X)
141 #ifdef TARGET_SPARC64
142 # define env32_field_offsetof(X)  ({ qemu_build_not_reached(); 0; })
143 # define env64_field_offsetof(X)  env_field_offsetof(X)
144 #else
145 # define env32_field_offsetof(X)  env_field_offsetof(X)
146 # define env64_field_offsetof(X)  ({ qemu_build_not_reached(); 0; })
147 #endif
148 
149 typedef struct DisasDelayException {
150     struct DisasDelayException *next;
151     TCGLabel *lab;
152     TCGv_i32 excp;
153     /* Saved state at parent insn. */
154     target_ulong pc;
155     target_ulong npc;
156 } DisasDelayException;
157 
158 typedef struct DisasContext {
159     DisasContextBase base;
160     target_ulong pc;    /* current Program Counter: integer or DYNAMIC_PC */
161     target_ulong npc;   /* next PC: integer or DYNAMIC_PC or JUMP_PC */
162     target_ulong jump_pc[2]; /* used when JUMP_PC pc value is used */
163     int mem_idx;
164     bool fpu_enabled;
165     bool address_mask_32bit;
166 #ifndef CONFIG_USER_ONLY
167     bool supervisor;
168 #ifdef TARGET_SPARC64
169     bool hypervisor;
170 #endif
171 #endif
172 
173     sparc_def_t *def;
174 #ifdef TARGET_SPARC64
175     int fprs_dirty;
176     int asi;
177 #endif
178     DisasDelayException *delay_excp_list;
179 } DisasContext;
180 
181 typedef struct {
182     TCGCond cond;
183     TCGv c1, c2;
184 } DisasCompare;
185 
186 // This function uses non-native bit order
187 #define GET_FIELD(X, FROM, TO)                                  \
188     ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
189 
190 // This function uses the order in the manuals, i.e. bit 0 is 2^0
191 #define GET_FIELD_SP(X, FROM, TO)               \
192     GET_FIELD(X, 31 - (TO), 31 - (FROM))
193 
194 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
195 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
196 
197 #ifdef TARGET_SPARC64
198 #define DFPREG(r) (((r & 1) << 5) | (r & 0x1e))
199 #define QFPREG(r) (((r & 1) << 5) | (r & 0x1c))
200 #else
201 #define DFPREG(r) (r & 0x1e)
202 #define QFPREG(r) (r & 0x1c)
203 #endif
204 
205 #define UA2005_HTRAP_MASK 0xff
206 #define V8_TRAP_MASK 0x7f
207 
208 #define IS_IMM (insn & (1<<13))
209 
210 static void gen_update_fprs_dirty(DisasContext *dc, int rd)
211 {
212 #if defined(TARGET_SPARC64)
213     int bit = (rd < 32) ? 1 : 2;
214     /* If we know we've already set this bit within the TB,
215        we can avoid setting it again.  */
216     if (!(dc->fprs_dirty & bit)) {
217         dc->fprs_dirty |= bit;
218         tcg_gen_ori_i32(cpu_fprs, cpu_fprs, bit);
219     }
220 #endif
221 }
222 
223 /* floating point registers moves */
224 static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src)
225 {
226     TCGv_i32 ret = tcg_temp_new_i32();
227     if (src & 1) {
228         tcg_gen_extrl_i64_i32(ret, cpu_fpr[src / 2]);
229     } else {
230         tcg_gen_extrh_i64_i32(ret, cpu_fpr[src / 2]);
231     }
232     return ret;
233 }
234 
235 static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v)
236 {
237     TCGv_i64 t = tcg_temp_new_i64();
238 
239     tcg_gen_extu_i32_i64(t, v);
240     tcg_gen_deposit_i64(cpu_fpr[dst / 2], cpu_fpr[dst / 2], t,
241                         (dst & 1 ? 0 : 32), 32);
242     gen_update_fprs_dirty(dc, dst);
243 }
244 
245 static TCGv_i32 gen_dest_fpr_F(DisasContext *dc)
246 {
247     return tcg_temp_new_i32();
248 }
249 
250 static TCGv_i64 gen_load_fpr_D(DisasContext *dc, unsigned int src)
251 {
252     src = DFPREG(src);
253     return cpu_fpr[src / 2];
254 }
255 
256 static void gen_store_fpr_D(DisasContext *dc, unsigned int dst, TCGv_i64 v)
257 {
258     dst = DFPREG(dst);
259     tcg_gen_mov_i64(cpu_fpr[dst / 2], v);
260     gen_update_fprs_dirty(dc, dst);
261 }
262 
263 static TCGv_i64 gen_dest_fpr_D(DisasContext *dc, unsigned int dst)
264 {
265     return cpu_fpr[DFPREG(dst) / 2];
266 }
267 
268 static void gen_op_load_fpr_QT0(unsigned int src)
269 {
270     tcg_gen_st_i64(cpu_fpr[src / 2], tcg_env, offsetof(CPUSPARCState, qt0) +
271                    offsetof(CPU_QuadU, ll.upper));
272     tcg_gen_st_i64(cpu_fpr[src/2 + 1], tcg_env, offsetof(CPUSPARCState, qt0) +
273                    offsetof(CPU_QuadU, ll.lower));
274 }
275 
276 static void gen_op_load_fpr_QT1(unsigned int src)
277 {
278     tcg_gen_st_i64(cpu_fpr[src / 2], tcg_env, offsetof(CPUSPARCState, qt1) +
279                    offsetof(CPU_QuadU, ll.upper));
280     tcg_gen_st_i64(cpu_fpr[src/2 + 1], tcg_env, offsetof(CPUSPARCState, qt1) +
281                    offsetof(CPU_QuadU, ll.lower));
282 }
283 
284 static void gen_op_store_QT0_fpr(unsigned int dst)
285 {
286     tcg_gen_ld_i64(cpu_fpr[dst / 2], tcg_env, offsetof(CPUSPARCState, qt0) +
287                    offsetof(CPU_QuadU, ll.upper));
288     tcg_gen_ld_i64(cpu_fpr[dst/2 + 1], tcg_env, offsetof(CPUSPARCState, qt0) +
289                    offsetof(CPU_QuadU, ll.lower));
290 }
291 
292 /* moves */
293 #ifdef CONFIG_USER_ONLY
294 #define supervisor(dc) 0
295 #define hypervisor(dc) 0
296 #else
297 #ifdef TARGET_SPARC64
298 #define hypervisor(dc) (dc->hypervisor)
299 #define supervisor(dc) (dc->supervisor | dc->hypervisor)
300 #else
301 #define supervisor(dc) (dc->supervisor)
302 #define hypervisor(dc) 0
303 #endif
304 #endif
305 
306 #if !defined(TARGET_SPARC64)
307 # define AM_CHECK(dc)  false
308 #elif defined(TARGET_ABI32)
309 # define AM_CHECK(dc)  true
310 #elif defined(CONFIG_USER_ONLY)
311 # define AM_CHECK(dc)  false
312 #else
313 # define AM_CHECK(dc)  ((dc)->address_mask_32bit)
314 #endif
315 
316 static void gen_address_mask(DisasContext *dc, TCGv addr)
317 {
318     if (AM_CHECK(dc)) {
319         tcg_gen_andi_tl(addr, addr, 0xffffffffULL);
320     }
321 }
322 
323 static target_ulong address_mask_i(DisasContext *dc, target_ulong addr)
324 {
325     return AM_CHECK(dc) ? (uint32_t)addr : addr;
326 }
327 
328 static TCGv gen_load_gpr(DisasContext *dc, int reg)
329 {
330     if (reg > 0) {
331         assert(reg < 32);
332         return cpu_regs[reg];
333     } else {
334         TCGv t = tcg_temp_new();
335         tcg_gen_movi_tl(t, 0);
336         return t;
337     }
338 }
339 
340 static void gen_store_gpr(DisasContext *dc, int reg, TCGv v)
341 {
342     if (reg > 0) {
343         assert(reg < 32);
344         tcg_gen_mov_tl(cpu_regs[reg], v);
345     }
346 }
347 
348 static TCGv gen_dest_gpr(DisasContext *dc, int reg)
349 {
350     if (reg > 0) {
351         assert(reg < 32);
352         return cpu_regs[reg];
353     } else {
354         return tcg_temp_new();
355     }
356 }
357 
358 static bool use_goto_tb(DisasContext *s, target_ulong pc, target_ulong npc)
359 {
360     return translator_use_goto_tb(&s->base, pc) &&
361            translator_use_goto_tb(&s->base, npc);
362 }
363 
364 static void gen_goto_tb(DisasContext *s, int tb_num,
365                         target_ulong pc, target_ulong npc)
366 {
367     if (use_goto_tb(s, pc, npc))  {
368         /* jump to same page: we can use a direct jump */
369         tcg_gen_goto_tb(tb_num);
370         tcg_gen_movi_tl(cpu_pc, pc);
371         tcg_gen_movi_tl(cpu_npc, npc);
372         tcg_gen_exit_tb(s->base.tb, tb_num);
373     } else {
374         /* jump to another page: we can use an indirect jump */
375         tcg_gen_movi_tl(cpu_pc, pc);
376         tcg_gen_movi_tl(cpu_npc, npc);
377         tcg_gen_lookup_and_goto_ptr();
378     }
379 }
380 
381 static TCGv gen_carry32(void)
382 {
383     if (TARGET_LONG_BITS == 64) {
384         TCGv t = tcg_temp_new();
385         tcg_gen_extract_tl(t, cpu_icc_C, 32, 1);
386         return t;
387     }
388     return cpu_icc_C;
389 }
390 
391 static void gen_op_addcc_int(TCGv dst, TCGv src1, TCGv src2, TCGv cin)
392 {
393     TCGv z = tcg_constant_tl(0);
394 
395     if (cin) {
396         tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, src1, z, cin, z);
397         tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, cpu_cc_N, cpu_cc_C, src2, z);
398     } else {
399         tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, src1, z, src2, z);
400     }
401     tcg_gen_xor_tl(cpu_cc_Z, src1, src2);
402     tcg_gen_xor_tl(cpu_cc_V, cpu_cc_N, src2);
403     tcg_gen_andc_tl(cpu_cc_V, cpu_cc_V, cpu_cc_Z);
404     if (TARGET_LONG_BITS == 64) {
405         /*
406          * Carry-in to bit 32 is result ^ src1 ^ src2.
407          * We already have the src xor term in Z, from computation of V.
408          */
409         tcg_gen_xor_tl(cpu_icc_C, cpu_cc_Z, cpu_cc_N);
410         tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
411     }
412     tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
413     tcg_gen_mov_tl(dst, cpu_cc_N);
414 }
415 
416 static void gen_op_addcc(TCGv dst, TCGv src1, TCGv src2)
417 {
418     gen_op_addcc_int(dst, src1, src2, NULL);
419 }
420 
421 static void gen_op_taddcc(TCGv dst, TCGv src1, TCGv src2)
422 {
423     TCGv t = tcg_temp_new();
424 
425     /* Save the tag bits around modification of dst. */
426     tcg_gen_or_tl(t, src1, src2);
427 
428     gen_op_addcc(dst, src1, src2);
429 
430     /* Incorprate tag bits into icc.V */
431     tcg_gen_andi_tl(t, t, 3);
432     tcg_gen_neg_tl(t, t);
433     tcg_gen_ext32u_tl(t, t);
434     tcg_gen_or_tl(cpu_cc_V, cpu_cc_V, t);
435 }
436 
437 static void gen_op_addc(TCGv dst, TCGv src1, TCGv src2)
438 {
439     tcg_gen_add_tl(dst, src1, src2);
440     tcg_gen_add_tl(dst, dst, gen_carry32());
441 }
442 
443 static void gen_op_addccc(TCGv dst, TCGv src1, TCGv src2)
444 {
445     gen_op_addcc_int(dst, src1, src2, gen_carry32());
446 }
447 
448 static void gen_op_subcc_int(TCGv dst, TCGv src1, TCGv src2, TCGv cin)
449 {
450     TCGv z = tcg_constant_tl(0);
451 
452     if (cin) {
453         tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, src1, z, cin, z);
454         tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, cpu_cc_N, cpu_cc_C, src2, z);
455     } else {
456         tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, src1, z, src2, z);
457     }
458     tcg_gen_neg_tl(cpu_cc_C, cpu_cc_C);
459     tcg_gen_xor_tl(cpu_cc_Z, src1, src2);
460     tcg_gen_xor_tl(cpu_cc_V, cpu_cc_N, src1);
461     tcg_gen_and_tl(cpu_cc_V, cpu_cc_V, cpu_cc_Z);
462 #ifdef TARGET_SPARC64
463     tcg_gen_xor_tl(cpu_icc_C, cpu_cc_Z, cpu_cc_N);
464     tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
465 #endif
466     tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
467     tcg_gen_mov_tl(dst, cpu_cc_N);
468 }
469 
470 static void gen_op_subcc(TCGv dst, TCGv src1, TCGv src2)
471 {
472     gen_op_subcc_int(dst, src1, src2, NULL);
473 }
474 
475 static void gen_op_tsubcc(TCGv dst, TCGv src1, TCGv src2)
476 {
477     TCGv t = tcg_temp_new();
478 
479     /* Save the tag bits around modification of dst. */
480     tcg_gen_or_tl(t, src1, src2);
481 
482     gen_op_subcc(dst, src1, src2);
483 
484     /* Incorprate tag bits into icc.V */
485     tcg_gen_andi_tl(t, t, 3);
486     tcg_gen_neg_tl(t, t);
487     tcg_gen_ext32u_tl(t, t);
488     tcg_gen_or_tl(cpu_cc_V, cpu_cc_V, t);
489 }
490 
491 static void gen_op_subc(TCGv dst, TCGv src1, TCGv src2)
492 {
493     tcg_gen_sub_tl(dst, src1, src2);
494     tcg_gen_sub_tl(dst, dst, gen_carry32());
495 }
496 
497 static void gen_op_subccc(TCGv dst, TCGv src1, TCGv src2)
498 {
499     gen_op_subcc_int(dst, src1, src2, gen_carry32());
500 }
501 
502 static void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
503 {
504     TCGv zero = tcg_constant_tl(0);
505     TCGv t_src1 = tcg_temp_new();
506     TCGv t_src2 = tcg_temp_new();
507     TCGv t0 = tcg_temp_new();
508 
509     tcg_gen_ext32u_tl(t_src1, src1);
510     tcg_gen_ext32u_tl(t_src2, src2);
511 
512     /*
513      * if (!(env->y & 1))
514      *   src2 = 0;
515      */
516     tcg_gen_andi_tl(t0, cpu_y, 0x1);
517     tcg_gen_movcond_tl(TCG_COND_EQ, t_src2, t0, zero, zero, t_src2);
518 
519     /*
520      * b2 = src1 & 1;
521      * y = (b2 << 31) | (y >> 1);
522      */
523     tcg_gen_extract_tl(t0, cpu_y, 1, 31);
524     tcg_gen_deposit_tl(cpu_y, t0, src1, 31, 1);
525 
526     // b1 = N ^ V;
527     tcg_gen_xor_tl(t0, cpu_cc_N, cpu_cc_V);
528 
529     /*
530      * src1 = (b1 << 31) | (src1 >> 1)
531      */
532     tcg_gen_andi_tl(t0, t0, 1u << 31);
533     tcg_gen_shri_tl(t_src1, t_src1, 1);
534     tcg_gen_or_tl(t_src1, t_src1, t0);
535 
536     gen_op_addcc(dst, t_src1, t_src2);
537 }
538 
539 static void gen_op_multiply(TCGv dst, TCGv src1, TCGv src2, int sign_ext)
540 {
541 #if TARGET_LONG_BITS == 32
542     if (sign_ext) {
543         tcg_gen_muls2_tl(dst, cpu_y, src1, src2);
544     } else {
545         tcg_gen_mulu2_tl(dst, cpu_y, src1, src2);
546     }
547 #else
548     TCGv t0 = tcg_temp_new_i64();
549     TCGv t1 = tcg_temp_new_i64();
550 
551     if (sign_ext) {
552         tcg_gen_ext32s_i64(t0, src1);
553         tcg_gen_ext32s_i64(t1, src2);
554     } else {
555         tcg_gen_ext32u_i64(t0, src1);
556         tcg_gen_ext32u_i64(t1, src2);
557     }
558 
559     tcg_gen_mul_i64(dst, t0, t1);
560     tcg_gen_shri_i64(cpu_y, dst, 32);
561 #endif
562 }
563 
564 static void gen_op_umul(TCGv dst, TCGv src1, TCGv src2)
565 {
566     /* zero-extend truncated operands before multiplication */
567     gen_op_multiply(dst, src1, src2, 0);
568 }
569 
570 static void gen_op_smul(TCGv dst, TCGv src1, TCGv src2)
571 {
572     /* sign-extend truncated operands before multiplication */
573     gen_op_multiply(dst, src1, src2, 1);
574 }
575 
576 static void gen_op_udivx(TCGv dst, TCGv src1, TCGv src2)
577 {
578     gen_helper_udivx(dst, tcg_env, src1, src2);
579 }
580 
581 static void gen_op_sdivx(TCGv dst, TCGv src1, TCGv src2)
582 {
583     gen_helper_sdivx(dst, tcg_env, src1, src2);
584 }
585 
586 static void gen_op_udiv(TCGv dst, TCGv src1, TCGv src2)
587 {
588 #ifdef TARGET_SPARC64
589     gen_helper_udiv(dst, tcg_env, src1, src2);
590     tcg_gen_ext32u_tl(dst, dst);
591 #else
592     TCGv_i64 t64 = tcg_temp_new_i64();
593     gen_helper_udiv(t64, tcg_env, src1, src2);
594     tcg_gen_trunc_i64_tl(dst, t64);
595 #endif
596 }
597 
598 static void gen_op_sdiv(TCGv dst, TCGv src1, TCGv src2)
599 {
600 #ifdef TARGET_SPARC64
601     gen_helper_sdiv(dst, tcg_env, src1, src2);
602     tcg_gen_ext32s_tl(dst, dst);
603 #else
604     TCGv_i64 t64 = tcg_temp_new_i64();
605     gen_helper_sdiv(t64, tcg_env, src1, src2);
606     tcg_gen_trunc_i64_tl(dst, t64);
607 #endif
608 }
609 
610 static void gen_op_udivcc(TCGv dst, TCGv src1, TCGv src2)
611 {
612     TCGv_i64 t64;
613 
614 #ifdef TARGET_SPARC64
615     t64 = cpu_cc_V;
616 #else
617     t64 = tcg_temp_new_i64();
618 #endif
619 
620     gen_helper_udiv(t64, tcg_env, src1, src2);
621 
622 #ifdef TARGET_SPARC64
623     tcg_gen_ext32u_tl(cpu_cc_N, t64);
624     tcg_gen_shri_tl(cpu_cc_V, t64, 32);
625     tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
626     tcg_gen_movi_tl(cpu_icc_C, 0);
627 #else
628     tcg_gen_extr_i64_tl(cpu_cc_N, cpu_cc_V, t64);
629 #endif
630     tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
631     tcg_gen_movi_tl(cpu_cc_C, 0);
632     tcg_gen_mov_tl(dst, cpu_cc_N);
633 }
634 
635 static void gen_op_sdivcc(TCGv dst, TCGv src1, TCGv src2)
636 {
637     TCGv_i64 t64;
638 
639 #ifdef TARGET_SPARC64
640     t64 = cpu_cc_V;
641 #else
642     t64 = tcg_temp_new_i64();
643 #endif
644 
645     gen_helper_sdiv(t64, tcg_env, src1, src2);
646 
647 #ifdef TARGET_SPARC64
648     tcg_gen_ext32s_tl(cpu_cc_N, t64);
649     tcg_gen_shri_tl(cpu_cc_V, t64, 32);
650     tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
651     tcg_gen_movi_tl(cpu_icc_C, 0);
652 #else
653     tcg_gen_extr_i64_tl(cpu_cc_N, cpu_cc_V, t64);
654 #endif
655     tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
656     tcg_gen_movi_tl(cpu_cc_C, 0);
657     tcg_gen_mov_tl(dst, cpu_cc_N);
658 }
659 
660 static void gen_op_taddcctv(TCGv dst, TCGv src1, TCGv src2)
661 {
662     gen_helper_taddcctv(dst, tcg_env, src1, src2);
663 }
664 
665 static void gen_op_tsubcctv(TCGv dst, TCGv src1, TCGv src2)
666 {
667     gen_helper_tsubcctv(dst, tcg_env, src1, src2);
668 }
669 
670 static void gen_op_popc(TCGv dst, TCGv src1, TCGv src2)
671 {
672     tcg_gen_ctpop_tl(dst, src2);
673 }
674 
675 #ifndef TARGET_SPARC64
676 static void gen_helper_array8(TCGv dst, TCGv src1, TCGv src2)
677 {
678     g_assert_not_reached();
679 }
680 #endif
681 
682 static void gen_op_array16(TCGv dst, TCGv src1, TCGv src2)
683 {
684     gen_helper_array8(dst, src1, src2);
685     tcg_gen_shli_tl(dst, dst, 1);
686 }
687 
688 static void gen_op_array32(TCGv dst, TCGv src1, TCGv src2)
689 {
690     gen_helper_array8(dst, src1, src2);
691     tcg_gen_shli_tl(dst, dst, 2);
692 }
693 
694 static void gen_op_fpack16(TCGv_i32 dst, TCGv_i64 src)
695 {
696 #ifdef TARGET_SPARC64
697     gen_helper_fpack16(dst, cpu_gsr, src);
698 #else
699     g_assert_not_reached();
700 #endif
701 }
702 
703 static void gen_op_fpackfix(TCGv_i32 dst, TCGv_i64 src)
704 {
705 #ifdef TARGET_SPARC64
706     gen_helper_fpackfix(dst, cpu_gsr, src);
707 #else
708     g_assert_not_reached();
709 #endif
710 }
711 
712 static void gen_op_fpack32(TCGv_i64 dst, TCGv_i64 src1, TCGv_i64 src2)
713 {
714 #ifdef TARGET_SPARC64
715     gen_helper_fpack32(dst, cpu_gsr, src1, src2);
716 #else
717     g_assert_not_reached();
718 #endif
719 }
720 
721 static void gen_op_faligndata(TCGv_i64 dst, TCGv_i64 s1, TCGv_i64 s2)
722 {
723 #ifdef TARGET_SPARC64
724     TCGv t1, t2, shift;
725 
726     t1 = tcg_temp_new();
727     t2 = tcg_temp_new();
728     shift = tcg_temp_new();
729 
730     tcg_gen_andi_tl(shift, cpu_gsr, 7);
731     tcg_gen_shli_tl(shift, shift, 3);
732     tcg_gen_shl_tl(t1, s1, shift);
733 
734     /*
735      * A shift of 64 does not produce 0 in TCG.  Divide this into a
736      * shift of (up to 63) followed by a constant shift of 1.
737      */
738     tcg_gen_xori_tl(shift, shift, 63);
739     tcg_gen_shr_tl(t2, s2, shift);
740     tcg_gen_shri_tl(t2, t2, 1);
741 
742     tcg_gen_or_tl(dst, t1, t2);
743 #else
744     g_assert_not_reached();
745 #endif
746 }
747 
748 static void gen_op_bshuffle(TCGv_i64 dst, TCGv_i64 src1, TCGv_i64 src2)
749 {
750 #ifdef TARGET_SPARC64
751     gen_helper_bshuffle(dst, cpu_gsr, src1, src2);
752 #else
753     g_assert_not_reached();
754 #endif
755 }
756 
757 // 1
758 static void gen_op_eval_ba(TCGv dst)
759 {
760     tcg_gen_movi_tl(dst, 1);
761 }
762 
763 // 0
764 static void gen_op_eval_bn(TCGv dst)
765 {
766     tcg_gen_movi_tl(dst, 0);
767 }
768 
769 /*
770   FPSR bit field FCC1 | FCC0:
771    0 =
772    1 <
773    2 >
774    3 unordered
775 */
776 static void gen_mov_reg_FCC0(TCGv reg, TCGv src,
777                                     unsigned int fcc_offset)
778 {
779     tcg_gen_shri_tl(reg, src, FSR_FCC0_SHIFT + fcc_offset);
780     tcg_gen_andi_tl(reg, reg, 0x1);
781 }
782 
783 static void gen_mov_reg_FCC1(TCGv reg, TCGv src, unsigned int fcc_offset)
784 {
785     tcg_gen_shri_tl(reg, src, FSR_FCC1_SHIFT + fcc_offset);
786     tcg_gen_andi_tl(reg, reg, 0x1);
787 }
788 
789 // !0: FCC0 | FCC1
790 static void gen_op_eval_fbne(TCGv dst, TCGv src, unsigned int fcc_offset)
791 {
792     TCGv t0 = tcg_temp_new();
793     gen_mov_reg_FCC0(dst, src, fcc_offset);
794     gen_mov_reg_FCC1(t0, src, fcc_offset);
795     tcg_gen_or_tl(dst, dst, t0);
796 }
797 
798 // 1 or 2: FCC0 ^ FCC1
799 static void gen_op_eval_fblg(TCGv dst, TCGv src, unsigned int fcc_offset)
800 {
801     TCGv t0 = tcg_temp_new();
802     gen_mov_reg_FCC0(dst, src, fcc_offset);
803     gen_mov_reg_FCC1(t0, src, fcc_offset);
804     tcg_gen_xor_tl(dst, dst, t0);
805 }
806 
807 // 1 or 3: FCC0
808 static void gen_op_eval_fbul(TCGv dst, TCGv src, unsigned int fcc_offset)
809 {
810     gen_mov_reg_FCC0(dst, src, fcc_offset);
811 }
812 
813 // 1: FCC0 & !FCC1
814 static void gen_op_eval_fbl(TCGv dst, TCGv src, unsigned int fcc_offset)
815 {
816     TCGv t0 = tcg_temp_new();
817     gen_mov_reg_FCC0(dst, src, fcc_offset);
818     gen_mov_reg_FCC1(t0, src, fcc_offset);
819     tcg_gen_andc_tl(dst, dst, t0);
820 }
821 
822 // 2 or 3: FCC1
823 static void gen_op_eval_fbug(TCGv dst, TCGv src, unsigned int fcc_offset)
824 {
825     gen_mov_reg_FCC1(dst, src, fcc_offset);
826 }
827 
828 // 2: !FCC0 & FCC1
829 static void gen_op_eval_fbg(TCGv dst, TCGv src, unsigned int fcc_offset)
830 {
831     TCGv t0 = tcg_temp_new();
832     gen_mov_reg_FCC0(dst, src, fcc_offset);
833     gen_mov_reg_FCC1(t0, src, fcc_offset);
834     tcg_gen_andc_tl(dst, t0, dst);
835 }
836 
837 // 3: FCC0 & FCC1
838 static void gen_op_eval_fbu(TCGv dst, TCGv src, unsigned int fcc_offset)
839 {
840     TCGv t0 = tcg_temp_new();
841     gen_mov_reg_FCC0(dst, src, fcc_offset);
842     gen_mov_reg_FCC1(t0, src, fcc_offset);
843     tcg_gen_and_tl(dst, dst, t0);
844 }
845 
846 // 0: !(FCC0 | FCC1)
847 static void gen_op_eval_fbe(TCGv dst, TCGv src, unsigned int fcc_offset)
848 {
849     TCGv t0 = tcg_temp_new();
850     gen_mov_reg_FCC0(dst, src, fcc_offset);
851     gen_mov_reg_FCC1(t0, src, fcc_offset);
852     tcg_gen_or_tl(dst, dst, t0);
853     tcg_gen_xori_tl(dst, dst, 0x1);
854 }
855 
856 // 0 or 3: !(FCC0 ^ FCC1)
857 static void gen_op_eval_fbue(TCGv dst, TCGv src, unsigned int fcc_offset)
858 {
859     TCGv t0 = tcg_temp_new();
860     gen_mov_reg_FCC0(dst, src, fcc_offset);
861     gen_mov_reg_FCC1(t0, src, fcc_offset);
862     tcg_gen_xor_tl(dst, dst, t0);
863     tcg_gen_xori_tl(dst, dst, 0x1);
864 }
865 
866 // 0 or 2: !FCC0
867 static void gen_op_eval_fbge(TCGv dst, TCGv src, unsigned int fcc_offset)
868 {
869     gen_mov_reg_FCC0(dst, src, fcc_offset);
870     tcg_gen_xori_tl(dst, dst, 0x1);
871 }
872 
873 // !1: !(FCC0 & !FCC1)
874 static void gen_op_eval_fbuge(TCGv dst, TCGv src, unsigned int fcc_offset)
875 {
876     TCGv t0 = tcg_temp_new();
877     gen_mov_reg_FCC0(dst, src, fcc_offset);
878     gen_mov_reg_FCC1(t0, src, fcc_offset);
879     tcg_gen_andc_tl(dst, dst, t0);
880     tcg_gen_xori_tl(dst, dst, 0x1);
881 }
882 
883 // 0 or 1: !FCC1
884 static void gen_op_eval_fble(TCGv dst, TCGv src, unsigned int fcc_offset)
885 {
886     gen_mov_reg_FCC1(dst, src, fcc_offset);
887     tcg_gen_xori_tl(dst, dst, 0x1);
888 }
889 
890 // !2: !(!FCC0 & FCC1)
891 static void gen_op_eval_fbule(TCGv dst, TCGv src, unsigned int fcc_offset)
892 {
893     TCGv t0 = tcg_temp_new();
894     gen_mov_reg_FCC0(dst, src, fcc_offset);
895     gen_mov_reg_FCC1(t0, src, fcc_offset);
896     tcg_gen_andc_tl(dst, t0, dst);
897     tcg_gen_xori_tl(dst, dst, 0x1);
898 }
899 
900 // !3: !(FCC0 & FCC1)
901 static void gen_op_eval_fbo(TCGv dst, TCGv src, unsigned int fcc_offset)
902 {
903     TCGv t0 = tcg_temp_new();
904     gen_mov_reg_FCC0(dst, src, fcc_offset);
905     gen_mov_reg_FCC1(t0, src, fcc_offset);
906     tcg_gen_and_tl(dst, dst, t0);
907     tcg_gen_xori_tl(dst, dst, 0x1);
908 }
909 
910 static void gen_branch2(DisasContext *dc, target_ulong pc1,
911                         target_ulong pc2, TCGv r_cond)
912 {
913     TCGLabel *l1 = gen_new_label();
914 
915     tcg_gen_brcondi_tl(TCG_COND_EQ, r_cond, 0, l1);
916 
917     gen_goto_tb(dc, 0, pc1, pc1 + 4);
918 
919     gen_set_label(l1);
920     gen_goto_tb(dc, 1, pc2, pc2 + 4);
921 }
922 
923 static void gen_generic_branch(DisasContext *dc)
924 {
925     TCGv npc0 = tcg_constant_tl(dc->jump_pc[0]);
926     TCGv npc1 = tcg_constant_tl(dc->jump_pc[1]);
927     TCGv zero = tcg_constant_tl(0);
928 
929     tcg_gen_movcond_tl(TCG_COND_NE, cpu_npc, cpu_cond, zero, npc0, npc1);
930 }
931 
932 /* call this function before using the condition register as it may
933    have been set for a jump */
934 static void flush_cond(DisasContext *dc)
935 {
936     if (dc->npc == JUMP_PC) {
937         gen_generic_branch(dc);
938         dc->npc = DYNAMIC_PC_LOOKUP;
939     }
940 }
941 
942 static void save_npc(DisasContext *dc)
943 {
944     if (dc->npc & 3) {
945         switch (dc->npc) {
946         case JUMP_PC:
947             gen_generic_branch(dc);
948             dc->npc = DYNAMIC_PC_LOOKUP;
949             break;
950         case DYNAMIC_PC:
951         case DYNAMIC_PC_LOOKUP:
952             break;
953         default:
954             g_assert_not_reached();
955         }
956     } else {
957         tcg_gen_movi_tl(cpu_npc, dc->npc);
958     }
959 }
960 
961 static void save_state(DisasContext *dc)
962 {
963     tcg_gen_movi_tl(cpu_pc, dc->pc);
964     save_npc(dc);
965 }
966 
967 static void gen_exception(DisasContext *dc, int which)
968 {
969     save_state(dc);
970     gen_helper_raise_exception(tcg_env, tcg_constant_i32(which));
971     dc->base.is_jmp = DISAS_NORETURN;
972 }
973 
974 static TCGLabel *delay_exceptionv(DisasContext *dc, TCGv_i32 excp)
975 {
976     DisasDelayException *e = g_new0(DisasDelayException, 1);
977 
978     e->next = dc->delay_excp_list;
979     dc->delay_excp_list = e;
980 
981     e->lab = gen_new_label();
982     e->excp = excp;
983     e->pc = dc->pc;
984     /* Caller must have used flush_cond before branch. */
985     assert(e->npc != JUMP_PC);
986     e->npc = dc->npc;
987 
988     return e->lab;
989 }
990 
991 static TCGLabel *delay_exception(DisasContext *dc, int excp)
992 {
993     return delay_exceptionv(dc, tcg_constant_i32(excp));
994 }
995 
996 static void gen_check_align(DisasContext *dc, TCGv addr, int mask)
997 {
998     TCGv t = tcg_temp_new();
999     TCGLabel *lab;
1000 
1001     tcg_gen_andi_tl(t, addr, mask);
1002 
1003     flush_cond(dc);
1004     lab = delay_exception(dc, TT_UNALIGNED);
1005     tcg_gen_brcondi_tl(TCG_COND_NE, t, 0, lab);
1006 }
1007 
1008 static void gen_mov_pc_npc(DisasContext *dc)
1009 {
1010     if (dc->npc & 3) {
1011         switch (dc->npc) {
1012         case JUMP_PC:
1013             gen_generic_branch(dc);
1014             tcg_gen_mov_tl(cpu_pc, cpu_npc);
1015             dc->pc = DYNAMIC_PC_LOOKUP;
1016             break;
1017         case DYNAMIC_PC:
1018         case DYNAMIC_PC_LOOKUP:
1019             tcg_gen_mov_tl(cpu_pc, cpu_npc);
1020             dc->pc = dc->npc;
1021             break;
1022         default:
1023             g_assert_not_reached();
1024         }
1025     } else {
1026         dc->pc = dc->npc;
1027     }
1028 }
1029 
1030 static void gen_op_next_insn(void)
1031 {
1032     tcg_gen_mov_tl(cpu_pc, cpu_npc);
1033     tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
1034 }
1035 
1036 static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond,
1037                         DisasContext *dc)
1038 {
1039     TCGv t1;
1040 
1041     cmp->c1 = t1 = tcg_temp_new();
1042     cmp->c2 = tcg_constant_tl(0);
1043 
1044     switch (cond & 7) {
1045     case 0x0: /* never */
1046         cmp->cond = TCG_COND_NEVER;
1047         cmp->c1 = cmp->c2;
1048         break;
1049 
1050     case 0x1: /* eq: Z */
1051         cmp->cond = TCG_COND_EQ;
1052         if (TARGET_LONG_BITS == 32 || xcc) {
1053             tcg_gen_mov_tl(t1, cpu_cc_Z);
1054         } else {
1055             tcg_gen_ext32u_tl(t1, cpu_icc_Z);
1056         }
1057         break;
1058 
1059     case 0x2: /* le: Z | (N ^ V) */
1060         /*
1061          * Simplify:
1062          *   cc_Z || (N ^ V) < 0        NE
1063          *   cc_Z && !((N ^ V) < 0)     EQ
1064          *   cc_Z & ~((N ^ V) >> TLB)   EQ
1065          */
1066         cmp->cond = TCG_COND_EQ;
1067         tcg_gen_xor_tl(t1, cpu_cc_N, cpu_cc_V);
1068         tcg_gen_sextract_tl(t1, t1, xcc ? 63 : 31, 1);
1069         tcg_gen_andc_tl(t1, xcc ? cpu_cc_Z : cpu_icc_Z, t1);
1070         if (TARGET_LONG_BITS == 64 && !xcc) {
1071             tcg_gen_ext32u_tl(t1, t1);
1072         }
1073         break;
1074 
1075     case 0x3: /* lt: N ^ V */
1076         cmp->cond = TCG_COND_LT;
1077         tcg_gen_xor_tl(t1, cpu_cc_N, cpu_cc_V);
1078         if (TARGET_LONG_BITS == 64 && !xcc) {
1079             tcg_gen_ext32s_tl(t1, t1);
1080         }
1081         break;
1082 
1083     case 0x4: /* leu: Z | C */
1084         /*
1085          * Simplify:
1086          *   cc_Z == 0 || cc_C != 0     NE
1087          *   cc_Z != 0 && cc_C == 0     EQ
1088          *   cc_Z & (cc_C ? 0 : -1)     EQ
1089          *   cc_Z & (cc_C - 1)          EQ
1090          */
1091         cmp->cond = TCG_COND_EQ;
1092         if (TARGET_LONG_BITS == 32 || xcc) {
1093             tcg_gen_subi_tl(t1, cpu_cc_C, 1);
1094             tcg_gen_and_tl(t1, t1, cpu_cc_Z);
1095         } else {
1096             tcg_gen_extract_tl(t1, cpu_icc_C, 32, 1);
1097             tcg_gen_subi_tl(t1, t1, 1);
1098             tcg_gen_and_tl(t1, t1, cpu_icc_Z);
1099             tcg_gen_ext32u_tl(t1, t1);
1100         }
1101         break;
1102 
1103     case 0x5: /* ltu: C */
1104         cmp->cond = TCG_COND_NE;
1105         if (TARGET_LONG_BITS == 32 || xcc) {
1106             tcg_gen_mov_tl(t1, cpu_cc_C);
1107         } else {
1108             tcg_gen_extract_tl(t1, cpu_icc_C, 32, 1);
1109         }
1110         break;
1111 
1112     case 0x6: /* neg: N */
1113         cmp->cond = TCG_COND_LT;
1114         if (TARGET_LONG_BITS == 32 || xcc) {
1115             tcg_gen_mov_tl(t1, cpu_cc_N);
1116         } else {
1117             tcg_gen_ext32s_tl(t1, cpu_cc_N);
1118         }
1119         break;
1120 
1121     case 0x7: /* vs: V */
1122         cmp->cond = TCG_COND_LT;
1123         if (TARGET_LONG_BITS == 32 || xcc) {
1124             tcg_gen_mov_tl(t1, cpu_cc_V);
1125         } else {
1126             tcg_gen_ext32s_tl(t1, cpu_cc_V);
1127         }
1128         break;
1129     }
1130     if (cond & 8) {
1131         cmp->cond = tcg_invert_cond(cmp->cond);
1132     }
1133 }
1134 
1135 static void gen_fcompare(DisasCompare *cmp, unsigned int cc, unsigned int cond)
1136 {
1137     unsigned int offset;
1138     TCGv r_dst;
1139 
1140     /* For now we still generate a straight boolean result.  */
1141     cmp->cond = TCG_COND_NE;
1142     cmp->c1 = r_dst = tcg_temp_new();
1143     cmp->c2 = tcg_constant_tl(0);
1144 
1145     switch (cc) {
1146     default:
1147     case 0x0:
1148         offset = 0;
1149         break;
1150     case 0x1:
1151         offset = 32 - 10;
1152         break;
1153     case 0x2:
1154         offset = 34 - 10;
1155         break;
1156     case 0x3:
1157         offset = 36 - 10;
1158         break;
1159     }
1160 
1161     switch (cond) {
1162     case 0x0:
1163         gen_op_eval_bn(r_dst);
1164         break;
1165     case 0x1:
1166         gen_op_eval_fbne(r_dst, cpu_fsr, offset);
1167         break;
1168     case 0x2:
1169         gen_op_eval_fblg(r_dst, cpu_fsr, offset);
1170         break;
1171     case 0x3:
1172         gen_op_eval_fbul(r_dst, cpu_fsr, offset);
1173         break;
1174     case 0x4:
1175         gen_op_eval_fbl(r_dst, cpu_fsr, offset);
1176         break;
1177     case 0x5:
1178         gen_op_eval_fbug(r_dst, cpu_fsr, offset);
1179         break;
1180     case 0x6:
1181         gen_op_eval_fbg(r_dst, cpu_fsr, offset);
1182         break;
1183     case 0x7:
1184         gen_op_eval_fbu(r_dst, cpu_fsr, offset);
1185         break;
1186     case 0x8:
1187         gen_op_eval_ba(r_dst);
1188         break;
1189     case 0x9:
1190         gen_op_eval_fbe(r_dst, cpu_fsr, offset);
1191         break;
1192     case 0xa:
1193         gen_op_eval_fbue(r_dst, cpu_fsr, offset);
1194         break;
1195     case 0xb:
1196         gen_op_eval_fbge(r_dst, cpu_fsr, offset);
1197         break;
1198     case 0xc:
1199         gen_op_eval_fbuge(r_dst, cpu_fsr, offset);
1200         break;
1201     case 0xd:
1202         gen_op_eval_fble(r_dst, cpu_fsr, offset);
1203         break;
1204     case 0xe:
1205         gen_op_eval_fbule(r_dst, cpu_fsr, offset);
1206         break;
1207     case 0xf:
1208         gen_op_eval_fbo(r_dst, cpu_fsr, offset);
1209         break;
1210     }
1211 }
1212 
1213 // Inverted logic
1214 static const TCGCond gen_tcg_cond_reg[8] = {
1215     TCG_COND_NEVER,  /* reserved */
1216     TCG_COND_NE,
1217     TCG_COND_GT,
1218     TCG_COND_GE,
1219     TCG_COND_NEVER,  /* reserved */
1220     TCG_COND_EQ,
1221     TCG_COND_LE,
1222     TCG_COND_LT,
1223 };
1224 
1225 static void gen_compare_reg(DisasCompare *cmp, int cond, TCGv r_src)
1226 {
1227     cmp->cond = tcg_invert_cond(gen_tcg_cond_reg[cond]);
1228     cmp->c1 = r_src;
1229     cmp->c2 = tcg_constant_tl(0);
1230 }
1231 
1232 static void gen_op_clear_ieee_excp_and_FTT(void)
1233 {
1234     tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_CEXC_NMASK);
1235 }
1236 
1237 static void gen_op_fmovs(TCGv_i32 dst, TCGv_i32 src)
1238 {
1239     gen_op_clear_ieee_excp_and_FTT();
1240     tcg_gen_mov_i32(dst, src);
1241 }
1242 
1243 static void gen_op_fnegs(TCGv_i32 dst, TCGv_i32 src)
1244 {
1245     gen_op_clear_ieee_excp_and_FTT();
1246     gen_helper_fnegs(dst, src);
1247 }
1248 
1249 static void gen_op_fabss(TCGv_i32 dst, TCGv_i32 src)
1250 {
1251     gen_op_clear_ieee_excp_and_FTT();
1252     gen_helper_fabss(dst, src);
1253 }
1254 
1255 static void gen_op_fmovd(TCGv_i64 dst, TCGv_i64 src)
1256 {
1257     gen_op_clear_ieee_excp_and_FTT();
1258     tcg_gen_mov_i64(dst, src);
1259 }
1260 
1261 static void gen_op_fnegd(TCGv_i64 dst, TCGv_i64 src)
1262 {
1263     gen_op_clear_ieee_excp_and_FTT();
1264     gen_helper_fnegd(dst, src);
1265 }
1266 
1267 static void gen_op_fabsd(TCGv_i64 dst, TCGv_i64 src)
1268 {
1269     gen_op_clear_ieee_excp_and_FTT();
1270     gen_helper_fabsd(dst, src);
1271 }
1272 
1273 #ifdef TARGET_SPARC64
1274 static void gen_op_fcmps(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1275 {
1276     switch (fccno) {
1277     case 0:
1278         gen_helper_fcmps(cpu_fsr, tcg_env, r_rs1, r_rs2);
1279         break;
1280     case 1:
1281         gen_helper_fcmps_fcc1(cpu_fsr, tcg_env, r_rs1, r_rs2);
1282         break;
1283     case 2:
1284         gen_helper_fcmps_fcc2(cpu_fsr, tcg_env, r_rs1, r_rs2);
1285         break;
1286     case 3:
1287         gen_helper_fcmps_fcc3(cpu_fsr, tcg_env, r_rs1, r_rs2);
1288         break;
1289     }
1290 }
1291 
1292 static void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1293 {
1294     switch (fccno) {
1295     case 0:
1296         gen_helper_fcmpd(cpu_fsr, tcg_env, r_rs1, r_rs2);
1297         break;
1298     case 1:
1299         gen_helper_fcmpd_fcc1(cpu_fsr, tcg_env, r_rs1, r_rs2);
1300         break;
1301     case 2:
1302         gen_helper_fcmpd_fcc2(cpu_fsr, tcg_env, r_rs1, r_rs2);
1303         break;
1304     case 3:
1305         gen_helper_fcmpd_fcc3(cpu_fsr, tcg_env, r_rs1, r_rs2);
1306         break;
1307     }
1308 }
1309 
1310 static void gen_op_fcmpq(int fccno)
1311 {
1312     switch (fccno) {
1313     case 0:
1314         gen_helper_fcmpq(cpu_fsr, tcg_env);
1315         break;
1316     case 1:
1317         gen_helper_fcmpq_fcc1(cpu_fsr, tcg_env);
1318         break;
1319     case 2:
1320         gen_helper_fcmpq_fcc2(cpu_fsr, tcg_env);
1321         break;
1322     case 3:
1323         gen_helper_fcmpq_fcc3(cpu_fsr, tcg_env);
1324         break;
1325     }
1326 }
1327 
1328 static void gen_op_fcmpes(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1329 {
1330     switch (fccno) {
1331     case 0:
1332         gen_helper_fcmpes(cpu_fsr, tcg_env, r_rs1, r_rs2);
1333         break;
1334     case 1:
1335         gen_helper_fcmpes_fcc1(cpu_fsr, tcg_env, r_rs1, r_rs2);
1336         break;
1337     case 2:
1338         gen_helper_fcmpes_fcc2(cpu_fsr, tcg_env, r_rs1, r_rs2);
1339         break;
1340     case 3:
1341         gen_helper_fcmpes_fcc3(cpu_fsr, tcg_env, r_rs1, r_rs2);
1342         break;
1343     }
1344 }
1345 
1346 static void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1347 {
1348     switch (fccno) {
1349     case 0:
1350         gen_helper_fcmped(cpu_fsr, tcg_env, r_rs1, r_rs2);
1351         break;
1352     case 1:
1353         gen_helper_fcmped_fcc1(cpu_fsr, tcg_env, r_rs1, r_rs2);
1354         break;
1355     case 2:
1356         gen_helper_fcmped_fcc2(cpu_fsr, tcg_env, r_rs1, r_rs2);
1357         break;
1358     case 3:
1359         gen_helper_fcmped_fcc3(cpu_fsr, tcg_env, r_rs1, r_rs2);
1360         break;
1361     }
1362 }
1363 
1364 static void gen_op_fcmpeq(int fccno)
1365 {
1366     switch (fccno) {
1367     case 0:
1368         gen_helper_fcmpeq(cpu_fsr, tcg_env);
1369         break;
1370     case 1:
1371         gen_helper_fcmpeq_fcc1(cpu_fsr, tcg_env);
1372         break;
1373     case 2:
1374         gen_helper_fcmpeq_fcc2(cpu_fsr, tcg_env);
1375         break;
1376     case 3:
1377         gen_helper_fcmpeq_fcc3(cpu_fsr, tcg_env);
1378         break;
1379     }
1380 }
1381 
1382 #else
1383 
1384 static void gen_op_fcmps(int fccno, TCGv r_rs1, TCGv r_rs2)
1385 {
1386     gen_helper_fcmps(cpu_fsr, tcg_env, r_rs1, r_rs2);
1387 }
1388 
1389 static void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1390 {
1391     gen_helper_fcmpd(cpu_fsr, tcg_env, r_rs1, r_rs2);
1392 }
1393 
1394 static void gen_op_fcmpq(int fccno)
1395 {
1396     gen_helper_fcmpq(cpu_fsr, tcg_env);
1397 }
1398 
1399 static void gen_op_fcmpes(int fccno, TCGv r_rs1, TCGv r_rs2)
1400 {
1401     gen_helper_fcmpes(cpu_fsr, tcg_env, r_rs1, r_rs2);
1402 }
1403 
1404 static void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1405 {
1406     gen_helper_fcmped(cpu_fsr, tcg_env, r_rs1, r_rs2);
1407 }
1408 
1409 static void gen_op_fcmpeq(int fccno)
1410 {
1411     gen_helper_fcmpeq(cpu_fsr, tcg_env);
1412 }
1413 #endif
1414 
1415 static void gen_op_fpexception_im(DisasContext *dc, int fsr_flags)
1416 {
1417     tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_NMASK);
1418     tcg_gen_ori_tl(cpu_fsr, cpu_fsr, fsr_flags);
1419     gen_exception(dc, TT_FP_EXCP);
1420 }
1421 
1422 static int gen_trap_ifnofpu(DisasContext *dc)
1423 {
1424 #if !defined(CONFIG_USER_ONLY)
1425     if (!dc->fpu_enabled) {
1426         gen_exception(dc, TT_NFPU_INSN);
1427         return 1;
1428     }
1429 #endif
1430     return 0;
1431 }
1432 
1433 /* asi moves */
1434 typedef enum {
1435     GET_ASI_HELPER,
1436     GET_ASI_EXCP,
1437     GET_ASI_DIRECT,
1438     GET_ASI_DTWINX,
1439     GET_ASI_BLOCK,
1440     GET_ASI_SHORT,
1441     GET_ASI_BCOPY,
1442     GET_ASI_BFILL,
1443 } ASIType;
1444 
1445 typedef struct {
1446     ASIType type;
1447     int asi;
1448     int mem_idx;
1449     MemOp memop;
1450 } DisasASI;
1451 
1452 /*
1453  * Build DisasASI.
1454  * For asi == -1, treat as non-asi.
1455  * For ask == -2, treat as immediate offset (v8 error, v9 %asi).
1456  */
1457 static DisasASI resolve_asi(DisasContext *dc, int asi, MemOp memop)
1458 {
1459     ASIType type = GET_ASI_HELPER;
1460     int mem_idx = dc->mem_idx;
1461 
1462     if (asi == -1) {
1463         /* Artificial "non-asi" case. */
1464         type = GET_ASI_DIRECT;
1465         goto done;
1466     }
1467 
1468 #ifndef TARGET_SPARC64
1469     /* Before v9, all asis are immediate and privileged.  */
1470     if (asi < 0) {
1471         gen_exception(dc, TT_ILL_INSN);
1472         type = GET_ASI_EXCP;
1473     } else if (supervisor(dc)
1474                /* Note that LEON accepts ASI_USERDATA in user mode, for
1475                   use with CASA.  Also note that previous versions of
1476                   QEMU allowed (and old versions of gcc emitted) ASI_P
1477                   for LEON, which is incorrect.  */
1478                || (asi == ASI_USERDATA
1479                    && (dc->def->features & CPU_FEATURE_CASA))) {
1480         switch (asi) {
1481         case ASI_USERDATA:   /* User data access */
1482             mem_idx = MMU_USER_IDX;
1483             type = GET_ASI_DIRECT;
1484             break;
1485         case ASI_KERNELDATA: /* Supervisor data access */
1486             mem_idx = MMU_KERNEL_IDX;
1487             type = GET_ASI_DIRECT;
1488             break;
1489         case ASI_M_BYPASS:    /* MMU passthrough */
1490         case ASI_LEON_BYPASS: /* LEON MMU passthrough */
1491             mem_idx = MMU_PHYS_IDX;
1492             type = GET_ASI_DIRECT;
1493             break;
1494         case ASI_M_BCOPY: /* Block copy, sta access */
1495             mem_idx = MMU_KERNEL_IDX;
1496             type = GET_ASI_BCOPY;
1497             break;
1498         case ASI_M_BFILL: /* Block fill, stda access */
1499             mem_idx = MMU_KERNEL_IDX;
1500             type = GET_ASI_BFILL;
1501             break;
1502         }
1503 
1504         /* MMU_PHYS_IDX is used when the MMU is disabled to passthrough the
1505          * permissions check in get_physical_address(..).
1506          */
1507         mem_idx = (dc->mem_idx == MMU_PHYS_IDX) ? MMU_PHYS_IDX : mem_idx;
1508     } else {
1509         gen_exception(dc, TT_PRIV_INSN);
1510         type = GET_ASI_EXCP;
1511     }
1512 #else
1513     if (asi < 0) {
1514         asi = dc->asi;
1515     }
1516     /* With v9, all asis below 0x80 are privileged.  */
1517     /* ??? We ought to check cpu_has_hypervisor, but we didn't copy
1518        down that bit into DisasContext.  For the moment that's ok,
1519        since the direct implementations below doesn't have any ASIs
1520        in the restricted [0x30, 0x7f] range, and the check will be
1521        done properly in the helper.  */
1522     if (!supervisor(dc) && asi < 0x80) {
1523         gen_exception(dc, TT_PRIV_ACT);
1524         type = GET_ASI_EXCP;
1525     } else {
1526         switch (asi) {
1527         case ASI_REAL:      /* Bypass */
1528         case ASI_REAL_IO:   /* Bypass, non-cacheable */
1529         case ASI_REAL_L:    /* Bypass LE */
1530         case ASI_REAL_IO_L: /* Bypass, non-cacheable LE */
1531         case ASI_TWINX_REAL:   /* Real address, twinx */
1532         case ASI_TWINX_REAL_L: /* Real address, twinx, LE */
1533         case ASI_QUAD_LDD_PHYS:
1534         case ASI_QUAD_LDD_PHYS_L:
1535             mem_idx = MMU_PHYS_IDX;
1536             break;
1537         case ASI_N:  /* Nucleus */
1538         case ASI_NL: /* Nucleus LE */
1539         case ASI_TWINX_N:
1540         case ASI_TWINX_NL:
1541         case ASI_NUCLEUS_QUAD_LDD:
1542         case ASI_NUCLEUS_QUAD_LDD_L:
1543             if (hypervisor(dc)) {
1544                 mem_idx = MMU_PHYS_IDX;
1545             } else {
1546                 mem_idx = MMU_NUCLEUS_IDX;
1547             }
1548             break;
1549         case ASI_AIUP:  /* As if user primary */
1550         case ASI_AIUPL: /* As if user primary LE */
1551         case ASI_TWINX_AIUP:
1552         case ASI_TWINX_AIUP_L:
1553         case ASI_BLK_AIUP_4V:
1554         case ASI_BLK_AIUP_L_4V:
1555         case ASI_BLK_AIUP:
1556         case ASI_BLK_AIUPL:
1557             mem_idx = MMU_USER_IDX;
1558             break;
1559         case ASI_AIUS:  /* As if user secondary */
1560         case ASI_AIUSL: /* As if user secondary LE */
1561         case ASI_TWINX_AIUS:
1562         case ASI_TWINX_AIUS_L:
1563         case ASI_BLK_AIUS_4V:
1564         case ASI_BLK_AIUS_L_4V:
1565         case ASI_BLK_AIUS:
1566         case ASI_BLK_AIUSL:
1567             mem_idx = MMU_USER_SECONDARY_IDX;
1568             break;
1569         case ASI_S:  /* Secondary */
1570         case ASI_SL: /* Secondary LE */
1571         case ASI_TWINX_S:
1572         case ASI_TWINX_SL:
1573         case ASI_BLK_COMMIT_S:
1574         case ASI_BLK_S:
1575         case ASI_BLK_SL:
1576         case ASI_FL8_S:
1577         case ASI_FL8_SL:
1578         case ASI_FL16_S:
1579         case ASI_FL16_SL:
1580             if (mem_idx == MMU_USER_IDX) {
1581                 mem_idx = MMU_USER_SECONDARY_IDX;
1582             } else if (mem_idx == MMU_KERNEL_IDX) {
1583                 mem_idx = MMU_KERNEL_SECONDARY_IDX;
1584             }
1585             break;
1586         case ASI_P:  /* Primary */
1587         case ASI_PL: /* Primary LE */
1588         case ASI_TWINX_P:
1589         case ASI_TWINX_PL:
1590         case ASI_BLK_COMMIT_P:
1591         case ASI_BLK_P:
1592         case ASI_BLK_PL:
1593         case ASI_FL8_P:
1594         case ASI_FL8_PL:
1595         case ASI_FL16_P:
1596         case ASI_FL16_PL:
1597             break;
1598         }
1599         switch (asi) {
1600         case ASI_REAL:
1601         case ASI_REAL_IO:
1602         case ASI_REAL_L:
1603         case ASI_REAL_IO_L:
1604         case ASI_N:
1605         case ASI_NL:
1606         case ASI_AIUP:
1607         case ASI_AIUPL:
1608         case ASI_AIUS:
1609         case ASI_AIUSL:
1610         case ASI_S:
1611         case ASI_SL:
1612         case ASI_P:
1613         case ASI_PL:
1614             type = GET_ASI_DIRECT;
1615             break;
1616         case ASI_TWINX_REAL:
1617         case ASI_TWINX_REAL_L:
1618         case ASI_TWINX_N:
1619         case ASI_TWINX_NL:
1620         case ASI_TWINX_AIUP:
1621         case ASI_TWINX_AIUP_L:
1622         case ASI_TWINX_AIUS:
1623         case ASI_TWINX_AIUS_L:
1624         case ASI_TWINX_P:
1625         case ASI_TWINX_PL:
1626         case ASI_TWINX_S:
1627         case ASI_TWINX_SL:
1628         case ASI_QUAD_LDD_PHYS:
1629         case ASI_QUAD_LDD_PHYS_L:
1630         case ASI_NUCLEUS_QUAD_LDD:
1631         case ASI_NUCLEUS_QUAD_LDD_L:
1632             type = GET_ASI_DTWINX;
1633             break;
1634         case ASI_BLK_COMMIT_P:
1635         case ASI_BLK_COMMIT_S:
1636         case ASI_BLK_AIUP_4V:
1637         case ASI_BLK_AIUP_L_4V:
1638         case ASI_BLK_AIUP:
1639         case ASI_BLK_AIUPL:
1640         case ASI_BLK_AIUS_4V:
1641         case ASI_BLK_AIUS_L_4V:
1642         case ASI_BLK_AIUS:
1643         case ASI_BLK_AIUSL:
1644         case ASI_BLK_S:
1645         case ASI_BLK_SL:
1646         case ASI_BLK_P:
1647         case ASI_BLK_PL:
1648             type = GET_ASI_BLOCK;
1649             break;
1650         case ASI_FL8_S:
1651         case ASI_FL8_SL:
1652         case ASI_FL8_P:
1653         case ASI_FL8_PL:
1654             memop = MO_UB;
1655             type = GET_ASI_SHORT;
1656             break;
1657         case ASI_FL16_S:
1658         case ASI_FL16_SL:
1659         case ASI_FL16_P:
1660         case ASI_FL16_PL:
1661             memop = MO_TEUW;
1662             type = GET_ASI_SHORT;
1663             break;
1664         }
1665         /* The little-endian asis all have bit 3 set.  */
1666         if (asi & 8) {
1667             memop ^= MO_BSWAP;
1668         }
1669     }
1670 #endif
1671 
1672  done:
1673     return (DisasASI){ type, asi, mem_idx, memop };
1674 }
1675 
1676 #if defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
1677 static void gen_helper_ld_asi(TCGv_i64 r, TCGv_env e, TCGv a,
1678                               TCGv_i32 asi, TCGv_i32 mop)
1679 {
1680     g_assert_not_reached();
1681 }
1682 
1683 static void gen_helper_st_asi(TCGv_env e, TCGv a, TCGv_i64 r,
1684                               TCGv_i32 asi, TCGv_i32 mop)
1685 {
1686     g_assert_not_reached();
1687 }
1688 #endif
1689 
1690 static void gen_ld_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr)
1691 {
1692     switch (da->type) {
1693     case GET_ASI_EXCP:
1694         break;
1695     case GET_ASI_DTWINX: /* Reserved for ldda.  */
1696         gen_exception(dc, TT_ILL_INSN);
1697         break;
1698     case GET_ASI_DIRECT:
1699         tcg_gen_qemu_ld_tl(dst, addr, da->mem_idx, da->memop | MO_ALIGN);
1700         break;
1701     default:
1702         {
1703             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1704             TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN);
1705 
1706             save_state(dc);
1707 #ifdef TARGET_SPARC64
1708             gen_helper_ld_asi(dst, tcg_env, addr, r_asi, r_mop);
1709 #else
1710             {
1711                 TCGv_i64 t64 = tcg_temp_new_i64();
1712                 gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
1713                 tcg_gen_trunc_i64_tl(dst, t64);
1714             }
1715 #endif
1716         }
1717         break;
1718     }
1719 }
1720 
1721 static void gen_st_asi(DisasContext *dc, DisasASI *da, TCGv src, TCGv addr)
1722 {
1723     switch (da->type) {
1724     case GET_ASI_EXCP:
1725         break;
1726 
1727     case GET_ASI_DTWINX: /* Reserved for stda.  */
1728         if (TARGET_LONG_BITS == 32) {
1729             gen_exception(dc, TT_ILL_INSN);
1730             break;
1731         } else if (!(dc->def->features & CPU_FEATURE_HYPV)) {
1732             /* Pre OpenSPARC CPUs don't have these */
1733             gen_exception(dc, TT_ILL_INSN);
1734             break;
1735         }
1736         /* In OpenSPARC T1+ CPUs TWINX ASIs in store are ST_BLKINIT_ ASIs */
1737         /* fall through */
1738 
1739     case GET_ASI_DIRECT:
1740         tcg_gen_qemu_st_tl(src, addr, da->mem_idx, da->memop | MO_ALIGN);
1741         break;
1742 
1743     case GET_ASI_BCOPY:
1744         assert(TARGET_LONG_BITS == 32);
1745         /* Copy 32 bytes from the address in SRC to ADDR.  */
1746         /* ??? The original qemu code suggests 4-byte alignment, dropping
1747            the low bits, but the only place I can see this used is in the
1748            Linux kernel with 32 byte alignment, which would make more sense
1749            as a cacheline-style operation.  */
1750         {
1751             TCGv saddr = tcg_temp_new();
1752             TCGv daddr = tcg_temp_new();
1753             TCGv four = tcg_constant_tl(4);
1754             TCGv_i32 tmp = tcg_temp_new_i32();
1755             int i;
1756 
1757             tcg_gen_andi_tl(saddr, src, -4);
1758             tcg_gen_andi_tl(daddr, addr, -4);
1759             for (i = 0; i < 32; i += 4) {
1760                 /* Since the loads and stores are paired, allow the
1761                    copy to happen in the host endianness.  */
1762                 tcg_gen_qemu_ld_i32(tmp, saddr, da->mem_idx, MO_UL);
1763                 tcg_gen_qemu_st_i32(tmp, daddr, da->mem_idx, MO_UL);
1764                 tcg_gen_add_tl(saddr, saddr, four);
1765                 tcg_gen_add_tl(daddr, daddr, four);
1766             }
1767         }
1768         break;
1769 
1770     default:
1771         {
1772             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1773             TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN);
1774 
1775             save_state(dc);
1776 #ifdef TARGET_SPARC64
1777             gen_helper_st_asi(tcg_env, addr, src, r_asi, r_mop);
1778 #else
1779             {
1780                 TCGv_i64 t64 = tcg_temp_new_i64();
1781                 tcg_gen_extu_tl_i64(t64, src);
1782                 gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
1783             }
1784 #endif
1785 
1786             /* A write to a TLB register may alter page maps.  End the TB. */
1787             dc->npc = DYNAMIC_PC;
1788         }
1789         break;
1790     }
1791 }
1792 
1793 static void gen_swap_asi(DisasContext *dc, DisasASI *da,
1794                          TCGv dst, TCGv src, TCGv addr)
1795 {
1796     switch (da->type) {
1797     case GET_ASI_EXCP:
1798         break;
1799     case GET_ASI_DIRECT:
1800         tcg_gen_atomic_xchg_tl(dst, addr, src,
1801                                da->mem_idx, da->memop | MO_ALIGN);
1802         break;
1803     default:
1804         /* ??? Should be DAE_invalid_asi.  */
1805         gen_exception(dc, TT_DATA_ACCESS);
1806         break;
1807     }
1808 }
1809 
1810 static void gen_cas_asi(DisasContext *dc, DisasASI *da,
1811                         TCGv oldv, TCGv newv, TCGv cmpv, TCGv addr)
1812 {
1813     switch (da->type) {
1814     case GET_ASI_EXCP:
1815         return;
1816     case GET_ASI_DIRECT:
1817         tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, newv,
1818                                   da->mem_idx, da->memop | MO_ALIGN);
1819         break;
1820     default:
1821         /* ??? Should be DAE_invalid_asi.  */
1822         gen_exception(dc, TT_DATA_ACCESS);
1823         break;
1824     }
1825 }
1826 
1827 static void gen_ldstub_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr)
1828 {
1829     switch (da->type) {
1830     case GET_ASI_EXCP:
1831         break;
1832     case GET_ASI_DIRECT:
1833         tcg_gen_atomic_xchg_tl(dst, addr, tcg_constant_tl(0xff),
1834                                da->mem_idx, MO_UB);
1835         break;
1836     default:
1837         /* ??? In theory, this should be raise DAE_invalid_asi.
1838            But the SS-20 roms do ldstuba [%l0] #ASI_M_CTL, %o1.  */
1839         if (tb_cflags(dc->base.tb) & CF_PARALLEL) {
1840             gen_helper_exit_atomic(tcg_env);
1841         } else {
1842             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1843             TCGv_i32 r_mop = tcg_constant_i32(MO_UB);
1844             TCGv_i64 s64, t64;
1845 
1846             save_state(dc);
1847             t64 = tcg_temp_new_i64();
1848             gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
1849 
1850             s64 = tcg_constant_i64(0xff);
1851             gen_helper_st_asi(tcg_env, addr, s64, r_asi, r_mop);
1852 
1853             tcg_gen_trunc_i64_tl(dst, t64);
1854 
1855             /* End the TB.  */
1856             dc->npc = DYNAMIC_PC;
1857         }
1858         break;
1859     }
1860 }
1861 
1862 static void gen_ldf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size,
1863                         TCGv addr, int rd)
1864 {
1865     MemOp memop = da->memop;
1866     MemOp size = memop & MO_SIZE;
1867     TCGv_i32 d32;
1868     TCGv_i64 d64;
1869     TCGv addr_tmp;
1870 
1871     /* TODO: Use 128-bit load/store below. */
1872     if (size == MO_128) {
1873         memop = (memop & ~MO_SIZE) | MO_64;
1874     }
1875 
1876     switch (da->type) {
1877     case GET_ASI_EXCP:
1878         break;
1879 
1880     case GET_ASI_DIRECT:
1881         memop |= MO_ALIGN_4;
1882         switch (size) {
1883         case MO_32:
1884             d32 = gen_dest_fpr_F(dc);
1885             tcg_gen_qemu_ld_i32(d32, addr, da->mem_idx, memop);
1886             gen_store_fpr_F(dc, rd, d32);
1887             break;
1888 
1889         case MO_64:
1890             tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da->mem_idx, memop);
1891             break;
1892 
1893         case MO_128:
1894             d64 = tcg_temp_new_i64();
1895             tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx, memop);
1896             addr_tmp = tcg_temp_new();
1897             tcg_gen_addi_tl(addr_tmp, addr, 8);
1898             tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2 + 1], addr_tmp, da->mem_idx, memop);
1899             tcg_gen_mov_i64(cpu_fpr[rd / 2], d64);
1900             break;
1901         default:
1902             g_assert_not_reached();
1903         }
1904         break;
1905 
1906     case GET_ASI_BLOCK:
1907         /* Valid for lddfa on aligned registers only.  */
1908         if (orig_size == MO_64 && (rd & 7) == 0) {
1909             /* The first operation checks required alignment.  */
1910             addr_tmp = tcg_temp_new();
1911             for (int i = 0; ; ++i) {
1912                 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2 + i], addr, da->mem_idx,
1913                                     memop | (i == 0 ? MO_ALIGN_64 : 0));
1914                 if (i == 7) {
1915                     break;
1916                 }
1917                 tcg_gen_addi_tl(addr_tmp, addr, 8);
1918                 addr = addr_tmp;
1919             }
1920         } else {
1921             gen_exception(dc, TT_ILL_INSN);
1922         }
1923         break;
1924 
1925     case GET_ASI_SHORT:
1926         /* Valid for lddfa only.  */
1927         if (orig_size == MO_64) {
1928             tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da->mem_idx,
1929                                 memop | MO_ALIGN);
1930         } else {
1931             gen_exception(dc, TT_ILL_INSN);
1932         }
1933         break;
1934 
1935     default:
1936         {
1937             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1938             TCGv_i32 r_mop = tcg_constant_i32(memop | MO_ALIGN);
1939 
1940             save_state(dc);
1941             /* According to the table in the UA2011 manual, the only
1942                other asis that are valid for ldfa/lddfa/ldqfa are
1943                the NO_FAULT asis.  We still need a helper for these,
1944                but we can just use the integer asi helper for them.  */
1945             switch (size) {
1946             case MO_32:
1947                 d64 = tcg_temp_new_i64();
1948                 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
1949                 d32 = gen_dest_fpr_F(dc);
1950                 tcg_gen_extrl_i64_i32(d32, d64);
1951                 gen_store_fpr_F(dc, rd, d32);
1952                 break;
1953             case MO_64:
1954                 gen_helper_ld_asi(cpu_fpr[rd / 2], tcg_env, addr,
1955                                   r_asi, r_mop);
1956                 break;
1957             case MO_128:
1958                 d64 = tcg_temp_new_i64();
1959                 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
1960                 addr_tmp = tcg_temp_new();
1961                 tcg_gen_addi_tl(addr_tmp, addr, 8);
1962                 gen_helper_ld_asi(cpu_fpr[rd / 2 + 1], tcg_env, addr_tmp,
1963                                   r_asi, r_mop);
1964                 tcg_gen_mov_i64(cpu_fpr[rd / 2], d64);
1965                 break;
1966             default:
1967                 g_assert_not_reached();
1968             }
1969         }
1970         break;
1971     }
1972 }
1973 
1974 static void gen_stf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size,
1975                         TCGv addr, int rd)
1976 {
1977     MemOp memop = da->memop;
1978     MemOp size = memop & MO_SIZE;
1979     TCGv_i32 d32;
1980     TCGv addr_tmp;
1981 
1982     /* TODO: Use 128-bit load/store below. */
1983     if (size == MO_128) {
1984         memop = (memop & ~MO_SIZE) | MO_64;
1985     }
1986 
1987     switch (da->type) {
1988     case GET_ASI_EXCP:
1989         break;
1990 
1991     case GET_ASI_DIRECT:
1992         memop |= MO_ALIGN_4;
1993         switch (size) {
1994         case MO_32:
1995             d32 = gen_load_fpr_F(dc, rd);
1996             tcg_gen_qemu_st_i32(d32, addr, da->mem_idx, memop | MO_ALIGN);
1997             break;
1998         case MO_64:
1999             tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da->mem_idx,
2000                                 memop | MO_ALIGN_4);
2001             break;
2002         case MO_128:
2003             /* Only 4-byte alignment required.  However, it is legal for the
2004                cpu to signal the alignment fault, and the OS trap handler is
2005                required to fix it up.  Requiring 16-byte alignment here avoids
2006                having to probe the second page before performing the first
2007                write.  */
2008             tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da->mem_idx,
2009                                 memop | MO_ALIGN_16);
2010             addr_tmp = tcg_temp_new();
2011             tcg_gen_addi_tl(addr_tmp, addr, 8);
2012             tcg_gen_qemu_st_i64(cpu_fpr[rd / 2 + 1], addr_tmp, da->mem_idx, memop);
2013             break;
2014         default:
2015             g_assert_not_reached();
2016         }
2017         break;
2018 
2019     case GET_ASI_BLOCK:
2020         /* Valid for stdfa on aligned registers only.  */
2021         if (orig_size == MO_64 && (rd & 7) == 0) {
2022             /* The first operation checks required alignment.  */
2023             addr_tmp = tcg_temp_new();
2024             for (int i = 0; ; ++i) {
2025                 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2 + i], addr, da->mem_idx,
2026                                     memop | (i == 0 ? MO_ALIGN_64 : 0));
2027                 if (i == 7) {
2028                     break;
2029                 }
2030                 tcg_gen_addi_tl(addr_tmp, addr, 8);
2031                 addr = addr_tmp;
2032             }
2033         } else {
2034             gen_exception(dc, TT_ILL_INSN);
2035         }
2036         break;
2037 
2038     case GET_ASI_SHORT:
2039         /* Valid for stdfa only.  */
2040         if (orig_size == MO_64) {
2041             tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da->mem_idx,
2042                                 memop | MO_ALIGN);
2043         } else {
2044             gen_exception(dc, TT_ILL_INSN);
2045         }
2046         break;
2047 
2048     default:
2049         /* According to the table in the UA2011 manual, the only
2050            other asis that are valid for ldfa/lddfa/ldqfa are
2051            the PST* asis, which aren't currently handled.  */
2052         gen_exception(dc, TT_ILL_INSN);
2053         break;
2054     }
2055 }
2056 
2057 static void gen_ldda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd)
2058 {
2059     TCGv hi = gen_dest_gpr(dc, rd);
2060     TCGv lo = gen_dest_gpr(dc, rd + 1);
2061 
2062     switch (da->type) {
2063     case GET_ASI_EXCP:
2064         return;
2065 
2066     case GET_ASI_DTWINX:
2067 #ifdef TARGET_SPARC64
2068         {
2069             MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16;
2070             TCGv_i128 t = tcg_temp_new_i128();
2071 
2072             tcg_gen_qemu_ld_i128(t, addr, da->mem_idx, mop);
2073             /*
2074              * Note that LE twinx acts as if each 64-bit register result is
2075              * byte swapped.  We perform one 128-bit LE load, so must swap
2076              * the order of the writebacks.
2077              */
2078             if ((mop & MO_BSWAP) == MO_TE) {
2079                 tcg_gen_extr_i128_i64(lo, hi, t);
2080             } else {
2081                 tcg_gen_extr_i128_i64(hi, lo, t);
2082             }
2083         }
2084         break;
2085 #else
2086         g_assert_not_reached();
2087 #endif
2088 
2089     case GET_ASI_DIRECT:
2090         {
2091             TCGv_i64 tmp = tcg_temp_new_i64();
2092 
2093             tcg_gen_qemu_ld_i64(tmp, addr, da->mem_idx, da->memop | MO_ALIGN);
2094 
2095             /* Note that LE ldda acts as if each 32-bit register
2096                result is byte swapped.  Having just performed one
2097                64-bit bswap, we need now to swap the writebacks.  */
2098             if ((da->memop & MO_BSWAP) == MO_TE) {
2099                 tcg_gen_extr_i64_tl(lo, hi, tmp);
2100             } else {
2101                 tcg_gen_extr_i64_tl(hi, lo, tmp);
2102             }
2103         }
2104         break;
2105 
2106     default:
2107         /* ??? In theory we've handled all of the ASIs that are valid
2108            for ldda, and this should raise DAE_invalid_asi.  However,
2109            real hardware allows others.  This can be seen with e.g.
2110            FreeBSD 10.3 wrt ASI_IC_TAG.  */
2111         {
2112             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2113             TCGv_i32 r_mop = tcg_constant_i32(da->memop);
2114             TCGv_i64 tmp = tcg_temp_new_i64();
2115 
2116             save_state(dc);
2117             gen_helper_ld_asi(tmp, tcg_env, addr, r_asi, r_mop);
2118 
2119             /* See above.  */
2120             if ((da->memop & MO_BSWAP) == MO_TE) {
2121                 tcg_gen_extr_i64_tl(lo, hi, tmp);
2122             } else {
2123                 tcg_gen_extr_i64_tl(hi, lo, tmp);
2124             }
2125         }
2126         break;
2127     }
2128 
2129     gen_store_gpr(dc, rd, hi);
2130     gen_store_gpr(dc, rd + 1, lo);
2131 }
2132 
2133 static void gen_stda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd)
2134 {
2135     TCGv hi = gen_load_gpr(dc, rd);
2136     TCGv lo = gen_load_gpr(dc, rd + 1);
2137 
2138     switch (da->type) {
2139     case GET_ASI_EXCP:
2140         break;
2141 
2142     case GET_ASI_DTWINX:
2143 #ifdef TARGET_SPARC64
2144         {
2145             MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16;
2146             TCGv_i128 t = tcg_temp_new_i128();
2147 
2148             /*
2149              * Note that LE twinx acts as if each 64-bit register result is
2150              * byte swapped.  We perform one 128-bit LE store, so must swap
2151              * the order of the construction.
2152              */
2153             if ((mop & MO_BSWAP) == MO_TE) {
2154                 tcg_gen_concat_i64_i128(t, lo, hi);
2155             } else {
2156                 tcg_gen_concat_i64_i128(t, hi, lo);
2157             }
2158             tcg_gen_qemu_st_i128(t, addr, da->mem_idx, mop);
2159         }
2160         break;
2161 #else
2162         g_assert_not_reached();
2163 #endif
2164 
2165     case GET_ASI_DIRECT:
2166         {
2167             TCGv_i64 t64 = tcg_temp_new_i64();
2168 
2169             /* Note that LE stda acts as if each 32-bit register result is
2170                byte swapped.  We will perform one 64-bit LE store, so now
2171                we must swap the order of the construction.  */
2172             if ((da->memop & MO_BSWAP) == MO_TE) {
2173                 tcg_gen_concat_tl_i64(t64, lo, hi);
2174             } else {
2175                 tcg_gen_concat_tl_i64(t64, hi, lo);
2176             }
2177             tcg_gen_qemu_st_i64(t64, addr, da->mem_idx, da->memop | MO_ALIGN);
2178         }
2179         break;
2180 
2181     case GET_ASI_BFILL:
2182         assert(TARGET_LONG_BITS == 32);
2183         /* Store 32 bytes of T64 to ADDR.  */
2184         /* ??? The original qemu code suggests 8-byte alignment, dropping
2185            the low bits, but the only place I can see this used is in the
2186            Linux kernel with 32 byte alignment, which would make more sense
2187            as a cacheline-style operation.  */
2188         {
2189             TCGv_i64 t64 = tcg_temp_new_i64();
2190             TCGv d_addr = tcg_temp_new();
2191             TCGv eight = tcg_constant_tl(8);
2192             int i;
2193 
2194             tcg_gen_concat_tl_i64(t64, lo, hi);
2195             tcg_gen_andi_tl(d_addr, addr, -8);
2196             for (i = 0; i < 32; i += 8) {
2197                 tcg_gen_qemu_st_i64(t64, d_addr, da->mem_idx, da->memop);
2198                 tcg_gen_add_tl(d_addr, d_addr, eight);
2199             }
2200         }
2201         break;
2202 
2203     default:
2204         /* ??? In theory we've handled all of the ASIs that are valid
2205            for stda, and this should raise DAE_invalid_asi.  */
2206         {
2207             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2208             TCGv_i32 r_mop = tcg_constant_i32(da->memop);
2209             TCGv_i64 t64 = tcg_temp_new_i64();
2210 
2211             /* See above.  */
2212             if ((da->memop & MO_BSWAP) == MO_TE) {
2213                 tcg_gen_concat_tl_i64(t64, lo, hi);
2214             } else {
2215                 tcg_gen_concat_tl_i64(t64, hi, lo);
2216             }
2217 
2218             save_state(dc);
2219             gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
2220         }
2221         break;
2222     }
2223 }
2224 
2225 static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2226 {
2227 #ifdef TARGET_SPARC64
2228     TCGv_i32 c32, zero, dst, s1, s2;
2229     TCGv_i64 c64 = tcg_temp_new_i64();
2230 
2231     /* We have two choices here: extend the 32 bit data and use movcond_i64,
2232        or fold the comparison down to 32 bits and use movcond_i32.  Choose
2233        the later.  */
2234     c32 = tcg_temp_new_i32();
2235     tcg_gen_setcond_i64(cmp->cond, c64, cmp->c1, cmp->c2);
2236     tcg_gen_extrl_i64_i32(c32, c64);
2237 
2238     s1 = gen_load_fpr_F(dc, rs);
2239     s2 = gen_load_fpr_F(dc, rd);
2240     dst = gen_dest_fpr_F(dc);
2241     zero = tcg_constant_i32(0);
2242 
2243     tcg_gen_movcond_i32(TCG_COND_NE, dst, c32, zero, s1, s2);
2244 
2245     gen_store_fpr_F(dc, rd, dst);
2246 #else
2247     qemu_build_not_reached();
2248 #endif
2249 }
2250 
2251 static void gen_fmovd(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2252 {
2253 #ifdef TARGET_SPARC64
2254     TCGv_i64 dst = gen_dest_fpr_D(dc, rd);
2255     tcg_gen_movcond_i64(cmp->cond, dst, cmp->c1, cmp->c2,
2256                         gen_load_fpr_D(dc, rs),
2257                         gen_load_fpr_D(dc, rd));
2258     gen_store_fpr_D(dc, rd, dst);
2259 #else
2260     qemu_build_not_reached();
2261 #endif
2262 }
2263 
2264 static void gen_fmovq(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2265 {
2266 #ifdef TARGET_SPARC64
2267     int qd = QFPREG(rd);
2268     int qs = QFPREG(rs);
2269 
2270     tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2], cmp->c1, cmp->c2,
2271                         cpu_fpr[qs / 2], cpu_fpr[qd / 2]);
2272     tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2 + 1], cmp->c1, cmp->c2,
2273                         cpu_fpr[qs / 2 + 1], cpu_fpr[qd / 2 + 1]);
2274 
2275     gen_update_fprs_dirty(dc, qd);
2276 #else
2277     qemu_build_not_reached();
2278 #endif
2279 }
2280 
2281 #ifdef TARGET_SPARC64
2282 static void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr)
2283 {
2284     TCGv_i32 r_tl = tcg_temp_new_i32();
2285 
2286     /* load env->tl into r_tl */
2287     tcg_gen_ld_i32(r_tl, tcg_env, offsetof(CPUSPARCState, tl));
2288 
2289     /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2290     tcg_gen_andi_i32(r_tl, r_tl, MAXTL_MASK);
2291 
2292     /* calculate offset to current trap state from env->ts, reuse r_tl */
2293     tcg_gen_muli_i32(r_tl, r_tl, sizeof (trap_state));
2294     tcg_gen_addi_ptr(r_tsptr, tcg_env, offsetof(CPUSPARCState, ts));
2295 
2296     /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2297     {
2298         TCGv_ptr r_tl_tmp = tcg_temp_new_ptr();
2299         tcg_gen_ext_i32_ptr(r_tl_tmp, r_tl);
2300         tcg_gen_add_ptr(r_tsptr, r_tsptr, r_tl_tmp);
2301     }
2302 }
2303 #endif
2304 
2305 static int extract_dfpreg(DisasContext *dc, int x)
2306 {
2307     return DFPREG(x);
2308 }
2309 
2310 static int extract_qfpreg(DisasContext *dc, int x)
2311 {
2312     return QFPREG(x);
2313 }
2314 
2315 /* Include the auto-generated decoder.  */
2316 #include "decode-insns.c.inc"
2317 
2318 #define TRANS(NAME, AVAIL, FUNC, ...) \
2319     static bool trans_##NAME(DisasContext *dc, arg_##NAME *a) \
2320     { return avail_##AVAIL(dc) && FUNC(dc, __VA_ARGS__); }
2321 
2322 #define avail_ALL(C)      true
2323 #ifdef TARGET_SPARC64
2324 # define avail_32(C)      false
2325 # define avail_ASR17(C)   false
2326 # define avail_CASA(C)    true
2327 # define avail_DIV(C)     true
2328 # define avail_MUL(C)     true
2329 # define avail_POWERDOWN(C) false
2330 # define avail_64(C)      true
2331 # define avail_GL(C)      ((C)->def->features & CPU_FEATURE_GL)
2332 # define avail_HYPV(C)    ((C)->def->features & CPU_FEATURE_HYPV)
2333 # define avail_VIS1(C)    ((C)->def->features & CPU_FEATURE_VIS1)
2334 # define avail_VIS2(C)    ((C)->def->features & CPU_FEATURE_VIS2)
2335 #else
2336 # define avail_32(C)      true
2337 # define avail_ASR17(C)   ((C)->def->features & CPU_FEATURE_ASR17)
2338 # define avail_CASA(C)    ((C)->def->features & CPU_FEATURE_CASA)
2339 # define avail_DIV(C)     ((C)->def->features & CPU_FEATURE_DIV)
2340 # define avail_MUL(C)     ((C)->def->features & CPU_FEATURE_MUL)
2341 # define avail_POWERDOWN(C) ((C)->def->features & CPU_FEATURE_POWERDOWN)
2342 # define avail_64(C)      false
2343 # define avail_GL(C)      false
2344 # define avail_HYPV(C)    false
2345 # define avail_VIS1(C)    false
2346 # define avail_VIS2(C)    false
2347 #endif
2348 
2349 /* Default case for non jump instructions. */
2350 static bool advance_pc(DisasContext *dc)
2351 {
2352     if (dc->npc & 3) {
2353         switch (dc->npc) {
2354         case DYNAMIC_PC:
2355         case DYNAMIC_PC_LOOKUP:
2356             dc->pc = dc->npc;
2357             gen_op_next_insn();
2358             break;
2359         case JUMP_PC:
2360             /* we can do a static jump */
2361             gen_branch2(dc, dc->jump_pc[0], dc->jump_pc[1], cpu_cond);
2362             dc->base.is_jmp = DISAS_NORETURN;
2363             break;
2364         default:
2365             g_assert_not_reached();
2366         }
2367     } else {
2368         dc->pc = dc->npc;
2369         dc->npc = dc->npc + 4;
2370     }
2371     return true;
2372 }
2373 
2374 /*
2375  * Major opcodes 00 and 01 -- branches, call, and sethi
2376  */
2377 
2378 static bool advance_jump_uncond_never(DisasContext *dc, bool annul)
2379 {
2380     if (annul) {
2381         dc->pc = dc->npc + 4;
2382         dc->npc = dc->pc + 4;
2383     } else {
2384         dc->pc = dc->npc;
2385         dc->npc = dc->pc + 4;
2386     }
2387     return true;
2388 }
2389 
2390 static bool advance_jump_uncond_always(DisasContext *dc, bool annul,
2391                                        target_ulong dest)
2392 {
2393     if (annul) {
2394         dc->pc = dest;
2395         dc->npc = dest + 4;
2396     } else {
2397         dc->pc = dc->npc;
2398         dc->npc = dest;
2399         tcg_gen_mov_tl(cpu_pc, cpu_npc);
2400     }
2401     return true;
2402 }
2403 
2404 static bool advance_jump_cond(DisasContext *dc, DisasCompare *cmp,
2405                               bool annul, target_ulong dest)
2406 {
2407     target_ulong npc = dc->npc;
2408 
2409     if (annul) {
2410         TCGLabel *l1 = gen_new_label();
2411 
2412         tcg_gen_brcond_tl(tcg_invert_cond(cmp->cond), cmp->c1, cmp->c2, l1);
2413         gen_goto_tb(dc, 0, npc, dest);
2414         gen_set_label(l1);
2415         gen_goto_tb(dc, 1, npc + 4, npc + 8);
2416 
2417         dc->base.is_jmp = DISAS_NORETURN;
2418     } else {
2419         if (npc & 3) {
2420             switch (npc) {
2421             case DYNAMIC_PC:
2422             case DYNAMIC_PC_LOOKUP:
2423                 tcg_gen_mov_tl(cpu_pc, cpu_npc);
2424                 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
2425                 tcg_gen_movcond_tl(cmp->cond, cpu_npc,
2426                                    cmp->c1, cmp->c2,
2427                                    tcg_constant_tl(dest), cpu_npc);
2428                 dc->pc = npc;
2429                 break;
2430             default:
2431                 g_assert_not_reached();
2432             }
2433         } else {
2434             dc->pc = npc;
2435             dc->jump_pc[0] = dest;
2436             dc->jump_pc[1] = npc + 4;
2437             dc->npc = JUMP_PC;
2438 
2439             /* The condition for cpu_cond is always NE -- normalize. */
2440             if (cmp->cond == TCG_COND_NE) {
2441                 tcg_gen_xor_tl(cpu_cond, cmp->c1, cmp->c2);
2442             } else {
2443                 tcg_gen_setcond_tl(cmp->cond, cpu_cond, cmp->c1, cmp->c2);
2444             }
2445         }
2446     }
2447     return true;
2448 }
2449 
2450 static bool raise_priv(DisasContext *dc)
2451 {
2452     gen_exception(dc, TT_PRIV_INSN);
2453     return true;
2454 }
2455 
2456 static bool raise_unimpfpop(DisasContext *dc)
2457 {
2458     gen_op_fpexception_im(dc, FSR_FTT_UNIMPFPOP);
2459     return true;
2460 }
2461 
2462 static bool gen_trap_float128(DisasContext *dc)
2463 {
2464     if (dc->def->features & CPU_FEATURE_FLOAT128) {
2465         return false;
2466     }
2467     return raise_unimpfpop(dc);
2468 }
2469 
2470 static bool do_bpcc(DisasContext *dc, arg_bcc *a)
2471 {
2472     target_long target = address_mask_i(dc, dc->pc + a->i * 4);
2473     DisasCompare cmp;
2474 
2475     switch (a->cond) {
2476     case 0x0:
2477         return advance_jump_uncond_never(dc, a->a);
2478     case 0x8:
2479         return advance_jump_uncond_always(dc, a->a, target);
2480     default:
2481         flush_cond(dc);
2482 
2483         gen_compare(&cmp, a->cc, a->cond, dc);
2484         return advance_jump_cond(dc, &cmp, a->a, target);
2485     }
2486 }
2487 
2488 TRANS(Bicc, ALL, do_bpcc, a)
2489 TRANS(BPcc,  64, do_bpcc, a)
2490 
2491 static bool do_fbpfcc(DisasContext *dc, arg_bcc *a)
2492 {
2493     target_long target = address_mask_i(dc, dc->pc + a->i * 4);
2494     DisasCompare cmp;
2495 
2496     if (gen_trap_ifnofpu(dc)) {
2497         return true;
2498     }
2499     switch (a->cond) {
2500     case 0x0:
2501         return advance_jump_uncond_never(dc, a->a);
2502     case 0x8:
2503         return advance_jump_uncond_always(dc, a->a, target);
2504     default:
2505         flush_cond(dc);
2506 
2507         gen_fcompare(&cmp, a->cc, a->cond);
2508         return advance_jump_cond(dc, &cmp, a->a, target);
2509     }
2510 }
2511 
2512 TRANS(FBPfcc,  64, do_fbpfcc, a)
2513 TRANS(FBfcc,  ALL, do_fbpfcc, a)
2514 
2515 static bool trans_BPr(DisasContext *dc, arg_BPr *a)
2516 {
2517     target_long target = address_mask_i(dc, dc->pc + a->i * 4);
2518     DisasCompare cmp;
2519 
2520     if (!avail_64(dc)) {
2521         return false;
2522     }
2523     if (gen_tcg_cond_reg[a->cond] == TCG_COND_NEVER) {
2524         return false;
2525     }
2526 
2527     flush_cond(dc);
2528     gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1));
2529     return advance_jump_cond(dc, &cmp, a->a, target);
2530 }
2531 
2532 static bool trans_CALL(DisasContext *dc, arg_CALL *a)
2533 {
2534     target_long target = address_mask_i(dc, dc->pc + a->i * 4);
2535 
2536     gen_store_gpr(dc, 15, tcg_constant_tl(dc->pc));
2537     gen_mov_pc_npc(dc);
2538     dc->npc = target;
2539     return true;
2540 }
2541 
2542 static bool trans_NCP(DisasContext *dc, arg_NCP *a)
2543 {
2544     /*
2545      * For sparc32, always generate the no-coprocessor exception.
2546      * For sparc64, always generate illegal instruction.
2547      */
2548 #ifdef TARGET_SPARC64
2549     return false;
2550 #else
2551     gen_exception(dc, TT_NCP_INSN);
2552     return true;
2553 #endif
2554 }
2555 
2556 static bool trans_SETHI(DisasContext *dc, arg_SETHI *a)
2557 {
2558     /* Special-case %g0 because that's the canonical nop.  */
2559     if (a->rd) {
2560         gen_store_gpr(dc, a->rd, tcg_constant_tl((uint32_t)a->i << 10));
2561     }
2562     return advance_pc(dc);
2563 }
2564 
2565 /*
2566  * Major Opcode 10 -- integer, floating-point, vis, and system insns.
2567  */
2568 
2569 static bool do_tcc(DisasContext *dc, int cond, int cc,
2570                    int rs1, bool imm, int rs2_or_imm)
2571 {
2572     int mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc)
2573                 ? UA2005_HTRAP_MASK : V8_TRAP_MASK);
2574     DisasCompare cmp;
2575     TCGLabel *lab;
2576     TCGv_i32 trap;
2577 
2578     /* Trap never.  */
2579     if (cond == 0) {
2580         return advance_pc(dc);
2581     }
2582 
2583     /*
2584      * Immediate traps are the most common case.  Since this value is
2585      * live across the branch, it really pays to evaluate the constant.
2586      */
2587     if (rs1 == 0 && (imm || rs2_or_imm == 0)) {
2588         trap = tcg_constant_i32((rs2_or_imm & mask) + TT_TRAP);
2589     } else {
2590         trap = tcg_temp_new_i32();
2591         tcg_gen_trunc_tl_i32(trap, gen_load_gpr(dc, rs1));
2592         if (imm) {
2593             tcg_gen_addi_i32(trap, trap, rs2_or_imm);
2594         } else {
2595             TCGv_i32 t2 = tcg_temp_new_i32();
2596             tcg_gen_trunc_tl_i32(t2, gen_load_gpr(dc, rs2_or_imm));
2597             tcg_gen_add_i32(trap, trap, t2);
2598         }
2599         tcg_gen_andi_i32(trap, trap, mask);
2600         tcg_gen_addi_i32(trap, trap, TT_TRAP);
2601     }
2602 
2603     /* Trap always.  */
2604     if (cond == 8) {
2605         save_state(dc);
2606         gen_helper_raise_exception(tcg_env, trap);
2607         dc->base.is_jmp = DISAS_NORETURN;
2608         return true;
2609     }
2610 
2611     /* Conditional trap.  */
2612     flush_cond(dc);
2613     lab = delay_exceptionv(dc, trap);
2614     gen_compare(&cmp, cc, cond, dc);
2615     tcg_gen_brcond_tl(cmp.cond, cmp.c1, cmp.c2, lab);
2616 
2617     return advance_pc(dc);
2618 }
2619 
2620 static bool trans_Tcc_r(DisasContext *dc, arg_Tcc_r *a)
2621 {
2622     if (avail_32(dc) && a->cc) {
2623         return false;
2624     }
2625     return do_tcc(dc, a->cond, a->cc, a->rs1, false, a->rs2);
2626 }
2627 
2628 static bool trans_Tcc_i_v7(DisasContext *dc, arg_Tcc_i_v7 *a)
2629 {
2630     if (avail_64(dc)) {
2631         return false;
2632     }
2633     return do_tcc(dc, a->cond, 0, a->rs1, true, a->i);
2634 }
2635 
2636 static bool trans_Tcc_i_v9(DisasContext *dc, arg_Tcc_i_v9 *a)
2637 {
2638     if (avail_32(dc)) {
2639         return false;
2640     }
2641     return do_tcc(dc, a->cond, a->cc, a->rs1, true, a->i);
2642 }
2643 
2644 static bool trans_STBAR(DisasContext *dc, arg_STBAR *a)
2645 {
2646     tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
2647     return advance_pc(dc);
2648 }
2649 
2650 static bool trans_MEMBAR(DisasContext *dc, arg_MEMBAR *a)
2651 {
2652     if (avail_32(dc)) {
2653         return false;
2654     }
2655     if (a->mmask) {
2656         /* Note TCG_MO_* was modeled on sparc64, so mmask matches. */
2657         tcg_gen_mb(a->mmask | TCG_BAR_SC);
2658     }
2659     if (a->cmask) {
2660         /* For #Sync, etc, end the TB to recognize interrupts. */
2661         dc->base.is_jmp = DISAS_EXIT;
2662     }
2663     return advance_pc(dc);
2664 }
2665 
2666 static bool do_rd_special(DisasContext *dc, bool priv, int rd,
2667                           TCGv (*func)(DisasContext *, TCGv))
2668 {
2669     if (!priv) {
2670         return raise_priv(dc);
2671     }
2672     gen_store_gpr(dc, rd, func(dc, gen_dest_gpr(dc, rd)));
2673     return advance_pc(dc);
2674 }
2675 
2676 static TCGv do_rdy(DisasContext *dc, TCGv dst)
2677 {
2678     return cpu_y;
2679 }
2680 
2681 static bool trans_RDY(DisasContext *dc, arg_RDY *a)
2682 {
2683     /*
2684      * TODO: Need a feature bit for sparcv8.  In the meantime, treat all
2685      * 32-bit cpus like sparcv7, which ignores the rs1 field.
2686      * This matches after all other ASR, so Leon3 Asr17 is handled first.
2687      */
2688     if (avail_64(dc) && a->rs1 != 0) {
2689         return false;
2690     }
2691     return do_rd_special(dc, true, a->rd, do_rdy);
2692 }
2693 
2694 static TCGv do_rd_leon3_config(DisasContext *dc, TCGv dst)
2695 {
2696     uint32_t val;
2697 
2698     /*
2699      * TODO: There are many more fields to be filled,
2700      * some of which are writable.
2701      */
2702     val = dc->def->nwindows - 1;   /* [4:0] NWIN */
2703     val |= 1 << 8;                 /* [8]   V8   */
2704 
2705     return tcg_constant_tl(val);
2706 }
2707 
2708 TRANS(RDASR17, ASR17, do_rd_special, true, a->rd, do_rd_leon3_config)
2709 
2710 static TCGv do_rdccr(DisasContext *dc, TCGv dst)
2711 {
2712     gen_helper_rdccr(dst, tcg_env);
2713     return dst;
2714 }
2715 
2716 TRANS(RDCCR, 64, do_rd_special, true, a->rd, do_rdccr)
2717 
2718 static TCGv do_rdasi(DisasContext *dc, TCGv dst)
2719 {
2720 #ifdef TARGET_SPARC64
2721     return tcg_constant_tl(dc->asi);
2722 #else
2723     qemu_build_not_reached();
2724 #endif
2725 }
2726 
2727 TRANS(RDASI, 64, do_rd_special, true, a->rd, do_rdasi)
2728 
2729 static TCGv do_rdtick(DisasContext *dc, TCGv dst)
2730 {
2731     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
2732 
2733     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
2734     if (translator_io_start(&dc->base)) {
2735         dc->base.is_jmp = DISAS_EXIT;
2736     }
2737     gen_helper_tick_get_count(dst, tcg_env, r_tickptr,
2738                               tcg_constant_i32(dc->mem_idx));
2739     return dst;
2740 }
2741 
2742 /* TODO: non-priv access only allowed when enabled. */
2743 TRANS(RDTICK, 64, do_rd_special, true, a->rd, do_rdtick)
2744 
2745 static TCGv do_rdpc(DisasContext *dc, TCGv dst)
2746 {
2747     return tcg_constant_tl(address_mask_i(dc, dc->pc));
2748 }
2749 
2750 TRANS(RDPC, 64, do_rd_special, true, a->rd, do_rdpc)
2751 
2752 static TCGv do_rdfprs(DisasContext *dc, TCGv dst)
2753 {
2754     tcg_gen_ext_i32_tl(dst, cpu_fprs);
2755     return dst;
2756 }
2757 
2758 TRANS(RDFPRS, 64, do_rd_special, true, a->rd, do_rdfprs)
2759 
2760 static TCGv do_rdgsr(DisasContext *dc, TCGv dst)
2761 {
2762     gen_trap_ifnofpu(dc);
2763     return cpu_gsr;
2764 }
2765 
2766 TRANS(RDGSR, 64, do_rd_special, true, a->rd, do_rdgsr)
2767 
2768 static TCGv do_rdsoftint(DisasContext *dc, TCGv dst)
2769 {
2770     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(softint));
2771     return dst;
2772 }
2773 
2774 TRANS(RDSOFTINT, 64, do_rd_special, supervisor(dc), a->rd, do_rdsoftint)
2775 
2776 static TCGv do_rdtick_cmpr(DisasContext *dc, TCGv dst)
2777 {
2778     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(tick_cmpr));
2779     return dst;
2780 }
2781 
2782 /* TODO: non-priv access only allowed when enabled. */
2783 TRANS(RDTICK_CMPR, 64, do_rd_special, true, a->rd, do_rdtick_cmpr)
2784 
2785 static TCGv do_rdstick(DisasContext *dc, TCGv dst)
2786 {
2787     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
2788 
2789     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick));
2790     if (translator_io_start(&dc->base)) {
2791         dc->base.is_jmp = DISAS_EXIT;
2792     }
2793     gen_helper_tick_get_count(dst, tcg_env, r_tickptr,
2794                               tcg_constant_i32(dc->mem_idx));
2795     return dst;
2796 }
2797 
2798 /* TODO: non-priv access only allowed when enabled. */
2799 TRANS(RDSTICK, 64, do_rd_special, true, a->rd, do_rdstick)
2800 
2801 static TCGv do_rdstick_cmpr(DisasContext *dc, TCGv dst)
2802 {
2803     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(stick_cmpr));
2804     return dst;
2805 }
2806 
2807 /* TODO: supervisor access only allowed when enabled by hypervisor. */
2808 TRANS(RDSTICK_CMPR, 64, do_rd_special, supervisor(dc), a->rd, do_rdstick_cmpr)
2809 
2810 /*
2811  * UltraSPARC-T1 Strand status.
2812  * HYPV check maybe not enough, UA2005 & UA2007 describe
2813  * this ASR as impl. dep
2814  */
2815 static TCGv do_rdstrand_status(DisasContext *dc, TCGv dst)
2816 {
2817     return tcg_constant_tl(1);
2818 }
2819 
2820 TRANS(RDSTRAND_STATUS, HYPV, do_rd_special, true, a->rd, do_rdstrand_status)
2821 
2822 static TCGv do_rdpsr(DisasContext *dc, TCGv dst)
2823 {
2824     gen_helper_rdpsr(dst, tcg_env);
2825     return dst;
2826 }
2827 
2828 TRANS(RDPSR, 32, do_rd_special, supervisor(dc), a->rd, do_rdpsr)
2829 
2830 static TCGv do_rdhpstate(DisasContext *dc, TCGv dst)
2831 {
2832     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hpstate));
2833     return dst;
2834 }
2835 
2836 TRANS(RDHPR_hpstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhpstate)
2837 
2838 static TCGv do_rdhtstate(DisasContext *dc, TCGv dst)
2839 {
2840     TCGv_i32 tl = tcg_temp_new_i32();
2841     TCGv_ptr tp = tcg_temp_new_ptr();
2842 
2843     tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl));
2844     tcg_gen_andi_i32(tl, tl, MAXTL_MASK);
2845     tcg_gen_shli_i32(tl, tl, 3);
2846     tcg_gen_ext_i32_ptr(tp, tl);
2847     tcg_gen_add_ptr(tp, tp, tcg_env);
2848 
2849     tcg_gen_ld_tl(dst, tp, env64_field_offsetof(htstate));
2850     return dst;
2851 }
2852 
2853 TRANS(RDHPR_htstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtstate)
2854 
2855 static TCGv do_rdhintp(DisasContext *dc, TCGv dst)
2856 {
2857     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hintp));
2858     return dst;
2859 }
2860 
2861 TRANS(RDHPR_hintp, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhintp)
2862 
2863 static TCGv do_rdhtba(DisasContext *dc, TCGv dst)
2864 {
2865     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(htba));
2866     return dst;
2867 }
2868 
2869 TRANS(RDHPR_htba, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtba)
2870 
2871 static TCGv do_rdhver(DisasContext *dc, TCGv dst)
2872 {
2873     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hver));
2874     return dst;
2875 }
2876 
2877 TRANS(RDHPR_hver, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhver)
2878 
2879 static TCGv do_rdhstick_cmpr(DisasContext *dc, TCGv dst)
2880 {
2881     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hstick_cmpr));
2882     return dst;
2883 }
2884 
2885 TRANS(RDHPR_hstick_cmpr, HYPV, do_rd_special, hypervisor(dc), a->rd,
2886       do_rdhstick_cmpr)
2887 
2888 static TCGv do_rdwim(DisasContext *dc, TCGv dst)
2889 {
2890     tcg_gen_ld_tl(dst, tcg_env, env32_field_offsetof(wim));
2891     return dst;
2892 }
2893 
2894 TRANS(RDWIM, 32, do_rd_special, supervisor(dc), a->rd, do_rdwim)
2895 
2896 static TCGv do_rdtpc(DisasContext *dc, TCGv dst)
2897 {
2898 #ifdef TARGET_SPARC64
2899     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2900 
2901     gen_load_trap_state_at_tl(r_tsptr);
2902     tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tpc));
2903     return dst;
2904 #else
2905     qemu_build_not_reached();
2906 #endif
2907 }
2908 
2909 TRANS(RDPR_tpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtpc)
2910 
2911 static TCGv do_rdtnpc(DisasContext *dc, TCGv dst)
2912 {
2913 #ifdef TARGET_SPARC64
2914     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2915 
2916     gen_load_trap_state_at_tl(r_tsptr);
2917     tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tnpc));
2918     return dst;
2919 #else
2920     qemu_build_not_reached();
2921 #endif
2922 }
2923 
2924 TRANS(RDPR_tnpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtnpc)
2925 
2926 static TCGv do_rdtstate(DisasContext *dc, TCGv dst)
2927 {
2928 #ifdef TARGET_SPARC64
2929     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2930 
2931     gen_load_trap_state_at_tl(r_tsptr);
2932     tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tstate));
2933     return dst;
2934 #else
2935     qemu_build_not_reached();
2936 #endif
2937 }
2938 
2939 TRANS(RDPR_tstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdtstate)
2940 
2941 static TCGv do_rdtt(DisasContext *dc, TCGv dst)
2942 {
2943 #ifdef TARGET_SPARC64
2944     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2945 
2946     gen_load_trap_state_at_tl(r_tsptr);
2947     tcg_gen_ld32s_tl(dst, r_tsptr, offsetof(trap_state, tt));
2948     return dst;
2949 #else
2950     qemu_build_not_reached();
2951 #endif
2952 }
2953 
2954 TRANS(RDPR_tt, 64, do_rd_special, supervisor(dc), a->rd, do_rdtt)
2955 TRANS(RDPR_tick, 64, do_rd_special, supervisor(dc), a->rd, do_rdtick)
2956 
2957 static TCGv do_rdtba(DisasContext *dc, TCGv dst)
2958 {
2959     return cpu_tbr;
2960 }
2961 
2962 TRANS(RDTBR, 32, do_rd_special, supervisor(dc), a->rd, do_rdtba)
2963 TRANS(RDPR_tba, 64, do_rd_special, supervisor(dc), a->rd, do_rdtba)
2964 
2965 static TCGv do_rdpstate(DisasContext *dc, TCGv dst)
2966 {
2967     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(pstate));
2968     return dst;
2969 }
2970 
2971 TRANS(RDPR_pstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdpstate)
2972 
2973 static TCGv do_rdtl(DisasContext *dc, TCGv dst)
2974 {
2975     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(tl));
2976     return dst;
2977 }
2978 
2979 TRANS(RDPR_tl, 64, do_rd_special, supervisor(dc), a->rd, do_rdtl)
2980 
2981 static TCGv do_rdpil(DisasContext *dc, TCGv dst)
2982 {
2983     tcg_gen_ld32s_tl(dst, tcg_env, env_field_offsetof(psrpil));
2984     return dst;
2985 }
2986 
2987 TRANS(RDPR_pil, 64, do_rd_special, supervisor(dc), a->rd, do_rdpil)
2988 
2989 static TCGv do_rdcwp(DisasContext *dc, TCGv dst)
2990 {
2991     gen_helper_rdcwp(dst, tcg_env);
2992     return dst;
2993 }
2994 
2995 TRANS(RDPR_cwp, 64, do_rd_special, supervisor(dc), a->rd, do_rdcwp)
2996 
2997 static TCGv do_rdcansave(DisasContext *dc, TCGv dst)
2998 {
2999     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cansave));
3000     return dst;
3001 }
3002 
3003 TRANS(RDPR_cansave, 64, do_rd_special, supervisor(dc), a->rd, do_rdcansave)
3004 
3005 static TCGv do_rdcanrestore(DisasContext *dc, TCGv dst)
3006 {
3007     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(canrestore));
3008     return dst;
3009 }
3010 
3011 TRANS(RDPR_canrestore, 64, do_rd_special, supervisor(dc), a->rd,
3012       do_rdcanrestore)
3013 
3014 static TCGv do_rdcleanwin(DisasContext *dc, TCGv dst)
3015 {
3016     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cleanwin));
3017     return dst;
3018 }
3019 
3020 TRANS(RDPR_cleanwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdcleanwin)
3021 
3022 static TCGv do_rdotherwin(DisasContext *dc, TCGv dst)
3023 {
3024     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(otherwin));
3025     return dst;
3026 }
3027 
3028 TRANS(RDPR_otherwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdotherwin)
3029 
3030 static TCGv do_rdwstate(DisasContext *dc, TCGv dst)
3031 {
3032     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(wstate));
3033     return dst;
3034 }
3035 
3036 TRANS(RDPR_wstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdwstate)
3037 
3038 static TCGv do_rdgl(DisasContext *dc, TCGv dst)
3039 {
3040     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(gl));
3041     return dst;
3042 }
3043 
3044 TRANS(RDPR_gl, GL, do_rd_special, supervisor(dc), a->rd, do_rdgl)
3045 
3046 /* UA2005 strand status */
3047 static TCGv do_rdssr(DisasContext *dc, TCGv dst)
3048 {
3049     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(ssr));
3050     return dst;
3051 }
3052 
3053 TRANS(RDPR_strand_status, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdssr)
3054 
3055 static TCGv do_rdver(DisasContext *dc, TCGv dst)
3056 {
3057     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(version));
3058     return dst;
3059 }
3060 
3061 TRANS(RDPR_ver, 64, do_rd_special, supervisor(dc), a->rd, do_rdver)
3062 
3063 static bool trans_FLUSHW(DisasContext *dc, arg_FLUSHW *a)
3064 {
3065     if (avail_64(dc)) {
3066         gen_helper_flushw(tcg_env);
3067         return advance_pc(dc);
3068     }
3069     return false;
3070 }
3071 
3072 static bool do_wr_special(DisasContext *dc, arg_r_r_ri *a, bool priv,
3073                           void (*func)(DisasContext *, TCGv))
3074 {
3075     TCGv src;
3076 
3077     /* For simplicity, we under-decoded the rs2 form. */
3078     if (!a->imm && (a->rs2_or_imm & ~0x1f)) {
3079         return false;
3080     }
3081     if (!priv) {
3082         return raise_priv(dc);
3083     }
3084 
3085     if (a->rs1 == 0 && (a->imm || a->rs2_or_imm == 0)) {
3086         src = tcg_constant_tl(a->rs2_or_imm);
3087     } else {
3088         TCGv src1 = gen_load_gpr(dc, a->rs1);
3089         if (a->rs2_or_imm == 0) {
3090             src = src1;
3091         } else {
3092             src = tcg_temp_new();
3093             if (a->imm) {
3094                 tcg_gen_xori_tl(src, src1, a->rs2_or_imm);
3095             } else {
3096                 tcg_gen_xor_tl(src, src1, gen_load_gpr(dc, a->rs2_or_imm));
3097             }
3098         }
3099     }
3100     func(dc, src);
3101     return advance_pc(dc);
3102 }
3103 
3104 static void do_wry(DisasContext *dc, TCGv src)
3105 {
3106     tcg_gen_ext32u_tl(cpu_y, src);
3107 }
3108 
3109 TRANS(WRY, ALL, do_wr_special, a, true, do_wry)
3110 
3111 static void do_wrccr(DisasContext *dc, TCGv src)
3112 {
3113     gen_helper_wrccr(tcg_env, src);
3114 }
3115 
3116 TRANS(WRCCR, 64, do_wr_special, a, true, do_wrccr)
3117 
3118 static void do_wrasi(DisasContext *dc, TCGv src)
3119 {
3120     TCGv tmp = tcg_temp_new();
3121 
3122     tcg_gen_ext8u_tl(tmp, src);
3123     tcg_gen_st32_tl(tmp, tcg_env, env64_field_offsetof(asi));
3124     /* End TB to notice changed ASI. */
3125     dc->base.is_jmp = DISAS_EXIT;
3126 }
3127 
3128 TRANS(WRASI, 64, do_wr_special, a, true, do_wrasi)
3129 
3130 static void do_wrfprs(DisasContext *dc, TCGv src)
3131 {
3132 #ifdef TARGET_SPARC64
3133     tcg_gen_trunc_tl_i32(cpu_fprs, src);
3134     dc->fprs_dirty = 0;
3135     dc->base.is_jmp = DISAS_EXIT;
3136 #else
3137     qemu_build_not_reached();
3138 #endif
3139 }
3140 
3141 TRANS(WRFPRS, 64, do_wr_special, a, true, do_wrfprs)
3142 
3143 static void do_wrgsr(DisasContext *dc, TCGv src)
3144 {
3145     gen_trap_ifnofpu(dc);
3146     tcg_gen_mov_tl(cpu_gsr, src);
3147 }
3148 
3149 TRANS(WRGSR, 64, do_wr_special, a, true, do_wrgsr)
3150 
3151 static void do_wrsoftint_set(DisasContext *dc, TCGv src)
3152 {
3153     gen_helper_set_softint(tcg_env, src);
3154 }
3155 
3156 TRANS(WRSOFTINT_SET, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_set)
3157 
3158 static void do_wrsoftint_clr(DisasContext *dc, TCGv src)
3159 {
3160     gen_helper_clear_softint(tcg_env, src);
3161 }
3162 
3163 TRANS(WRSOFTINT_CLR, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_clr)
3164 
3165 static void do_wrsoftint(DisasContext *dc, TCGv src)
3166 {
3167     gen_helper_write_softint(tcg_env, src);
3168 }
3169 
3170 TRANS(WRSOFTINT, 64, do_wr_special, a, supervisor(dc), do_wrsoftint)
3171 
3172 static void do_wrtick_cmpr(DisasContext *dc, TCGv src)
3173 {
3174     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3175 
3176     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(tick_cmpr));
3177     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
3178     translator_io_start(&dc->base);
3179     gen_helper_tick_set_limit(r_tickptr, src);
3180     /* End TB to handle timer interrupt */
3181     dc->base.is_jmp = DISAS_EXIT;
3182 }
3183 
3184 TRANS(WRTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrtick_cmpr)
3185 
3186 static void do_wrstick(DisasContext *dc, TCGv src)
3187 {
3188 #ifdef TARGET_SPARC64
3189     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3190 
3191     tcg_gen_ld_ptr(r_tickptr, tcg_env, offsetof(CPUSPARCState, stick));
3192     translator_io_start(&dc->base);
3193     gen_helper_tick_set_count(r_tickptr, src);
3194     /* End TB to handle timer interrupt */
3195     dc->base.is_jmp = DISAS_EXIT;
3196 #else
3197     qemu_build_not_reached();
3198 #endif
3199 }
3200 
3201 TRANS(WRSTICK, 64, do_wr_special, a, supervisor(dc), do_wrstick)
3202 
3203 static void do_wrstick_cmpr(DisasContext *dc, TCGv src)
3204 {
3205     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3206 
3207     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(stick_cmpr));
3208     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick));
3209     translator_io_start(&dc->base);
3210     gen_helper_tick_set_limit(r_tickptr, src);
3211     /* End TB to handle timer interrupt */
3212     dc->base.is_jmp = DISAS_EXIT;
3213 }
3214 
3215 TRANS(WRSTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrstick_cmpr)
3216 
3217 static void do_wrpowerdown(DisasContext *dc, TCGv src)
3218 {
3219     save_state(dc);
3220     gen_helper_power_down(tcg_env);
3221 }
3222 
3223 TRANS(WRPOWERDOWN, POWERDOWN, do_wr_special, a, supervisor(dc), do_wrpowerdown)
3224 
3225 static void do_wrpsr(DisasContext *dc, TCGv src)
3226 {
3227     gen_helper_wrpsr(tcg_env, src);
3228     dc->base.is_jmp = DISAS_EXIT;
3229 }
3230 
3231 TRANS(WRPSR, 32, do_wr_special, a, supervisor(dc), do_wrpsr)
3232 
3233 static void do_wrwim(DisasContext *dc, TCGv src)
3234 {
3235     target_ulong mask = MAKE_64BIT_MASK(0, dc->def->nwindows);
3236     TCGv tmp = tcg_temp_new();
3237 
3238     tcg_gen_andi_tl(tmp, src, mask);
3239     tcg_gen_st_tl(tmp, tcg_env, env32_field_offsetof(wim));
3240 }
3241 
3242 TRANS(WRWIM, 32, do_wr_special, a, supervisor(dc), do_wrwim)
3243 
3244 static void do_wrtpc(DisasContext *dc, TCGv src)
3245 {
3246 #ifdef TARGET_SPARC64
3247     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3248 
3249     gen_load_trap_state_at_tl(r_tsptr);
3250     tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tpc));
3251 #else
3252     qemu_build_not_reached();
3253 #endif
3254 }
3255 
3256 TRANS(WRPR_tpc, 64, do_wr_special, a, supervisor(dc), do_wrtpc)
3257 
3258 static void do_wrtnpc(DisasContext *dc, TCGv src)
3259 {
3260 #ifdef TARGET_SPARC64
3261     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3262 
3263     gen_load_trap_state_at_tl(r_tsptr);
3264     tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tnpc));
3265 #else
3266     qemu_build_not_reached();
3267 #endif
3268 }
3269 
3270 TRANS(WRPR_tnpc, 64, do_wr_special, a, supervisor(dc), do_wrtnpc)
3271 
3272 static void do_wrtstate(DisasContext *dc, TCGv src)
3273 {
3274 #ifdef TARGET_SPARC64
3275     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3276 
3277     gen_load_trap_state_at_tl(r_tsptr);
3278     tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tstate));
3279 #else
3280     qemu_build_not_reached();
3281 #endif
3282 }
3283 
3284 TRANS(WRPR_tstate, 64, do_wr_special, a, supervisor(dc), do_wrtstate)
3285 
3286 static void do_wrtt(DisasContext *dc, TCGv src)
3287 {
3288 #ifdef TARGET_SPARC64
3289     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3290 
3291     gen_load_trap_state_at_tl(r_tsptr);
3292     tcg_gen_st32_tl(src, r_tsptr, offsetof(trap_state, tt));
3293 #else
3294     qemu_build_not_reached();
3295 #endif
3296 }
3297 
3298 TRANS(WRPR_tt, 64, do_wr_special, a, supervisor(dc), do_wrtt)
3299 
3300 static void do_wrtick(DisasContext *dc, TCGv src)
3301 {
3302     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3303 
3304     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
3305     translator_io_start(&dc->base);
3306     gen_helper_tick_set_count(r_tickptr, src);
3307     /* End TB to handle timer interrupt */
3308     dc->base.is_jmp = DISAS_EXIT;
3309 }
3310 
3311 TRANS(WRPR_tick, 64, do_wr_special, a, supervisor(dc), do_wrtick)
3312 
3313 static void do_wrtba(DisasContext *dc, TCGv src)
3314 {
3315     tcg_gen_mov_tl(cpu_tbr, src);
3316 }
3317 
3318 TRANS(WRPR_tba, 64, do_wr_special, a, supervisor(dc), do_wrtba)
3319 
3320 static void do_wrpstate(DisasContext *dc, TCGv src)
3321 {
3322     save_state(dc);
3323     if (translator_io_start(&dc->base)) {
3324         dc->base.is_jmp = DISAS_EXIT;
3325     }
3326     gen_helper_wrpstate(tcg_env, src);
3327     dc->npc = DYNAMIC_PC;
3328 }
3329 
3330 TRANS(WRPR_pstate, 64, do_wr_special, a, supervisor(dc), do_wrpstate)
3331 
3332 static void do_wrtl(DisasContext *dc, TCGv src)
3333 {
3334     save_state(dc);
3335     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(tl));
3336     dc->npc = DYNAMIC_PC;
3337 }
3338 
3339 TRANS(WRPR_tl, 64, do_wr_special, a, supervisor(dc), do_wrtl)
3340 
3341 static void do_wrpil(DisasContext *dc, TCGv src)
3342 {
3343     if (translator_io_start(&dc->base)) {
3344         dc->base.is_jmp = DISAS_EXIT;
3345     }
3346     gen_helper_wrpil(tcg_env, src);
3347 }
3348 
3349 TRANS(WRPR_pil, 64, do_wr_special, a, supervisor(dc), do_wrpil)
3350 
3351 static void do_wrcwp(DisasContext *dc, TCGv src)
3352 {
3353     gen_helper_wrcwp(tcg_env, src);
3354 }
3355 
3356 TRANS(WRPR_cwp, 64, do_wr_special, a, supervisor(dc), do_wrcwp)
3357 
3358 static void do_wrcansave(DisasContext *dc, TCGv src)
3359 {
3360     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cansave));
3361 }
3362 
3363 TRANS(WRPR_cansave, 64, do_wr_special, a, supervisor(dc), do_wrcansave)
3364 
3365 static void do_wrcanrestore(DisasContext *dc, TCGv src)
3366 {
3367     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(canrestore));
3368 }
3369 
3370 TRANS(WRPR_canrestore, 64, do_wr_special, a, supervisor(dc), do_wrcanrestore)
3371 
3372 static void do_wrcleanwin(DisasContext *dc, TCGv src)
3373 {
3374     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cleanwin));
3375 }
3376 
3377 TRANS(WRPR_cleanwin, 64, do_wr_special, a, supervisor(dc), do_wrcleanwin)
3378 
3379 static void do_wrotherwin(DisasContext *dc, TCGv src)
3380 {
3381     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(otherwin));
3382 }
3383 
3384 TRANS(WRPR_otherwin, 64, do_wr_special, a, supervisor(dc), do_wrotherwin)
3385 
3386 static void do_wrwstate(DisasContext *dc, TCGv src)
3387 {
3388     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(wstate));
3389 }
3390 
3391 TRANS(WRPR_wstate, 64, do_wr_special, a, supervisor(dc), do_wrwstate)
3392 
3393 static void do_wrgl(DisasContext *dc, TCGv src)
3394 {
3395     gen_helper_wrgl(tcg_env, src);
3396 }
3397 
3398 TRANS(WRPR_gl, GL, do_wr_special, a, supervisor(dc), do_wrgl)
3399 
3400 /* UA2005 strand status */
3401 static void do_wrssr(DisasContext *dc, TCGv src)
3402 {
3403     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(ssr));
3404 }
3405 
3406 TRANS(WRPR_strand_status, HYPV, do_wr_special, a, hypervisor(dc), do_wrssr)
3407 
3408 TRANS(WRTBR, 32, do_wr_special, a, supervisor(dc), do_wrtba)
3409 
3410 static void do_wrhpstate(DisasContext *dc, TCGv src)
3411 {
3412     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hpstate));
3413     dc->base.is_jmp = DISAS_EXIT;
3414 }
3415 
3416 TRANS(WRHPR_hpstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhpstate)
3417 
3418 static void do_wrhtstate(DisasContext *dc, TCGv src)
3419 {
3420     TCGv_i32 tl = tcg_temp_new_i32();
3421     TCGv_ptr tp = tcg_temp_new_ptr();
3422 
3423     tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl));
3424     tcg_gen_andi_i32(tl, tl, MAXTL_MASK);
3425     tcg_gen_shli_i32(tl, tl, 3);
3426     tcg_gen_ext_i32_ptr(tp, tl);
3427     tcg_gen_add_ptr(tp, tp, tcg_env);
3428 
3429     tcg_gen_st_tl(src, tp, env64_field_offsetof(htstate));
3430 }
3431 
3432 TRANS(WRHPR_htstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtstate)
3433 
3434 static void do_wrhintp(DisasContext *dc, TCGv src)
3435 {
3436     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hintp));
3437 }
3438 
3439 TRANS(WRHPR_hintp, HYPV, do_wr_special, a, hypervisor(dc), do_wrhintp)
3440 
3441 static void do_wrhtba(DisasContext *dc, TCGv src)
3442 {
3443     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(htba));
3444 }
3445 
3446 TRANS(WRHPR_htba, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtba)
3447 
3448 static void do_wrhstick_cmpr(DisasContext *dc, TCGv src)
3449 {
3450     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3451 
3452     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hstick_cmpr));
3453     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(hstick));
3454     translator_io_start(&dc->base);
3455     gen_helper_tick_set_limit(r_tickptr, src);
3456     /* End TB to handle timer interrupt */
3457     dc->base.is_jmp = DISAS_EXIT;
3458 }
3459 
3460 TRANS(WRHPR_hstick_cmpr, HYPV, do_wr_special, a, hypervisor(dc),
3461       do_wrhstick_cmpr)
3462 
3463 static bool do_saved_restored(DisasContext *dc, bool saved)
3464 {
3465     if (!supervisor(dc)) {
3466         return raise_priv(dc);
3467     }
3468     if (saved) {
3469         gen_helper_saved(tcg_env);
3470     } else {
3471         gen_helper_restored(tcg_env);
3472     }
3473     return advance_pc(dc);
3474 }
3475 
3476 TRANS(SAVED, 64, do_saved_restored, true)
3477 TRANS(RESTORED, 64, do_saved_restored, false)
3478 
3479 static bool trans_NOP(DisasContext *dc, arg_NOP *a)
3480 {
3481     return advance_pc(dc);
3482 }
3483 
3484 /*
3485  * TODO: Need a feature bit for sparcv8.
3486  * In the meantime, treat all 32-bit cpus like sparcv7.
3487  */
3488 TRANS(NOP_v7, 32, trans_NOP, a)
3489 TRANS(NOP_v9, 64, trans_NOP, a)
3490 
3491 static bool do_arith_int(DisasContext *dc, arg_r_r_ri_cc *a,
3492                          void (*func)(TCGv, TCGv, TCGv),
3493                          void (*funci)(TCGv, TCGv, target_long),
3494                          bool logic_cc)
3495 {
3496     TCGv dst, src1;
3497 
3498     /* For simplicity, we under-decoded the rs2 form. */
3499     if (!a->imm && a->rs2_or_imm & ~0x1f) {
3500         return false;
3501     }
3502 
3503     if (logic_cc) {
3504         dst = cpu_cc_N;
3505     } else {
3506         dst = gen_dest_gpr(dc, a->rd);
3507     }
3508     src1 = gen_load_gpr(dc, a->rs1);
3509 
3510     if (a->imm || a->rs2_or_imm == 0) {
3511         if (funci) {
3512             funci(dst, src1, a->rs2_or_imm);
3513         } else {
3514             func(dst, src1, tcg_constant_tl(a->rs2_or_imm));
3515         }
3516     } else {
3517         func(dst, src1, cpu_regs[a->rs2_or_imm]);
3518     }
3519 
3520     if (logic_cc) {
3521         if (TARGET_LONG_BITS == 64) {
3522             tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
3523             tcg_gen_movi_tl(cpu_icc_C, 0);
3524         }
3525         tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
3526         tcg_gen_movi_tl(cpu_cc_C, 0);
3527         tcg_gen_movi_tl(cpu_cc_V, 0);
3528     }
3529 
3530     gen_store_gpr(dc, a->rd, dst);
3531     return advance_pc(dc);
3532 }
3533 
3534 static bool do_arith(DisasContext *dc, arg_r_r_ri_cc *a,
3535                      void (*func)(TCGv, TCGv, TCGv),
3536                      void (*funci)(TCGv, TCGv, target_long),
3537                      void (*func_cc)(TCGv, TCGv, TCGv))
3538 {
3539     if (a->cc) {
3540         return do_arith_int(dc, a, func_cc, NULL, false);
3541     }
3542     return do_arith_int(dc, a, func, funci, false);
3543 }
3544 
3545 static bool do_logic(DisasContext *dc, arg_r_r_ri_cc *a,
3546                      void (*func)(TCGv, TCGv, TCGv),
3547                      void (*funci)(TCGv, TCGv, target_long))
3548 {
3549     return do_arith_int(dc, a, func, funci, a->cc);
3550 }
3551 
3552 TRANS(ADD, ALL, do_arith, a, tcg_gen_add_tl, tcg_gen_addi_tl, gen_op_addcc)
3553 TRANS(SUB, ALL, do_arith, a, tcg_gen_sub_tl, tcg_gen_subi_tl, gen_op_subcc)
3554 TRANS(ADDC, ALL, do_arith, a, gen_op_addc, NULL, gen_op_addccc)
3555 TRANS(SUBC, ALL, do_arith, a, gen_op_subc, NULL, gen_op_subccc)
3556 
3557 TRANS(TADDcc, ALL, do_arith, a, NULL, NULL, gen_op_taddcc)
3558 TRANS(TSUBcc, ALL, do_arith, a, NULL, NULL, gen_op_tsubcc)
3559 TRANS(TADDccTV, ALL, do_arith, a, NULL, NULL, gen_op_taddcctv)
3560 TRANS(TSUBccTV, ALL, do_arith, a, NULL, NULL, gen_op_tsubcctv)
3561 
3562 TRANS(AND, ALL, do_logic, a, tcg_gen_and_tl, tcg_gen_andi_tl)
3563 TRANS(XOR, ALL, do_logic, a, tcg_gen_xor_tl, tcg_gen_xori_tl)
3564 TRANS(ANDN, ALL, do_logic, a, tcg_gen_andc_tl, NULL)
3565 TRANS(ORN, ALL, do_logic, a, tcg_gen_orc_tl, NULL)
3566 TRANS(XORN, ALL, do_logic, a, tcg_gen_eqv_tl, NULL)
3567 
3568 TRANS(MULX, 64, do_arith, a, tcg_gen_mul_tl, tcg_gen_muli_tl, NULL)
3569 TRANS(UMUL, MUL, do_logic, a, gen_op_umul, NULL)
3570 TRANS(SMUL, MUL, do_logic, a, gen_op_smul, NULL)
3571 TRANS(MULScc, ALL, do_arith, a, NULL, NULL, gen_op_mulscc)
3572 
3573 TRANS(UDIVX, 64, do_arith, a, gen_op_udivx, NULL, NULL)
3574 TRANS(SDIVX, 64, do_arith, a, gen_op_sdivx, NULL, NULL)
3575 TRANS(UDIV, DIV, do_arith, a, gen_op_udiv, NULL, gen_op_udivcc)
3576 TRANS(SDIV, DIV, do_arith, a, gen_op_sdiv, NULL, gen_op_sdivcc)
3577 
3578 /* TODO: Should have feature bit -- comes in with UltraSparc T2. */
3579 TRANS(POPC, 64, do_arith, a, gen_op_popc, NULL, NULL)
3580 
3581 static bool trans_OR(DisasContext *dc, arg_r_r_ri_cc *a)
3582 {
3583     /* OR with %g0 is the canonical alias for MOV. */
3584     if (!a->cc && a->rs1 == 0) {
3585         if (a->imm || a->rs2_or_imm == 0) {
3586             gen_store_gpr(dc, a->rd, tcg_constant_tl(a->rs2_or_imm));
3587         } else if (a->rs2_or_imm & ~0x1f) {
3588             /* For simplicity, we under-decoded the rs2 form. */
3589             return false;
3590         } else {
3591             gen_store_gpr(dc, a->rd, cpu_regs[a->rs2_or_imm]);
3592         }
3593         return advance_pc(dc);
3594     }
3595     return do_logic(dc, a, tcg_gen_or_tl, tcg_gen_ori_tl);
3596 }
3597 
3598 static bool gen_edge(DisasContext *dc, arg_r_r_r *a,
3599                      int width, bool cc, bool left)
3600 {
3601     TCGv dst, s1, s2, lo1, lo2;
3602     uint64_t amask, tabl, tabr;
3603     int shift, imask, omask;
3604 
3605     dst = gen_dest_gpr(dc, a->rd);
3606     s1 = gen_load_gpr(dc, a->rs1);
3607     s2 = gen_load_gpr(dc, a->rs2);
3608 
3609     if (cc) {
3610         gen_op_subcc(cpu_cc_N, s1, s2);
3611     }
3612 
3613     /*
3614      * Theory of operation: there are two tables, left and right (not to
3615      * be confused with the left and right versions of the opcode).  These
3616      * are indexed by the low 3 bits of the inputs.  To make things "easy",
3617      * these tables are loaded into two constants, TABL and TABR below.
3618      * The operation index = (input & imask) << shift calculates the index
3619      * into the constant, while val = (table >> index) & omask calculates
3620      * the value we're looking for.
3621      */
3622     switch (width) {
3623     case 8:
3624         imask = 0x7;
3625         shift = 3;
3626         omask = 0xff;
3627         if (left) {
3628             tabl = 0x80c0e0f0f8fcfeffULL;
3629             tabr = 0xff7f3f1f0f070301ULL;
3630         } else {
3631             tabl = 0x0103070f1f3f7fffULL;
3632             tabr = 0xfffefcf8f0e0c080ULL;
3633         }
3634         break;
3635     case 16:
3636         imask = 0x6;
3637         shift = 1;
3638         omask = 0xf;
3639         if (left) {
3640             tabl = 0x8cef;
3641             tabr = 0xf731;
3642         } else {
3643             tabl = 0x137f;
3644             tabr = 0xfec8;
3645         }
3646         break;
3647     case 32:
3648         imask = 0x4;
3649         shift = 0;
3650         omask = 0x3;
3651         if (left) {
3652             tabl = (2 << 2) | 3;
3653             tabr = (3 << 2) | 1;
3654         } else {
3655             tabl = (1 << 2) | 3;
3656             tabr = (3 << 2) | 2;
3657         }
3658         break;
3659     default:
3660         abort();
3661     }
3662 
3663     lo1 = tcg_temp_new();
3664     lo2 = tcg_temp_new();
3665     tcg_gen_andi_tl(lo1, s1, imask);
3666     tcg_gen_andi_tl(lo2, s2, imask);
3667     tcg_gen_shli_tl(lo1, lo1, shift);
3668     tcg_gen_shli_tl(lo2, lo2, shift);
3669 
3670     tcg_gen_shr_tl(lo1, tcg_constant_tl(tabl), lo1);
3671     tcg_gen_shr_tl(lo2, tcg_constant_tl(tabr), lo2);
3672     tcg_gen_andi_tl(lo1, lo1, omask);
3673     tcg_gen_andi_tl(lo2, lo2, omask);
3674 
3675     amask = address_mask_i(dc, -8);
3676     tcg_gen_andi_tl(s1, s1, amask);
3677     tcg_gen_andi_tl(s2, s2, amask);
3678 
3679     /* Compute dst = (s1 == s2 ? lo1 : lo1 & lo2). */
3680     tcg_gen_and_tl(lo2, lo2, lo1);
3681     tcg_gen_movcond_tl(TCG_COND_EQ, dst, s1, s2, lo1, lo2);
3682 
3683     gen_store_gpr(dc, a->rd, dst);
3684     return advance_pc(dc);
3685 }
3686 
3687 TRANS(EDGE8cc, VIS1, gen_edge, a, 8, 1, 0)
3688 TRANS(EDGE8Lcc, VIS1, gen_edge, a, 8, 1, 1)
3689 TRANS(EDGE16cc, VIS1, gen_edge, a, 16, 1, 0)
3690 TRANS(EDGE16Lcc, VIS1, gen_edge, a, 16, 1, 1)
3691 TRANS(EDGE32cc, VIS1, gen_edge, a, 32, 1, 0)
3692 TRANS(EDGE32Lcc, VIS1, gen_edge, a, 32, 1, 1)
3693 
3694 TRANS(EDGE8N, VIS2, gen_edge, a, 8, 0, 0)
3695 TRANS(EDGE8LN, VIS2, gen_edge, a, 8, 0, 1)
3696 TRANS(EDGE16N, VIS2, gen_edge, a, 16, 0, 0)
3697 TRANS(EDGE16LN, VIS2, gen_edge, a, 16, 0, 1)
3698 TRANS(EDGE32N, VIS2, gen_edge, a, 32, 0, 0)
3699 TRANS(EDGE32LN, VIS2, gen_edge, a, 32, 0, 1)
3700 
3701 static bool do_rrr(DisasContext *dc, arg_r_r_r *a,
3702                    void (*func)(TCGv, TCGv, TCGv))
3703 {
3704     TCGv dst = gen_dest_gpr(dc, a->rd);
3705     TCGv src1 = gen_load_gpr(dc, a->rs1);
3706     TCGv src2 = gen_load_gpr(dc, a->rs2);
3707 
3708     func(dst, src1, src2);
3709     gen_store_gpr(dc, a->rd, dst);
3710     return advance_pc(dc);
3711 }
3712 
3713 TRANS(ARRAY8, VIS1, do_rrr, a, gen_helper_array8)
3714 TRANS(ARRAY16, VIS1, do_rrr, a, gen_op_array16)
3715 TRANS(ARRAY32, VIS1, do_rrr, a, gen_op_array32)
3716 
3717 static void gen_op_alignaddr(TCGv dst, TCGv s1, TCGv s2)
3718 {
3719 #ifdef TARGET_SPARC64
3720     TCGv tmp = tcg_temp_new();
3721 
3722     tcg_gen_add_tl(tmp, s1, s2);
3723     tcg_gen_andi_tl(dst, tmp, -8);
3724     tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
3725 #else
3726     g_assert_not_reached();
3727 #endif
3728 }
3729 
3730 static void gen_op_alignaddrl(TCGv dst, TCGv s1, TCGv s2)
3731 {
3732 #ifdef TARGET_SPARC64
3733     TCGv tmp = tcg_temp_new();
3734 
3735     tcg_gen_add_tl(tmp, s1, s2);
3736     tcg_gen_andi_tl(dst, tmp, -8);
3737     tcg_gen_neg_tl(tmp, tmp);
3738     tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
3739 #else
3740     g_assert_not_reached();
3741 #endif
3742 }
3743 
3744 TRANS(ALIGNADDR, VIS1, do_rrr, a, gen_op_alignaddr)
3745 TRANS(ALIGNADDRL, VIS1, do_rrr, a, gen_op_alignaddrl)
3746 
3747 static void gen_op_bmask(TCGv dst, TCGv s1, TCGv s2)
3748 {
3749 #ifdef TARGET_SPARC64
3750     tcg_gen_add_tl(dst, s1, s2);
3751     tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, dst, 32, 32);
3752 #else
3753     g_assert_not_reached();
3754 #endif
3755 }
3756 
3757 TRANS(BMASK, VIS2, do_rrr, a, gen_op_bmask)
3758 
3759 static bool do_shift_r(DisasContext *dc, arg_shiftr *a, bool l, bool u)
3760 {
3761     TCGv dst, src1, src2;
3762 
3763     /* Reject 64-bit shifts for sparc32. */
3764     if (avail_32(dc) && a->x) {
3765         return false;
3766     }
3767 
3768     src2 = tcg_temp_new();
3769     tcg_gen_andi_tl(src2, gen_load_gpr(dc, a->rs2), a->x ? 63 : 31);
3770     src1 = gen_load_gpr(dc, a->rs1);
3771     dst = gen_dest_gpr(dc, a->rd);
3772 
3773     if (l) {
3774         tcg_gen_shl_tl(dst, src1, src2);
3775         if (!a->x) {
3776             tcg_gen_ext32u_tl(dst, dst);
3777         }
3778     } else if (u) {
3779         if (!a->x) {
3780             tcg_gen_ext32u_tl(dst, src1);
3781             src1 = dst;
3782         }
3783         tcg_gen_shr_tl(dst, src1, src2);
3784     } else {
3785         if (!a->x) {
3786             tcg_gen_ext32s_tl(dst, src1);
3787             src1 = dst;
3788         }
3789         tcg_gen_sar_tl(dst, src1, src2);
3790     }
3791     gen_store_gpr(dc, a->rd, dst);
3792     return advance_pc(dc);
3793 }
3794 
3795 TRANS(SLL_r, ALL, do_shift_r, a, true, true)
3796 TRANS(SRL_r, ALL, do_shift_r, a, false, true)
3797 TRANS(SRA_r, ALL, do_shift_r, a, false, false)
3798 
3799 static bool do_shift_i(DisasContext *dc, arg_shifti *a, bool l, bool u)
3800 {
3801     TCGv dst, src1;
3802 
3803     /* Reject 64-bit shifts for sparc32. */
3804     if (avail_32(dc) && (a->x || a->i >= 32)) {
3805         return false;
3806     }
3807 
3808     src1 = gen_load_gpr(dc, a->rs1);
3809     dst = gen_dest_gpr(dc, a->rd);
3810 
3811     if (avail_32(dc) || a->x) {
3812         if (l) {
3813             tcg_gen_shli_tl(dst, src1, a->i);
3814         } else if (u) {
3815             tcg_gen_shri_tl(dst, src1, a->i);
3816         } else {
3817             tcg_gen_sari_tl(dst, src1, a->i);
3818         }
3819     } else {
3820         if (l) {
3821             tcg_gen_deposit_z_tl(dst, src1, a->i, 32 - a->i);
3822         } else if (u) {
3823             tcg_gen_extract_tl(dst, src1, a->i, 32 - a->i);
3824         } else {
3825             tcg_gen_sextract_tl(dst, src1, a->i, 32 - a->i);
3826         }
3827     }
3828     gen_store_gpr(dc, a->rd, dst);
3829     return advance_pc(dc);
3830 }
3831 
3832 TRANS(SLL_i, ALL, do_shift_i, a, true, true)
3833 TRANS(SRL_i, ALL, do_shift_i, a, false, true)
3834 TRANS(SRA_i, ALL, do_shift_i, a, false, false)
3835 
3836 static TCGv gen_rs2_or_imm(DisasContext *dc, bool imm, int rs2_or_imm)
3837 {
3838     /* For simplicity, we under-decoded the rs2 form. */
3839     if (!imm && rs2_or_imm & ~0x1f) {
3840         return NULL;
3841     }
3842     if (imm || rs2_or_imm == 0) {
3843         return tcg_constant_tl(rs2_or_imm);
3844     } else {
3845         return cpu_regs[rs2_or_imm];
3846     }
3847 }
3848 
3849 static bool do_mov_cond(DisasContext *dc, DisasCompare *cmp, int rd, TCGv src2)
3850 {
3851     TCGv dst = gen_load_gpr(dc, rd);
3852 
3853     tcg_gen_movcond_tl(cmp->cond, dst, cmp->c1, cmp->c2, src2, dst);
3854     gen_store_gpr(dc, rd, dst);
3855     return advance_pc(dc);
3856 }
3857 
3858 static bool trans_MOVcc(DisasContext *dc, arg_MOVcc *a)
3859 {
3860     TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
3861     DisasCompare cmp;
3862 
3863     if (src2 == NULL) {
3864         return false;
3865     }
3866     gen_compare(&cmp, a->cc, a->cond, dc);
3867     return do_mov_cond(dc, &cmp, a->rd, src2);
3868 }
3869 
3870 static bool trans_MOVfcc(DisasContext *dc, arg_MOVfcc *a)
3871 {
3872     TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
3873     DisasCompare cmp;
3874 
3875     if (src2 == NULL) {
3876         return false;
3877     }
3878     gen_fcompare(&cmp, a->cc, a->cond);
3879     return do_mov_cond(dc, &cmp, a->rd, src2);
3880 }
3881 
3882 static bool trans_MOVR(DisasContext *dc, arg_MOVR *a)
3883 {
3884     TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
3885     DisasCompare cmp;
3886 
3887     if (src2 == NULL) {
3888         return false;
3889     }
3890     gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1));
3891     return do_mov_cond(dc, &cmp, a->rd, src2);
3892 }
3893 
3894 static bool do_add_special(DisasContext *dc, arg_r_r_ri *a,
3895                            bool (*func)(DisasContext *dc, int rd, TCGv src))
3896 {
3897     TCGv src1, sum;
3898 
3899     /* For simplicity, we under-decoded the rs2 form. */
3900     if (!a->imm && a->rs2_or_imm & ~0x1f) {
3901         return false;
3902     }
3903 
3904     /*
3905      * Always load the sum into a new temporary.
3906      * This is required to capture the value across a window change,
3907      * e.g. SAVE and RESTORE, and may be optimized away otherwise.
3908      */
3909     sum = tcg_temp_new();
3910     src1 = gen_load_gpr(dc, a->rs1);
3911     if (a->imm || a->rs2_or_imm == 0) {
3912         tcg_gen_addi_tl(sum, src1, a->rs2_or_imm);
3913     } else {
3914         tcg_gen_add_tl(sum, src1, cpu_regs[a->rs2_or_imm]);
3915     }
3916     return func(dc, a->rd, sum);
3917 }
3918 
3919 static bool do_jmpl(DisasContext *dc, int rd, TCGv src)
3920 {
3921     /*
3922      * Preserve pc across advance, so that we can delay
3923      * the writeback to rd until after src is consumed.
3924      */
3925     target_ulong cur_pc = dc->pc;
3926 
3927     gen_check_align(dc, src, 3);
3928 
3929     gen_mov_pc_npc(dc);
3930     tcg_gen_mov_tl(cpu_npc, src);
3931     gen_address_mask(dc, cpu_npc);
3932     gen_store_gpr(dc, rd, tcg_constant_tl(cur_pc));
3933 
3934     dc->npc = DYNAMIC_PC_LOOKUP;
3935     return true;
3936 }
3937 
3938 TRANS(JMPL, ALL, do_add_special, a, do_jmpl)
3939 
3940 static bool do_rett(DisasContext *dc, int rd, TCGv src)
3941 {
3942     if (!supervisor(dc)) {
3943         return raise_priv(dc);
3944     }
3945 
3946     gen_check_align(dc, src, 3);
3947 
3948     gen_mov_pc_npc(dc);
3949     tcg_gen_mov_tl(cpu_npc, src);
3950     gen_helper_rett(tcg_env);
3951 
3952     dc->npc = DYNAMIC_PC;
3953     return true;
3954 }
3955 
3956 TRANS(RETT, 32, do_add_special, a, do_rett)
3957 
3958 static bool do_return(DisasContext *dc, int rd, TCGv src)
3959 {
3960     gen_check_align(dc, src, 3);
3961 
3962     gen_mov_pc_npc(dc);
3963     tcg_gen_mov_tl(cpu_npc, src);
3964     gen_address_mask(dc, cpu_npc);
3965 
3966     gen_helper_restore(tcg_env);
3967     dc->npc = DYNAMIC_PC_LOOKUP;
3968     return true;
3969 }
3970 
3971 TRANS(RETURN, 64, do_add_special, a, do_return)
3972 
3973 static bool do_save(DisasContext *dc, int rd, TCGv src)
3974 {
3975     gen_helper_save(tcg_env);
3976     gen_store_gpr(dc, rd, src);
3977     return advance_pc(dc);
3978 }
3979 
3980 TRANS(SAVE, ALL, do_add_special, a, do_save)
3981 
3982 static bool do_restore(DisasContext *dc, int rd, TCGv src)
3983 {
3984     gen_helper_restore(tcg_env);
3985     gen_store_gpr(dc, rd, src);
3986     return advance_pc(dc);
3987 }
3988 
3989 TRANS(RESTORE, ALL, do_add_special, a, do_restore)
3990 
3991 static bool do_done_retry(DisasContext *dc, bool done)
3992 {
3993     if (!supervisor(dc)) {
3994         return raise_priv(dc);
3995     }
3996     dc->npc = DYNAMIC_PC;
3997     dc->pc = DYNAMIC_PC;
3998     translator_io_start(&dc->base);
3999     if (done) {
4000         gen_helper_done(tcg_env);
4001     } else {
4002         gen_helper_retry(tcg_env);
4003     }
4004     return true;
4005 }
4006 
4007 TRANS(DONE, 64, do_done_retry, true)
4008 TRANS(RETRY, 64, do_done_retry, false)
4009 
4010 /*
4011  * Major opcode 11 -- load and store instructions
4012  */
4013 
4014 static TCGv gen_ldst_addr(DisasContext *dc, int rs1, bool imm, int rs2_or_imm)
4015 {
4016     TCGv addr, tmp = NULL;
4017 
4018     /* For simplicity, we under-decoded the rs2 form. */
4019     if (!imm && rs2_or_imm & ~0x1f) {
4020         return NULL;
4021     }
4022 
4023     addr = gen_load_gpr(dc, rs1);
4024     if (rs2_or_imm) {
4025         tmp = tcg_temp_new();
4026         if (imm) {
4027             tcg_gen_addi_tl(tmp, addr, rs2_or_imm);
4028         } else {
4029             tcg_gen_add_tl(tmp, addr, cpu_regs[rs2_or_imm]);
4030         }
4031         addr = tmp;
4032     }
4033     if (AM_CHECK(dc)) {
4034         if (!tmp) {
4035             tmp = tcg_temp_new();
4036         }
4037         tcg_gen_ext32u_tl(tmp, addr);
4038         addr = tmp;
4039     }
4040     return addr;
4041 }
4042 
4043 static bool do_ld_gpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4044 {
4045     TCGv reg, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4046     DisasASI da;
4047 
4048     if (addr == NULL) {
4049         return false;
4050     }
4051     da = resolve_asi(dc, a->asi, mop);
4052 
4053     reg = gen_dest_gpr(dc, a->rd);
4054     gen_ld_asi(dc, &da, reg, addr);
4055     gen_store_gpr(dc, a->rd, reg);
4056     return advance_pc(dc);
4057 }
4058 
4059 TRANS(LDUW, ALL, do_ld_gpr, a, MO_TEUL)
4060 TRANS(LDUB, ALL, do_ld_gpr, a, MO_UB)
4061 TRANS(LDUH, ALL, do_ld_gpr, a, MO_TEUW)
4062 TRANS(LDSB, ALL, do_ld_gpr, a, MO_SB)
4063 TRANS(LDSH, ALL, do_ld_gpr, a, MO_TESW)
4064 TRANS(LDSW, 64, do_ld_gpr, a, MO_TESL)
4065 TRANS(LDX, 64, do_ld_gpr, a, MO_TEUQ)
4066 
4067 static bool do_st_gpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4068 {
4069     TCGv reg, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4070     DisasASI da;
4071 
4072     if (addr == NULL) {
4073         return false;
4074     }
4075     da = resolve_asi(dc, a->asi, mop);
4076 
4077     reg = gen_load_gpr(dc, a->rd);
4078     gen_st_asi(dc, &da, reg, addr);
4079     return advance_pc(dc);
4080 }
4081 
4082 TRANS(STW, ALL, do_st_gpr, a, MO_TEUL)
4083 TRANS(STB, ALL, do_st_gpr, a, MO_UB)
4084 TRANS(STH, ALL, do_st_gpr, a, MO_TEUW)
4085 TRANS(STX, 64, do_st_gpr, a, MO_TEUQ)
4086 
4087 static bool trans_LDD(DisasContext *dc, arg_r_r_ri_asi *a)
4088 {
4089     TCGv addr;
4090     DisasASI da;
4091 
4092     if (a->rd & 1) {
4093         return false;
4094     }
4095     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4096     if (addr == NULL) {
4097         return false;
4098     }
4099     da = resolve_asi(dc, a->asi, MO_TEUQ);
4100     gen_ldda_asi(dc, &da, addr, a->rd);
4101     return advance_pc(dc);
4102 }
4103 
4104 static bool trans_STD(DisasContext *dc, arg_r_r_ri_asi *a)
4105 {
4106     TCGv addr;
4107     DisasASI da;
4108 
4109     if (a->rd & 1) {
4110         return false;
4111     }
4112     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4113     if (addr == NULL) {
4114         return false;
4115     }
4116     da = resolve_asi(dc, a->asi, MO_TEUQ);
4117     gen_stda_asi(dc, &da, addr, a->rd);
4118     return advance_pc(dc);
4119 }
4120 
4121 static bool trans_LDSTUB(DisasContext *dc, arg_r_r_ri_asi *a)
4122 {
4123     TCGv addr, reg;
4124     DisasASI da;
4125 
4126     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4127     if (addr == NULL) {
4128         return false;
4129     }
4130     da = resolve_asi(dc, a->asi, MO_UB);
4131 
4132     reg = gen_dest_gpr(dc, a->rd);
4133     gen_ldstub_asi(dc, &da, reg, addr);
4134     gen_store_gpr(dc, a->rd, reg);
4135     return advance_pc(dc);
4136 }
4137 
4138 static bool trans_SWAP(DisasContext *dc, arg_r_r_ri_asi *a)
4139 {
4140     TCGv addr, dst, src;
4141     DisasASI da;
4142 
4143     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4144     if (addr == NULL) {
4145         return false;
4146     }
4147     da = resolve_asi(dc, a->asi, MO_TEUL);
4148 
4149     dst = gen_dest_gpr(dc, a->rd);
4150     src = gen_load_gpr(dc, a->rd);
4151     gen_swap_asi(dc, &da, dst, src, addr);
4152     gen_store_gpr(dc, a->rd, dst);
4153     return advance_pc(dc);
4154 }
4155 
4156 static bool do_casa(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4157 {
4158     TCGv addr, o, n, c;
4159     DisasASI da;
4160 
4161     addr = gen_ldst_addr(dc, a->rs1, true, 0);
4162     if (addr == NULL) {
4163         return false;
4164     }
4165     da = resolve_asi(dc, a->asi, mop);
4166 
4167     o = gen_dest_gpr(dc, a->rd);
4168     n = gen_load_gpr(dc, a->rd);
4169     c = gen_load_gpr(dc, a->rs2_or_imm);
4170     gen_cas_asi(dc, &da, o, n, c, addr);
4171     gen_store_gpr(dc, a->rd, o);
4172     return advance_pc(dc);
4173 }
4174 
4175 TRANS(CASA, CASA, do_casa, a, MO_TEUL)
4176 TRANS(CASXA, 64, do_casa, a, MO_TEUQ)
4177 
4178 static bool do_ld_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz)
4179 {
4180     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4181     DisasASI da;
4182 
4183     if (addr == NULL) {
4184         return false;
4185     }
4186     if (gen_trap_ifnofpu(dc)) {
4187         return true;
4188     }
4189     if (sz == MO_128 && gen_trap_float128(dc)) {
4190         return true;
4191     }
4192     da = resolve_asi(dc, a->asi, MO_TE | sz);
4193     gen_ldf_asi(dc, &da, sz, addr, a->rd);
4194     gen_update_fprs_dirty(dc, a->rd);
4195     return advance_pc(dc);
4196 }
4197 
4198 TRANS(LDF, ALL, do_ld_fpr, a, MO_32)
4199 TRANS(LDDF, ALL, do_ld_fpr, a, MO_64)
4200 TRANS(LDQF, ALL, do_ld_fpr, a, MO_128)
4201 
4202 TRANS(LDFA, 64, do_ld_fpr, a, MO_32)
4203 TRANS(LDDFA, 64, do_ld_fpr, a, MO_64)
4204 TRANS(LDQFA, 64, do_ld_fpr, a, MO_128)
4205 
4206 static bool do_st_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz)
4207 {
4208     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4209     DisasASI da;
4210 
4211     if (addr == NULL) {
4212         return false;
4213     }
4214     if (gen_trap_ifnofpu(dc)) {
4215         return true;
4216     }
4217     if (sz == MO_128 && gen_trap_float128(dc)) {
4218         return true;
4219     }
4220     da = resolve_asi(dc, a->asi, MO_TE | sz);
4221     gen_stf_asi(dc, &da, sz, addr, a->rd);
4222     return advance_pc(dc);
4223 }
4224 
4225 TRANS(STF, ALL, do_st_fpr, a, MO_32)
4226 TRANS(STDF, ALL, do_st_fpr, a, MO_64)
4227 TRANS(STQF, ALL, do_st_fpr, a, MO_128)
4228 
4229 TRANS(STFA, 64, do_st_fpr, a, MO_32)
4230 TRANS(STDFA, 64, do_st_fpr, a, MO_64)
4231 TRANS(STQFA, 64, do_st_fpr, a, MO_128)
4232 
4233 static bool trans_STDFQ(DisasContext *dc, arg_STDFQ *a)
4234 {
4235     if (!avail_32(dc)) {
4236         return false;
4237     }
4238     if (!supervisor(dc)) {
4239         return raise_priv(dc);
4240     }
4241     if (gen_trap_ifnofpu(dc)) {
4242         return true;
4243     }
4244     gen_op_fpexception_im(dc, FSR_FTT_SEQ_ERROR);
4245     return true;
4246 }
4247 
4248 static bool do_ldfsr(DisasContext *dc, arg_r_r_ri *a, MemOp mop,
4249                      target_ulong new_mask, target_ulong old_mask)
4250 {
4251     TCGv tmp, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4252     if (addr == NULL) {
4253         return false;
4254     }
4255     if (gen_trap_ifnofpu(dc)) {
4256         return true;
4257     }
4258     tmp = tcg_temp_new();
4259     tcg_gen_qemu_ld_tl(tmp, addr, dc->mem_idx, mop | MO_ALIGN);
4260     tcg_gen_andi_tl(tmp, tmp, new_mask);
4261     tcg_gen_andi_tl(cpu_fsr, cpu_fsr, old_mask);
4262     tcg_gen_or_tl(cpu_fsr, cpu_fsr, tmp);
4263     gen_helper_set_fsr(tcg_env, cpu_fsr);
4264     return advance_pc(dc);
4265 }
4266 
4267 TRANS(LDFSR, ALL, do_ldfsr, a, MO_TEUL, FSR_LDFSR_MASK, FSR_LDFSR_OLDMASK)
4268 TRANS(LDXFSR, 64, do_ldfsr, a, MO_TEUQ, FSR_LDXFSR_MASK, FSR_LDXFSR_OLDMASK)
4269 
4270 static bool do_stfsr(DisasContext *dc, arg_r_r_ri *a, MemOp mop)
4271 {
4272     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4273     if (addr == NULL) {
4274         return false;
4275     }
4276     if (gen_trap_ifnofpu(dc)) {
4277         return true;
4278     }
4279     tcg_gen_qemu_st_tl(cpu_fsr, addr, dc->mem_idx, mop | MO_ALIGN);
4280     return advance_pc(dc);
4281 }
4282 
4283 TRANS(STFSR, ALL, do_stfsr, a, MO_TEUL)
4284 TRANS(STXFSR, 64, do_stfsr, a, MO_TEUQ)
4285 
4286 static bool do_fc(DisasContext *dc, int rd, bool c)
4287 {
4288     uint64_t mask;
4289 
4290     if (gen_trap_ifnofpu(dc)) {
4291         return true;
4292     }
4293 
4294     if (rd & 1) {
4295         mask = MAKE_64BIT_MASK(0, 32);
4296     } else {
4297         mask = MAKE_64BIT_MASK(32, 32);
4298     }
4299     if (c) {
4300         tcg_gen_ori_i64(cpu_fpr[rd / 2], cpu_fpr[rd / 2], mask);
4301     } else {
4302         tcg_gen_andi_i64(cpu_fpr[rd / 2], cpu_fpr[rd / 2], ~mask);
4303     }
4304     gen_update_fprs_dirty(dc, rd);
4305     return advance_pc(dc);
4306 }
4307 
4308 TRANS(FZEROs, VIS1, do_fc, a->rd, 0)
4309 TRANS(FONEs, VIS1, do_fc, a->rd, 1)
4310 
4311 static bool do_dc(DisasContext *dc, int rd, int64_t c)
4312 {
4313     if (gen_trap_ifnofpu(dc)) {
4314         return true;
4315     }
4316 
4317     tcg_gen_movi_i64(cpu_fpr[rd / 2], c);
4318     gen_update_fprs_dirty(dc, rd);
4319     return advance_pc(dc);
4320 }
4321 
4322 TRANS(FZEROd, VIS1, do_dc, a->rd, 0)
4323 TRANS(FONEd, VIS1, do_dc, a->rd, -1)
4324 
4325 static bool do_ff(DisasContext *dc, arg_r_r *a,
4326                   void (*func)(TCGv_i32, TCGv_i32))
4327 {
4328     TCGv_i32 tmp;
4329 
4330     if (gen_trap_ifnofpu(dc)) {
4331         return true;
4332     }
4333 
4334     tmp = gen_load_fpr_F(dc, a->rs);
4335     func(tmp, tmp);
4336     gen_store_fpr_F(dc, a->rd, tmp);
4337     return advance_pc(dc);
4338 }
4339 
4340 TRANS(FMOVs, ALL, do_ff, a, gen_op_fmovs)
4341 TRANS(FNEGs, ALL, do_ff, a, gen_op_fnegs)
4342 TRANS(FABSs, ALL, do_ff, a, gen_op_fabss)
4343 TRANS(FSRCs, VIS1, do_ff, a, tcg_gen_mov_i32)
4344 TRANS(FNOTs, VIS1, do_ff, a, tcg_gen_not_i32)
4345 
4346 static bool do_fd(DisasContext *dc, arg_r_r *a,
4347                   void (*func)(TCGv_i32, TCGv_i64))
4348 {
4349     TCGv_i32 dst;
4350     TCGv_i64 src;
4351 
4352     if (gen_trap_ifnofpu(dc)) {
4353         return true;
4354     }
4355 
4356     dst = gen_dest_fpr_F(dc);
4357     src = gen_load_fpr_D(dc, a->rs);
4358     func(dst, src);
4359     gen_store_fpr_F(dc, a->rd, dst);
4360     return advance_pc(dc);
4361 }
4362 
4363 TRANS(FPACK16, VIS1, do_fd, a, gen_op_fpack16)
4364 TRANS(FPACKFIX, VIS1, do_fd, a, gen_op_fpackfix)
4365 
4366 static bool do_env_ff(DisasContext *dc, arg_r_r *a,
4367                       void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
4368 {
4369     TCGv_i32 tmp;
4370 
4371     if (gen_trap_ifnofpu(dc)) {
4372         return true;
4373     }
4374 
4375     gen_op_clear_ieee_excp_and_FTT();
4376     tmp = gen_load_fpr_F(dc, a->rs);
4377     func(tmp, tcg_env, tmp);
4378     gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4379     gen_store_fpr_F(dc, a->rd, tmp);
4380     return advance_pc(dc);
4381 }
4382 
4383 TRANS(FSQRTs, ALL, do_env_ff, a, gen_helper_fsqrts)
4384 TRANS(FiTOs, ALL, do_env_ff, a, gen_helper_fitos)
4385 TRANS(FsTOi, ALL, do_env_ff, a, gen_helper_fstoi)
4386 
4387 static bool do_env_fd(DisasContext *dc, arg_r_r *a,
4388                       void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
4389 {
4390     TCGv_i32 dst;
4391     TCGv_i64 src;
4392 
4393     if (gen_trap_ifnofpu(dc)) {
4394         return true;
4395     }
4396 
4397     gen_op_clear_ieee_excp_and_FTT();
4398     dst = gen_dest_fpr_F(dc);
4399     src = gen_load_fpr_D(dc, a->rs);
4400     func(dst, tcg_env, src);
4401     gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4402     gen_store_fpr_F(dc, a->rd, dst);
4403     return advance_pc(dc);
4404 }
4405 
4406 TRANS(FdTOs, ALL, do_env_fd, a, gen_helper_fdtos)
4407 TRANS(FdTOi, ALL, do_env_fd, a, gen_helper_fdtoi)
4408 TRANS(FxTOs, 64, do_env_fd, a, gen_helper_fxtos)
4409 
4410 static bool do_dd(DisasContext *dc, arg_r_r *a,
4411                   void (*func)(TCGv_i64, TCGv_i64))
4412 {
4413     TCGv_i64 dst, src;
4414 
4415     if (gen_trap_ifnofpu(dc)) {
4416         return true;
4417     }
4418 
4419     dst = gen_dest_fpr_D(dc, a->rd);
4420     src = gen_load_fpr_D(dc, a->rs);
4421     func(dst, src);
4422     gen_store_fpr_D(dc, a->rd, dst);
4423     return advance_pc(dc);
4424 }
4425 
4426 TRANS(FMOVd, 64, do_dd, a, gen_op_fmovd)
4427 TRANS(FNEGd, 64, do_dd, a, gen_op_fnegd)
4428 TRANS(FABSd, 64, do_dd, a, gen_op_fabsd)
4429 TRANS(FSRCd, VIS1, do_dd, a, tcg_gen_mov_i64)
4430 TRANS(FNOTd, VIS1, do_dd, a, tcg_gen_not_i64)
4431 
4432 static bool do_env_dd(DisasContext *dc, arg_r_r *a,
4433                       void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
4434 {
4435     TCGv_i64 dst, src;
4436 
4437     if (gen_trap_ifnofpu(dc)) {
4438         return true;
4439     }
4440 
4441     gen_op_clear_ieee_excp_and_FTT();
4442     dst = gen_dest_fpr_D(dc, a->rd);
4443     src = gen_load_fpr_D(dc, a->rs);
4444     func(dst, tcg_env, src);
4445     gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4446     gen_store_fpr_D(dc, a->rd, dst);
4447     return advance_pc(dc);
4448 }
4449 
4450 TRANS(FSQRTd, ALL, do_env_dd, a, gen_helper_fsqrtd)
4451 TRANS(FxTOd, 64, do_env_dd, a, gen_helper_fxtod)
4452 TRANS(FdTOx, 64, do_env_dd, a, gen_helper_fdtox)
4453 
4454 static bool do_env_df(DisasContext *dc, arg_r_r *a,
4455                       void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
4456 {
4457     TCGv_i64 dst;
4458     TCGv_i32 src;
4459 
4460     if (gen_trap_ifnofpu(dc)) {
4461         return true;
4462     }
4463 
4464     gen_op_clear_ieee_excp_and_FTT();
4465     dst = gen_dest_fpr_D(dc, a->rd);
4466     src = gen_load_fpr_F(dc, a->rs);
4467     func(dst, tcg_env, src);
4468     gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4469     gen_store_fpr_D(dc, a->rd, dst);
4470     return advance_pc(dc);
4471 }
4472 
4473 TRANS(FiTOd, ALL, do_env_df, a, gen_helper_fitod)
4474 TRANS(FsTOd, ALL, do_env_df, a, gen_helper_fstod)
4475 TRANS(FsTOx, 64, do_env_df, a, gen_helper_fstox)
4476 
4477 static bool trans_FMOVq(DisasContext *dc, arg_FMOVq *a)
4478 {
4479     int rd, rs;
4480 
4481     if (!avail_64(dc)) {
4482         return false;
4483     }
4484     if (gen_trap_ifnofpu(dc)) {
4485         return true;
4486     }
4487     if (gen_trap_float128(dc)) {
4488         return true;
4489     }
4490 
4491     gen_op_clear_ieee_excp_and_FTT();
4492     rd = QFPREG(a->rd);
4493     rs = QFPREG(a->rs);
4494     tcg_gen_mov_i64(cpu_fpr[rd / 2], cpu_fpr[rs / 2]);
4495     tcg_gen_mov_i64(cpu_fpr[rd / 2 + 1], cpu_fpr[rs / 2 + 1]);
4496     gen_update_fprs_dirty(dc, rd);
4497     return advance_pc(dc);
4498 }
4499 
4500 static bool do_qq(DisasContext *dc, arg_r_r *a,
4501                   void (*func)(TCGv_env))
4502 {
4503     if (gen_trap_ifnofpu(dc)) {
4504         return true;
4505     }
4506     if (gen_trap_float128(dc)) {
4507         return true;
4508     }
4509 
4510     gen_op_clear_ieee_excp_and_FTT();
4511     gen_op_load_fpr_QT1(QFPREG(a->rs));
4512     func(tcg_env);
4513     gen_op_store_QT0_fpr(QFPREG(a->rd));
4514     gen_update_fprs_dirty(dc, QFPREG(a->rd));
4515     return advance_pc(dc);
4516 }
4517 
4518 TRANS(FNEGq, 64, do_qq, a, gen_helper_fnegq)
4519 TRANS(FABSq, 64, do_qq, a, gen_helper_fabsq)
4520 
4521 static bool do_env_qq(DisasContext *dc, arg_r_r *a,
4522                        void (*func)(TCGv_env))
4523 {
4524     if (gen_trap_ifnofpu(dc)) {
4525         return true;
4526     }
4527     if (gen_trap_float128(dc)) {
4528         return true;
4529     }
4530 
4531     gen_op_clear_ieee_excp_and_FTT();
4532     gen_op_load_fpr_QT1(QFPREG(a->rs));
4533     func(tcg_env);
4534     gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4535     gen_op_store_QT0_fpr(QFPREG(a->rd));
4536     gen_update_fprs_dirty(dc, QFPREG(a->rd));
4537     return advance_pc(dc);
4538 }
4539 
4540 TRANS(FSQRTq, ALL, do_env_qq, a, gen_helper_fsqrtq)
4541 
4542 static bool do_env_fq(DisasContext *dc, arg_r_r *a,
4543                       void (*func)(TCGv_i32, TCGv_env))
4544 {
4545     TCGv_i32 dst;
4546 
4547     if (gen_trap_ifnofpu(dc)) {
4548         return true;
4549     }
4550     if (gen_trap_float128(dc)) {
4551         return true;
4552     }
4553 
4554     gen_op_clear_ieee_excp_and_FTT();
4555     gen_op_load_fpr_QT1(QFPREG(a->rs));
4556     dst = gen_dest_fpr_F(dc);
4557     func(dst, tcg_env);
4558     gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4559     gen_store_fpr_F(dc, a->rd, dst);
4560     return advance_pc(dc);
4561 }
4562 
4563 TRANS(FqTOs, ALL, do_env_fq, a, gen_helper_fqtos)
4564 TRANS(FqTOi, ALL, do_env_fq, a, gen_helper_fqtoi)
4565 
4566 static bool do_env_dq(DisasContext *dc, arg_r_r *a,
4567                       void (*func)(TCGv_i64, TCGv_env))
4568 {
4569     TCGv_i64 dst;
4570 
4571     if (gen_trap_ifnofpu(dc)) {
4572         return true;
4573     }
4574     if (gen_trap_float128(dc)) {
4575         return true;
4576     }
4577 
4578     gen_op_clear_ieee_excp_and_FTT();
4579     gen_op_load_fpr_QT1(QFPREG(a->rs));
4580     dst = gen_dest_fpr_D(dc, a->rd);
4581     func(dst, tcg_env);
4582     gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4583     gen_store_fpr_D(dc, a->rd, dst);
4584     return advance_pc(dc);
4585 }
4586 
4587 TRANS(FqTOd, ALL, do_env_dq, a, gen_helper_fqtod)
4588 TRANS(FqTOx, 64, do_env_dq, a, gen_helper_fqtox)
4589 
4590 static bool do_env_qf(DisasContext *dc, arg_r_r *a,
4591                       void (*func)(TCGv_env, TCGv_i32))
4592 {
4593     TCGv_i32 src;
4594 
4595     if (gen_trap_ifnofpu(dc)) {
4596         return true;
4597     }
4598     if (gen_trap_float128(dc)) {
4599         return true;
4600     }
4601 
4602     gen_op_clear_ieee_excp_and_FTT();
4603     src = gen_load_fpr_F(dc, a->rs);
4604     func(tcg_env, src);
4605     gen_op_store_QT0_fpr(QFPREG(a->rd));
4606     gen_update_fprs_dirty(dc, QFPREG(a->rd));
4607     return advance_pc(dc);
4608 }
4609 
4610 TRANS(FiTOq, ALL, do_env_qf, a, gen_helper_fitoq)
4611 TRANS(FsTOq, ALL, do_env_qf, a, gen_helper_fstoq)
4612 
4613 static bool do_env_qd(DisasContext *dc, arg_r_r *a,
4614                       void (*func)(TCGv_env, TCGv_i64))
4615 {
4616     TCGv_i64 src;
4617 
4618     if (gen_trap_ifnofpu(dc)) {
4619         return true;
4620     }
4621     if (gen_trap_float128(dc)) {
4622         return true;
4623     }
4624 
4625     gen_op_clear_ieee_excp_and_FTT();
4626     src = gen_load_fpr_D(dc, a->rs);
4627     func(tcg_env, src);
4628     gen_op_store_QT0_fpr(QFPREG(a->rd));
4629     gen_update_fprs_dirty(dc, QFPREG(a->rd));
4630     return advance_pc(dc);
4631 }
4632 
4633 TRANS(FdTOq, ALL, do_env_qd, a, gen_helper_fdtoq)
4634 TRANS(FxTOq, 64, do_env_qd, a, gen_helper_fxtoq)
4635 
4636 static bool do_fff(DisasContext *dc, arg_r_r_r *a,
4637                    void (*func)(TCGv_i32, TCGv_i32, TCGv_i32))
4638 {
4639     TCGv_i32 src1, src2;
4640 
4641     if (gen_trap_ifnofpu(dc)) {
4642         return true;
4643     }
4644 
4645     src1 = gen_load_fpr_F(dc, a->rs1);
4646     src2 = gen_load_fpr_F(dc, a->rs2);
4647     func(src1, src1, src2);
4648     gen_store_fpr_F(dc, a->rd, src1);
4649     return advance_pc(dc);
4650 }
4651 
4652 TRANS(FPADD16s, VIS1, do_fff, a, tcg_gen_vec_add16_i32)
4653 TRANS(FPADD32s, VIS1, do_fff, a, tcg_gen_add_i32)
4654 TRANS(FPSUB16s, VIS1, do_fff, a, tcg_gen_vec_sub16_i32)
4655 TRANS(FPSUB32s, VIS1, do_fff, a, tcg_gen_sub_i32)
4656 TRANS(FNORs, VIS1, do_fff, a, tcg_gen_nor_i32)
4657 TRANS(FANDNOTs, VIS1, do_fff, a, tcg_gen_andc_i32)
4658 TRANS(FXORs, VIS1, do_fff, a, tcg_gen_xor_i32)
4659 TRANS(FNANDs, VIS1, do_fff, a, tcg_gen_nand_i32)
4660 TRANS(FANDs, VIS1, do_fff, a, tcg_gen_and_i32)
4661 TRANS(FXNORs, VIS1, do_fff, a, tcg_gen_eqv_i32)
4662 TRANS(FORNOTs, VIS1, do_fff, a, tcg_gen_orc_i32)
4663 TRANS(FORs, VIS1, do_fff, a, tcg_gen_or_i32)
4664 
4665 static bool do_env_fff(DisasContext *dc, arg_r_r_r *a,
4666                        void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
4667 {
4668     TCGv_i32 src1, src2;
4669 
4670     if (gen_trap_ifnofpu(dc)) {
4671         return true;
4672     }
4673 
4674     gen_op_clear_ieee_excp_and_FTT();
4675     src1 = gen_load_fpr_F(dc, a->rs1);
4676     src2 = gen_load_fpr_F(dc, a->rs2);
4677     func(src1, tcg_env, src1, src2);
4678     gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4679     gen_store_fpr_F(dc, a->rd, src1);
4680     return advance_pc(dc);
4681 }
4682 
4683 TRANS(FADDs, ALL, do_env_fff, a, gen_helper_fadds)
4684 TRANS(FSUBs, ALL, do_env_fff, a, gen_helper_fsubs)
4685 TRANS(FMULs, ALL, do_env_fff, a, gen_helper_fmuls)
4686 TRANS(FDIVs, ALL, do_env_fff, a, gen_helper_fdivs)
4687 
4688 static bool do_ddd(DisasContext *dc, arg_r_r_r *a,
4689                    void (*func)(TCGv_i64, TCGv_i64, TCGv_i64))
4690 {
4691     TCGv_i64 dst, src1, src2;
4692 
4693     if (gen_trap_ifnofpu(dc)) {
4694         return true;
4695     }
4696 
4697     dst = gen_dest_fpr_D(dc, a->rd);
4698     src1 = gen_load_fpr_D(dc, a->rs1);
4699     src2 = gen_load_fpr_D(dc, a->rs2);
4700     func(dst, src1, src2);
4701     gen_store_fpr_D(dc, a->rd, dst);
4702     return advance_pc(dc);
4703 }
4704 
4705 TRANS(FMUL8x16, VIS1, do_ddd, a, gen_helper_fmul8x16)
4706 TRANS(FMUL8x16AU, VIS1, do_ddd, a, gen_helper_fmul8x16au)
4707 TRANS(FMUL8x16AL, VIS1, do_ddd, a, gen_helper_fmul8x16al)
4708 TRANS(FMUL8SUx16, VIS1, do_ddd, a, gen_helper_fmul8sux16)
4709 TRANS(FMUL8ULx16, VIS1, do_ddd, a, gen_helper_fmul8ulx16)
4710 TRANS(FMULD8SUx16, VIS1, do_ddd, a, gen_helper_fmuld8sux16)
4711 TRANS(FMULD8ULx16, VIS1, do_ddd, a, gen_helper_fmuld8ulx16)
4712 TRANS(FPMERGE, VIS1, do_ddd, a, gen_helper_fpmerge)
4713 TRANS(FEXPAND, VIS1, do_ddd, a, gen_helper_fexpand)
4714 
4715 TRANS(FPADD16, VIS1, do_ddd, a, tcg_gen_vec_add16_i64)
4716 TRANS(FPADD32, VIS1, do_ddd, a, tcg_gen_vec_add32_i64)
4717 TRANS(FPSUB16, VIS1, do_ddd, a, tcg_gen_vec_sub16_i64)
4718 TRANS(FPSUB32, VIS1, do_ddd, a, tcg_gen_vec_sub32_i64)
4719 TRANS(FNORd, VIS1, do_ddd, a, tcg_gen_nor_i64)
4720 TRANS(FANDNOTd, VIS1, do_ddd, a, tcg_gen_andc_i64)
4721 TRANS(FXORd, VIS1, do_ddd, a, tcg_gen_xor_i64)
4722 TRANS(FNANDd, VIS1, do_ddd, a, tcg_gen_nand_i64)
4723 TRANS(FANDd, VIS1, do_ddd, a, tcg_gen_and_i64)
4724 TRANS(FXNORd, VIS1, do_ddd, a, tcg_gen_eqv_i64)
4725 TRANS(FORNOTd, VIS1, do_ddd, a, tcg_gen_orc_i64)
4726 TRANS(FORd, VIS1, do_ddd, a, tcg_gen_or_i64)
4727 
4728 TRANS(FPACK32, VIS1, do_ddd, a, gen_op_fpack32)
4729 TRANS(FALIGNDATAg, VIS1, do_ddd, a, gen_op_faligndata)
4730 TRANS(BSHUFFLE, VIS2, do_ddd, a, gen_op_bshuffle)
4731 
4732 static bool do_rdd(DisasContext *dc, arg_r_r_r *a,
4733                    void (*func)(TCGv, TCGv_i64, TCGv_i64))
4734 {
4735     TCGv_i64 src1, src2;
4736     TCGv dst;
4737 
4738     if (gen_trap_ifnofpu(dc)) {
4739         return true;
4740     }
4741 
4742     dst = gen_dest_gpr(dc, a->rd);
4743     src1 = gen_load_fpr_D(dc, a->rs1);
4744     src2 = gen_load_fpr_D(dc, a->rs2);
4745     func(dst, src1, src2);
4746     gen_store_gpr(dc, a->rd, dst);
4747     return advance_pc(dc);
4748 }
4749 
4750 TRANS(FPCMPLE16, VIS1, do_rdd, a, gen_helper_fcmple16)
4751 TRANS(FPCMPNE16, VIS1, do_rdd, a, gen_helper_fcmpne16)
4752 TRANS(FPCMPGT16, VIS1, do_rdd, a, gen_helper_fcmpgt16)
4753 TRANS(FPCMPEQ16, VIS1, do_rdd, a, gen_helper_fcmpeq16)
4754 
4755 TRANS(FPCMPLE32, VIS1, do_rdd, a, gen_helper_fcmple32)
4756 TRANS(FPCMPNE32, VIS1, do_rdd, a, gen_helper_fcmpne32)
4757 TRANS(FPCMPGT32, VIS1, do_rdd, a, gen_helper_fcmpgt32)
4758 TRANS(FPCMPEQ32, VIS1, do_rdd, a, gen_helper_fcmpeq32)
4759 
4760 static bool do_env_ddd(DisasContext *dc, arg_r_r_r *a,
4761                        void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
4762 {
4763     TCGv_i64 dst, src1, src2;
4764 
4765     if (gen_trap_ifnofpu(dc)) {
4766         return true;
4767     }
4768 
4769     gen_op_clear_ieee_excp_and_FTT();
4770     dst = gen_dest_fpr_D(dc, a->rd);
4771     src1 = gen_load_fpr_D(dc, a->rs1);
4772     src2 = gen_load_fpr_D(dc, a->rs2);
4773     func(dst, tcg_env, src1, src2);
4774     gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4775     gen_store_fpr_D(dc, a->rd, dst);
4776     return advance_pc(dc);
4777 }
4778 
4779 TRANS(FADDd, ALL, do_env_ddd, a, gen_helper_faddd)
4780 TRANS(FSUBd, ALL, do_env_ddd, a, gen_helper_fsubd)
4781 TRANS(FMULd, ALL, do_env_ddd, a, gen_helper_fmuld)
4782 TRANS(FDIVd, ALL, do_env_ddd, a, gen_helper_fdivd)
4783 
4784 static bool trans_FsMULd(DisasContext *dc, arg_r_r_r *a)
4785 {
4786     TCGv_i64 dst;
4787     TCGv_i32 src1, src2;
4788 
4789     if (gen_trap_ifnofpu(dc)) {
4790         return true;
4791     }
4792     if (!(dc->def->features & CPU_FEATURE_FSMULD)) {
4793         return raise_unimpfpop(dc);
4794     }
4795 
4796     gen_op_clear_ieee_excp_and_FTT();
4797     dst = gen_dest_fpr_D(dc, a->rd);
4798     src1 = gen_load_fpr_F(dc, a->rs1);
4799     src2 = gen_load_fpr_F(dc, a->rs2);
4800     gen_helper_fsmuld(dst, tcg_env, src1, src2);
4801     gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4802     gen_store_fpr_D(dc, a->rd, dst);
4803     return advance_pc(dc);
4804 }
4805 
4806 static bool do_dddd(DisasContext *dc, arg_r_r_r *a,
4807                     void (*func)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
4808 {
4809     TCGv_i64 dst, src0, src1, src2;
4810 
4811     if (gen_trap_ifnofpu(dc)) {
4812         return true;
4813     }
4814 
4815     dst  = gen_dest_fpr_D(dc, a->rd);
4816     src0 = gen_load_fpr_D(dc, a->rd);
4817     src1 = gen_load_fpr_D(dc, a->rs1);
4818     src2 = gen_load_fpr_D(dc, a->rs2);
4819     func(dst, src0, src1, src2);
4820     gen_store_fpr_D(dc, a->rd, dst);
4821     return advance_pc(dc);
4822 }
4823 
4824 TRANS(PDIST, VIS1, do_dddd, a, gen_helper_pdist)
4825 
4826 static bool do_env_qqq(DisasContext *dc, arg_r_r_r *a,
4827                        void (*func)(TCGv_env))
4828 {
4829     if (gen_trap_ifnofpu(dc)) {
4830         return true;
4831     }
4832     if (gen_trap_float128(dc)) {
4833         return true;
4834     }
4835 
4836     gen_op_clear_ieee_excp_and_FTT();
4837     gen_op_load_fpr_QT0(QFPREG(a->rs1));
4838     gen_op_load_fpr_QT1(QFPREG(a->rs2));
4839     func(tcg_env);
4840     gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4841     gen_op_store_QT0_fpr(QFPREG(a->rd));
4842     gen_update_fprs_dirty(dc, QFPREG(a->rd));
4843     return advance_pc(dc);
4844 }
4845 
4846 TRANS(FADDq, ALL, do_env_qqq, a, gen_helper_faddq)
4847 TRANS(FSUBq, ALL, do_env_qqq, a, gen_helper_fsubq)
4848 TRANS(FMULq, ALL, do_env_qqq, a, gen_helper_fmulq)
4849 TRANS(FDIVq, ALL, do_env_qqq, a, gen_helper_fdivq)
4850 
4851 static bool trans_FdMULq(DisasContext *dc, arg_r_r_r *a)
4852 {
4853     TCGv_i64 src1, src2;
4854 
4855     if (gen_trap_ifnofpu(dc)) {
4856         return true;
4857     }
4858     if (gen_trap_float128(dc)) {
4859         return true;
4860     }
4861 
4862     gen_op_clear_ieee_excp_and_FTT();
4863     src1 = gen_load_fpr_D(dc, a->rs1);
4864     src2 = gen_load_fpr_D(dc, a->rs2);
4865     gen_helper_fdmulq(tcg_env, src1, src2);
4866     gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4867     gen_op_store_QT0_fpr(QFPREG(a->rd));
4868     gen_update_fprs_dirty(dc, QFPREG(a->rd));
4869     return advance_pc(dc);
4870 }
4871 
4872 static bool do_fmovr(DisasContext *dc, arg_FMOVRs *a, bool is_128,
4873                      void (*func)(DisasContext *, DisasCompare *, int, int))
4874 {
4875     DisasCompare cmp;
4876 
4877     if (gen_trap_ifnofpu(dc)) {
4878         return true;
4879     }
4880     if (is_128 && gen_trap_float128(dc)) {
4881         return true;
4882     }
4883 
4884     gen_op_clear_ieee_excp_and_FTT();
4885     gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1));
4886     func(dc, &cmp, a->rd, a->rs2);
4887     return advance_pc(dc);
4888 }
4889 
4890 TRANS(FMOVRs, 64, do_fmovr, a, false, gen_fmovs)
4891 TRANS(FMOVRd, 64, do_fmovr, a, false, gen_fmovd)
4892 TRANS(FMOVRq, 64, do_fmovr, a, true, gen_fmovq)
4893 
4894 static bool do_fmovcc(DisasContext *dc, arg_FMOVscc *a, bool is_128,
4895                       void (*func)(DisasContext *, DisasCompare *, int, int))
4896 {
4897     DisasCompare cmp;
4898 
4899     if (gen_trap_ifnofpu(dc)) {
4900         return true;
4901     }
4902     if (is_128 && gen_trap_float128(dc)) {
4903         return true;
4904     }
4905 
4906     gen_op_clear_ieee_excp_and_FTT();
4907     gen_compare(&cmp, a->cc, a->cond, dc);
4908     func(dc, &cmp, a->rd, a->rs2);
4909     return advance_pc(dc);
4910 }
4911 
4912 TRANS(FMOVscc, 64, do_fmovcc, a, false, gen_fmovs)
4913 TRANS(FMOVdcc, 64, do_fmovcc, a, false, gen_fmovd)
4914 TRANS(FMOVqcc, 64, do_fmovcc, a, true, gen_fmovq)
4915 
4916 static bool do_fmovfcc(DisasContext *dc, arg_FMOVsfcc *a, bool is_128,
4917                        void (*func)(DisasContext *, DisasCompare *, int, int))
4918 {
4919     DisasCompare cmp;
4920 
4921     if (gen_trap_ifnofpu(dc)) {
4922         return true;
4923     }
4924     if (is_128 && gen_trap_float128(dc)) {
4925         return true;
4926     }
4927 
4928     gen_op_clear_ieee_excp_and_FTT();
4929     gen_fcompare(&cmp, a->cc, a->cond);
4930     func(dc, &cmp, a->rd, a->rs2);
4931     return advance_pc(dc);
4932 }
4933 
4934 TRANS(FMOVsfcc, 64, do_fmovfcc, a, false, gen_fmovs)
4935 TRANS(FMOVdfcc, 64, do_fmovfcc, a, false, gen_fmovd)
4936 TRANS(FMOVqfcc, 64, do_fmovfcc, a, true, gen_fmovq)
4937 
4938 static bool do_fcmps(DisasContext *dc, arg_FCMPs *a, bool e)
4939 {
4940     TCGv_i32 src1, src2;
4941 
4942     if (avail_32(dc) && a->cc != 0) {
4943         return false;
4944     }
4945     if (gen_trap_ifnofpu(dc)) {
4946         return true;
4947     }
4948 
4949     gen_op_clear_ieee_excp_and_FTT();
4950     src1 = gen_load_fpr_F(dc, a->rs1);
4951     src2 = gen_load_fpr_F(dc, a->rs2);
4952     if (e) {
4953         gen_op_fcmpes(a->cc, src1, src2);
4954     } else {
4955         gen_op_fcmps(a->cc, src1, src2);
4956     }
4957     return advance_pc(dc);
4958 }
4959 
4960 TRANS(FCMPs, ALL, do_fcmps, a, false)
4961 TRANS(FCMPEs, ALL, do_fcmps, a, true)
4962 
4963 static bool do_fcmpd(DisasContext *dc, arg_FCMPd *a, bool e)
4964 {
4965     TCGv_i64 src1, src2;
4966 
4967     if (avail_32(dc) && a->cc != 0) {
4968         return false;
4969     }
4970     if (gen_trap_ifnofpu(dc)) {
4971         return true;
4972     }
4973 
4974     gen_op_clear_ieee_excp_and_FTT();
4975     src1 = gen_load_fpr_D(dc, a->rs1);
4976     src2 = gen_load_fpr_D(dc, a->rs2);
4977     if (e) {
4978         gen_op_fcmped(a->cc, src1, src2);
4979     } else {
4980         gen_op_fcmpd(a->cc, src1, src2);
4981     }
4982     return advance_pc(dc);
4983 }
4984 
4985 TRANS(FCMPd, ALL, do_fcmpd, a, false)
4986 TRANS(FCMPEd, ALL, do_fcmpd, a, true)
4987 
4988 static bool do_fcmpq(DisasContext *dc, arg_FCMPq *a, bool e)
4989 {
4990     if (avail_32(dc) && a->cc != 0) {
4991         return false;
4992     }
4993     if (gen_trap_ifnofpu(dc)) {
4994         return true;
4995     }
4996     if (gen_trap_float128(dc)) {
4997         return true;
4998     }
4999 
5000     gen_op_clear_ieee_excp_and_FTT();
5001     gen_op_load_fpr_QT0(QFPREG(a->rs1));
5002     gen_op_load_fpr_QT1(QFPREG(a->rs2));
5003     if (e) {
5004         gen_op_fcmpeq(a->cc);
5005     } else {
5006         gen_op_fcmpq(a->cc);
5007     }
5008     return advance_pc(dc);
5009 }
5010 
5011 TRANS(FCMPq, ALL, do_fcmpq, a, false)
5012 TRANS(FCMPEq, ALL, do_fcmpq, a, true)
5013 
5014 static void sparc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
5015 {
5016     DisasContext *dc = container_of(dcbase, DisasContext, base);
5017     CPUSPARCState *env = cpu_env(cs);
5018     int bound;
5019 
5020     dc->pc = dc->base.pc_first;
5021     dc->npc = (target_ulong)dc->base.tb->cs_base;
5022     dc->mem_idx = dc->base.tb->flags & TB_FLAG_MMU_MASK;
5023     dc->def = &env->def;
5024     dc->fpu_enabled = tb_fpu_enabled(dc->base.tb->flags);
5025     dc->address_mask_32bit = tb_am_enabled(dc->base.tb->flags);
5026 #ifndef CONFIG_USER_ONLY
5027     dc->supervisor = (dc->base.tb->flags & TB_FLAG_SUPER) != 0;
5028 #endif
5029 #ifdef TARGET_SPARC64
5030     dc->fprs_dirty = 0;
5031     dc->asi = (dc->base.tb->flags >> TB_FLAG_ASI_SHIFT) & 0xff;
5032 #ifndef CONFIG_USER_ONLY
5033     dc->hypervisor = (dc->base.tb->flags & TB_FLAG_HYPER) != 0;
5034 #endif
5035 #endif
5036     /*
5037      * if we reach a page boundary, we stop generation so that the
5038      * PC of a TT_TFAULT exception is always in the right page
5039      */
5040     bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
5041     dc->base.max_insns = MIN(dc->base.max_insns, bound);
5042 }
5043 
5044 static void sparc_tr_tb_start(DisasContextBase *db, CPUState *cs)
5045 {
5046 }
5047 
5048 static void sparc_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
5049 {
5050     DisasContext *dc = container_of(dcbase, DisasContext, base);
5051     target_ulong npc = dc->npc;
5052 
5053     if (npc & 3) {
5054         switch (npc) {
5055         case JUMP_PC:
5056             assert(dc->jump_pc[1] == dc->pc + 4);
5057             npc = dc->jump_pc[0] | JUMP_PC;
5058             break;
5059         case DYNAMIC_PC:
5060         case DYNAMIC_PC_LOOKUP:
5061             npc = DYNAMIC_PC;
5062             break;
5063         default:
5064             g_assert_not_reached();
5065         }
5066     }
5067     tcg_gen_insn_start(dc->pc, npc);
5068 }
5069 
5070 static void sparc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
5071 {
5072     DisasContext *dc = container_of(dcbase, DisasContext, base);
5073     CPUSPARCState *env = cpu_env(cs);
5074     unsigned int insn;
5075 
5076     insn = translator_ldl(env, &dc->base, dc->pc);
5077     dc->base.pc_next += 4;
5078 
5079     if (!decode(dc, insn)) {
5080         gen_exception(dc, TT_ILL_INSN);
5081     }
5082 
5083     if (dc->base.is_jmp == DISAS_NORETURN) {
5084         return;
5085     }
5086     if (dc->pc != dc->base.pc_next) {
5087         dc->base.is_jmp = DISAS_TOO_MANY;
5088     }
5089 }
5090 
5091 static void sparc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
5092 {
5093     DisasContext *dc = container_of(dcbase, DisasContext, base);
5094     DisasDelayException *e, *e_next;
5095     bool may_lookup;
5096 
5097     switch (dc->base.is_jmp) {
5098     case DISAS_NEXT:
5099     case DISAS_TOO_MANY:
5100         if (((dc->pc | dc->npc) & 3) == 0) {
5101             /* static PC and NPC: we can use direct chaining */
5102             gen_goto_tb(dc, 0, dc->pc, dc->npc);
5103             break;
5104         }
5105 
5106         may_lookup = true;
5107         if (dc->pc & 3) {
5108             switch (dc->pc) {
5109             case DYNAMIC_PC_LOOKUP:
5110                 break;
5111             case DYNAMIC_PC:
5112                 may_lookup = false;
5113                 break;
5114             default:
5115                 g_assert_not_reached();
5116             }
5117         } else {
5118             tcg_gen_movi_tl(cpu_pc, dc->pc);
5119         }
5120 
5121         if (dc->npc & 3) {
5122             switch (dc->npc) {
5123             case JUMP_PC:
5124                 gen_generic_branch(dc);
5125                 break;
5126             case DYNAMIC_PC:
5127                 may_lookup = false;
5128                 break;
5129             case DYNAMIC_PC_LOOKUP:
5130                 break;
5131             default:
5132                 g_assert_not_reached();
5133             }
5134         } else {
5135             tcg_gen_movi_tl(cpu_npc, dc->npc);
5136         }
5137         if (may_lookup) {
5138             tcg_gen_lookup_and_goto_ptr();
5139         } else {
5140             tcg_gen_exit_tb(NULL, 0);
5141         }
5142         break;
5143 
5144     case DISAS_NORETURN:
5145        break;
5146 
5147     case DISAS_EXIT:
5148         /* Exit TB */
5149         save_state(dc);
5150         tcg_gen_exit_tb(NULL, 0);
5151         break;
5152 
5153     default:
5154         g_assert_not_reached();
5155     }
5156 
5157     for (e = dc->delay_excp_list; e ; e = e_next) {
5158         gen_set_label(e->lab);
5159 
5160         tcg_gen_movi_tl(cpu_pc, e->pc);
5161         if (e->npc % 4 == 0) {
5162             tcg_gen_movi_tl(cpu_npc, e->npc);
5163         }
5164         gen_helper_raise_exception(tcg_env, e->excp);
5165 
5166         e_next = e->next;
5167         g_free(e);
5168     }
5169 }
5170 
5171 static void sparc_tr_disas_log(const DisasContextBase *dcbase,
5172                                CPUState *cpu, FILE *logfile)
5173 {
5174     fprintf(logfile, "IN: %s\n", lookup_symbol(dcbase->pc_first));
5175     target_disas(logfile, cpu, dcbase->pc_first, dcbase->tb->size);
5176 }
5177 
5178 static const TranslatorOps sparc_tr_ops = {
5179     .init_disas_context = sparc_tr_init_disas_context,
5180     .tb_start           = sparc_tr_tb_start,
5181     .insn_start         = sparc_tr_insn_start,
5182     .translate_insn     = sparc_tr_translate_insn,
5183     .tb_stop            = sparc_tr_tb_stop,
5184     .disas_log          = sparc_tr_disas_log,
5185 };
5186 
5187 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
5188                            target_ulong pc, void *host_pc)
5189 {
5190     DisasContext dc = {};
5191 
5192     translator_loop(cs, tb, max_insns, pc, host_pc, &sparc_tr_ops, &dc.base);
5193 }
5194 
5195 void sparc_tcg_init(void)
5196 {
5197     static const char gregnames[32][4] = {
5198         "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7",
5199         "o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7",
5200         "l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7",
5201         "i0", "i1", "i2", "i3", "i4", "i5", "i6", "i7",
5202     };
5203     static const char fregnames[32][4] = {
5204         "f0", "f2", "f4", "f6", "f8", "f10", "f12", "f14",
5205         "f16", "f18", "f20", "f22", "f24", "f26", "f28", "f30",
5206         "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46",
5207         "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62",
5208     };
5209 
5210     static const struct { TCGv *ptr; int off; const char *name; } rtl[] = {
5211 #ifdef TARGET_SPARC64
5212         { &cpu_gsr, offsetof(CPUSPARCState, gsr), "gsr" },
5213         { &cpu_xcc_Z, offsetof(CPUSPARCState, xcc_Z), "xcc_Z" },
5214         { &cpu_xcc_C, offsetof(CPUSPARCState, xcc_C), "xcc_C" },
5215 #endif
5216         { &cpu_cc_N, offsetof(CPUSPARCState, cc_N), "cc_N" },
5217         { &cpu_cc_V, offsetof(CPUSPARCState, cc_V), "cc_V" },
5218         { &cpu_icc_Z, offsetof(CPUSPARCState, icc_Z), "icc_Z" },
5219         { &cpu_icc_C, offsetof(CPUSPARCState, icc_C), "icc_C" },
5220         { &cpu_cond, offsetof(CPUSPARCState, cond), "cond" },
5221         { &cpu_fsr, offsetof(CPUSPARCState, fsr), "fsr" },
5222         { &cpu_pc, offsetof(CPUSPARCState, pc), "pc" },
5223         { &cpu_npc, offsetof(CPUSPARCState, npc), "npc" },
5224         { &cpu_y, offsetof(CPUSPARCState, y), "y" },
5225         { &cpu_tbr, offsetof(CPUSPARCState, tbr), "tbr" },
5226     };
5227 
5228     unsigned int i;
5229 
5230     cpu_regwptr = tcg_global_mem_new_ptr(tcg_env,
5231                                          offsetof(CPUSPARCState, regwptr),
5232                                          "regwptr");
5233 
5234     for (i = 0; i < ARRAY_SIZE(rtl); ++i) {
5235         *rtl[i].ptr = tcg_global_mem_new(tcg_env, rtl[i].off, rtl[i].name);
5236     }
5237 
5238     cpu_regs[0] = NULL;
5239     for (i = 1; i < 8; ++i) {
5240         cpu_regs[i] = tcg_global_mem_new(tcg_env,
5241                                          offsetof(CPUSPARCState, gregs[i]),
5242                                          gregnames[i]);
5243     }
5244 
5245     for (i = 8; i < 32; ++i) {
5246         cpu_regs[i] = tcg_global_mem_new(cpu_regwptr,
5247                                          (i - 8) * sizeof(target_ulong),
5248                                          gregnames[i]);
5249     }
5250 
5251     for (i = 0; i < TARGET_DPREGS; i++) {
5252         cpu_fpr[i] = tcg_global_mem_new_i64(tcg_env,
5253                                             offsetof(CPUSPARCState, fpr[i]),
5254                                             fregnames[i]);
5255     }
5256 
5257 #ifdef TARGET_SPARC64
5258     cpu_fprs = tcg_global_mem_new_i32(tcg_env,
5259                                       offsetof(CPUSPARCState, fprs), "fprs");
5260 #endif
5261 }
5262 
5263 void sparc_restore_state_to_opc(CPUState *cs,
5264                                 const TranslationBlock *tb,
5265                                 const uint64_t *data)
5266 {
5267     SPARCCPU *cpu = SPARC_CPU(cs);
5268     CPUSPARCState *env = &cpu->env;
5269     target_ulong pc = data[0];
5270     target_ulong npc = data[1];
5271 
5272     env->pc = pc;
5273     if (npc == DYNAMIC_PC) {
5274         /* dynamic NPC: already stored */
5275     } else if (npc & JUMP_PC) {
5276         /* jump PC: use 'cond' and the jump targets of the translation */
5277         if (env->cond) {
5278             env->npc = npc & ~3;
5279         } else {
5280             env->npc = pc + 4;
5281         }
5282     } else {
5283         env->npc = npc;
5284     }
5285 }
5286