xref: /openbmc/qemu/target/sparc/translate.c (revision 09b157e6283d02e02ec9f47d8d4a2fd0cd8612ce)
1 /*
2    SPARC translation
3 
4    Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5    Copyright (C) 2003-2005 Fabrice Bellard
6 
7    This library is free software; you can redistribute it and/or
8    modify it under the terms of the GNU Lesser General Public
9    License as published by the Free Software Foundation; either
10    version 2.1 of the License, or (at your option) any later version.
11 
12    This library is distributed in the hope that it will be useful,
13    but WITHOUT ANY WARRANTY; without even the implied warranty of
14    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15    Lesser General Public License for more details.
16 
17    You should have received a copy of the GNU Lesser General Public
18    License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 
23 #include "cpu.h"
24 #include "exec/helper-proto.h"
25 #include "exec/exec-all.h"
26 #include "tcg/tcg-op.h"
27 #include "tcg/tcg-op-gvec.h"
28 #include "exec/helper-gen.h"
29 #include "exec/translator.h"
30 #include "exec/log.h"
31 #include "fpu/softfloat.h"
32 #include "asi.h"
33 
34 #define HELPER_H "helper.h"
35 #include "exec/helper-info.c.inc"
36 #undef  HELPER_H
37 
38 #ifdef TARGET_SPARC64
39 # define gen_helper_rdpsr(D, E)                 qemu_build_not_reached()
40 # define gen_helper_rdasr17(D, E)               qemu_build_not_reached()
41 # define gen_helper_rett(E)                     qemu_build_not_reached()
42 # define gen_helper_power_down(E)               qemu_build_not_reached()
43 # define gen_helper_wrpsr(E, S)                 qemu_build_not_reached()
44 #else
45 # define gen_helper_clear_softint(E, S)         qemu_build_not_reached()
46 # define gen_helper_done(E)                     qemu_build_not_reached()
47 # define gen_helper_flushw(E)                   qemu_build_not_reached()
48 # define gen_helper_fmul8x16a(D, S1, S2)        qemu_build_not_reached()
49 # define gen_helper_rdccr(D, E)                 qemu_build_not_reached()
50 # define gen_helper_rdcwp(D, E)                 qemu_build_not_reached()
51 # define gen_helper_restored(E)                 qemu_build_not_reached()
52 # define gen_helper_retry(E)                    qemu_build_not_reached()
53 # define gen_helper_saved(E)                    qemu_build_not_reached()
54 # define gen_helper_set_softint(E, S)           qemu_build_not_reached()
55 # define gen_helper_tick_get_count(D, E, T, C)  qemu_build_not_reached()
56 # define gen_helper_tick_set_count(P, S)        qemu_build_not_reached()
57 # define gen_helper_tick_set_limit(P, S)        qemu_build_not_reached()
58 # define gen_helper_wrccr(E, S)                 qemu_build_not_reached()
59 # define gen_helper_wrcwp(E, S)                 qemu_build_not_reached()
60 # define gen_helper_wrgl(E, S)                  qemu_build_not_reached()
61 # define gen_helper_write_softint(E, S)         qemu_build_not_reached()
62 # define gen_helper_wrpil(E, S)                 qemu_build_not_reached()
63 # define gen_helper_wrpstate(E, S)              qemu_build_not_reached()
64 # define gen_helper_cmask8               ({ qemu_build_not_reached(); NULL; })
65 # define gen_helper_cmask16              ({ qemu_build_not_reached(); NULL; })
66 # define gen_helper_cmask32              ({ qemu_build_not_reached(); NULL; })
67 # define gen_helper_fcmpeq8              ({ qemu_build_not_reached(); NULL; })
68 # define gen_helper_fcmpeq16             ({ qemu_build_not_reached(); NULL; })
69 # define gen_helper_fcmpeq32             ({ qemu_build_not_reached(); NULL; })
70 # define gen_helper_fcmpgt16             ({ qemu_build_not_reached(); NULL; })
71 # define gen_helper_fcmpgt32             ({ qemu_build_not_reached(); NULL; })
72 # define gen_helper_fcmple16             ({ qemu_build_not_reached(); NULL; })
73 # define gen_helper_fcmple32             ({ qemu_build_not_reached(); NULL; })
74 # define gen_helper_fcmpne8              ({ qemu_build_not_reached(); NULL; })
75 # define gen_helper_fcmpne16             ({ qemu_build_not_reached(); NULL; })
76 # define gen_helper_fcmpne32             ({ qemu_build_not_reached(); NULL; })
77 # define gen_helper_fcmpule8             ({ qemu_build_not_reached(); NULL; })
78 # define gen_helper_fcmpugt8             ({ qemu_build_not_reached(); NULL; })
79 # define gen_helper_fdtox                ({ qemu_build_not_reached(); NULL; })
80 # define gen_helper_fexpand              ({ qemu_build_not_reached(); NULL; })
81 # define gen_helper_fmul8sux16           ({ qemu_build_not_reached(); NULL; })
82 # define gen_helper_fmul8ulx16           ({ qemu_build_not_reached(); NULL; })
83 # define gen_helper_fmul8x16             ({ qemu_build_not_reached(); NULL; })
84 # define gen_helper_fpmerge              ({ qemu_build_not_reached(); NULL; })
85 # define gen_helper_fqtox                ({ qemu_build_not_reached(); NULL; })
86 # define gen_helper_fslas16              ({ qemu_build_not_reached(); NULL; })
87 # define gen_helper_fslas32              ({ qemu_build_not_reached(); NULL; })
88 # define gen_helper_fstox                ({ qemu_build_not_reached(); NULL; })
89 # define gen_helper_fxtod                ({ qemu_build_not_reached(); NULL; })
90 # define gen_helper_fxtoq                ({ qemu_build_not_reached(); NULL; })
91 # define gen_helper_fxtos                ({ qemu_build_not_reached(); NULL; })
92 # define gen_helper_pdist                ({ qemu_build_not_reached(); NULL; })
93 # define MAXTL_MASK                             0
94 #endif
95 
96 /* Dynamic PC, must exit to main loop. */
97 #define DYNAMIC_PC         1
98 /* Dynamic PC, one of two values according to jump_pc[T2]. */
99 #define JUMP_PC            2
100 /* Dynamic PC, may lookup next TB. */
101 #define DYNAMIC_PC_LOOKUP  3
102 
103 #define DISAS_EXIT  DISAS_TARGET_0
104 
105 /* global register indexes */
106 static TCGv_ptr cpu_regwptr;
107 static TCGv cpu_pc, cpu_npc;
108 static TCGv cpu_regs[32];
109 static TCGv cpu_y;
110 static TCGv cpu_tbr;
111 static TCGv cpu_cond;
112 static TCGv cpu_cc_N;
113 static TCGv cpu_cc_V;
114 static TCGv cpu_icc_Z;
115 static TCGv cpu_icc_C;
116 #ifdef TARGET_SPARC64
117 static TCGv cpu_xcc_Z;
118 static TCGv cpu_xcc_C;
119 static TCGv_i32 cpu_fprs;
120 static TCGv cpu_gsr;
121 #else
122 # define cpu_fprs               ({ qemu_build_not_reached(); (TCGv)NULL; })
123 # define cpu_gsr                ({ qemu_build_not_reached(); (TCGv)NULL; })
124 #endif
125 
126 #ifdef TARGET_SPARC64
127 #define cpu_cc_Z  cpu_xcc_Z
128 #define cpu_cc_C  cpu_xcc_C
129 #else
130 #define cpu_cc_Z  cpu_icc_Z
131 #define cpu_cc_C  cpu_icc_C
132 #define cpu_xcc_Z ({ qemu_build_not_reached(); NULL; })
133 #define cpu_xcc_C ({ qemu_build_not_reached(); NULL; })
134 #endif
135 
136 /* Floating point comparison registers */
137 static TCGv_i32 cpu_fcc[TARGET_FCCREGS];
138 
139 #define env_field_offsetof(X)     offsetof(CPUSPARCState, X)
140 #ifdef TARGET_SPARC64
141 # define env32_field_offsetof(X)  ({ qemu_build_not_reached(); 0; })
142 # define env64_field_offsetof(X)  env_field_offsetof(X)
143 #else
144 # define env32_field_offsetof(X)  env_field_offsetof(X)
145 # define env64_field_offsetof(X)  ({ qemu_build_not_reached(); 0; })
146 #endif
147 
148 typedef struct DisasCompare {
149     TCGCond cond;
150     TCGv c1;
151     int c2;
152 } DisasCompare;
153 
154 typedef struct DisasDelayException {
155     struct DisasDelayException *next;
156     TCGLabel *lab;
157     TCGv_i32 excp;
158     /* Saved state at parent insn. */
159     target_ulong pc;
160     target_ulong npc;
161 } DisasDelayException;
162 
163 typedef struct DisasContext {
164     DisasContextBase base;
165     target_ulong pc;    /* current Program Counter: integer or DYNAMIC_PC */
166     target_ulong npc;   /* next PC: integer or DYNAMIC_PC or JUMP_PC */
167 
168     /* Used when JUMP_PC value is used. */
169     DisasCompare jump;
170     target_ulong jump_pc[2];
171 
172     int mem_idx;
173     bool cpu_cond_live;
174     bool fpu_enabled;
175     bool address_mask_32bit;
176 #ifndef CONFIG_USER_ONLY
177     bool supervisor;
178 #ifdef TARGET_SPARC64
179     bool hypervisor;
180 #endif
181 #endif
182 
183     sparc_def_t *def;
184 #ifdef TARGET_SPARC64
185     int fprs_dirty;
186     int asi;
187 #endif
188     DisasDelayException *delay_excp_list;
189 } DisasContext;
190 
191 // This function uses non-native bit order
192 #define GET_FIELD(X, FROM, TO)                                  \
193     ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
194 
195 // This function uses the order in the manuals, i.e. bit 0 is 2^0
196 #define GET_FIELD_SP(X, FROM, TO)               \
197     GET_FIELD(X, 31 - (TO), 31 - (FROM))
198 
199 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
200 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
201 
202 #define UA2005_HTRAP_MASK 0xff
203 #define V8_TRAP_MASK 0x7f
204 
205 #define IS_IMM (insn & (1<<13))
206 
207 static void gen_update_fprs_dirty(DisasContext *dc, int rd)
208 {
209 #if defined(TARGET_SPARC64)
210     int bit = (rd < 32) ? 1 : 2;
211     /* If we know we've already set this bit within the TB,
212        we can avoid setting it again.  */
213     if (!(dc->fprs_dirty & bit)) {
214         dc->fprs_dirty |= bit;
215         tcg_gen_ori_i32(cpu_fprs, cpu_fprs, bit);
216     }
217 #endif
218 }
219 
220 /* floating point registers moves */
221 
222 static int gen_offset_fpr_F(unsigned int reg)
223 {
224     int ret;
225 
226     tcg_debug_assert(reg < 32);
227     ret= offsetof(CPUSPARCState, fpr[reg / 2]);
228     if (reg & 1) {
229         ret += offsetof(CPU_DoubleU, l.lower);
230     } else {
231         ret += offsetof(CPU_DoubleU, l.upper);
232     }
233     return ret;
234 }
235 
236 static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src)
237 {
238     TCGv_i32 ret = tcg_temp_new_i32();
239     tcg_gen_ld_i32(ret, tcg_env, gen_offset_fpr_F(src));
240     return ret;
241 }
242 
243 static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v)
244 {
245     tcg_gen_st_i32(v, tcg_env, gen_offset_fpr_F(dst));
246     gen_update_fprs_dirty(dc, dst);
247 }
248 
249 static int gen_offset_fpr_D(unsigned int reg)
250 {
251     tcg_debug_assert(reg < 64);
252     tcg_debug_assert(reg % 2 == 0);
253     return offsetof(CPUSPARCState, fpr[reg / 2]);
254 }
255 
256 static TCGv_i64 gen_load_fpr_D(DisasContext *dc, unsigned int src)
257 {
258     TCGv_i64 ret = tcg_temp_new_i64();
259     tcg_gen_ld_i64(ret, tcg_env, gen_offset_fpr_D(src));
260     return ret;
261 }
262 
263 static void gen_store_fpr_D(DisasContext *dc, unsigned int dst, TCGv_i64 v)
264 {
265     tcg_gen_st_i64(v, tcg_env, gen_offset_fpr_D(dst));
266     gen_update_fprs_dirty(dc, dst);
267 }
268 
269 static TCGv_i128 gen_load_fpr_Q(DisasContext *dc, unsigned int src)
270 {
271     TCGv_i128 ret = tcg_temp_new_i128();
272     TCGv_i64 h = gen_load_fpr_D(dc, src);
273     TCGv_i64 l = gen_load_fpr_D(dc, src + 2);
274 
275     tcg_gen_concat_i64_i128(ret, l, h);
276     return ret;
277 }
278 
279 static void gen_store_fpr_Q(DisasContext *dc, unsigned int dst, TCGv_i128 v)
280 {
281     TCGv_i64 h = tcg_temp_new_i64();
282     TCGv_i64 l = tcg_temp_new_i64();
283 
284     tcg_gen_extr_i128_i64(l, h, v);
285     gen_store_fpr_D(dc, dst, h);
286     gen_store_fpr_D(dc, dst + 2, l);
287 }
288 
289 /* moves */
290 #ifdef CONFIG_USER_ONLY
291 #define supervisor(dc) 0
292 #define hypervisor(dc) 0
293 #else
294 #ifdef TARGET_SPARC64
295 #define hypervisor(dc) (dc->hypervisor)
296 #define supervisor(dc) (dc->supervisor | dc->hypervisor)
297 #else
298 #define supervisor(dc) (dc->supervisor)
299 #define hypervisor(dc) 0
300 #endif
301 #endif
302 
303 #if !defined(TARGET_SPARC64)
304 # define AM_CHECK(dc)  false
305 #elif defined(TARGET_ABI32)
306 # define AM_CHECK(dc)  true
307 #elif defined(CONFIG_USER_ONLY)
308 # define AM_CHECK(dc)  false
309 #else
310 # define AM_CHECK(dc)  ((dc)->address_mask_32bit)
311 #endif
312 
313 static void gen_address_mask(DisasContext *dc, TCGv addr)
314 {
315     if (AM_CHECK(dc)) {
316         tcg_gen_andi_tl(addr, addr, 0xffffffffULL);
317     }
318 }
319 
320 static target_ulong address_mask_i(DisasContext *dc, target_ulong addr)
321 {
322     return AM_CHECK(dc) ? (uint32_t)addr : addr;
323 }
324 
325 static TCGv gen_load_gpr(DisasContext *dc, int reg)
326 {
327     if (reg > 0) {
328         assert(reg < 32);
329         return cpu_regs[reg];
330     } else {
331         TCGv t = tcg_temp_new();
332         tcg_gen_movi_tl(t, 0);
333         return t;
334     }
335 }
336 
337 static void gen_store_gpr(DisasContext *dc, int reg, TCGv v)
338 {
339     if (reg > 0) {
340         assert(reg < 32);
341         tcg_gen_mov_tl(cpu_regs[reg], v);
342     }
343 }
344 
345 static TCGv gen_dest_gpr(DisasContext *dc, int reg)
346 {
347     if (reg > 0) {
348         assert(reg < 32);
349         return cpu_regs[reg];
350     } else {
351         return tcg_temp_new();
352     }
353 }
354 
355 static bool use_goto_tb(DisasContext *s, target_ulong pc, target_ulong npc)
356 {
357     return translator_use_goto_tb(&s->base, pc) &&
358            translator_use_goto_tb(&s->base, npc);
359 }
360 
361 static void gen_goto_tb(DisasContext *s, int tb_num,
362                         target_ulong pc, target_ulong npc)
363 {
364     if (use_goto_tb(s, pc, npc))  {
365         /* jump to same page: we can use a direct jump */
366         tcg_gen_goto_tb(tb_num);
367         tcg_gen_movi_tl(cpu_pc, pc);
368         tcg_gen_movi_tl(cpu_npc, npc);
369         tcg_gen_exit_tb(s->base.tb, tb_num);
370     } else {
371         /* jump to another page: we can use an indirect jump */
372         tcg_gen_movi_tl(cpu_pc, pc);
373         tcg_gen_movi_tl(cpu_npc, npc);
374         tcg_gen_lookup_and_goto_ptr();
375     }
376 }
377 
378 static TCGv gen_carry32(void)
379 {
380     if (TARGET_LONG_BITS == 64) {
381         TCGv t = tcg_temp_new();
382         tcg_gen_extract_tl(t, cpu_icc_C, 32, 1);
383         return t;
384     }
385     return cpu_icc_C;
386 }
387 
388 static void gen_op_addcc_int(TCGv dst, TCGv src1, TCGv src2, TCGv cin)
389 {
390     TCGv z = tcg_constant_tl(0);
391 
392     if (cin) {
393         tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, src1, z, cin, z);
394         tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, cpu_cc_N, cpu_cc_C, src2, z);
395     } else {
396         tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, src1, z, src2, z);
397     }
398     tcg_gen_xor_tl(cpu_cc_Z, src1, src2);
399     tcg_gen_xor_tl(cpu_cc_V, cpu_cc_N, src2);
400     tcg_gen_andc_tl(cpu_cc_V, cpu_cc_V, cpu_cc_Z);
401     if (TARGET_LONG_BITS == 64) {
402         /*
403          * Carry-in to bit 32 is result ^ src1 ^ src2.
404          * We already have the src xor term in Z, from computation of V.
405          */
406         tcg_gen_xor_tl(cpu_icc_C, cpu_cc_Z, cpu_cc_N);
407         tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
408     }
409     tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
410     tcg_gen_mov_tl(dst, cpu_cc_N);
411 }
412 
413 static void gen_op_addcc(TCGv dst, TCGv src1, TCGv src2)
414 {
415     gen_op_addcc_int(dst, src1, src2, NULL);
416 }
417 
418 static void gen_op_taddcc(TCGv dst, TCGv src1, TCGv src2)
419 {
420     TCGv t = tcg_temp_new();
421 
422     /* Save the tag bits around modification of dst. */
423     tcg_gen_or_tl(t, src1, src2);
424 
425     gen_op_addcc(dst, src1, src2);
426 
427     /* Incorprate tag bits into icc.V */
428     tcg_gen_andi_tl(t, t, 3);
429     tcg_gen_neg_tl(t, t);
430     tcg_gen_ext32u_tl(t, t);
431     tcg_gen_or_tl(cpu_cc_V, cpu_cc_V, t);
432 }
433 
434 static void gen_op_addc(TCGv dst, TCGv src1, TCGv src2)
435 {
436     tcg_gen_add_tl(dst, src1, src2);
437     tcg_gen_add_tl(dst, dst, gen_carry32());
438 }
439 
440 static void gen_op_addccc(TCGv dst, TCGv src1, TCGv src2)
441 {
442     gen_op_addcc_int(dst, src1, src2, gen_carry32());
443 }
444 
445 static void gen_op_addxc(TCGv dst, TCGv src1, TCGv src2)
446 {
447     tcg_gen_add_tl(dst, src1, src2);
448     tcg_gen_add_tl(dst, dst, cpu_cc_C);
449 }
450 
451 static void gen_op_addxccc(TCGv dst, TCGv src1, TCGv src2)
452 {
453     gen_op_addcc_int(dst, src1, src2, cpu_cc_C);
454 }
455 
456 static void gen_op_subcc_int(TCGv dst, TCGv src1, TCGv src2, TCGv cin)
457 {
458     TCGv z = tcg_constant_tl(0);
459 
460     if (cin) {
461         tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, src1, z, cin, z);
462         tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, cpu_cc_N, cpu_cc_C, src2, z);
463     } else {
464         tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, src1, z, src2, z);
465     }
466     tcg_gen_neg_tl(cpu_cc_C, cpu_cc_C);
467     tcg_gen_xor_tl(cpu_cc_Z, src1, src2);
468     tcg_gen_xor_tl(cpu_cc_V, cpu_cc_N, src1);
469     tcg_gen_and_tl(cpu_cc_V, cpu_cc_V, cpu_cc_Z);
470 #ifdef TARGET_SPARC64
471     tcg_gen_xor_tl(cpu_icc_C, cpu_cc_Z, cpu_cc_N);
472     tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
473 #endif
474     tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
475     tcg_gen_mov_tl(dst, cpu_cc_N);
476 }
477 
478 static void gen_op_subcc(TCGv dst, TCGv src1, TCGv src2)
479 {
480     gen_op_subcc_int(dst, src1, src2, NULL);
481 }
482 
483 static void gen_op_tsubcc(TCGv dst, TCGv src1, TCGv src2)
484 {
485     TCGv t = tcg_temp_new();
486 
487     /* Save the tag bits around modification of dst. */
488     tcg_gen_or_tl(t, src1, src2);
489 
490     gen_op_subcc(dst, src1, src2);
491 
492     /* Incorprate tag bits into icc.V */
493     tcg_gen_andi_tl(t, t, 3);
494     tcg_gen_neg_tl(t, t);
495     tcg_gen_ext32u_tl(t, t);
496     tcg_gen_or_tl(cpu_cc_V, cpu_cc_V, t);
497 }
498 
499 static void gen_op_subc(TCGv dst, TCGv src1, TCGv src2)
500 {
501     tcg_gen_sub_tl(dst, src1, src2);
502     tcg_gen_sub_tl(dst, dst, gen_carry32());
503 }
504 
505 static void gen_op_subccc(TCGv dst, TCGv src1, TCGv src2)
506 {
507     gen_op_subcc_int(dst, src1, src2, gen_carry32());
508 }
509 
510 static void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
511 {
512     TCGv zero = tcg_constant_tl(0);
513     TCGv one = tcg_constant_tl(1);
514     TCGv t_src1 = tcg_temp_new();
515     TCGv t_src2 = tcg_temp_new();
516     TCGv t0 = tcg_temp_new();
517 
518     tcg_gen_ext32u_tl(t_src1, src1);
519     tcg_gen_ext32u_tl(t_src2, src2);
520 
521     /*
522      * if (!(env->y & 1))
523      *   src2 = 0;
524      */
525     tcg_gen_movcond_tl(TCG_COND_TSTEQ, t_src2, cpu_y, one, zero, t_src2);
526 
527     /*
528      * b2 = src1 & 1;
529      * y = (b2 << 31) | (y >> 1);
530      */
531     tcg_gen_extract_tl(t0, cpu_y, 1, 31);
532     tcg_gen_deposit_tl(cpu_y, t0, src1, 31, 1);
533 
534     // b1 = N ^ V;
535     tcg_gen_xor_tl(t0, cpu_cc_N, cpu_cc_V);
536 
537     /*
538      * src1 = (b1 << 31) | (src1 >> 1)
539      */
540     tcg_gen_andi_tl(t0, t0, 1u << 31);
541     tcg_gen_shri_tl(t_src1, t_src1, 1);
542     tcg_gen_or_tl(t_src1, t_src1, t0);
543 
544     gen_op_addcc(dst, t_src1, t_src2);
545 }
546 
547 static void gen_op_multiply(TCGv dst, TCGv src1, TCGv src2, int sign_ext)
548 {
549 #if TARGET_LONG_BITS == 32
550     if (sign_ext) {
551         tcg_gen_muls2_tl(dst, cpu_y, src1, src2);
552     } else {
553         tcg_gen_mulu2_tl(dst, cpu_y, src1, src2);
554     }
555 #else
556     TCGv t0 = tcg_temp_new_i64();
557     TCGv t1 = tcg_temp_new_i64();
558 
559     if (sign_ext) {
560         tcg_gen_ext32s_i64(t0, src1);
561         tcg_gen_ext32s_i64(t1, src2);
562     } else {
563         tcg_gen_ext32u_i64(t0, src1);
564         tcg_gen_ext32u_i64(t1, src2);
565     }
566 
567     tcg_gen_mul_i64(dst, t0, t1);
568     tcg_gen_shri_i64(cpu_y, dst, 32);
569 #endif
570 }
571 
572 static void gen_op_umul(TCGv dst, TCGv src1, TCGv src2)
573 {
574     /* zero-extend truncated operands before multiplication */
575     gen_op_multiply(dst, src1, src2, 0);
576 }
577 
578 static void gen_op_smul(TCGv dst, TCGv src1, TCGv src2)
579 {
580     /* sign-extend truncated operands before multiplication */
581     gen_op_multiply(dst, src1, src2, 1);
582 }
583 
584 static void gen_op_sdiv(TCGv dst, TCGv src1, TCGv src2)
585 {
586 #ifdef TARGET_SPARC64
587     gen_helper_sdiv(dst, tcg_env, src1, src2);
588     tcg_gen_ext32s_tl(dst, dst);
589 #else
590     TCGv_i64 t64 = tcg_temp_new_i64();
591     gen_helper_sdiv(t64, tcg_env, src1, src2);
592     tcg_gen_trunc_i64_tl(dst, t64);
593 #endif
594 }
595 
596 static void gen_op_udivcc(TCGv dst, TCGv src1, TCGv src2)
597 {
598     TCGv_i64 t64;
599 
600 #ifdef TARGET_SPARC64
601     t64 = cpu_cc_V;
602 #else
603     t64 = tcg_temp_new_i64();
604 #endif
605 
606     gen_helper_udiv(t64, tcg_env, src1, src2);
607 
608 #ifdef TARGET_SPARC64
609     tcg_gen_ext32u_tl(cpu_cc_N, t64);
610     tcg_gen_shri_tl(cpu_cc_V, t64, 32);
611     tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
612     tcg_gen_movi_tl(cpu_icc_C, 0);
613 #else
614     tcg_gen_extr_i64_tl(cpu_cc_N, cpu_cc_V, t64);
615 #endif
616     tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
617     tcg_gen_movi_tl(cpu_cc_C, 0);
618     tcg_gen_mov_tl(dst, cpu_cc_N);
619 }
620 
621 static void gen_op_sdivcc(TCGv dst, TCGv src1, TCGv src2)
622 {
623     TCGv_i64 t64;
624 
625 #ifdef TARGET_SPARC64
626     t64 = cpu_cc_V;
627 #else
628     t64 = tcg_temp_new_i64();
629 #endif
630 
631     gen_helper_sdiv(t64, tcg_env, src1, src2);
632 
633 #ifdef TARGET_SPARC64
634     tcg_gen_ext32s_tl(cpu_cc_N, t64);
635     tcg_gen_shri_tl(cpu_cc_V, t64, 32);
636     tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
637     tcg_gen_movi_tl(cpu_icc_C, 0);
638 #else
639     tcg_gen_extr_i64_tl(cpu_cc_N, cpu_cc_V, t64);
640 #endif
641     tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
642     tcg_gen_movi_tl(cpu_cc_C, 0);
643     tcg_gen_mov_tl(dst, cpu_cc_N);
644 }
645 
646 static void gen_op_taddcctv(TCGv dst, TCGv src1, TCGv src2)
647 {
648     gen_helper_taddcctv(dst, tcg_env, src1, src2);
649 }
650 
651 static void gen_op_tsubcctv(TCGv dst, TCGv src1, TCGv src2)
652 {
653     gen_helper_tsubcctv(dst, tcg_env, src1, src2);
654 }
655 
656 static void gen_op_popc(TCGv dst, TCGv src1, TCGv src2)
657 {
658     tcg_gen_ctpop_tl(dst, src2);
659 }
660 
661 static void gen_op_lzcnt(TCGv dst, TCGv src)
662 {
663     tcg_gen_clzi_tl(dst, src, TARGET_LONG_BITS);
664 }
665 
666 #ifndef TARGET_SPARC64
667 static void gen_helper_array8(TCGv dst, TCGv src1, TCGv src2)
668 {
669     g_assert_not_reached();
670 }
671 #endif
672 
673 static void gen_op_array16(TCGv dst, TCGv src1, TCGv src2)
674 {
675     gen_helper_array8(dst, src1, src2);
676     tcg_gen_shli_tl(dst, dst, 1);
677 }
678 
679 static void gen_op_array32(TCGv dst, TCGv src1, TCGv src2)
680 {
681     gen_helper_array8(dst, src1, src2);
682     tcg_gen_shli_tl(dst, dst, 2);
683 }
684 
685 static void gen_op_fpack16(TCGv_i32 dst, TCGv_i64 src)
686 {
687 #ifdef TARGET_SPARC64
688     gen_helper_fpack16(dst, cpu_gsr, src);
689 #else
690     g_assert_not_reached();
691 #endif
692 }
693 
694 static void gen_op_fpackfix(TCGv_i32 dst, TCGv_i64 src)
695 {
696 #ifdef TARGET_SPARC64
697     gen_helper_fpackfix(dst, cpu_gsr, src);
698 #else
699     g_assert_not_reached();
700 #endif
701 }
702 
703 static void gen_op_fpack32(TCGv_i64 dst, TCGv_i64 src1, TCGv_i64 src2)
704 {
705 #ifdef TARGET_SPARC64
706     gen_helper_fpack32(dst, cpu_gsr, src1, src2);
707 #else
708     g_assert_not_reached();
709 #endif
710 }
711 
712 static void gen_op_fpadds16s(TCGv_i32 d, TCGv_i32 src1, TCGv_i32 src2)
713 {
714     TCGv_i32 t[2];
715 
716     for (int i = 0; i < 2; i++) {
717         TCGv_i32 u = tcg_temp_new_i32();
718         TCGv_i32 v = tcg_temp_new_i32();
719 
720         tcg_gen_sextract_i32(u, src1, i * 16, 16);
721         tcg_gen_sextract_i32(v, src2, i * 16, 16);
722         tcg_gen_add_i32(u, u, v);
723         tcg_gen_smax_i32(u, u, tcg_constant_i32(INT16_MIN));
724         tcg_gen_smin_i32(u, u, tcg_constant_i32(INT16_MAX));
725         t[i] = u;
726     }
727     tcg_gen_deposit_i32(d, t[0], t[1], 16, 16);
728 }
729 
730 static void gen_op_fpsubs16s(TCGv_i32 d, TCGv_i32 src1, TCGv_i32 src2)
731 {
732     TCGv_i32 t[2];
733 
734     for (int i = 0; i < 2; i++) {
735         TCGv_i32 u = tcg_temp_new_i32();
736         TCGv_i32 v = tcg_temp_new_i32();
737 
738         tcg_gen_sextract_i32(u, src1, i * 16, 16);
739         tcg_gen_sextract_i32(v, src2, i * 16, 16);
740         tcg_gen_sub_i32(u, u, v);
741         tcg_gen_smax_i32(u, u, tcg_constant_i32(INT16_MIN));
742         tcg_gen_smin_i32(u, u, tcg_constant_i32(INT16_MAX));
743         t[i] = u;
744     }
745     tcg_gen_deposit_i32(d, t[0], t[1], 16, 16);
746 }
747 
748 static void gen_op_fpadds32s(TCGv_i32 d, TCGv_i32 src1, TCGv_i32 src2)
749 {
750     TCGv_i32 r = tcg_temp_new_i32();
751     TCGv_i32 t = tcg_temp_new_i32();
752     TCGv_i32 v = tcg_temp_new_i32();
753     TCGv_i32 z = tcg_constant_i32(0);
754 
755     tcg_gen_add_i32(r, src1, src2);
756     tcg_gen_xor_i32(t, src1, src2);
757     tcg_gen_xor_i32(v, r, src2);
758     tcg_gen_andc_i32(v, v, t);
759 
760     tcg_gen_setcond_i32(TCG_COND_GE, t, r, z);
761     tcg_gen_addi_i32(t, t, INT32_MAX);
762 
763     tcg_gen_movcond_i32(TCG_COND_LT, d, v, z, t, r);
764 }
765 
766 static void gen_op_fpsubs32s(TCGv_i32 d, TCGv_i32 src1, TCGv_i32 src2)
767 {
768     TCGv_i32 r = tcg_temp_new_i32();
769     TCGv_i32 t = tcg_temp_new_i32();
770     TCGv_i32 v = tcg_temp_new_i32();
771     TCGv_i32 z = tcg_constant_i32(0);
772 
773     tcg_gen_sub_i32(r, src1, src2);
774     tcg_gen_xor_i32(t, src1, src2);
775     tcg_gen_xor_i32(v, r, src1);
776     tcg_gen_and_i32(v, v, t);
777 
778     tcg_gen_setcond_i32(TCG_COND_GE, t, r, z);
779     tcg_gen_addi_i32(t, t, INT32_MAX);
780 
781     tcg_gen_movcond_i32(TCG_COND_LT, d, v, z, t, r);
782 }
783 
784 static void gen_op_faligndata(TCGv_i64 dst, TCGv_i64 s1, TCGv_i64 s2)
785 {
786 #ifdef TARGET_SPARC64
787     TCGv t1, t2, shift;
788 
789     t1 = tcg_temp_new();
790     t2 = tcg_temp_new();
791     shift = tcg_temp_new();
792 
793     tcg_gen_andi_tl(shift, cpu_gsr, 7);
794     tcg_gen_shli_tl(shift, shift, 3);
795     tcg_gen_shl_tl(t1, s1, shift);
796 
797     /*
798      * A shift of 64 does not produce 0 in TCG.  Divide this into a
799      * shift of (up to 63) followed by a constant shift of 1.
800      */
801     tcg_gen_xori_tl(shift, shift, 63);
802     tcg_gen_shr_tl(t2, s2, shift);
803     tcg_gen_shri_tl(t2, t2, 1);
804 
805     tcg_gen_or_tl(dst, t1, t2);
806 #else
807     g_assert_not_reached();
808 #endif
809 }
810 
811 static void gen_op_bshuffle(TCGv_i64 dst, TCGv_i64 src1, TCGv_i64 src2)
812 {
813 #ifdef TARGET_SPARC64
814     gen_helper_bshuffle(dst, cpu_gsr, src1, src2);
815 #else
816     g_assert_not_reached();
817 #endif
818 }
819 
820 static void gen_op_fmul8x16al(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
821 {
822     tcg_gen_ext16s_i32(src2, src2);
823     gen_helper_fmul8x16a(dst, src1, src2);
824 }
825 
826 static void gen_op_fmul8x16au(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
827 {
828     tcg_gen_sari_i32(src2, src2, 16);
829     gen_helper_fmul8x16a(dst, src1, src2);
830 }
831 
832 static void gen_op_fmuld8ulx16(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
833 {
834     TCGv_i32 t0 = tcg_temp_new_i32();
835     TCGv_i32 t1 = tcg_temp_new_i32();
836     TCGv_i32 t2 = tcg_temp_new_i32();
837 
838     tcg_gen_ext8u_i32(t0, src1);
839     tcg_gen_ext16s_i32(t1, src2);
840     tcg_gen_mul_i32(t0, t0, t1);
841 
842     tcg_gen_extract_i32(t1, src1, 16, 8);
843     tcg_gen_sextract_i32(t2, src2, 16, 16);
844     tcg_gen_mul_i32(t1, t1, t2);
845 
846     tcg_gen_concat_i32_i64(dst, t0, t1);
847 }
848 
849 static void gen_op_fmuld8sux16(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
850 {
851     TCGv_i32 t0 = tcg_temp_new_i32();
852     TCGv_i32 t1 = tcg_temp_new_i32();
853     TCGv_i32 t2 = tcg_temp_new_i32();
854 
855     /*
856      * The insn description talks about extracting the upper 8 bits
857      * of the signed 16-bit input rs1, performing the multiply, then
858      * shifting left by 8 bits.  Instead, zap the lower 8 bits of
859      * the rs1 input, which avoids the need for two shifts.
860      */
861     tcg_gen_ext16s_i32(t0, src1);
862     tcg_gen_andi_i32(t0, t0, ~0xff);
863     tcg_gen_ext16s_i32(t1, src2);
864     tcg_gen_mul_i32(t0, t0, t1);
865 
866     tcg_gen_sextract_i32(t1, src1, 16, 16);
867     tcg_gen_andi_i32(t1, t1, ~0xff);
868     tcg_gen_sextract_i32(t2, src2, 16, 16);
869     tcg_gen_mul_i32(t1, t1, t2);
870 
871     tcg_gen_concat_i32_i64(dst, t0, t1);
872 }
873 
874 #ifdef TARGET_SPARC64
875 static void gen_vec_fchksm16(unsigned vece, TCGv_vec dst,
876                              TCGv_vec src1, TCGv_vec src2)
877 {
878     TCGv_vec a = tcg_temp_new_vec_matching(dst);
879     TCGv_vec c = tcg_temp_new_vec_matching(dst);
880 
881     tcg_gen_add_vec(vece, a, src1, src2);
882     tcg_gen_cmp_vec(TCG_COND_LTU, vece, c, a, src1);
883     /* Vector cmp produces -1 for true, so subtract to add carry. */
884     tcg_gen_sub_vec(vece, dst, a, c);
885 }
886 
887 static void gen_op_fchksm16(unsigned vece, uint32_t dofs, uint32_t aofs,
888                             uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
889 {
890     static const TCGOpcode vecop_list[] = {
891         INDEX_op_cmp_vec, INDEX_op_add_vec, INDEX_op_sub_vec,
892     };
893     static const GVecGen3 op = {
894         .fni8 = gen_helper_fchksm16,
895         .fniv = gen_vec_fchksm16,
896         .opt_opc = vecop_list,
897         .vece = MO_16,
898     };
899     tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &op);
900 }
901 
902 static void gen_vec_fmean16(unsigned vece, TCGv_vec dst,
903                             TCGv_vec src1, TCGv_vec src2)
904 {
905     TCGv_vec t = tcg_temp_new_vec_matching(dst);
906 
907     tcg_gen_or_vec(vece, t, src1, src2);
908     tcg_gen_and_vec(vece, t, t, tcg_constant_vec_matching(dst, vece, 1));
909     tcg_gen_sari_vec(vece, src1, src1, 1);
910     tcg_gen_sari_vec(vece, src2, src2, 1);
911     tcg_gen_add_vec(vece, dst, src1, src2);
912     tcg_gen_add_vec(vece, dst, dst, t);
913 }
914 
915 static void gen_op_fmean16(unsigned vece, uint32_t dofs, uint32_t aofs,
916                            uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
917 {
918     static const TCGOpcode vecop_list[] = {
919         INDEX_op_add_vec, INDEX_op_sari_vec,
920     };
921     static const GVecGen3 op = {
922         .fni8 = gen_helper_fmean16,
923         .fniv = gen_vec_fmean16,
924         .opt_opc = vecop_list,
925         .vece = MO_16,
926     };
927     tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &op);
928 }
929 #else
930 #define gen_op_fchksm16   ({ qemu_build_not_reached(); NULL; })
931 #define gen_op_fmean16    ({ qemu_build_not_reached(); NULL; })
932 #endif
933 
934 static void finishing_insn(DisasContext *dc)
935 {
936     /*
937      * From here, there is no future path through an unwinding exception.
938      * If the current insn cannot raise an exception, the computation of
939      * cpu_cond may be able to be elided.
940      */
941     if (dc->cpu_cond_live) {
942         tcg_gen_discard_tl(cpu_cond);
943         dc->cpu_cond_live = false;
944     }
945 }
946 
947 static void gen_generic_branch(DisasContext *dc)
948 {
949     TCGv npc0 = tcg_constant_tl(dc->jump_pc[0]);
950     TCGv npc1 = tcg_constant_tl(dc->jump_pc[1]);
951     TCGv c2 = tcg_constant_tl(dc->jump.c2);
952 
953     tcg_gen_movcond_tl(dc->jump.cond, cpu_npc, dc->jump.c1, c2, npc0, npc1);
954 }
955 
956 /* call this function before using the condition register as it may
957    have been set for a jump */
958 static void flush_cond(DisasContext *dc)
959 {
960     if (dc->npc == JUMP_PC) {
961         gen_generic_branch(dc);
962         dc->npc = DYNAMIC_PC_LOOKUP;
963     }
964 }
965 
966 static void save_npc(DisasContext *dc)
967 {
968     if (dc->npc & 3) {
969         switch (dc->npc) {
970         case JUMP_PC:
971             gen_generic_branch(dc);
972             dc->npc = DYNAMIC_PC_LOOKUP;
973             break;
974         case DYNAMIC_PC:
975         case DYNAMIC_PC_LOOKUP:
976             break;
977         default:
978             g_assert_not_reached();
979         }
980     } else {
981         tcg_gen_movi_tl(cpu_npc, dc->npc);
982     }
983 }
984 
985 static void save_state(DisasContext *dc)
986 {
987     tcg_gen_movi_tl(cpu_pc, dc->pc);
988     save_npc(dc);
989 }
990 
991 static void gen_exception(DisasContext *dc, int which)
992 {
993     finishing_insn(dc);
994     save_state(dc);
995     gen_helper_raise_exception(tcg_env, tcg_constant_i32(which));
996     dc->base.is_jmp = DISAS_NORETURN;
997 }
998 
999 static TCGLabel *delay_exceptionv(DisasContext *dc, TCGv_i32 excp)
1000 {
1001     DisasDelayException *e = g_new0(DisasDelayException, 1);
1002 
1003     e->next = dc->delay_excp_list;
1004     dc->delay_excp_list = e;
1005 
1006     e->lab = gen_new_label();
1007     e->excp = excp;
1008     e->pc = dc->pc;
1009     /* Caller must have used flush_cond before branch. */
1010     assert(e->npc != JUMP_PC);
1011     e->npc = dc->npc;
1012 
1013     return e->lab;
1014 }
1015 
1016 static TCGLabel *delay_exception(DisasContext *dc, int excp)
1017 {
1018     return delay_exceptionv(dc, tcg_constant_i32(excp));
1019 }
1020 
1021 static void gen_check_align(DisasContext *dc, TCGv addr, int mask)
1022 {
1023     TCGv t = tcg_temp_new();
1024     TCGLabel *lab;
1025 
1026     tcg_gen_andi_tl(t, addr, mask);
1027 
1028     flush_cond(dc);
1029     lab = delay_exception(dc, TT_UNALIGNED);
1030     tcg_gen_brcondi_tl(TCG_COND_NE, t, 0, lab);
1031 }
1032 
1033 static void gen_mov_pc_npc(DisasContext *dc)
1034 {
1035     finishing_insn(dc);
1036 
1037     if (dc->npc & 3) {
1038         switch (dc->npc) {
1039         case JUMP_PC:
1040             gen_generic_branch(dc);
1041             tcg_gen_mov_tl(cpu_pc, cpu_npc);
1042             dc->pc = DYNAMIC_PC_LOOKUP;
1043             break;
1044         case DYNAMIC_PC:
1045         case DYNAMIC_PC_LOOKUP:
1046             tcg_gen_mov_tl(cpu_pc, cpu_npc);
1047             dc->pc = dc->npc;
1048             break;
1049         default:
1050             g_assert_not_reached();
1051         }
1052     } else {
1053         dc->pc = dc->npc;
1054     }
1055 }
1056 
1057 static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond,
1058                         DisasContext *dc)
1059 {
1060     TCGv t1;
1061 
1062     cmp->c1 = t1 = tcg_temp_new();
1063     cmp->c2 = 0;
1064 
1065     switch (cond & 7) {
1066     case 0x0: /* never */
1067         cmp->cond = TCG_COND_NEVER;
1068         cmp->c1 = tcg_constant_tl(0);
1069         break;
1070 
1071     case 0x1: /* eq: Z */
1072         cmp->cond = TCG_COND_EQ;
1073         if (TARGET_LONG_BITS == 32 || xcc) {
1074             tcg_gen_mov_tl(t1, cpu_cc_Z);
1075         } else {
1076             tcg_gen_ext32u_tl(t1, cpu_icc_Z);
1077         }
1078         break;
1079 
1080     case 0x2: /* le: Z | (N ^ V) */
1081         /*
1082          * Simplify:
1083          *   cc_Z || (N ^ V) < 0        NE
1084          *   cc_Z && !((N ^ V) < 0)     EQ
1085          *   cc_Z & ~((N ^ V) >> TLB)   EQ
1086          */
1087         cmp->cond = TCG_COND_EQ;
1088         tcg_gen_xor_tl(t1, cpu_cc_N, cpu_cc_V);
1089         tcg_gen_sextract_tl(t1, t1, xcc ? 63 : 31, 1);
1090         tcg_gen_andc_tl(t1, xcc ? cpu_cc_Z : cpu_icc_Z, t1);
1091         if (TARGET_LONG_BITS == 64 && !xcc) {
1092             tcg_gen_ext32u_tl(t1, t1);
1093         }
1094         break;
1095 
1096     case 0x3: /* lt: N ^ V */
1097         cmp->cond = TCG_COND_LT;
1098         tcg_gen_xor_tl(t1, cpu_cc_N, cpu_cc_V);
1099         if (TARGET_LONG_BITS == 64 && !xcc) {
1100             tcg_gen_ext32s_tl(t1, t1);
1101         }
1102         break;
1103 
1104     case 0x4: /* leu: Z | C */
1105         /*
1106          * Simplify:
1107          *   cc_Z == 0 || cc_C != 0     NE
1108          *   cc_Z != 0 && cc_C == 0     EQ
1109          *   cc_Z & (cc_C ? 0 : -1)     EQ
1110          *   cc_Z & (cc_C - 1)          EQ
1111          */
1112         cmp->cond = TCG_COND_EQ;
1113         if (TARGET_LONG_BITS == 32 || xcc) {
1114             tcg_gen_subi_tl(t1, cpu_cc_C, 1);
1115             tcg_gen_and_tl(t1, t1, cpu_cc_Z);
1116         } else {
1117             tcg_gen_extract_tl(t1, cpu_icc_C, 32, 1);
1118             tcg_gen_subi_tl(t1, t1, 1);
1119             tcg_gen_and_tl(t1, t1, cpu_icc_Z);
1120             tcg_gen_ext32u_tl(t1, t1);
1121         }
1122         break;
1123 
1124     case 0x5: /* ltu: C */
1125         cmp->cond = TCG_COND_NE;
1126         if (TARGET_LONG_BITS == 32 || xcc) {
1127             tcg_gen_mov_tl(t1, cpu_cc_C);
1128         } else {
1129             tcg_gen_extract_tl(t1, cpu_icc_C, 32, 1);
1130         }
1131         break;
1132 
1133     case 0x6: /* neg: N */
1134         cmp->cond = TCG_COND_LT;
1135         if (TARGET_LONG_BITS == 32 || xcc) {
1136             tcg_gen_mov_tl(t1, cpu_cc_N);
1137         } else {
1138             tcg_gen_ext32s_tl(t1, cpu_cc_N);
1139         }
1140         break;
1141 
1142     case 0x7: /* vs: V */
1143         cmp->cond = TCG_COND_LT;
1144         if (TARGET_LONG_BITS == 32 || xcc) {
1145             tcg_gen_mov_tl(t1, cpu_cc_V);
1146         } else {
1147             tcg_gen_ext32s_tl(t1, cpu_cc_V);
1148         }
1149         break;
1150     }
1151     if (cond & 8) {
1152         cmp->cond = tcg_invert_cond(cmp->cond);
1153     }
1154 }
1155 
1156 static void gen_fcompare(DisasCompare *cmp, unsigned int cc, unsigned int cond)
1157 {
1158     TCGv_i32 fcc = cpu_fcc[cc];
1159     TCGv_i32 c1 = fcc;
1160     int c2 = 0;
1161     TCGCond tcond;
1162 
1163     /*
1164      * FCC values:
1165      * 0 =
1166      * 1 <
1167      * 2 >
1168      * 3 unordered
1169      */
1170     switch (cond & 7) {
1171     case 0x0: /* fbn */
1172         tcond = TCG_COND_NEVER;
1173         break;
1174     case 0x1: /* fbne : !0 */
1175         tcond = TCG_COND_NE;
1176         break;
1177     case 0x2: /* fblg : 1 or 2 */
1178         /* fcc in {1,2} - 1 -> fcc in {0,1} */
1179         c1 = tcg_temp_new_i32();
1180         tcg_gen_addi_i32(c1, fcc, -1);
1181         c2 = 1;
1182         tcond = TCG_COND_LEU;
1183         break;
1184     case 0x3: /* fbul : 1 or 3 */
1185         c1 = tcg_temp_new_i32();
1186         tcg_gen_andi_i32(c1, fcc, 1);
1187         tcond = TCG_COND_NE;
1188         break;
1189     case 0x4: /* fbl  : 1 */
1190         c2 = 1;
1191         tcond = TCG_COND_EQ;
1192         break;
1193     case 0x5: /* fbug : 2 or 3 */
1194         c2 = 2;
1195         tcond = TCG_COND_GEU;
1196         break;
1197     case 0x6: /* fbg  : 2 */
1198         c2 = 2;
1199         tcond = TCG_COND_EQ;
1200         break;
1201     case 0x7: /* fbu  : 3 */
1202         c2 = 3;
1203         tcond = TCG_COND_EQ;
1204         break;
1205     }
1206     if (cond & 8) {
1207         tcond = tcg_invert_cond(tcond);
1208     }
1209 
1210     cmp->cond = tcond;
1211     cmp->c2 = c2;
1212     cmp->c1 = tcg_temp_new();
1213     tcg_gen_extu_i32_tl(cmp->c1, c1);
1214 }
1215 
1216 static bool gen_compare_reg(DisasCompare *cmp, int cond, TCGv r_src)
1217 {
1218     static const TCGCond cond_reg[4] = {
1219         TCG_COND_NEVER,  /* reserved */
1220         TCG_COND_EQ,
1221         TCG_COND_LE,
1222         TCG_COND_LT,
1223     };
1224     TCGCond tcond;
1225 
1226     if ((cond & 3) == 0) {
1227         return false;
1228     }
1229     tcond = cond_reg[cond & 3];
1230     if (cond & 4) {
1231         tcond = tcg_invert_cond(tcond);
1232     }
1233 
1234     cmp->cond = tcond;
1235     cmp->c1 = tcg_temp_new();
1236     cmp->c2 = 0;
1237     tcg_gen_mov_tl(cmp->c1, r_src);
1238     return true;
1239 }
1240 
1241 static void gen_op_clear_ieee_excp_and_FTT(void)
1242 {
1243     tcg_gen_st_i32(tcg_constant_i32(0), tcg_env,
1244                    offsetof(CPUSPARCState, fsr_cexc_ftt));
1245 }
1246 
1247 static void gen_op_fmovs(TCGv_i32 dst, TCGv_i32 src)
1248 {
1249     gen_op_clear_ieee_excp_and_FTT();
1250     tcg_gen_mov_i32(dst, src);
1251 }
1252 
1253 static void gen_op_fnegs(TCGv_i32 dst, TCGv_i32 src)
1254 {
1255     gen_op_clear_ieee_excp_and_FTT();
1256     tcg_gen_xori_i32(dst, src, 1u << 31);
1257 }
1258 
1259 static void gen_op_fabss(TCGv_i32 dst, TCGv_i32 src)
1260 {
1261     gen_op_clear_ieee_excp_and_FTT();
1262     tcg_gen_andi_i32(dst, src, ~(1u << 31));
1263 }
1264 
1265 static void gen_op_fmovd(TCGv_i64 dst, TCGv_i64 src)
1266 {
1267     gen_op_clear_ieee_excp_and_FTT();
1268     tcg_gen_mov_i64(dst, src);
1269 }
1270 
1271 static void gen_op_fnegd(TCGv_i64 dst, TCGv_i64 src)
1272 {
1273     gen_op_clear_ieee_excp_and_FTT();
1274     tcg_gen_xori_i64(dst, src, 1ull << 63);
1275 }
1276 
1277 static void gen_op_fabsd(TCGv_i64 dst, TCGv_i64 src)
1278 {
1279     gen_op_clear_ieee_excp_and_FTT();
1280     tcg_gen_andi_i64(dst, src, ~(1ull << 63));
1281 }
1282 
1283 static void gen_op_fnegq(TCGv_i128 dst, TCGv_i128 src)
1284 {
1285     TCGv_i64 l = tcg_temp_new_i64();
1286     TCGv_i64 h = tcg_temp_new_i64();
1287 
1288     tcg_gen_extr_i128_i64(l, h, src);
1289     tcg_gen_xori_i64(h, h, 1ull << 63);
1290     tcg_gen_concat_i64_i128(dst, l, h);
1291 }
1292 
1293 static void gen_op_fabsq(TCGv_i128 dst, TCGv_i128 src)
1294 {
1295     TCGv_i64 l = tcg_temp_new_i64();
1296     TCGv_i64 h = tcg_temp_new_i64();
1297 
1298     tcg_gen_extr_i128_i64(l, h, src);
1299     tcg_gen_andi_i64(h, h, ~(1ull << 63));
1300     tcg_gen_concat_i64_i128(dst, l, h);
1301 }
1302 
1303 static void gen_op_fmadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
1304 {
1305     gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(0));
1306 }
1307 
1308 static void gen_op_fmaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
1309 {
1310     gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(0));
1311 }
1312 
1313 static void gen_op_fmsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
1314 {
1315     int op = float_muladd_negate_c;
1316     gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1317 }
1318 
1319 static void gen_op_fmsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
1320 {
1321     int op = float_muladd_negate_c;
1322     gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1323 }
1324 
1325 static void gen_op_fnmsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
1326 {
1327     int op = float_muladd_negate_c | float_muladd_negate_result;
1328     gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1329 }
1330 
1331 static void gen_op_fnmsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
1332 {
1333     int op = float_muladd_negate_c | float_muladd_negate_result;
1334     gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1335 }
1336 
1337 static void gen_op_fnmadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
1338 {
1339     int op = float_muladd_negate_result;
1340     gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1341 }
1342 
1343 static void gen_op_fnmaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
1344 {
1345     int op = float_muladd_negate_result;
1346     gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1347 }
1348 
1349 /* Use muladd to compute (1 * src1) + src2 / 2 with one rounding. */
1350 static void gen_op_fhadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
1351 {
1352     TCGv_i32 one = tcg_constant_i32(float32_one);
1353     int op = float_muladd_halve_result;
1354     gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
1355 }
1356 
1357 static void gen_op_fhaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
1358 {
1359     TCGv_i64 one = tcg_constant_i64(float64_one);
1360     int op = float_muladd_halve_result;
1361     gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
1362 }
1363 
1364 /* Use muladd to compute (1 * src1) - src2 / 2 with one rounding. */
1365 static void gen_op_fhsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
1366 {
1367     TCGv_i32 one = tcg_constant_i32(float32_one);
1368     int op = float_muladd_negate_c | float_muladd_halve_result;
1369     gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
1370 }
1371 
1372 static void gen_op_fhsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
1373 {
1374     TCGv_i64 one = tcg_constant_i64(float64_one);
1375     int op = float_muladd_negate_c | float_muladd_halve_result;
1376     gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
1377 }
1378 
1379 /* Use muladd to compute -((1 * src1) + src2 / 2) with one rounding. */
1380 static void gen_op_fnhadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
1381 {
1382     TCGv_i32 one = tcg_constant_i32(float32_one);
1383     int op = float_muladd_negate_result | float_muladd_halve_result;
1384     gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
1385 }
1386 
1387 static void gen_op_fnhaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
1388 {
1389     TCGv_i64 one = tcg_constant_i64(float64_one);
1390     int op = float_muladd_negate_result | float_muladd_halve_result;
1391     gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
1392 }
1393 
1394 static void gen_op_fpexception_im(DisasContext *dc, int ftt)
1395 {
1396     /*
1397      * CEXC is only set when succesfully completing an FPop,
1398      * or when raising FSR_FTT_IEEE_EXCP, i.e. check_ieee_exception.
1399      * Thus we can simply store FTT into this field.
1400      */
1401     tcg_gen_st_i32(tcg_constant_i32(ftt), tcg_env,
1402                    offsetof(CPUSPARCState, fsr_cexc_ftt));
1403     gen_exception(dc, TT_FP_EXCP);
1404 }
1405 
1406 static int gen_trap_ifnofpu(DisasContext *dc)
1407 {
1408 #if !defined(CONFIG_USER_ONLY)
1409     if (!dc->fpu_enabled) {
1410         gen_exception(dc, TT_NFPU_INSN);
1411         return 1;
1412     }
1413 #endif
1414     return 0;
1415 }
1416 
1417 /* asi moves */
1418 typedef enum {
1419     GET_ASI_HELPER,
1420     GET_ASI_EXCP,
1421     GET_ASI_DIRECT,
1422     GET_ASI_DTWINX,
1423     GET_ASI_CODE,
1424     GET_ASI_BLOCK,
1425     GET_ASI_SHORT,
1426     GET_ASI_BCOPY,
1427     GET_ASI_BFILL,
1428 } ASIType;
1429 
1430 typedef struct {
1431     ASIType type;
1432     int asi;
1433     int mem_idx;
1434     MemOp memop;
1435 } DisasASI;
1436 
1437 /*
1438  * Build DisasASI.
1439  * For asi == -1, treat as non-asi.
1440  * For ask == -2, treat as immediate offset (v8 error, v9 %asi).
1441  */
1442 static DisasASI resolve_asi(DisasContext *dc, int asi, MemOp memop)
1443 {
1444     ASIType type = GET_ASI_HELPER;
1445     int mem_idx = dc->mem_idx;
1446 
1447     if (asi == -1) {
1448         /* Artificial "non-asi" case. */
1449         type = GET_ASI_DIRECT;
1450         goto done;
1451     }
1452 
1453 #ifndef TARGET_SPARC64
1454     /* Before v9, all asis are immediate and privileged.  */
1455     if (asi < 0) {
1456         gen_exception(dc, TT_ILL_INSN);
1457         type = GET_ASI_EXCP;
1458     } else if (supervisor(dc)
1459                /* Note that LEON accepts ASI_USERDATA in user mode, for
1460                   use with CASA.  Also note that previous versions of
1461                   QEMU allowed (and old versions of gcc emitted) ASI_P
1462                   for LEON, which is incorrect.  */
1463                || (asi == ASI_USERDATA
1464                    && (dc->def->features & CPU_FEATURE_CASA))) {
1465         switch (asi) {
1466         case ASI_USERDATA:    /* User data access */
1467             mem_idx = MMU_USER_IDX;
1468             type = GET_ASI_DIRECT;
1469             break;
1470         case ASI_KERNELDATA:  /* Supervisor data access */
1471             mem_idx = MMU_KERNEL_IDX;
1472             type = GET_ASI_DIRECT;
1473             break;
1474         case ASI_USERTXT:     /* User text access */
1475             mem_idx = MMU_USER_IDX;
1476             type = GET_ASI_CODE;
1477             break;
1478         case ASI_KERNELTXT:   /* Supervisor text access */
1479             mem_idx = MMU_KERNEL_IDX;
1480             type = GET_ASI_CODE;
1481             break;
1482         case ASI_M_BYPASS:    /* MMU passthrough */
1483         case ASI_LEON_BYPASS: /* LEON MMU passthrough */
1484             mem_idx = MMU_PHYS_IDX;
1485             type = GET_ASI_DIRECT;
1486             break;
1487         case ASI_M_BCOPY: /* Block copy, sta access */
1488             mem_idx = MMU_KERNEL_IDX;
1489             type = GET_ASI_BCOPY;
1490             break;
1491         case ASI_M_BFILL: /* Block fill, stda access */
1492             mem_idx = MMU_KERNEL_IDX;
1493             type = GET_ASI_BFILL;
1494             break;
1495         }
1496 
1497         /* MMU_PHYS_IDX is used when the MMU is disabled to passthrough the
1498          * permissions check in get_physical_address(..).
1499          */
1500         mem_idx = (dc->mem_idx == MMU_PHYS_IDX) ? MMU_PHYS_IDX : mem_idx;
1501     } else {
1502         gen_exception(dc, TT_PRIV_INSN);
1503         type = GET_ASI_EXCP;
1504     }
1505 #else
1506     if (asi < 0) {
1507         asi = dc->asi;
1508     }
1509     /* With v9, all asis below 0x80 are privileged.  */
1510     /* ??? We ought to check cpu_has_hypervisor, but we didn't copy
1511        down that bit into DisasContext.  For the moment that's ok,
1512        since the direct implementations below doesn't have any ASIs
1513        in the restricted [0x30, 0x7f] range, and the check will be
1514        done properly in the helper.  */
1515     if (!supervisor(dc) && asi < 0x80) {
1516         gen_exception(dc, TT_PRIV_ACT);
1517         type = GET_ASI_EXCP;
1518     } else {
1519         switch (asi) {
1520         case ASI_REAL:      /* Bypass */
1521         case ASI_REAL_IO:   /* Bypass, non-cacheable */
1522         case ASI_REAL_L:    /* Bypass LE */
1523         case ASI_REAL_IO_L: /* Bypass, non-cacheable LE */
1524         case ASI_TWINX_REAL:   /* Real address, twinx */
1525         case ASI_TWINX_REAL_L: /* Real address, twinx, LE */
1526         case ASI_QUAD_LDD_PHYS:
1527         case ASI_QUAD_LDD_PHYS_L:
1528             mem_idx = MMU_PHYS_IDX;
1529             break;
1530         case ASI_N:  /* Nucleus */
1531         case ASI_NL: /* Nucleus LE */
1532         case ASI_TWINX_N:
1533         case ASI_TWINX_NL:
1534         case ASI_NUCLEUS_QUAD_LDD:
1535         case ASI_NUCLEUS_QUAD_LDD_L:
1536             if (hypervisor(dc)) {
1537                 mem_idx = MMU_PHYS_IDX;
1538             } else {
1539                 mem_idx = MMU_NUCLEUS_IDX;
1540             }
1541             break;
1542         case ASI_AIUP:  /* As if user primary */
1543         case ASI_AIUPL: /* As if user primary LE */
1544         case ASI_TWINX_AIUP:
1545         case ASI_TWINX_AIUP_L:
1546         case ASI_BLK_AIUP_4V:
1547         case ASI_BLK_AIUP_L_4V:
1548         case ASI_BLK_AIUP:
1549         case ASI_BLK_AIUPL:
1550             mem_idx = MMU_USER_IDX;
1551             break;
1552         case ASI_AIUS:  /* As if user secondary */
1553         case ASI_AIUSL: /* As if user secondary LE */
1554         case ASI_TWINX_AIUS:
1555         case ASI_TWINX_AIUS_L:
1556         case ASI_BLK_AIUS_4V:
1557         case ASI_BLK_AIUS_L_4V:
1558         case ASI_BLK_AIUS:
1559         case ASI_BLK_AIUSL:
1560             mem_idx = MMU_USER_SECONDARY_IDX;
1561             break;
1562         case ASI_S:  /* Secondary */
1563         case ASI_SL: /* Secondary LE */
1564         case ASI_TWINX_S:
1565         case ASI_TWINX_SL:
1566         case ASI_BLK_COMMIT_S:
1567         case ASI_BLK_S:
1568         case ASI_BLK_SL:
1569         case ASI_FL8_S:
1570         case ASI_FL8_SL:
1571         case ASI_FL16_S:
1572         case ASI_FL16_SL:
1573             if (mem_idx == MMU_USER_IDX) {
1574                 mem_idx = MMU_USER_SECONDARY_IDX;
1575             } else if (mem_idx == MMU_KERNEL_IDX) {
1576                 mem_idx = MMU_KERNEL_SECONDARY_IDX;
1577             }
1578             break;
1579         case ASI_P:  /* Primary */
1580         case ASI_PL: /* Primary LE */
1581         case ASI_TWINX_P:
1582         case ASI_TWINX_PL:
1583         case ASI_BLK_COMMIT_P:
1584         case ASI_BLK_P:
1585         case ASI_BLK_PL:
1586         case ASI_FL8_P:
1587         case ASI_FL8_PL:
1588         case ASI_FL16_P:
1589         case ASI_FL16_PL:
1590             break;
1591         }
1592         switch (asi) {
1593         case ASI_REAL:
1594         case ASI_REAL_IO:
1595         case ASI_REAL_L:
1596         case ASI_REAL_IO_L:
1597         case ASI_N:
1598         case ASI_NL:
1599         case ASI_AIUP:
1600         case ASI_AIUPL:
1601         case ASI_AIUS:
1602         case ASI_AIUSL:
1603         case ASI_S:
1604         case ASI_SL:
1605         case ASI_P:
1606         case ASI_PL:
1607             type = GET_ASI_DIRECT;
1608             break;
1609         case ASI_TWINX_REAL:
1610         case ASI_TWINX_REAL_L:
1611         case ASI_TWINX_N:
1612         case ASI_TWINX_NL:
1613         case ASI_TWINX_AIUP:
1614         case ASI_TWINX_AIUP_L:
1615         case ASI_TWINX_AIUS:
1616         case ASI_TWINX_AIUS_L:
1617         case ASI_TWINX_P:
1618         case ASI_TWINX_PL:
1619         case ASI_TWINX_S:
1620         case ASI_TWINX_SL:
1621         case ASI_QUAD_LDD_PHYS:
1622         case ASI_QUAD_LDD_PHYS_L:
1623         case ASI_NUCLEUS_QUAD_LDD:
1624         case ASI_NUCLEUS_QUAD_LDD_L:
1625             type = GET_ASI_DTWINX;
1626             break;
1627         case ASI_BLK_COMMIT_P:
1628         case ASI_BLK_COMMIT_S:
1629         case ASI_BLK_AIUP_4V:
1630         case ASI_BLK_AIUP_L_4V:
1631         case ASI_BLK_AIUP:
1632         case ASI_BLK_AIUPL:
1633         case ASI_BLK_AIUS_4V:
1634         case ASI_BLK_AIUS_L_4V:
1635         case ASI_BLK_AIUS:
1636         case ASI_BLK_AIUSL:
1637         case ASI_BLK_S:
1638         case ASI_BLK_SL:
1639         case ASI_BLK_P:
1640         case ASI_BLK_PL:
1641             type = GET_ASI_BLOCK;
1642             break;
1643         case ASI_FL8_S:
1644         case ASI_FL8_SL:
1645         case ASI_FL8_P:
1646         case ASI_FL8_PL:
1647             memop = MO_UB;
1648             type = GET_ASI_SHORT;
1649             break;
1650         case ASI_FL16_S:
1651         case ASI_FL16_SL:
1652         case ASI_FL16_P:
1653         case ASI_FL16_PL:
1654             memop = MO_TEUW;
1655             type = GET_ASI_SHORT;
1656             break;
1657         }
1658         /* The little-endian asis all have bit 3 set.  */
1659         if (asi & 8) {
1660             memop ^= MO_BSWAP;
1661         }
1662     }
1663 #endif
1664 
1665  done:
1666     return (DisasASI){ type, asi, mem_idx, memop };
1667 }
1668 
1669 #if defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
1670 static void gen_helper_ld_asi(TCGv_i64 r, TCGv_env e, TCGv a,
1671                               TCGv_i32 asi, TCGv_i32 mop)
1672 {
1673     g_assert_not_reached();
1674 }
1675 
1676 static void gen_helper_st_asi(TCGv_env e, TCGv a, TCGv_i64 r,
1677                               TCGv_i32 asi, TCGv_i32 mop)
1678 {
1679     g_assert_not_reached();
1680 }
1681 #endif
1682 
1683 static void gen_ld_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr)
1684 {
1685     switch (da->type) {
1686     case GET_ASI_EXCP:
1687         break;
1688     case GET_ASI_DTWINX: /* Reserved for ldda.  */
1689         gen_exception(dc, TT_ILL_INSN);
1690         break;
1691     case GET_ASI_DIRECT:
1692         tcg_gen_qemu_ld_tl(dst, addr, da->mem_idx, da->memop | MO_ALIGN);
1693         break;
1694 
1695     case GET_ASI_CODE:
1696 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
1697         {
1698             MemOpIdx oi = make_memop_idx(da->memop, da->mem_idx);
1699             TCGv_i64 t64 = tcg_temp_new_i64();
1700 
1701             gen_helper_ld_code(t64, tcg_env, addr, tcg_constant_i32(oi));
1702             tcg_gen_trunc_i64_tl(dst, t64);
1703         }
1704         break;
1705 #else
1706         g_assert_not_reached();
1707 #endif
1708 
1709     default:
1710         {
1711             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1712             TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN);
1713 
1714             save_state(dc);
1715 #ifdef TARGET_SPARC64
1716             gen_helper_ld_asi(dst, tcg_env, addr, r_asi, r_mop);
1717 #else
1718             {
1719                 TCGv_i64 t64 = tcg_temp_new_i64();
1720                 gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
1721                 tcg_gen_trunc_i64_tl(dst, t64);
1722             }
1723 #endif
1724         }
1725         break;
1726     }
1727 }
1728 
1729 static void gen_st_asi(DisasContext *dc, DisasASI *da, TCGv src, TCGv addr)
1730 {
1731     switch (da->type) {
1732     case GET_ASI_EXCP:
1733         break;
1734 
1735     case GET_ASI_DTWINX: /* Reserved for stda.  */
1736         if (TARGET_LONG_BITS == 32) {
1737             gen_exception(dc, TT_ILL_INSN);
1738             break;
1739         } else if (!(dc->def->features & CPU_FEATURE_HYPV)) {
1740             /* Pre OpenSPARC CPUs don't have these */
1741             gen_exception(dc, TT_ILL_INSN);
1742             break;
1743         }
1744         /* In OpenSPARC T1+ CPUs TWINX ASIs in store are ST_BLKINIT_ ASIs */
1745         /* fall through */
1746 
1747     case GET_ASI_DIRECT:
1748         tcg_gen_qemu_st_tl(src, addr, da->mem_idx, da->memop | MO_ALIGN);
1749         break;
1750 
1751     case GET_ASI_BCOPY:
1752         assert(TARGET_LONG_BITS == 32);
1753         /*
1754          * Copy 32 bytes from the address in SRC to ADDR.
1755          *
1756          * From Ross RT625 hyperSPARC manual, section 4.6:
1757          * "Block Copy and Block Fill will work only on cache line boundaries."
1758          *
1759          * It does not specify if an unaliged address is truncated or trapped.
1760          * Previous qemu behaviour was to truncate to 4 byte alignment, which
1761          * is obviously wrong.  The only place I can see this used is in the
1762          * Linux kernel which begins with page alignment, advancing by 32,
1763          * so is always aligned.  Assume truncation as the simpler option.
1764          *
1765          * Since the loads and stores are paired, allow the copy to happen
1766          * in the host endianness.  The copy need not be atomic.
1767          */
1768         {
1769             MemOp mop = MO_128 | MO_ATOM_IFALIGN_PAIR;
1770             TCGv saddr = tcg_temp_new();
1771             TCGv daddr = tcg_temp_new();
1772             TCGv_i128 tmp = tcg_temp_new_i128();
1773 
1774             tcg_gen_andi_tl(saddr, src, -32);
1775             tcg_gen_andi_tl(daddr, addr, -32);
1776             tcg_gen_qemu_ld_i128(tmp, saddr, da->mem_idx, mop);
1777             tcg_gen_qemu_st_i128(tmp, daddr, da->mem_idx, mop);
1778             tcg_gen_addi_tl(saddr, saddr, 16);
1779             tcg_gen_addi_tl(daddr, daddr, 16);
1780             tcg_gen_qemu_ld_i128(tmp, saddr, da->mem_idx, mop);
1781             tcg_gen_qemu_st_i128(tmp, daddr, da->mem_idx, mop);
1782         }
1783         break;
1784 
1785     default:
1786         {
1787             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1788             TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN);
1789 
1790             save_state(dc);
1791 #ifdef TARGET_SPARC64
1792             gen_helper_st_asi(tcg_env, addr, src, r_asi, r_mop);
1793 #else
1794             {
1795                 TCGv_i64 t64 = tcg_temp_new_i64();
1796                 tcg_gen_extu_tl_i64(t64, src);
1797                 gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
1798             }
1799 #endif
1800 
1801             /* A write to a TLB register may alter page maps.  End the TB. */
1802             dc->npc = DYNAMIC_PC;
1803         }
1804         break;
1805     }
1806 }
1807 
1808 static void gen_swap_asi(DisasContext *dc, DisasASI *da,
1809                          TCGv dst, TCGv src, TCGv addr)
1810 {
1811     switch (da->type) {
1812     case GET_ASI_EXCP:
1813         break;
1814     case GET_ASI_DIRECT:
1815         tcg_gen_atomic_xchg_tl(dst, addr, src,
1816                                da->mem_idx, da->memop | MO_ALIGN);
1817         break;
1818     default:
1819         /* ??? Should be DAE_invalid_asi.  */
1820         gen_exception(dc, TT_DATA_ACCESS);
1821         break;
1822     }
1823 }
1824 
1825 static void gen_cas_asi(DisasContext *dc, DisasASI *da,
1826                         TCGv oldv, TCGv newv, TCGv cmpv, TCGv addr)
1827 {
1828     switch (da->type) {
1829     case GET_ASI_EXCP:
1830         return;
1831     case GET_ASI_DIRECT:
1832         tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, newv,
1833                                   da->mem_idx, da->memop | MO_ALIGN);
1834         break;
1835     default:
1836         /* ??? Should be DAE_invalid_asi.  */
1837         gen_exception(dc, TT_DATA_ACCESS);
1838         break;
1839     }
1840 }
1841 
1842 static void gen_ldstub_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr)
1843 {
1844     switch (da->type) {
1845     case GET_ASI_EXCP:
1846         break;
1847     case GET_ASI_DIRECT:
1848         tcg_gen_atomic_xchg_tl(dst, addr, tcg_constant_tl(0xff),
1849                                da->mem_idx, MO_UB);
1850         break;
1851     default:
1852         /* ??? In theory, this should be raise DAE_invalid_asi.
1853            But the SS-20 roms do ldstuba [%l0] #ASI_M_CTL, %o1.  */
1854         if (tb_cflags(dc->base.tb) & CF_PARALLEL) {
1855             gen_helper_exit_atomic(tcg_env);
1856         } else {
1857             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1858             TCGv_i32 r_mop = tcg_constant_i32(MO_UB);
1859             TCGv_i64 s64, t64;
1860 
1861             save_state(dc);
1862             t64 = tcg_temp_new_i64();
1863             gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
1864 
1865             s64 = tcg_constant_i64(0xff);
1866             gen_helper_st_asi(tcg_env, addr, s64, r_asi, r_mop);
1867 
1868             tcg_gen_trunc_i64_tl(dst, t64);
1869 
1870             /* End the TB.  */
1871             dc->npc = DYNAMIC_PC;
1872         }
1873         break;
1874     }
1875 }
1876 
1877 static void gen_ldf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size,
1878                         TCGv addr, int rd)
1879 {
1880     MemOp memop = da->memop;
1881     MemOp size = memop & MO_SIZE;
1882     TCGv_i32 d32;
1883     TCGv_i64 d64, l64;
1884     TCGv addr_tmp;
1885 
1886     /* TODO: Use 128-bit load/store below. */
1887     if (size == MO_128) {
1888         memop = (memop & ~MO_SIZE) | MO_64;
1889     }
1890 
1891     switch (da->type) {
1892     case GET_ASI_EXCP:
1893         break;
1894 
1895     case GET_ASI_DIRECT:
1896         memop |= MO_ALIGN_4;
1897         switch (size) {
1898         case MO_32:
1899             d32 = tcg_temp_new_i32();
1900             tcg_gen_qemu_ld_i32(d32, addr, da->mem_idx, memop);
1901             gen_store_fpr_F(dc, rd, d32);
1902             break;
1903 
1904         case MO_64:
1905             d64 = tcg_temp_new_i64();
1906             tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx, memop);
1907             gen_store_fpr_D(dc, rd, d64);
1908             break;
1909 
1910         case MO_128:
1911             d64 = tcg_temp_new_i64();
1912             l64 = tcg_temp_new_i64();
1913             tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx, memop);
1914             addr_tmp = tcg_temp_new();
1915             tcg_gen_addi_tl(addr_tmp, addr, 8);
1916             tcg_gen_qemu_ld_i64(l64, addr_tmp, da->mem_idx, memop);
1917             gen_store_fpr_D(dc, rd, d64);
1918             gen_store_fpr_D(dc, rd + 2, l64);
1919             break;
1920         default:
1921             g_assert_not_reached();
1922         }
1923         break;
1924 
1925     case GET_ASI_BLOCK:
1926         /* Valid for lddfa on aligned registers only.  */
1927         if (orig_size == MO_64 && (rd & 7) == 0) {
1928             /* The first operation checks required alignment.  */
1929             addr_tmp = tcg_temp_new();
1930             d64 = tcg_temp_new_i64();
1931             for (int i = 0; ; ++i) {
1932                 tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx,
1933                                     memop | (i == 0 ? MO_ALIGN_64 : 0));
1934                 gen_store_fpr_D(dc, rd + 2 * i, d64);
1935                 if (i == 7) {
1936                     break;
1937                 }
1938                 tcg_gen_addi_tl(addr_tmp, addr, 8);
1939                 addr = addr_tmp;
1940             }
1941         } else {
1942             gen_exception(dc, TT_ILL_INSN);
1943         }
1944         break;
1945 
1946     case GET_ASI_SHORT:
1947         /* Valid for lddfa only.  */
1948         if (orig_size == MO_64) {
1949             d64 = tcg_temp_new_i64();
1950             tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx, memop | MO_ALIGN);
1951             gen_store_fpr_D(dc, rd, d64);
1952         } else {
1953             gen_exception(dc, TT_ILL_INSN);
1954         }
1955         break;
1956 
1957     default:
1958         {
1959             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1960             TCGv_i32 r_mop = tcg_constant_i32(memop | MO_ALIGN);
1961 
1962             save_state(dc);
1963             /* According to the table in the UA2011 manual, the only
1964                other asis that are valid for ldfa/lddfa/ldqfa are
1965                the NO_FAULT asis.  We still need a helper for these,
1966                but we can just use the integer asi helper for them.  */
1967             switch (size) {
1968             case MO_32:
1969                 d64 = tcg_temp_new_i64();
1970                 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
1971                 d32 = tcg_temp_new_i32();
1972                 tcg_gen_extrl_i64_i32(d32, d64);
1973                 gen_store_fpr_F(dc, rd, d32);
1974                 break;
1975             case MO_64:
1976                 d64 = tcg_temp_new_i64();
1977                 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
1978                 gen_store_fpr_D(dc, rd, d64);
1979                 break;
1980             case MO_128:
1981                 d64 = tcg_temp_new_i64();
1982                 l64 = tcg_temp_new_i64();
1983                 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
1984                 addr_tmp = tcg_temp_new();
1985                 tcg_gen_addi_tl(addr_tmp, addr, 8);
1986                 gen_helper_ld_asi(l64, tcg_env, addr_tmp, r_asi, r_mop);
1987                 gen_store_fpr_D(dc, rd, d64);
1988                 gen_store_fpr_D(dc, rd + 2, l64);
1989                 break;
1990             default:
1991                 g_assert_not_reached();
1992             }
1993         }
1994         break;
1995     }
1996 }
1997 
1998 static void gen_stf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size,
1999                         TCGv addr, int rd)
2000 {
2001     MemOp memop = da->memop;
2002     MemOp size = memop & MO_SIZE;
2003     TCGv_i32 d32;
2004     TCGv_i64 d64;
2005     TCGv addr_tmp;
2006 
2007     /* TODO: Use 128-bit load/store below. */
2008     if (size == MO_128) {
2009         memop = (memop & ~MO_SIZE) | MO_64;
2010     }
2011 
2012     switch (da->type) {
2013     case GET_ASI_EXCP:
2014         break;
2015 
2016     case GET_ASI_DIRECT:
2017         memop |= MO_ALIGN_4;
2018         switch (size) {
2019         case MO_32:
2020             d32 = gen_load_fpr_F(dc, rd);
2021             tcg_gen_qemu_st_i32(d32, addr, da->mem_idx, memop | MO_ALIGN);
2022             break;
2023         case MO_64:
2024             d64 = gen_load_fpr_D(dc, rd);
2025             tcg_gen_qemu_st_i64(d64, addr, da->mem_idx, memop | MO_ALIGN_4);
2026             break;
2027         case MO_128:
2028             /* Only 4-byte alignment required.  However, it is legal for the
2029                cpu to signal the alignment fault, and the OS trap handler is
2030                required to fix it up.  Requiring 16-byte alignment here avoids
2031                having to probe the second page before performing the first
2032                write.  */
2033             d64 = gen_load_fpr_D(dc, rd);
2034             tcg_gen_qemu_st_i64(d64, addr, da->mem_idx, memop | MO_ALIGN_16);
2035             addr_tmp = tcg_temp_new();
2036             tcg_gen_addi_tl(addr_tmp, addr, 8);
2037             d64 = gen_load_fpr_D(dc, rd + 2);
2038             tcg_gen_qemu_st_i64(d64, addr_tmp, da->mem_idx, memop);
2039             break;
2040         default:
2041             g_assert_not_reached();
2042         }
2043         break;
2044 
2045     case GET_ASI_BLOCK:
2046         /* Valid for stdfa on aligned registers only.  */
2047         if (orig_size == MO_64 && (rd & 7) == 0) {
2048             /* The first operation checks required alignment.  */
2049             addr_tmp = tcg_temp_new();
2050             for (int i = 0; ; ++i) {
2051                 d64 = gen_load_fpr_D(dc, rd + 2 * i);
2052                 tcg_gen_qemu_st_i64(d64, addr, da->mem_idx,
2053                                     memop | (i == 0 ? MO_ALIGN_64 : 0));
2054                 if (i == 7) {
2055                     break;
2056                 }
2057                 tcg_gen_addi_tl(addr_tmp, addr, 8);
2058                 addr = addr_tmp;
2059             }
2060         } else {
2061             gen_exception(dc, TT_ILL_INSN);
2062         }
2063         break;
2064 
2065     case GET_ASI_SHORT:
2066         /* Valid for stdfa only.  */
2067         if (orig_size == MO_64) {
2068             d64 = gen_load_fpr_D(dc, rd);
2069             tcg_gen_qemu_st_i64(d64, addr, da->mem_idx, memop | MO_ALIGN);
2070         } else {
2071             gen_exception(dc, TT_ILL_INSN);
2072         }
2073         break;
2074 
2075     default:
2076         /* According to the table in the UA2011 manual, the only
2077            other asis that are valid for ldfa/lddfa/ldqfa are
2078            the PST* asis, which aren't currently handled.  */
2079         gen_exception(dc, TT_ILL_INSN);
2080         break;
2081     }
2082 }
2083 
2084 static void gen_ldda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd)
2085 {
2086     TCGv hi = gen_dest_gpr(dc, rd);
2087     TCGv lo = gen_dest_gpr(dc, rd + 1);
2088 
2089     switch (da->type) {
2090     case GET_ASI_EXCP:
2091         return;
2092 
2093     case GET_ASI_DTWINX:
2094 #ifdef TARGET_SPARC64
2095         {
2096             MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16;
2097             TCGv_i128 t = tcg_temp_new_i128();
2098 
2099             tcg_gen_qemu_ld_i128(t, addr, da->mem_idx, mop);
2100             /*
2101              * Note that LE twinx acts as if each 64-bit register result is
2102              * byte swapped.  We perform one 128-bit LE load, so must swap
2103              * the order of the writebacks.
2104              */
2105             if ((mop & MO_BSWAP) == MO_TE) {
2106                 tcg_gen_extr_i128_i64(lo, hi, t);
2107             } else {
2108                 tcg_gen_extr_i128_i64(hi, lo, t);
2109             }
2110         }
2111         break;
2112 #else
2113         g_assert_not_reached();
2114 #endif
2115 
2116     case GET_ASI_DIRECT:
2117         {
2118             TCGv_i64 tmp = tcg_temp_new_i64();
2119 
2120             tcg_gen_qemu_ld_i64(tmp, addr, da->mem_idx, da->memop | MO_ALIGN);
2121 
2122             /* Note that LE ldda acts as if each 32-bit register
2123                result is byte swapped.  Having just performed one
2124                64-bit bswap, we need now to swap the writebacks.  */
2125             if ((da->memop & MO_BSWAP) == MO_TE) {
2126                 tcg_gen_extr_i64_tl(lo, hi, tmp);
2127             } else {
2128                 tcg_gen_extr_i64_tl(hi, lo, tmp);
2129             }
2130         }
2131         break;
2132 
2133     case GET_ASI_CODE:
2134 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
2135         {
2136             MemOpIdx oi = make_memop_idx(da->memop, da->mem_idx);
2137             TCGv_i64 tmp = tcg_temp_new_i64();
2138 
2139             gen_helper_ld_code(tmp, tcg_env, addr, tcg_constant_i32(oi));
2140 
2141             /* See above.  */
2142             if ((da->memop & MO_BSWAP) == MO_TE) {
2143                 tcg_gen_extr_i64_tl(lo, hi, tmp);
2144             } else {
2145                 tcg_gen_extr_i64_tl(hi, lo, tmp);
2146             }
2147         }
2148         break;
2149 #else
2150         g_assert_not_reached();
2151 #endif
2152 
2153     default:
2154         /* ??? In theory we've handled all of the ASIs that are valid
2155            for ldda, and this should raise DAE_invalid_asi.  However,
2156            real hardware allows others.  This can be seen with e.g.
2157            FreeBSD 10.3 wrt ASI_IC_TAG.  */
2158         {
2159             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2160             TCGv_i32 r_mop = tcg_constant_i32(da->memop);
2161             TCGv_i64 tmp = tcg_temp_new_i64();
2162 
2163             save_state(dc);
2164             gen_helper_ld_asi(tmp, tcg_env, addr, r_asi, r_mop);
2165 
2166             /* See above.  */
2167             if ((da->memop & MO_BSWAP) == MO_TE) {
2168                 tcg_gen_extr_i64_tl(lo, hi, tmp);
2169             } else {
2170                 tcg_gen_extr_i64_tl(hi, lo, tmp);
2171             }
2172         }
2173         break;
2174     }
2175 
2176     gen_store_gpr(dc, rd, hi);
2177     gen_store_gpr(dc, rd + 1, lo);
2178 }
2179 
2180 static void gen_stda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd)
2181 {
2182     TCGv hi = gen_load_gpr(dc, rd);
2183     TCGv lo = gen_load_gpr(dc, rd + 1);
2184 
2185     switch (da->type) {
2186     case GET_ASI_EXCP:
2187         break;
2188 
2189     case GET_ASI_DTWINX:
2190 #ifdef TARGET_SPARC64
2191         {
2192             MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16;
2193             TCGv_i128 t = tcg_temp_new_i128();
2194 
2195             /*
2196              * Note that LE twinx acts as if each 64-bit register result is
2197              * byte swapped.  We perform one 128-bit LE store, so must swap
2198              * the order of the construction.
2199              */
2200             if ((mop & MO_BSWAP) == MO_TE) {
2201                 tcg_gen_concat_i64_i128(t, lo, hi);
2202             } else {
2203                 tcg_gen_concat_i64_i128(t, hi, lo);
2204             }
2205             tcg_gen_qemu_st_i128(t, addr, da->mem_idx, mop);
2206         }
2207         break;
2208 #else
2209         g_assert_not_reached();
2210 #endif
2211 
2212     case GET_ASI_DIRECT:
2213         {
2214             TCGv_i64 t64 = tcg_temp_new_i64();
2215 
2216             /* Note that LE stda acts as if each 32-bit register result is
2217                byte swapped.  We will perform one 64-bit LE store, so now
2218                we must swap the order of the construction.  */
2219             if ((da->memop & MO_BSWAP) == MO_TE) {
2220                 tcg_gen_concat_tl_i64(t64, lo, hi);
2221             } else {
2222                 tcg_gen_concat_tl_i64(t64, hi, lo);
2223             }
2224             tcg_gen_qemu_st_i64(t64, addr, da->mem_idx, da->memop | MO_ALIGN);
2225         }
2226         break;
2227 
2228     case GET_ASI_BFILL:
2229         assert(TARGET_LONG_BITS == 32);
2230         /*
2231          * Store 32 bytes of [rd:rd+1] to ADDR.
2232          * See comments for GET_ASI_COPY above.
2233          */
2234         {
2235             MemOp mop = MO_TE | MO_128 | MO_ATOM_IFALIGN_PAIR;
2236             TCGv_i64 t8 = tcg_temp_new_i64();
2237             TCGv_i128 t16 = tcg_temp_new_i128();
2238             TCGv daddr = tcg_temp_new();
2239 
2240             tcg_gen_concat_tl_i64(t8, lo, hi);
2241             tcg_gen_concat_i64_i128(t16, t8, t8);
2242             tcg_gen_andi_tl(daddr, addr, -32);
2243             tcg_gen_qemu_st_i128(t16, daddr, da->mem_idx, mop);
2244             tcg_gen_addi_tl(daddr, daddr, 16);
2245             tcg_gen_qemu_st_i128(t16, daddr, da->mem_idx, mop);
2246         }
2247         break;
2248 
2249     default:
2250         /* ??? In theory we've handled all of the ASIs that are valid
2251            for stda, and this should raise DAE_invalid_asi.  */
2252         {
2253             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2254             TCGv_i32 r_mop = tcg_constant_i32(da->memop);
2255             TCGv_i64 t64 = tcg_temp_new_i64();
2256 
2257             /* See above.  */
2258             if ((da->memop & MO_BSWAP) == MO_TE) {
2259                 tcg_gen_concat_tl_i64(t64, lo, hi);
2260             } else {
2261                 tcg_gen_concat_tl_i64(t64, hi, lo);
2262             }
2263 
2264             save_state(dc);
2265             gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
2266         }
2267         break;
2268     }
2269 }
2270 
2271 static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2272 {
2273 #ifdef TARGET_SPARC64
2274     TCGv_i32 c32, zero, dst, s1, s2;
2275     TCGv_i64 c64 = tcg_temp_new_i64();
2276 
2277     /* We have two choices here: extend the 32 bit data and use movcond_i64,
2278        or fold the comparison down to 32 bits and use movcond_i32.  Choose
2279        the later.  */
2280     c32 = tcg_temp_new_i32();
2281     tcg_gen_setcondi_i64(cmp->cond, c64, cmp->c1, cmp->c2);
2282     tcg_gen_extrl_i64_i32(c32, c64);
2283 
2284     s1 = gen_load_fpr_F(dc, rs);
2285     s2 = gen_load_fpr_F(dc, rd);
2286     dst = tcg_temp_new_i32();
2287     zero = tcg_constant_i32(0);
2288 
2289     tcg_gen_movcond_i32(TCG_COND_NE, dst, c32, zero, s1, s2);
2290 
2291     gen_store_fpr_F(dc, rd, dst);
2292 #else
2293     qemu_build_not_reached();
2294 #endif
2295 }
2296 
2297 static void gen_fmovd(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2298 {
2299 #ifdef TARGET_SPARC64
2300     TCGv_i64 dst = tcg_temp_new_i64();
2301     tcg_gen_movcond_i64(cmp->cond, dst, cmp->c1, tcg_constant_tl(cmp->c2),
2302                         gen_load_fpr_D(dc, rs),
2303                         gen_load_fpr_D(dc, rd));
2304     gen_store_fpr_D(dc, rd, dst);
2305 #else
2306     qemu_build_not_reached();
2307 #endif
2308 }
2309 
2310 static void gen_fmovq(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2311 {
2312 #ifdef TARGET_SPARC64
2313     TCGv c2 = tcg_constant_tl(cmp->c2);
2314     TCGv_i64 h = tcg_temp_new_i64();
2315     TCGv_i64 l = tcg_temp_new_i64();
2316 
2317     tcg_gen_movcond_i64(cmp->cond, h, cmp->c1, c2,
2318                         gen_load_fpr_D(dc, rs),
2319                         gen_load_fpr_D(dc, rd));
2320     tcg_gen_movcond_i64(cmp->cond, l, cmp->c1, c2,
2321                         gen_load_fpr_D(dc, rs + 2),
2322                         gen_load_fpr_D(dc, rd + 2));
2323     gen_store_fpr_D(dc, rd, h);
2324     gen_store_fpr_D(dc, rd + 2, l);
2325 #else
2326     qemu_build_not_reached();
2327 #endif
2328 }
2329 
2330 #ifdef TARGET_SPARC64
2331 static void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr)
2332 {
2333     TCGv_i32 r_tl = tcg_temp_new_i32();
2334 
2335     /* load env->tl into r_tl */
2336     tcg_gen_ld_i32(r_tl, tcg_env, offsetof(CPUSPARCState, tl));
2337 
2338     /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2339     tcg_gen_andi_i32(r_tl, r_tl, MAXTL_MASK);
2340 
2341     /* calculate offset to current trap state from env->ts, reuse r_tl */
2342     tcg_gen_muli_i32(r_tl, r_tl, sizeof (trap_state));
2343     tcg_gen_addi_ptr(r_tsptr, tcg_env, offsetof(CPUSPARCState, ts));
2344 
2345     /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2346     {
2347         TCGv_ptr r_tl_tmp = tcg_temp_new_ptr();
2348         tcg_gen_ext_i32_ptr(r_tl_tmp, r_tl);
2349         tcg_gen_add_ptr(r_tsptr, r_tsptr, r_tl_tmp);
2350     }
2351 }
2352 #endif
2353 
2354 static int extract_dfpreg(DisasContext *dc, int x)
2355 {
2356     int r = x & 0x1e;
2357 #ifdef TARGET_SPARC64
2358     r |= (x & 1) << 5;
2359 #endif
2360     return r;
2361 }
2362 
2363 static int extract_qfpreg(DisasContext *dc, int x)
2364 {
2365     int r = x & 0x1c;
2366 #ifdef TARGET_SPARC64
2367     r |= (x & 1) << 5;
2368 #endif
2369     return r;
2370 }
2371 
2372 /* Include the auto-generated decoder.  */
2373 #include "decode-insns.c.inc"
2374 
2375 #define TRANS(NAME, AVAIL, FUNC, ...) \
2376     static bool trans_##NAME(DisasContext *dc, arg_##NAME *a) \
2377     { return avail_##AVAIL(dc) && FUNC(dc, __VA_ARGS__); }
2378 
2379 #define avail_ALL(C)      true
2380 #ifdef TARGET_SPARC64
2381 # define avail_32(C)      false
2382 # define avail_ASR17(C)   false
2383 # define avail_CASA(C)    true
2384 # define avail_DIV(C)     true
2385 # define avail_MUL(C)     true
2386 # define avail_POWERDOWN(C) false
2387 # define avail_64(C)      true
2388 # define avail_FMAF(C)    ((C)->def->features & CPU_FEATURE_FMAF)
2389 # define avail_GL(C)      ((C)->def->features & CPU_FEATURE_GL)
2390 # define avail_HYPV(C)    ((C)->def->features & CPU_FEATURE_HYPV)
2391 # define avail_VIS1(C)    ((C)->def->features & CPU_FEATURE_VIS1)
2392 # define avail_VIS2(C)    ((C)->def->features & CPU_FEATURE_VIS2)
2393 # define avail_VIS3(C)    ((C)->def->features & CPU_FEATURE_VIS3)
2394 # define avail_VIS3B(C)   avail_VIS3(C)
2395 #else
2396 # define avail_32(C)      true
2397 # define avail_ASR17(C)   ((C)->def->features & CPU_FEATURE_ASR17)
2398 # define avail_CASA(C)    ((C)->def->features & CPU_FEATURE_CASA)
2399 # define avail_DIV(C)     ((C)->def->features & CPU_FEATURE_DIV)
2400 # define avail_MUL(C)     ((C)->def->features & CPU_FEATURE_MUL)
2401 # define avail_POWERDOWN(C) ((C)->def->features & CPU_FEATURE_POWERDOWN)
2402 # define avail_64(C)      false
2403 # define avail_FMAF(C)    false
2404 # define avail_GL(C)      false
2405 # define avail_HYPV(C)    false
2406 # define avail_VIS1(C)    false
2407 # define avail_VIS2(C)    false
2408 # define avail_VIS3(C)    false
2409 # define avail_VIS3B(C)   false
2410 #endif
2411 
2412 /* Default case for non jump instructions. */
2413 static bool advance_pc(DisasContext *dc)
2414 {
2415     TCGLabel *l1;
2416 
2417     finishing_insn(dc);
2418 
2419     if (dc->npc & 3) {
2420         switch (dc->npc) {
2421         case DYNAMIC_PC:
2422         case DYNAMIC_PC_LOOKUP:
2423             dc->pc = dc->npc;
2424             tcg_gen_mov_tl(cpu_pc, cpu_npc);
2425             tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
2426             break;
2427 
2428         case JUMP_PC:
2429             /* we can do a static jump */
2430             l1 = gen_new_label();
2431             tcg_gen_brcondi_tl(dc->jump.cond, dc->jump.c1, dc->jump.c2, l1);
2432 
2433             /* jump not taken */
2434             gen_goto_tb(dc, 1, dc->jump_pc[1], dc->jump_pc[1] + 4);
2435 
2436             /* jump taken */
2437             gen_set_label(l1);
2438             gen_goto_tb(dc, 0, dc->jump_pc[0], dc->jump_pc[0] + 4);
2439 
2440             dc->base.is_jmp = DISAS_NORETURN;
2441             break;
2442 
2443         default:
2444             g_assert_not_reached();
2445         }
2446     } else {
2447         dc->pc = dc->npc;
2448         dc->npc = dc->npc + 4;
2449     }
2450     return true;
2451 }
2452 
2453 /*
2454  * Major opcodes 00 and 01 -- branches, call, and sethi
2455  */
2456 
2457 static bool advance_jump_cond(DisasContext *dc, DisasCompare *cmp,
2458                               bool annul, int disp)
2459 {
2460     target_ulong dest = address_mask_i(dc, dc->pc + disp * 4);
2461     target_ulong npc;
2462 
2463     finishing_insn(dc);
2464 
2465     if (cmp->cond == TCG_COND_ALWAYS) {
2466         if (annul) {
2467             dc->pc = dest;
2468             dc->npc = dest + 4;
2469         } else {
2470             gen_mov_pc_npc(dc);
2471             dc->npc = dest;
2472         }
2473         return true;
2474     }
2475 
2476     if (cmp->cond == TCG_COND_NEVER) {
2477         npc = dc->npc;
2478         if (npc & 3) {
2479             gen_mov_pc_npc(dc);
2480             if (annul) {
2481                 tcg_gen_addi_tl(cpu_pc, cpu_pc, 4);
2482             }
2483             tcg_gen_addi_tl(cpu_npc, cpu_pc, 4);
2484         } else {
2485             dc->pc = npc + (annul ? 4 : 0);
2486             dc->npc = dc->pc + 4;
2487         }
2488         return true;
2489     }
2490 
2491     flush_cond(dc);
2492     npc = dc->npc;
2493 
2494     if (annul) {
2495         TCGLabel *l1 = gen_new_label();
2496 
2497         tcg_gen_brcondi_tl(tcg_invert_cond(cmp->cond), cmp->c1, cmp->c2, l1);
2498         gen_goto_tb(dc, 0, npc, dest);
2499         gen_set_label(l1);
2500         gen_goto_tb(dc, 1, npc + 4, npc + 8);
2501 
2502         dc->base.is_jmp = DISAS_NORETURN;
2503     } else {
2504         if (npc & 3) {
2505             switch (npc) {
2506             case DYNAMIC_PC:
2507             case DYNAMIC_PC_LOOKUP:
2508                 tcg_gen_mov_tl(cpu_pc, cpu_npc);
2509                 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
2510                 tcg_gen_movcond_tl(cmp->cond, cpu_npc,
2511                                    cmp->c1, tcg_constant_tl(cmp->c2),
2512                                    tcg_constant_tl(dest), cpu_npc);
2513                 dc->pc = npc;
2514                 break;
2515             default:
2516                 g_assert_not_reached();
2517             }
2518         } else {
2519             dc->pc = npc;
2520             dc->npc = JUMP_PC;
2521             dc->jump = *cmp;
2522             dc->jump_pc[0] = dest;
2523             dc->jump_pc[1] = npc + 4;
2524 
2525             /* The condition for cpu_cond is always NE -- normalize. */
2526             if (cmp->cond == TCG_COND_NE) {
2527                 tcg_gen_xori_tl(cpu_cond, cmp->c1, cmp->c2);
2528             } else {
2529                 tcg_gen_setcondi_tl(cmp->cond, cpu_cond, cmp->c1, cmp->c2);
2530             }
2531             dc->cpu_cond_live = true;
2532         }
2533     }
2534     return true;
2535 }
2536 
2537 static bool raise_priv(DisasContext *dc)
2538 {
2539     gen_exception(dc, TT_PRIV_INSN);
2540     return true;
2541 }
2542 
2543 static bool raise_unimpfpop(DisasContext *dc)
2544 {
2545     gen_op_fpexception_im(dc, FSR_FTT_UNIMPFPOP);
2546     return true;
2547 }
2548 
2549 static bool gen_trap_float128(DisasContext *dc)
2550 {
2551     if (dc->def->features & CPU_FEATURE_FLOAT128) {
2552         return false;
2553     }
2554     return raise_unimpfpop(dc);
2555 }
2556 
2557 static bool do_bpcc(DisasContext *dc, arg_bcc *a)
2558 {
2559     DisasCompare cmp;
2560 
2561     gen_compare(&cmp, a->cc, a->cond, dc);
2562     return advance_jump_cond(dc, &cmp, a->a, a->i);
2563 }
2564 
2565 TRANS(Bicc, ALL, do_bpcc, a)
2566 TRANS(BPcc,  64, do_bpcc, a)
2567 
2568 static bool do_fbpfcc(DisasContext *dc, arg_bcc *a)
2569 {
2570     DisasCompare cmp;
2571 
2572     if (gen_trap_ifnofpu(dc)) {
2573         return true;
2574     }
2575     gen_fcompare(&cmp, a->cc, a->cond);
2576     return advance_jump_cond(dc, &cmp, a->a, a->i);
2577 }
2578 
2579 TRANS(FBPfcc,  64, do_fbpfcc, a)
2580 TRANS(FBfcc,  ALL, do_fbpfcc, a)
2581 
2582 static bool trans_BPr(DisasContext *dc, arg_BPr *a)
2583 {
2584     DisasCompare cmp;
2585 
2586     if (!avail_64(dc)) {
2587         return false;
2588     }
2589     if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
2590         return false;
2591     }
2592     return advance_jump_cond(dc, &cmp, a->a, a->i);
2593 }
2594 
2595 static bool trans_CALL(DisasContext *dc, arg_CALL *a)
2596 {
2597     target_long target = address_mask_i(dc, dc->pc + a->i * 4);
2598 
2599     gen_store_gpr(dc, 15, tcg_constant_tl(dc->pc));
2600     gen_mov_pc_npc(dc);
2601     dc->npc = target;
2602     return true;
2603 }
2604 
2605 static bool trans_NCP(DisasContext *dc, arg_NCP *a)
2606 {
2607     /*
2608      * For sparc32, always generate the no-coprocessor exception.
2609      * For sparc64, always generate illegal instruction.
2610      */
2611 #ifdef TARGET_SPARC64
2612     return false;
2613 #else
2614     gen_exception(dc, TT_NCP_INSN);
2615     return true;
2616 #endif
2617 }
2618 
2619 static bool trans_SETHI(DisasContext *dc, arg_SETHI *a)
2620 {
2621     /* Special-case %g0 because that's the canonical nop.  */
2622     if (a->rd) {
2623         gen_store_gpr(dc, a->rd, tcg_constant_tl((uint32_t)a->i << 10));
2624     }
2625     return advance_pc(dc);
2626 }
2627 
2628 /*
2629  * Major Opcode 10 -- integer, floating-point, vis, and system insns.
2630  */
2631 
2632 static bool do_tcc(DisasContext *dc, int cond, int cc,
2633                    int rs1, bool imm, int rs2_or_imm)
2634 {
2635     int mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc)
2636                 ? UA2005_HTRAP_MASK : V8_TRAP_MASK);
2637     DisasCompare cmp;
2638     TCGLabel *lab;
2639     TCGv_i32 trap;
2640 
2641     /* Trap never.  */
2642     if (cond == 0) {
2643         return advance_pc(dc);
2644     }
2645 
2646     /*
2647      * Immediate traps are the most common case.  Since this value is
2648      * live across the branch, it really pays to evaluate the constant.
2649      */
2650     if (rs1 == 0 && (imm || rs2_or_imm == 0)) {
2651         trap = tcg_constant_i32((rs2_or_imm & mask) + TT_TRAP);
2652     } else {
2653         trap = tcg_temp_new_i32();
2654         tcg_gen_trunc_tl_i32(trap, gen_load_gpr(dc, rs1));
2655         if (imm) {
2656             tcg_gen_addi_i32(trap, trap, rs2_or_imm);
2657         } else {
2658             TCGv_i32 t2 = tcg_temp_new_i32();
2659             tcg_gen_trunc_tl_i32(t2, gen_load_gpr(dc, rs2_or_imm));
2660             tcg_gen_add_i32(trap, trap, t2);
2661         }
2662         tcg_gen_andi_i32(trap, trap, mask);
2663         tcg_gen_addi_i32(trap, trap, TT_TRAP);
2664     }
2665 
2666     finishing_insn(dc);
2667 
2668     /* Trap always.  */
2669     if (cond == 8) {
2670         save_state(dc);
2671         gen_helper_raise_exception(tcg_env, trap);
2672         dc->base.is_jmp = DISAS_NORETURN;
2673         return true;
2674     }
2675 
2676     /* Conditional trap.  */
2677     flush_cond(dc);
2678     lab = delay_exceptionv(dc, trap);
2679     gen_compare(&cmp, cc, cond, dc);
2680     tcg_gen_brcondi_tl(cmp.cond, cmp.c1, cmp.c2, lab);
2681 
2682     return advance_pc(dc);
2683 }
2684 
2685 static bool trans_Tcc_r(DisasContext *dc, arg_Tcc_r *a)
2686 {
2687     if (avail_32(dc) && a->cc) {
2688         return false;
2689     }
2690     return do_tcc(dc, a->cond, a->cc, a->rs1, false, a->rs2);
2691 }
2692 
2693 static bool trans_Tcc_i_v7(DisasContext *dc, arg_Tcc_i_v7 *a)
2694 {
2695     if (avail_64(dc)) {
2696         return false;
2697     }
2698     return do_tcc(dc, a->cond, 0, a->rs1, true, a->i);
2699 }
2700 
2701 static bool trans_Tcc_i_v9(DisasContext *dc, arg_Tcc_i_v9 *a)
2702 {
2703     if (avail_32(dc)) {
2704         return false;
2705     }
2706     return do_tcc(dc, a->cond, a->cc, a->rs1, true, a->i);
2707 }
2708 
2709 static bool trans_STBAR(DisasContext *dc, arg_STBAR *a)
2710 {
2711     tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
2712     return advance_pc(dc);
2713 }
2714 
2715 static bool trans_MEMBAR(DisasContext *dc, arg_MEMBAR *a)
2716 {
2717     if (avail_32(dc)) {
2718         return false;
2719     }
2720     if (a->mmask) {
2721         /* Note TCG_MO_* was modeled on sparc64, so mmask matches. */
2722         tcg_gen_mb(a->mmask | TCG_BAR_SC);
2723     }
2724     if (a->cmask) {
2725         /* For #Sync, etc, end the TB to recognize interrupts. */
2726         dc->base.is_jmp = DISAS_EXIT;
2727     }
2728     return advance_pc(dc);
2729 }
2730 
2731 static bool do_rd_special(DisasContext *dc, bool priv, int rd,
2732                           TCGv (*func)(DisasContext *, TCGv))
2733 {
2734     if (!priv) {
2735         return raise_priv(dc);
2736     }
2737     gen_store_gpr(dc, rd, func(dc, gen_dest_gpr(dc, rd)));
2738     return advance_pc(dc);
2739 }
2740 
2741 static TCGv do_rdy(DisasContext *dc, TCGv dst)
2742 {
2743     return cpu_y;
2744 }
2745 
2746 static bool trans_RDY(DisasContext *dc, arg_RDY *a)
2747 {
2748     /*
2749      * TODO: Need a feature bit for sparcv8.  In the meantime, treat all
2750      * 32-bit cpus like sparcv7, which ignores the rs1 field.
2751      * This matches after all other ASR, so Leon3 Asr17 is handled first.
2752      */
2753     if (avail_64(dc) && a->rs1 != 0) {
2754         return false;
2755     }
2756     return do_rd_special(dc, true, a->rd, do_rdy);
2757 }
2758 
2759 static TCGv do_rd_leon3_config(DisasContext *dc, TCGv dst)
2760 {
2761     gen_helper_rdasr17(dst, tcg_env);
2762     return dst;
2763 }
2764 
2765 TRANS(RDASR17, ASR17, do_rd_special, true, a->rd, do_rd_leon3_config)
2766 
2767 static TCGv do_rdccr(DisasContext *dc, TCGv dst)
2768 {
2769     gen_helper_rdccr(dst, tcg_env);
2770     return dst;
2771 }
2772 
2773 TRANS(RDCCR, 64, do_rd_special, true, a->rd, do_rdccr)
2774 
2775 static TCGv do_rdasi(DisasContext *dc, TCGv dst)
2776 {
2777 #ifdef TARGET_SPARC64
2778     return tcg_constant_tl(dc->asi);
2779 #else
2780     qemu_build_not_reached();
2781 #endif
2782 }
2783 
2784 TRANS(RDASI, 64, do_rd_special, true, a->rd, do_rdasi)
2785 
2786 static TCGv do_rdtick(DisasContext *dc, TCGv dst)
2787 {
2788     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
2789 
2790     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
2791     if (translator_io_start(&dc->base)) {
2792         dc->base.is_jmp = DISAS_EXIT;
2793     }
2794     gen_helper_tick_get_count(dst, tcg_env, r_tickptr,
2795                               tcg_constant_i32(dc->mem_idx));
2796     return dst;
2797 }
2798 
2799 /* TODO: non-priv access only allowed when enabled. */
2800 TRANS(RDTICK, 64, do_rd_special, true, a->rd, do_rdtick)
2801 
2802 static TCGv do_rdpc(DisasContext *dc, TCGv dst)
2803 {
2804     return tcg_constant_tl(address_mask_i(dc, dc->pc));
2805 }
2806 
2807 TRANS(RDPC, 64, do_rd_special, true, a->rd, do_rdpc)
2808 
2809 static TCGv do_rdfprs(DisasContext *dc, TCGv dst)
2810 {
2811     tcg_gen_ext_i32_tl(dst, cpu_fprs);
2812     return dst;
2813 }
2814 
2815 TRANS(RDFPRS, 64, do_rd_special, true, a->rd, do_rdfprs)
2816 
2817 static TCGv do_rdgsr(DisasContext *dc, TCGv dst)
2818 {
2819     gen_trap_ifnofpu(dc);
2820     return cpu_gsr;
2821 }
2822 
2823 TRANS(RDGSR, 64, do_rd_special, true, a->rd, do_rdgsr)
2824 
2825 static TCGv do_rdsoftint(DisasContext *dc, TCGv dst)
2826 {
2827     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(softint));
2828     return dst;
2829 }
2830 
2831 TRANS(RDSOFTINT, 64, do_rd_special, supervisor(dc), a->rd, do_rdsoftint)
2832 
2833 static TCGv do_rdtick_cmpr(DisasContext *dc, TCGv dst)
2834 {
2835     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(tick_cmpr));
2836     return dst;
2837 }
2838 
2839 /* TODO: non-priv access only allowed when enabled. */
2840 TRANS(RDTICK_CMPR, 64, do_rd_special, true, a->rd, do_rdtick_cmpr)
2841 
2842 static TCGv do_rdstick(DisasContext *dc, TCGv dst)
2843 {
2844     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
2845 
2846     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick));
2847     if (translator_io_start(&dc->base)) {
2848         dc->base.is_jmp = DISAS_EXIT;
2849     }
2850     gen_helper_tick_get_count(dst, tcg_env, r_tickptr,
2851                               tcg_constant_i32(dc->mem_idx));
2852     return dst;
2853 }
2854 
2855 /* TODO: non-priv access only allowed when enabled. */
2856 TRANS(RDSTICK, 64, do_rd_special, true, a->rd, do_rdstick)
2857 
2858 static TCGv do_rdstick_cmpr(DisasContext *dc, TCGv dst)
2859 {
2860     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(stick_cmpr));
2861     return dst;
2862 }
2863 
2864 /* TODO: supervisor access only allowed when enabled by hypervisor. */
2865 TRANS(RDSTICK_CMPR, 64, do_rd_special, supervisor(dc), a->rd, do_rdstick_cmpr)
2866 
2867 /*
2868  * UltraSPARC-T1 Strand status.
2869  * HYPV check maybe not enough, UA2005 & UA2007 describe
2870  * this ASR as impl. dep
2871  */
2872 static TCGv do_rdstrand_status(DisasContext *dc, TCGv dst)
2873 {
2874     return tcg_constant_tl(1);
2875 }
2876 
2877 TRANS(RDSTRAND_STATUS, HYPV, do_rd_special, true, a->rd, do_rdstrand_status)
2878 
2879 static TCGv do_rdpsr(DisasContext *dc, TCGv dst)
2880 {
2881     gen_helper_rdpsr(dst, tcg_env);
2882     return dst;
2883 }
2884 
2885 TRANS(RDPSR, 32, do_rd_special, supervisor(dc), a->rd, do_rdpsr)
2886 
2887 static TCGv do_rdhpstate(DisasContext *dc, TCGv dst)
2888 {
2889     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hpstate));
2890     return dst;
2891 }
2892 
2893 TRANS(RDHPR_hpstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhpstate)
2894 
2895 static TCGv do_rdhtstate(DisasContext *dc, TCGv dst)
2896 {
2897     TCGv_i32 tl = tcg_temp_new_i32();
2898     TCGv_ptr tp = tcg_temp_new_ptr();
2899 
2900     tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl));
2901     tcg_gen_andi_i32(tl, tl, MAXTL_MASK);
2902     tcg_gen_shli_i32(tl, tl, 3);
2903     tcg_gen_ext_i32_ptr(tp, tl);
2904     tcg_gen_add_ptr(tp, tp, tcg_env);
2905 
2906     tcg_gen_ld_tl(dst, tp, env64_field_offsetof(htstate));
2907     return dst;
2908 }
2909 
2910 TRANS(RDHPR_htstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtstate)
2911 
2912 static TCGv do_rdhintp(DisasContext *dc, TCGv dst)
2913 {
2914     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hintp));
2915     return dst;
2916 }
2917 
2918 TRANS(RDHPR_hintp, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhintp)
2919 
2920 static TCGv do_rdhtba(DisasContext *dc, TCGv dst)
2921 {
2922     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(htba));
2923     return dst;
2924 }
2925 
2926 TRANS(RDHPR_htba, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtba)
2927 
2928 static TCGv do_rdhver(DisasContext *dc, TCGv dst)
2929 {
2930     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hver));
2931     return dst;
2932 }
2933 
2934 TRANS(RDHPR_hver, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhver)
2935 
2936 static TCGv do_rdhstick_cmpr(DisasContext *dc, TCGv dst)
2937 {
2938     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hstick_cmpr));
2939     return dst;
2940 }
2941 
2942 TRANS(RDHPR_hstick_cmpr, HYPV, do_rd_special, hypervisor(dc), a->rd,
2943       do_rdhstick_cmpr)
2944 
2945 static TCGv do_rdwim(DisasContext *dc, TCGv dst)
2946 {
2947     tcg_gen_ld_tl(dst, tcg_env, env32_field_offsetof(wim));
2948     return dst;
2949 }
2950 
2951 TRANS(RDWIM, 32, do_rd_special, supervisor(dc), a->rd, do_rdwim)
2952 
2953 static TCGv do_rdtpc(DisasContext *dc, TCGv dst)
2954 {
2955 #ifdef TARGET_SPARC64
2956     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2957 
2958     gen_load_trap_state_at_tl(r_tsptr);
2959     tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tpc));
2960     return dst;
2961 #else
2962     qemu_build_not_reached();
2963 #endif
2964 }
2965 
2966 TRANS(RDPR_tpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtpc)
2967 
2968 static TCGv do_rdtnpc(DisasContext *dc, TCGv dst)
2969 {
2970 #ifdef TARGET_SPARC64
2971     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2972 
2973     gen_load_trap_state_at_tl(r_tsptr);
2974     tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tnpc));
2975     return dst;
2976 #else
2977     qemu_build_not_reached();
2978 #endif
2979 }
2980 
2981 TRANS(RDPR_tnpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtnpc)
2982 
2983 static TCGv do_rdtstate(DisasContext *dc, TCGv dst)
2984 {
2985 #ifdef TARGET_SPARC64
2986     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2987 
2988     gen_load_trap_state_at_tl(r_tsptr);
2989     tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tstate));
2990     return dst;
2991 #else
2992     qemu_build_not_reached();
2993 #endif
2994 }
2995 
2996 TRANS(RDPR_tstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdtstate)
2997 
2998 static TCGv do_rdtt(DisasContext *dc, TCGv dst)
2999 {
3000 #ifdef TARGET_SPARC64
3001     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3002 
3003     gen_load_trap_state_at_tl(r_tsptr);
3004     tcg_gen_ld32s_tl(dst, r_tsptr, offsetof(trap_state, tt));
3005     return dst;
3006 #else
3007     qemu_build_not_reached();
3008 #endif
3009 }
3010 
3011 TRANS(RDPR_tt, 64, do_rd_special, supervisor(dc), a->rd, do_rdtt)
3012 TRANS(RDPR_tick, 64, do_rd_special, supervisor(dc), a->rd, do_rdtick)
3013 
3014 static TCGv do_rdtba(DisasContext *dc, TCGv dst)
3015 {
3016     return cpu_tbr;
3017 }
3018 
3019 TRANS(RDTBR, 32, do_rd_special, supervisor(dc), a->rd, do_rdtba)
3020 TRANS(RDPR_tba, 64, do_rd_special, supervisor(dc), a->rd, do_rdtba)
3021 
3022 static TCGv do_rdpstate(DisasContext *dc, TCGv dst)
3023 {
3024     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(pstate));
3025     return dst;
3026 }
3027 
3028 TRANS(RDPR_pstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdpstate)
3029 
3030 static TCGv do_rdtl(DisasContext *dc, TCGv dst)
3031 {
3032     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(tl));
3033     return dst;
3034 }
3035 
3036 TRANS(RDPR_tl, 64, do_rd_special, supervisor(dc), a->rd, do_rdtl)
3037 
3038 static TCGv do_rdpil(DisasContext *dc, TCGv dst)
3039 {
3040     tcg_gen_ld32s_tl(dst, tcg_env, env_field_offsetof(psrpil));
3041     return dst;
3042 }
3043 
3044 TRANS(RDPR_pil, 64, do_rd_special, supervisor(dc), a->rd, do_rdpil)
3045 
3046 static TCGv do_rdcwp(DisasContext *dc, TCGv dst)
3047 {
3048     gen_helper_rdcwp(dst, tcg_env);
3049     return dst;
3050 }
3051 
3052 TRANS(RDPR_cwp, 64, do_rd_special, supervisor(dc), a->rd, do_rdcwp)
3053 
3054 static TCGv do_rdcansave(DisasContext *dc, TCGv dst)
3055 {
3056     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cansave));
3057     return dst;
3058 }
3059 
3060 TRANS(RDPR_cansave, 64, do_rd_special, supervisor(dc), a->rd, do_rdcansave)
3061 
3062 static TCGv do_rdcanrestore(DisasContext *dc, TCGv dst)
3063 {
3064     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(canrestore));
3065     return dst;
3066 }
3067 
3068 TRANS(RDPR_canrestore, 64, do_rd_special, supervisor(dc), a->rd,
3069       do_rdcanrestore)
3070 
3071 static TCGv do_rdcleanwin(DisasContext *dc, TCGv dst)
3072 {
3073     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cleanwin));
3074     return dst;
3075 }
3076 
3077 TRANS(RDPR_cleanwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdcleanwin)
3078 
3079 static TCGv do_rdotherwin(DisasContext *dc, TCGv dst)
3080 {
3081     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(otherwin));
3082     return dst;
3083 }
3084 
3085 TRANS(RDPR_otherwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdotherwin)
3086 
3087 static TCGv do_rdwstate(DisasContext *dc, TCGv dst)
3088 {
3089     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(wstate));
3090     return dst;
3091 }
3092 
3093 TRANS(RDPR_wstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdwstate)
3094 
3095 static TCGv do_rdgl(DisasContext *dc, TCGv dst)
3096 {
3097     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(gl));
3098     return dst;
3099 }
3100 
3101 TRANS(RDPR_gl, GL, do_rd_special, supervisor(dc), a->rd, do_rdgl)
3102 
3103 /* UA2005 strand status */
3104 static TCGv do_rdssr(DisasContext *dc, TCGv dst)
3105 {
3106     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(ssr));
3107     return dst;
3108 }
3109 
3110 TRANS(RDPR_strand_status, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdssr)
3111 
3112 static TCGv do_rdver(DisasContext *dc, TCGv dst)
3113 {
3114     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(version));
3115     return dst;
3116 }
3117 
3118 TRANS(RDPR_ver, 64, do_rd_special, supervisor(dc), a->rd, do_rdver)
3119 
3120 static bool trans_FLUSHW(DisasContext *dc, arg_FLUSHW *a)
3121 {
3122     if (avail_64(dc)) {
3123         gen_helper_flushw(tcg_env);
3124         return advance_pc(dc);
3125     }
3126     return false;
3127 }
3128 
3129 static bool do_wr_special(DisasContext *dc, arg_r_r_ri *a, bool priv,
3130                           void (*func)(DisasContext *, TCGv))
3131 {
3132     TCGv src;
3133 
3134     /* For simplicity, we under-decoded the rs2 form. */
3135     if (!a->imm && (a->rs2_or_imm & ~0x1f)) {
3136         return false;
3137     }
3138     if (!priv) {
3139         return raise_priv(dc);
3140     }
3141 
3142     if (a->rs1 == 0 && (a->imm || a->rs2_or_imm == 0)) {
3143         src = tcg_constant_tl(a->rs2_or_imm);
3144     } else {
3145         TCGv src1 = gen_load_gpr(dc, a->rs1);
3146         if (a->rs2_or_imm == 0) {
3147             src = src1;
3148         } else {
3149             src = tcg_temp_new();
3150             if (a->imm) {
3151                 tcg_gen_xori_tl(src, src1, a->rs2_or_imm);
3152             } else {
3153                 tcg_gen_xor_tl(src, src1, gen_load_gpr(dc, a->rs2_or_imm));
3154             }
3155         }
3156     }
3157     func(dc, src);
3158     return advance_pc(dc);
3159 }
3160 
3161 static void do_wry(DisasContext *dc, TCGv src)
3162 {
3163     tcg_gen_ext32u_tl(cpu_y, src);
3164 }
3165 
3166 TRANS(WRY, ALL, do_wr_special, a, true, do_wry)
3167 
3168 static void do_wrccr(DisasContext *dc, TCGv src)
3169 {
3170     gen_helper_wrccr(tcg_env, src);
3171 }
3172 
3173 TRANS(WRCCR, 64, do_wr_special, a, true, do_wrccr)
3174 
3175 static void do_wrasi(DisasContext *dc, TCGv src)
3176 {
3177     TCGv tmp = tcg_temp_new();
3178 
3179     tcg_gen_ext8u_tl(tmp, src);
3180     tcg_gen_st32_tl(tmp, tcg_env, env64_field_offsetof(asi));
3181     /* End TB to notice changed ASI. */
3182     dc->base.is_jmp = DISAS_EXIT;
3183 }
3184 
3185 TRANS(WRASI, 64, do_wr_special, a, true, do_wrasi)
3186 
3187 static void do_wrfprs(DisasContext *dc, TCGv src)
3188 {
3189 #ifdef TARGET_SPARC64
3190     tcg_gen_trunc_tl_i32(cpu_fprs, src);
3191     dc->fprs_dirty = 0;
3192     dc->base.is_jmp = DISAS_EXIT;
3193 #else
3194     qemu_build_not_reached();
3195 #endif
3196 }
3197 
3198 TRANS(WRFPRS, 64, do_wr_special, a, true, do_wrfprs)
3199 
3200 static void do_wrgsr(DisasContext *dc, TCGv src)
3201 {
3202     gen_trap_ifnofpu(dc);
3203     tcg_gen_mov_tl(cpu_gsr, src);
3204 }
3205 
3206 TRANS(WRGSR, 64, do_wr_special, a, true, do_wrgsr)
3207 
3208 static void do_wrsoftint_set(DisasContext *dc, TCGv src)
3209 {
3210     gen_helper_set_softint(tcg_env, src);
3211 }
3212 
3213 TRANS(WRSOFTINT_SET, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_set)
3214 
3215 static void do_wrsoftint_clr(DisasContext *dc, TCGv src)
3216 {
3217     gen_helper_clear_softint(tcg_env, src);
3218 }
3219 
3220 TRANS(WRSOFTINT_CLR, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_clr)
3221 
3222 static void do_wrsoftint(DisasContext *dc, TCGv src)
3223 {
3224     gen_helper_write_softint(tcg_env, src);
3225 }
3226 
3227 TRANS(WRSOFTINT, 64, do_wr_special, a, supervisor(dc), do_wrsoftint)
3228 
3229 static void do_wrtick_cmpr(DisasContext *dc, TCGv src)
3230 {
3231     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3232 
3233     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(tick_cmpr));
3234     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
3235     translator_io_start(&dc->base);
3236     gen_helper_tick_set_limit(r_tickptr, src);
3237     /* End TB to handle timer interrupt */
3238     dc->base.is_jmp = DISAS_EXIT;
3239 }
3240 
3241 TRANS(WRTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrtick_cmpr)
3242 
3243 static void do_wrstick(DisasContext *dc, TCGv src)
3244 {
3245 #ifdef TARGET_SPARC64
3246     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3247 
3248     tcg_gen_ld_ptr(r_tickptr, tcg_env, offsetof(CPUSPARCState, stick));
3249     translator_io_start(&dc->base);
3250     gen_helper_tick_set_count(r_tickptr, src);
3251     /* End TB to handle timer interrupt */
3252     dc->base.is_jmp = DISAS_EXIT;
3253 #else
3254     qemu_build_not_reached();
3255 #endif
3256 }
3257 
3258 TRANS(WRSTICK, 64, do_wr_special, a, supervisor(dc), do_wrstick)
3259 
3260 static void do_wrstick_cmpr(DisasContext *dc, TCGv src)
3261 {
3262     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3263 
3264     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(stick_cmpr));
3265     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick));
3266     translator_io_start(&dc->base);
3267     gen_helper_tick_set_limit(r_tickptr, src);
3268     /* End TB to handle timer interrupt */
3269     dc->base.is_jmp = DISAS_EXIT;
3270 }
3271 
3272 TRANS(WRSTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrstick_cmpr)
3273 
3274 static void do_wrpowerdown(DisasContext *dc, TCGv src)
3275 {
3276     finishing_insn(dc);
3277     save_state(dc);
3278     gen_helper_power_down(tcg_env);
3279 }
3280 
3281 TRANS(WRPOWERDOWN, POWERDOWN, do_wr_special, a, supervisor(dc), do_wrpowerdown)
3282 
3283 static void do_wrpsr(DisasContext *dc, TCGv src)
3284 {
3285     gen_helper_wrpsr(tcg_env, src);
3286     dc->base.is_jmp = DISAS_EXIT;
3287 }
3288 
3289 TRANS(WRPSR, 32, do_wr_special, a, supervisor(dc), do_wrpsr)
3290 
3291 static void do_wrwim(DisasContext *dc, TCGv src)
3292 {
3293     target_ulong mask = MAKE_64BIT_MASK(0, dc->def->nwindows);
3294     TCGv tmp = tcg_temp_new();
3295 
3296     tcg_gen_andi_tl(tmp, src, mask);
3297     tcg_gen_st_tl(tmp, tcg_env, env32_field_offsetof(wim));
3298 }
3299 
3300 TRANS(WRWIM, 32, do_wr_special, a, supervisor(dc), do_wrwim)
3301 
3302 static void do_wrtpc(DisasContext *dc, TCGv src)
3303 {
3304 #ifdef TARGET_SPARC64
3305     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3306 
3307     gen_load_trap_state_at_tl(r_tsptr);
3308     tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tpc));
3309 #else
3310     qemu_build_not_reached();
3311 #endif
3312 }
3313 
3314 TRANS(WRPR_tpc, 64, do_wr_special, a, supervisor(dc), do_wrtpc)
3315 
3316 static void do_wrtnpc(DisasContext *dc, TCGv src)
3317 {
3318 #ifdef TARGET_SPARC64
3319     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3320 
3321     gen_load_trap_state_at_tl(r_tsptr);
3322     tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tnpc));
3323 #else
3324     qemu_build_not_reached();
3325 #endif
3326 }
3327 
3328 TRANS(WRPR_tnpc, 64, do_wr_special, a, supervisor(dc), do_wrtnpc)
3329 
3330 static void do_wrtstate(DisasContext *dc, TCGv src)
3331 {
3332 #ifdef TARGET_SPARC64
3333     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3334 
3335     gen_load_trap_state_at_tl(r_tsptr);
3336     tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tstate));
3337 #else
3338     qemu_build_not_reached();
3339 #endif
3340 }
3341 
3342 TRANS(WRPR_tstate, 64, do_wr_special, a, supervisor(dc), do_wrtstate)
3343 
3344 static void do_wrtt(DisasContext *dc, TCGv src)
3345 {
3346 #ifdef TARGET_SPARC64
3347     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3348 
3349     gen_load_trap_state_at_tl(r_tsptr);
3350     tcg_gen_st32_tl(src, r_tsptr, offsetof(trap_state, tt));
3351 #else
3352     qemu_build_not_reached();
3353 #endif
3354 }
3355 
3356 TRANS(WRPR_tt, 64, do_wr_special, a, supervisor(dc), do_wrtt)
3357 
3358 static void do_wrtick(DisasContext *dc, TCGv src)
3359 {
3360     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3361 
3362     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
3363     translator_io_start(&dc->base);
3364     gen_helper_tick_set_count(r_tickptr, src);
3365     /* End TB to handle timer interrupt */
3366     dc->base.is_jmp = DISAS_EXIT;
3367 }
3368 
3369 TRANS(WRPR_tick, 64, do_wr_special, a, supervisor(dc), do_wrtick)
3370 
3371 static void do_wrtba(DisasContext *dc, TCGv src)
3372 {
3373     tcg_gen_mov_tl(cpu_tbr, src);
3374 }
3375 
3376 TRANS(WRPR_tba, 64, do_wr_special, a, supervisor(dc), do_wrtba)
3377 
3378 static void do_wrpstate(DisasContext *dc, TCGv src)
3379 {
3380     save_state(dc);
3381     if (translator_io_start(&dc->base)) {
3382         dc->base.is_jmp = DISAS_EXIT;
3383     }
3384     gen_helper_wrpstate(tcg_env, src);
3385     dc->npc = DYNAMIC_PC;
3386 }
3387 
3388 TRANS(WRPR_pstate, 64, do_wr_special, a, supervisor(dc), do_wrpstate)
3389 
3390 static void do_wrtl(DisasContext *dc, TCGv src)
3391 {
3392     save_state(dc);
3393     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(tl));
3394     dc->npc = DYNAMIC_PC;
3395 }
3396 
3397 TRANS(WRPR_tl, 64, do_wr_special, a, supervisor(dc), do_wrtl)
3398 
3399 static void do_wrpil(DisasContext *dc, TCGv src)
3400 {
3401     if (translator_io_start(&dc->base)) {
3402         dc->base.is_jmp = DISAS_EXIT;
3403     }
3404     gen_helper_wrpil(tcg_env, src);
3405 }
3406 
3407 TRANS(WRPR_pil, 64, do_wr_special, a, supervisor(dc), do_wrpil)
3408 
3409 static void do_wrcwp(DisasContext *dc, TCGv src)
3410 {
3411     gen_helper_wrcwp(tcg_env, src);
3412 }
3413 
3414 TRANS(WRPR_cwp, 64, do_wr_special, a, supervisor(dc), do_wrcwp)
3415 
3416 static void do_wrcansave(DisasContext *dc, TCGv src)
3417 {
3418     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cansave));
3419 }
3420 
3421 TRANS(WRPR_cansave, 64, do_wr_special, a, supervisor(dc), do_wrcansave)
3422 
3423 static void do_wrcanrestore(DisasContext *dc, TCGv src)
3424 {
3425     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(canrestore));
3426 }
3427 
3428 TRANS(WRPR_canrestore, 64, do_wr_special, a, supervisor(dc), do_wrcanrestore)
3429 
3430 static void do_wrcleanwin(DisasContext *dc, TCGv src)
3431 {
3432     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cleanwin));
3433 }
3434 
3435 TRANS(WRPR_cleanwin, 64, do_wr_special, a, supervisor(dc), do_wrcleanwin)
3436 
3437 static void do_wrotherwin(DisasContext *dc, TCGv src)
3438 {
3439     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(otherwin));
3440 }
3441 
3442 TRANS(WRPR_otherwin, 64, do_wr_special, a, supervisor(dc), do_wrotherwin)
3443 
3444 static void do_wrwstate(DisasContext *dc, TCGv src)
3445 {
3446     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(wstate));
3447 }
3448 
3449 TRANS(WRPR_wstate, 64, do_wr_special, a, supervisor(dc), do_wrwstate)
3450 
3451 static void do_wrgl(DisasContext *dc, TCGv src)
3452 {
3453     gen_helper_wrgl(tcg_env, src);
3454 }
3455 
3456 TRANS(WRPR_gl, GL, do_wr_special, a, supervisor(dc), do_wrgl)
3457 
3458 /* UA2005 strand status */
3459 static void do_wrssr(DisasContext *dc, TCGv src)
3460 {
3461     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(ssr));
3462 }
3463 
3464 TRANS(WRPR_strand_status, HYPV, do_wr_special, a, hypervisor(dc), do_wrssr)
3465 
3466 TRANS(WRTBR, 32, do_wr_special, a, supervisor(dc), do_wrtba)
3467 
3468 static void do_wrhpstate(DisasContext *dc, TCGv src)
3469 {
3470     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hpstate));
3471     dc->base.is_jmp = DISAS_EXIT;
3472 }
3473 
3474 TRANS(WRHPR_hpstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhpstate)
3475 
3476 static void do_wrhtstate(DisasContext *dc, TCGv src)
3477 {
3478     TCGv_i32 tl = tcg_temp_new_i32();
3479     TCGv_ptr tp = tcg_temp_new_ptr();
3480 
3481     tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl));
3482     tcg_gen_andi_i32(tl, tl, MAXTL_MASK);
3483     tcg_gen_shli_i32(tl, tl, 3);
3484     tcg_gen_ext_i32_ptr(tp, tl);
3485     tcg_gen_add_ptr(tp, tp, tcg_env);
3486 
3487     tcg_gen_st_tl(src, tp, env64_field_offsetof(htstate));
3488 }
3489 
3490 TRANS(WRHPR_htstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtstate)
3491 
3492 static void do_wrhintp(DisasContext *dc, TCGv src)
3493 {
3494     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hintp));
3495 }
3496 
3497 TRANS(WRHPR_hintp, HYPV, do_wr_special, a, hypervisor(dc), do_wrhintp)
3498 
3499 static void do_wrhtba(DisasContext *dc, TCGv src)
3500 {
3501     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(htba));
3502 }
3503 
3504 TRANS(WRHPR_htba, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtba)
3505 
3506 static void do_wrhstick_cmpr(DisasContext *dc, TCGv src)
3507 {
3508     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3509 
3510     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hstick_cmpr));
3511     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(hstick));
3512     translator_io_start(&dc->base);
3513     gen_helper_tick_set_limit(r_tickptr, src);
3514     /* End TB to handle timer interrupt */
3515     dc->base.is_jmp = DISAS_EXIT;
3516 }
3517 
3518 TRANS(WRHPR_hstick_cmpr, HYPV, do_wr_special, a, hypervisor(dc),
3519       do_wrhstick_cmpr)
3520 
3521 static bool do_saved_restored(DisasContext *dc, bool saved)
3522 {
3523     if (!supervisor(dc)) {
3524         return raise_priv(dc);
3525     }
3526     if (saved) {
3527         gen_helper_saved(tcg_env);
3528     } else {
3529         gen_helper_restored(tcg_env);
3530     }
3531     return advance_pc(dc);
3532 }
3533 
3534 TRANS(SAVED, 64, do_saved_restored, true)
3535 TRANS(RESTORED, 64, do_saved_restored, false)
3536 
3537 static bool trans_NOP(DisasContext *dc, arg_NOP *a)
3538 {
3539     return advance_pc(dc);
3540 }
3541 
3542 /*
3543  * TODO: Need a feature bit for sparcv8.
3544  * In the meantime, treat all 32-bit cpus like sparcv7.
3545  */
3546 TRANS(NOP_v7, 32, trans_NOP, a)
3547 TRANS(NOP_v9, 64, trans_NOP, a)
3548 
3549 static bool do_arith_int(DisasContext *dc, arg_r_r_ri_cc *a,
3550                          void (*func)(TCGv, TCGv, TCGv),
3551                          void (*funci)(TCGv, TCGv, target_long),
3552                          bool logic_cc)
3553 {
3554     TCGv dst, src1;
3555 
3556     /* For simplicity, we under-decoded the rs2 form. */
3557     if (!a->imm && a->rs2_or_imm & ~0x1f) {
3558         return false;
3559     }
3560 
3561     if (logic_cc) {
3562         dst = cpu_cc_N;
3563     } else {
3564         dst = gen_dest_gpr(dc, a->rd);
3565     }
3566     src1 = gen_load_gpr(dc, a->rs1);
3567 
3568     if (a->imm || a->rs2_or_imm == 0) {
3569         if (funci) {
3570             funci(dst, src1, a->rs2_or_imm);
3571         } else {
3572             func(dst, src1, tcg_constant_tl(a->rs2_or_imm));
3573         }
3574     } else {
3575         func(dst, src1, cpu_regs[a->rs2_or_imm]);
3576     }
3577 
3578     if (logic_cc) {
3579         if (TARGET_LONG_BITS == 64) {
3580             tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
3581             tcg_gen_movi_tl(cpu_icc_C, 0);
3582         }
3583         tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
3584         tcg_gen_movi_tl(cpu_cc_C, 0);
3585         tcg_gen_movi_tl(cpu_cc_V, 0);
3586     }
3587 
3588     gen_store_gpr(dc, a->rd, dst);
3589     return advance_pc(dc);
3590 }
3591 
3592 static bool do_arith(DisasContext *dc, arg_r_r_ri_cc *a,
3593                      void (*func)(TCGv, TCGv, TCGv),
3594                      void (*funci)(TCGv, TCGv, target_long),
3595                      void (*func_cc)(TCGv, TCGv, TCGv))
3596 {
3597     if (a->cc) {
3598         return do_arith_int(dc, a, func_cc, NULL, false);
3599     }
3600     return do_arith_int(dc, a, func, funci, false);
3601 }
3602 
3603 static bool do_logic(DisasContext *dc, arg_r_r_ri_cc *a,
3604                      void (*func)(TCGv, TCGv, TCGv),
3605                      void (*funci)(TCGv, TCGv, target_long))
3606 {
3607     return do_arith_int(dc, a, func, funci, a->cc);
3608 }
3609 
3610 TRANS(ADD, ALL, do_arith, a, tcg_gen_add_tl, tcg_gen_addi_tl, gen_op_addcc)
3611 TRANS(SUB, ALL, do_arith, a, tcg_gen_sub_tl, tcg_gen_subi_tl, gen_op_subcc)
3612 TRANS(ADDC, ALL, do_arith, a, gen_op_addc, NULL, gen_op_addccc)
3613 TRANS(SUBC, ALL, do_arith, a, gen_op_subc, NULL, gen_op_subccc)
3614 
3615 TRANS(TADDcc, ALL, do_arith, a, NULL, NULL, gen_op_taddcc)
3616 TRANS(TSUBcc, ALL, do_arith, a, NULL, NULL, gen_op_tsubcc)
3617 TRANS(TADDccTV, ALL, do_arith, a, NULL, NULL, gen_op_taddcctv)
3618 TRANS(TSUBccTV, ALL, do_arith, a, NULL, NULL, gen_op_tsubcctv)
3619 
3620 TRANS(AND, ALL, do_logic, a, tcg_gen_and_tl, tcg_gen_andi_tl)
3621 TRANS(XOR, ALL, do_logic, a, tcg_gen_xor_tl, tcg_gen_xori_tl)
3622 TRANS(ANDN, ALL, do_logic, a, tcg_gen_andc_tl, NULL)
3623 TRANS(ORN, ALL, do_logic, a, tcg_gen_orc_tl, NULL)
3624 TRANS(XORN, ALL, do_logic, a, tcg_gen_eqv_tl, NULL)
3625 
3626 TRANS(MULX, 64, do_arith, a, tcg_gen_mul_tl, tcg_gen_muli_tl, NULL)
3627 TRANS(UMUL, MUL, do_logic, a, gen_op_umul, NULL)
3628 TRANS(SMUL, MUL, do_logic, a, gen_op_smul, NULL)
3629 TRANS(MULScc, ALL, do_arith, a, NULL, NULL, gen_op_mulscc)
3630 
3631 TRANS(UDIVcc, DIV, do_arith, a, NULL, NULL, gen_op_udivcc)
3632 TRANS(SDIV, DIV, do_arith, a, gen_op_sdiv, NULL, gen_op_sdivcc)
3633 
3634 /* TODO: Should have feature bit -- comes in with UltraSparc T2. */
3635 TRANS(POPC, 64, do_arith, a, gen_op_popc, NULL, NULL)
3636 
3637 static bool trans_OR(DisasContext *dc, arg_r_r_ri_cc *a)
3638 {
3639     /* OR with %g0 is the canonical alias for MOV. */
3640     if (!a->cc && a->rs1 == 0) {
3641         if (a->imm || a->rs2_or_imm == 0) {
3642             gen_store_gpr(dc, a->rd, tcg_constant_tl(a->rs2_or_imm));
3643         } else if (a->rs2_or_imm & ~0x1f) {
3644             /* For simplicity, we under-decoded the rs2 form. */
3645             return false;
3646         } else {
3647             gen_store_gpr(dc, a->rd, cpu_regs[a->rs2_or_imm]);
3648         }
3649         return advance_pc(dc);
3650     }
3651     return do_logic(dc, a, tcg_gen_or_tl, tcg_gen_ori_tl);
3652 }
3653 
3654 static bool trans_UDIV(DisasContext *dc, arg_r_r_ri *a)
3655 {
3656     TCGv_i64 t1, t2;
3657     TCGv dst;
3658 
3659     if (!avail_DIV(dc)) {
3660         return false;
3661     }
3662     /* For simplicity, we under-decoded the rs2 form. */
3663     if (!a->imm && a->rs2_or_imm & ~0x1f) {
3664         return false;
3665     }
3666 
3667     if (unlikely(a->rs2_or_imm == 0)) {
3668         gen_exception(dc, TT_DIV_ZERO);
3669         return true;
3670     }
3671 
3672     if (a->imm) {
3673         t2 = tcg_constant_i64((uint32_t)a->rs2_or_imm);
3674     } else {
3675         TCGLabel *lab;
3676         TCGv_i32 n2;
3677 
3678         finishing_insn(dc);
3679         flush_cond(dc);
3680 
3681         n2 = tcg_temp_new_i32();
3682         tcg_gen_trunc_tl_i32(n2, cpu_regs[a->rs2_or_imm]);
3683 
3684         lab = delay_exception(dc, TT_DIV_ZERO);
3685         tcg_gen_brcondi_i32(TCG_COND_EQ, n2, 0, lab);
3686 
3687         t2 = tcg_temp_new_i64();
3688 #ifdef TARGET_SPARC64
3689         tcg_gen_ext32u_i64(t2, cpu_regs[a->rs2_or_imm]);
3690 #else
3691         tcg_gen_extu_i32_i64(t2, cpu_regs[a->rs2_or_imm]);
3692 #endif
3693     }
3694 
3695     t1 = tcg_temp_new_i64();
3696     tcg_gen_concat_tl_i64(t1, gen_load_gpr(dc, a->rs1), cpu_y);
3697 
3698     tcg_gen_divu_i64(t1, t1, t2);
3699     tcg_gen_umin_i64(t1, t1, tcg_constant_i64(UINT32_MAX));
3700 
3701     dst = gen_dest_gpr(dc, a->rd);
3702     tcg_gen_trunc_i64_tl(dst, t1);
3703     gen_store_gpr(dc, a->rd, dst);
3704     return advance_pc(dc);
3705 }
3706 
3707 static bool trans_UDIVX(DisasContext *dc, arg_r_r_ri *a)
3708 {
3709     TCGv dst, src1, src2;
3710 
3711     if (!avail_64(dc)) {
3712         return false;
3713     }
3714     /* For simplicity, we under-decoded the rs2 form. */
3715     if (!a->imm && a->rs2_or_imm & ~0x1f) {
3716         return false;
3717     }
3718 
3719     if (unlikely(a->rs2_or_imm == 0)) {
3720         gen_exception(dc, TT_DIV_ZERO);
3721         return true;
3722     }
3723 
3724     if (a->imm) {
3725         src2 = tcg_constant_tl(a->rs2_or_imm);
3726     } else {
3727         TCGLabel *lab;
3728 
3729         finishing_insn(dc);
3730         flush_cond(dc);
3731 
3732         lab = delay_exception(dc, TT_DIV_ZERO);
3733         src2 = cpu_regs[a->rs2_or_imm];
3734         tcg_gen_brcondi_tl(TCG_COND_EQ, src2, 0, lab);
3735     }
3736 
3737     dst = gen_dest_gpr(dc, a->rd);
3738     src1 = gen_load_gpr(dc, a->rs1);
3739 
3740     tcg_gen_divu_tl(dst, src1, src2);
3741     gen_store_gpr(dc, a->rd, dst);
3742     return advance_pc(dc);
3743 }
3744 
3745 static bool trans_SDIVX(DisasContext *dc, arg_r_r_ri *a)
3746 {
3747     TCGv dst, src1, src2;
3748 
3749     if (!avail_64(dc)) {
3750         return false;
3751     }
3752     /* For simplicity, we under-decoded the rs2 form. */
3753     if (!a->imm && a->rs2_or_imm & ~0x1f) {
3754         return false;
3755     }
3756 
3757     if (unlikely(a->rs2_or_imm == 0)) {
3758         gen_exception(dc, TT_DIV_ZERO);
3759         return true;
3760     }
3761 
3762     dst = gen_dest_gpr(dc, a->rd);
3763     src1 = gen_load_gpr(dc, a->rs1);
3764 
3765     if (a->imm) {
3766         if (unlikely(a->rs2_or_imm == -1)) {
3767             tcg_gen_neg_tl(dst, src1);
3768             gen_store_gpr(dc, a->rd, dst);
3769             return advance_pc(dc);
3770         }
3771         src2 = tcg_constant_tl(a->rs2_or_imm);
3772     } else {
3773         TCGLabel *lab;
3774         TCGv t1, t2;
3775 
3776         finishing_insn(dc);
3777         flush_cond(dc);
3778 
3779         lab = delay_exception(dc, TT_DIV_ZERO);
3780         src2 = cpu_regs[a->rs2_or_imm];
3781         tcg_gen_brcondi_tl(TCG_COND_EQ, src2, 0, lab);
3782 
3783         /*
3784          * Need to avoid INT64_MIN / -1, which will trap on x86 host.
3785          * Set SRC2 to 1 as a new divisor, to produce the correct result.
3786          */
3787         t1 = tcg_temp_new();
3788         t2 = tcg_temp_new();
3789         tcg_gen_setcondi_tl(TCG_COND_EQ, t1, src1, (target_long)INT64_MIN);
3790         tcg_gen_setcondi_tl(TCG_COND_EQ, t2, src2, -1);
3791         tcg_gen_and_tl(t1, t1, t2);
3792         tcg_gen_movcond_tl(TCG_COND_NE, t1, t1, tcg_constant_tl(0),
3793                            tcg_constant_tl(1), src2);
3794         src2 = t1;
3795     }
3796 
3797     tcg_gen_div_tl(dst, src1, src2);
3798     gen_store_gpr(dc, a->rd, dst);
3799     return advance_pc(dc);
3800 }
3801 
3802 static bool gen_edge(DisasContext *dc, arg_r_r_r *a,
3803                      int width, bool cc, bool little_endian)
3804 {
3805     TCGv dst, s1, s2, l, r, t, m;
3806     uint64_t amask = address_mask_i(dc, -8);
3807 
3808     dst = gen_dest_gpr(dc, a->rd);
3809     s1 = gen_load_gpr(dc, a->rs1);
3810     s2 = gen_load_gpr(dc, a->rs2);
3811 
3812     if (cc) {
3813         gen_op_subcc(cpu_cc_N, s1, s2);
3814     }
3815 
3816     l = tcg_temp_new();
3817     r = tcg_temp_new();
3818     t = tcg_temp_new();
3819 
3820     switch (width) {
3821     case 8:
3822         tcg_gen_andi_tl(l, s1, 7);
3823         tcg_gen_andi_tl(r, s2, 7);
3824         tcg_gen_xori_tl(r, r, 7);
3825         m = tcg_constant_tl(0xff);
3826         break;
3827     case 16:
3828         tcg_gen_extract_tl(l, s1, 1, 2);
3829         tcg_gen_extract_tl(r, s2, 1, 2);
3830         tcg_gen_xori_tl(r, r, 3);
3831         m = tcg_constant_tl(0xf);
3832         break;
3833     case 32:
3834         tcg_gen_extract_tl(l, s1, 2, 1);
3835         tcg_gen_extract_tl(r, s2, 2, 1);
3836         tcg_gen_xori_tl(r, r, 1);
3837         m = tcg_constant_tl(0x3);
3838         break;
3839     default:
3840         abort();
3841     }
3842 
3843     /* Compute Left Edge */
3844     if (little_endian) {
3845         tcg_gen_shl_tl(l, m, l);
3846         tcg_gen_and_tl(l, l, m);
3847     } else {
3848         tcg_gen_shr_tl(l, m, l);
3849     }
3850     /* Compute Right Edge */
3851     if (little_endian) {
3852         tcg_gen_shr_tl(r, m, r);
3853     } else {
3854         tcg_gen_shl_tl(r, m, r);
3855         tcg_gen_and_tl(r, r, m);
3856     }
3857 
3858     /* Compute dst = (s1 == s2 under amask ? l : l & r) */
3859     tcg_gen_xor_tl(t, s1, s2);
3860     tcg_gen_and_tl(r, r, l);
3861     tcg_gen_movcond_tl(TCG_COND_TSTEQ, dst, t, tcg_constant_tl(amask), r, l);
3862 
3863     gen_store_gpr(dc, a->rd, dst);
3864     return advance_pc(dc);
3865 }
3866 
3867 TRANS(EDGE8cc, VIS1, gen_edge, a, 8, 1, 0)
3868 TRANS(EDGE8Lcc, VIS1, gen_edge, a, 8, 1, 1)
3869 TRANS(EDGE16cc, VIS1, gen_edge, a, 16, 1, 0)
3870 TRANS(EDGE16Lcc, VIS1, gen_edge, a, 16, 1, 1)
3871 TRANS(EDGE32cc, VIS1, gen_edge, a, 32, 1, 0)
3872 TRANS(EDGE32Lcc, VIS1, gen_edge, a, 32, 1, 1)
3873 
3874 TRANS(EDGE8N, VIS2, gen_edge, a, 8, 0, 0)
3875 TRANS(EDGE8LN, VIS2, gen_edge, a, 8, 0, 1)
3876 TRANS(EDGE16N, VIS2, gen_edge, a, 16, 0, 0)
3877 TRANS(EDGE16LN, VIS2, gen_edge, a, 16, 0, 1)
3878 TRANS(EDGE32N, VIS2, gen_edge, a, 32, 0, 0)
3879 TRANS(EDGE32LN, VIS2, gen_edge, a, 32, 0, 1)
3880 
3881 static bool do_rr(DisasContext *dc, arg_r_r *a,
3882                   void (*func)(TCGv, TCGv))
3883 {
3884     TCGv dst = gen_dest_gpr(dc, a->rd);
3885     TCGv src = gen_load_gpr(dc, a->rs);
3886 
3887     func(dst, src);
3888     gen_store_gpr(dc, a->rd, dst);
3889     return advance_pc(dc);
3890 }
3891 
3892 TRANS(LZCNT, VIS3, do_rr, a, gen_op_lzcnt)
3893 
3894 static bool do_rrr(DisasContext *dc, arg_r_r_r *a,
3895                    void (*func)(TCGv, TCGv, TCGv))
3896 {
3897     TCGv dst = gen_dest_gpr(dc, a->rd);
3898     TCGv src1 = gen_load_gpr(dc, a->rs1);
3899     TCGv src2 = gen_load_gpr(dc, a->rs2);
3900 
3901     func(dst, src1, src2);
3902     gen_store_gpr(dc, a->rd, dst);
3903     return advance_pc(dc);
3904 }
3905 
3906 TRANS(ARRAY8, VIS1, do_rrr, a, gen_helper_array8)
3907 TRANS(ARRAY16, VIS1, do_rrr, a, gen_op_array16)
3908 TRANS(ARRAY32, VIS1, do_rrr, a, gen_op_array32)
3909 
3910 TRANS(ADDXC, VIS3, do_rrr, a, gen_op_addxc)
3911 TRANS(ADDXCcc, VIS3, do_rrr, a, gen_op_addxccc)
3912 
3913 static void gen_op_alignaddr(TCGv dst, TCGv s1, TCGv s2)
3914 {
3915 #ifdef TARGET_SPARC64
3916     TCGv tmp = tcg_temp_new();
3917 
3918     tcg_gen_add_tl(tmp, s1, s2);
3919     tcg_gen_andi_tl(dst, tmp, -8);
3920     tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
3921 #else
3922     g_assert_not_reached();
3923 #endif
3924 }
3925 
3926 static void gen_op_alignaddrl(TCGv dst, TCGv s1, TCGv s2)
3927 {
3928 #ifdef TARGET_SPARC64
3929     TCGv tmp = tcg_temp_new();
3930 
3931     tcg_gen_add_tl(tmp, s1, s2);
3932     tcg_gen_andi_tl(dst, tmp, -8);
3933     tcg_gen_neg_tl(tmp, tmp);
3934     tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
3935 #else
3936     g_assert_not_reached();
3937 #endif
3938 }
3939 
3940 TRANS(ALIGNADDR, VIS1, do_rrr, a, gen_op_alignaddr)
3941 TRANS(ALIGNADDRL, VIS1, do_rrr, a, gen_op_alignaddrl)
3942 
3943 static void gen_op_bmask(TCGv dst, TCGv s1, TCGv s2)
3944 {
3945 #ifdef TARGET_SPARC64
3946     tcg_gen_add_tl(dst, s1, s2);
3947     tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, dst, 32, 32);
3948 #else
3949     g_assert_not_reached();
3950 #endif
3951 }
3952 
3953 TRANS(BMASK, VIS2, do_rrr, a, gen_op_bmask)
3954 
3955 static bool do_cmask(DisasContext *dc, int rs2, void (*func)(TCGv, TCGv, TCGv))
3956 {
3957     func(cpu_gsr, cpu_gsr, gen_load_gpr(dc, rs2));
3958     return true;
3959 }
3960 
3961 TRANS(CMASK8, VIS3, do_cmask, a->rs2, gen_helper_cmask8)
3962 TRANS(CMASK16, VIS3, do_cmask, a->rs2, gen_helper_cmask16)
3963 TRANS(CMASK32, VIS3, do_cmask, a->rs2, gen_helper_cmask32)
3964 
3965 static bool do_shift_r(DisasContext *dc, arg_shiftr *a, bool l, bool u)
3966 {
3967     TCGv dst, src1, src2;
3968 
3969     /* Reject 64-bit shifts for sparc32. */
3970     if (avail_32(dc) && a->x) {
3971         return false;
3972     }
3973 
3974     src2 = tcg_temp_new();
3975     tcg_gen_andi_tl(src2, gen_load_gpr(dc, a->rs2), a->x ? 63 : 31);
3976     src1 = gen_load_gpr(dc, a->rs1);
3977     dst = gen_dest_gpr(dc, a->rd);
3978 
3979     if (l) {
3980         tcg_gen_shl_tl(dst, src1, src2);
3981         if (!a->x) {
3982             tcg_gen_ext32u_tl(dst, dst);
3983         }
3984     } else if (u) {
3985         if (!a->x) {
3986             tcg_gen_ext32u_tl(dst, src1);
3987             src1 = dst;
3988         }
3989         tcg_gen_shr_tl(dst, src1, src2);
3990     } else {
3991         if (!a->x) {
3992             tcg_gen_ext32s_tl(dst, src1);
3993             src1 = dst;
3994         }
3995         tcg_gen_sar_tl(dst, src1, src2);
3996     }
3997     gen_store_gpr(dc, a->rd, dst);
3998     return advance_pc(dc);
3999 }
4000 
4001 TRANS(SLL_r, ALL, do_shift_r, a, true, true)
4002 TRANS(SRL_r, ALL, do_shift_r, a, false, true)
4003 TRANS(SRA_r, ALL, do_shift_r, a, false, false)
4004 
4005 static bool do_shift_i(DisasContext *dc, arg_shifti *a, bool l, bool u)
4006 {
4007     TCGv dst, src1;
4008 
4009     /* Reject 64-bit shifts for sparc32. */
4010     if (avail_32(dc) && (a->x || a->i >= 32)) {
4011         return false;
4012     }
4013 
4014     src1 = gen_load_gpr(dc, a->rs1);
4015     dst = gen_dest_gpr(dc, a->rd);
4016 
4017     if (avail_32(dc) || a->x) {
4018         if (l) {
4019             tcg_gen_shli_tl(dst, src1, a->i);
4020         } else if (u) {
4021             tcg_gen_shri_tl(dst, src1, a->i);
4022         } else {
4023             tcg_gen_sari_tl(dst, src1, a->i);
4024         }
4025     } else {
4026         if (l) {
4027             tcg_gen_deposit_z_tl(dst, src1, a->i, 32 - a->i);
4028         } else if (u) {
4029             tcg_gen_extract_tl(dst, src1, a->i, 32 - a->i);
4030         } else {
4031             tcg_gen_sextract_tl(dst, src1, a->i, 32 - a->i);
4032         }
4033     }
4034     gen_store_gpr(dc, a->rd, dst);
4035     return advance_pc(dc);
4036 }
4037 
4038 TRANS(SLL_i, ALL, do_shift_i, a, true, true)
4039 TRANS(SRL_i, ALL, do_shift_i, a, false, true)
4040 TRANS(SRA_i, ALL, do_shift_i, a, false, false)
4041 
4042 static TCGv gen_rs2_or_imm(DisasContext *dc, bool imm, int rs2_or_imm)
4043 {
4044     /* For simplicity, we under-decoded the rs2 form. */
4045     if (!imm && rs2_or_imm & ~0x1f) {
4046         return NULL;
4047     }
4048     if (imm || rs2_or_imm == 0) {
4049         return tcg_constant_tl(rs2_or_imm);
4050     } else {
4051         return cpu_regs[rs2_or_imm];
4052     }
4053 }
4054 
4055 static bool do_mov_cond(DisasContext *dc, DisasCompare *cmp, int rd, TCGv src2)
4056 {
4057     TCGv dst = gen_load_gpr(dc, rd);
4058     TCGv c2 = tcg_constant_tl(cmp->c2);
4059 
4060     tcg_gen_movcond_tl(cmp->cond, dst, cmp->c1, c2, src2, dst);
4061     gen_store_gpr(dc, rd, dst);
4062     return advance_pc(dc);
4063 }
4064 
4065 static bool trans_MOVcc(DisasContext *dc, arg_MOVcc *a)
4066 {
4067     TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4068     DisasCompare cmp;
4069 
4070     if (src2 == NULL) {
4071         return false;
4072     }
4073     gen_compare(&cmp, a->cc, a->cond, dc);
4074     return do_mov_cond(dc, &cmp, a->rd, src2);
4075 }
4076 
4077 static bool trans_MOVfcc(DisasContext *dc, arg_MOVfcc *a)
4078 {
4079     TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4080     DisasCompare cmp;
4081 
4082     if (src2 == NULL) {
4083         return false;
4084     }
4085     gen_fcompare(&cmp, a->cc, a->cond);
4086     return do_mov_cond(dc, &cmp, a->rd, src2);
4087 }
4088 
4089 static bool trans_MOVR(DisasContext *dc, arg_MOVR *a)
4090 {
4091     TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4092     DisasCompare cmp;
4093 
4094     if (src2 == NULL) {
4095         return false;
4096     }
4097     if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
4098         return false;
4099     }
4100     return do_mov_cond(dc, &cmp, a->rd, src2);
4101 }
4102 
4103 static bool do_add_special(DisasContext *dc, arg_r_r_ri *a,
4104                            bool (*func)(DisasContext *dc, int rd, TCGv src))
4105 {
4106     TCGv src1, sum;
4107 
4108     /* For simplicity, we under-decoded the rs2 form. */
4109     if (!a->imm && a->rs2_or_imm & ~0x1f) {
4110         return false;
4111     }
4112 
4113     /*
4114      * Always load the sum into a new temporary.
4115      * This is required to capture the value across a window change,
4116      * e.g. SAVE and RESTORE, and may be optimized away otherwise.
4117      */
4118     sum = tcg_temp_new();
4119     src1 = gen_load_gpr(dc, a->rs1);
4120     if (a->imm || a->rs2_or_imm == 0) {
4121         tcg_gen_addi_tl(sum, src1, a->rs2_or_imm);
4122     } else {
4123         tcg_gen_add_tl(sum, src1, cpu_regs[a->rs2_or_imm]);
4124     }
4125     return func(dc, a->rd, sum);
4126 }
4127 
4128 static bool do_jmpl(DisasContext *dc, int rd, TCGv src)
4129 {
4130     /*
4131      * Preserve pc across advance, so that we can delay
4132      * the writeback to rd until after src is consumed.
4133      */
4134     target_ulong cur_pc = dc->pc;
4135 
4136     gen_check_align(dc, src, 3);
4137 
4138     gen_mov_pc_npc(dc);
4139     tcg_gen_mov_tl(cpu_npc, src);
4140     gen_address_mask(dc, cpu_npc);
4141     gen_store_gpr(dc, rd, tcg_constant_tl(cur_pc));
4142 
4143     dc->npc = DYNAMIC_PC_LOOKUP;
4144     return true;
4145 }
4146 
4147 TRANS(JMPL, ALL, do_add_special, a, do_jmpl)
4148 
4149 static bool do_rett(DisasContext *dc, int rd, TCGv src)
4150 {
4151     if (!supervisor(dc)) {
4152         return raise_priv(dc);
4153     }
4154 
4155     gen_check_align(dc, src, 3);
4156 
4157     gen_mov_pc_npc(dc);
4158     tcg_gen_mov_tl(cpu_npc, src);
4159     gen_helper_rett(tcg_env);
4160 
4161     dc->npc = DYNAMIC_PC;
4162     return true;
4163 }
4164 
4165 TRANS(RETT, 32, do_add_special, a, do_rett)
4166 
4167 static bool do_return(DisasContext *dc, int rd, TCGv src)
4168 {
4169     gen_check_align(dc, src, 3);
4170     gen_helper_restore(tcg_env);
4171 
4172     gen_mov_pc_npc(dc);
4173     tcg_gen_mov_tl(cpu_npc, src);
4174     gen_address_mask(dc, cpu_npc);
4175 
4176     dc->npc = DYNAMIC_PC_LOOKUP;
4177     return true;
4178 }
4179 
4180 TRANS(RETURN, 64, do_add_special, a, do_return)
4181 
4182 static bool do_save(DisasContext *dc, int rd, TCGv src)
4183 {
4184     gen_helper_save(tcg_env);
4185     gen_store_gpr(dc, rd, src);
4186     return advance_pc(dc);
4187 }
4188 
4189 TRANS(SAVE, ALL, do_add_special, a, do_save)
4190 
4191 static bool do_restore(DisasContext *dc, int rd, TCGv src)
4192 {
4193     gen_helper_restore(tcg_env);
4194     gen_store_gpr(dc, rd, src);
4195     return advance_pc(dc);
4196 }
4197 
4198 TRANS(RESTORE, ALL, do_add_special, a, do_restore)
4199 
4200 static bool do_done_retry(DisasContext *dc, bool done)
4201 {
4202     if (!supervisor(dc)) {
4203         return raise_priv(dc);
4204     }
4205     dc->npc = DYNAMIC_PC;
4206     dc->pc = DYNAMIC_PC;
4207     translator_io_start(&dc->base);
4208     if (done) {
4209         gen_helper_done(tcg_env);
4210     } else {
4211         gen_helper_retry(tcg_env);
4212     }
4213     return true;
4214 }
4215 
4216 TRANS(DONE, 64, do_done_retry, true)
4217 TRANS(RETRY, 64, do_done_retry, false)
4218 
4219 /*
4220  * Major opcode 11 -- load and store instructions
4221  */
4222 
4223 static TCGv gen_ldst_addr(DisasContext *dc, int rs1, bool imm, int rs2_or_imm)
4224 {
4225     TCGv addr, tmp = NULL;
4226 
4227     /* For simplicity, we under-decoded the rs2 form. */
4228     if (!imm && rs2_or_imm & ~0x1f) {
4229         return NULL;
4230     }
4231 
4232     addr = gen_load_gpr(dc, rs1);
4233     if (rs2_or_imm) {
4234         tmp = tcg_temp_new();
4235         if (imm) {
4236             tcg_gen_addi_tl(tmp, addr, rs2_or_imm);
4237         } else {
4238             tcg_gen_add_tl(tmp, addr, cpu_regs[rs2_or_imm]);
4239         }
4240         addr = tmp;
4241     }
4242     if (AM_CHECK(dc)) {
4243         if (!tmp) {
4244             tmp = tcg_temp_new();
4245         }
4246         tcg_gen_ext32u_tl(tmp, addr);
4247         addr = tmp;
4248     }
4249     return addr;
4250 }
4251 
4252 static bool do_ld_gpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4253 {
4254     TCGv reg, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4255     DisasASI da;
4256 
4257     if (addr == NULL) {
4258         return false;
4259     }
4260     da = resolve_asi(dc, a->asi, mop);
4261 
4262     reg = gen_dest_gpr(dc, a->rd);
4263     gen_ld_asi(dc, &da, reg, addr);
4264     gen_store_gpr(dc, a->rd, reg);
4265     return advance_pc(dc);
4266 }
4267 
4268 TRANS(LDUW, ALL, do_ld_gpr, a, MO_TEUL)
4269 TRANS(LDUB, ALL, do_ld_gpr, a, MO_UB)
4270 TRANS(LDUH, ALL, do_ld_gpr, a, MO_TEUW)
4271 TRANS(LDSB, ALL, do_ld_gpr, a, MO_SB)
4272 TRANS(LDSH, ALL, do_ld_gpr, a, MO_TESW)
4273 TRANS(LDSW, 64, do_ld_gpr, a, MO_TESL)
4274 TRANS(LDX, 64, do_ld_gpr, a, MO_TEUQ)
4275 
4276 static bool do_st_gpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4277 {
4278     TCGv reg, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4279     DisasASI da;
4280 
4281     if (addr == NULL) {
4282         return false;
4283     }
4284     da = resolve_asi(dc, a->asi, mop);
4285 
4286     reg = gen_load_gpr(dc, a->rd);
4287     gen_st_asi(dc, &da, reg, addr);
4288     return advance_pc(dc);
4289 }
4290 
4291 TRANS(STW, ALL, do_st_gpr, a, MO_TEUL)
4292 TRANS(STB, ALL, do_st_gpr, a, MO_UB)
4293 TRANS(STH, ALL, do_st_gpr, a, MO_TEUW)
4294 TRANS(STX, 64, do_st_gpr, a, MO_TEUQ)
4295 
4296 static bool trans_LDD(DisasContext *dc, arg_r_r_ri_asi *a)
4297 {
4298     TCGv addr;
4299     DisasASI da;
4300 
4301     if (a->rd & 1) {
4302         return false;
4303     }
4304     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4305     if (addr == NULL) {
4306         return false;
4307     }
4308     da = resolve_asi(dc, a->asi, MO_TEUQ);
4309     gen_ldda_asi(dc, &da, addr, a->rd);
4310     return advance_pc(dc);
4311 }
4312 
4313 static bool trans_STD(DisasContext *dc, arg_r_r_ri_asi *a)
4314 {
4315     TCGv addr;
4316     DisasASI da;
4317 
4318     if (a->rd & 1) {
4319         return false;
4320     }
4321     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4322     if (addr == NULL) {
4323         return false;
4324     }
4325     da = resolve_asi(dc, a->asi, MO_TEUQ);
4326     gen_stda_asi(dc, &da, addr, a->rd);
4327     return advance_pc(dc);
4328 }
4329 
4330 static bool trans_LDSTUB(DisasContext *dc, arg_r_r_ri_asi *a)
4331 {
4332     TCGv addr, reg;
4333     DisasASI da;
4334 
4335     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4336     if (addr == NULL) {
4337         return false;
4338     }
4339     da = resolve_asi(dc, a->asi, MO_UB);
4340 
4341     reg = gen_dest_gpr(dc, a->rd);
4342     gen_ldstub_asi(dc, &da, reg, addr);
4343     gen_store_gpr(dc, a->rd, reg);
4344     return advance_pc(dc);
4345 }
4346 
4347 static bool trans_SWAP(DisasContext *dc, arg_r_r_ri_asi *a)
4348 {
4349     TCGv addr, dst, src;
4350     DisasASI da;
4351 
4352     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4353     if (addr == NULL) {
4354         return false;
4355     }
4356     da = resolve_asi(dc, a->asi, MO_TEUL);
4357 
4358     dst = gen_dest_gpr(dc, a->rd);
4359     src = gen_load_gpr(dc, a->rd);
4360     gen_swap_asi(dc, &da, dst, src, addr);
4361     gen_store_gpr(dc, a->rd, dst);
4362     return advance_pc(dc);
4363 }
4364 
4365 static bool do_casa(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4366 {
4367     TCGv addr, o, n, c;
4368     DisasASI da;
4369 
4370     addr = gen_ldst_addr(dc, a->rs1, true, 0);
4371     if (addr == NULL) {
4372         return false;
4373     }
4374     da = resolve_asi(dc, a->asi, mop);
4375 
4376     o = gen_dest_gpr(dc, a->rd);
4377     n = gen_load_gpr(dc, a->rd);
4378     c = gen_load_gpr(dc, a->rs2_or_imm);
4379     gen_cas_asi(dc, &da, o, n, c, addr);
4380     gen_store_gpr(dc, a->rd, o);
4381     return advance_pc(dc);
4382 }
4383 
4384 TRANS(CASA, CASA, do_casa, a, MO_TEUL)
4385 TRANS(CASXA, 64, do_casa, a, MO_TEUQ)
4386 
4387 static bool do_ld_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz)
4388 {
4389     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4390     DisasASI da;
4391 
4392     if (addr == NULL) {
4393         return false;
4394     }
4395     if (gen_trap_ifnofpu(dc)) {
4396         return true;
4397     }
4398     if (sz == MO_128 && gen_trap_float128(dc)) {
4399         return true;
4400     }
4401     da = resolve_asi(dc, a->asi, MO_TE | sz);
4402     gen_ldf_asi(dc, &da, sz, addr, a->rd);
4403     gen_update_fprs_dirty(dc, a->rd);
4404     return advance_pc(dc);
4405 }
4406 
4407 TRANS(LDF, ALL, do_ld_fpr, a, MO_32)
4408 TRANS(LDDF, ALL, do_ld_fpr, a, MO_64)
4409 TRANS(LDQF, ALL, do_ld_fpr, a, MO_128)
4410 
4411 TRANS(LDFA, 64, do_ld_fpr, a, MO_32)
4412 TRANS(LDDFA, 64, do_ld_fpr, a, MO_64)
4413 TRANS(LDQFA, 64, do_ld_fpr, a, MO_128)
4414 
4415 static bool do_st_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz)
4416 {
4417     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4418     DisasASI da;
4419 
4420     if (addr == NULL) {
4421         return false;
4422     }
4423     if (gen_trap_ifnofpu(dc)) {
4424         return true;
4425     }
4426     if (sz == MO_128 && gen_trap_float128(dc)) {
4427         return true;
4428     }
4429     da = resolve_asi(dc, a->asi, MO_TE | sz);
4430     gen_stf_asi(dc, &da, sz, addr, a->rd);
4431     return advance_pc(dc);
4432 }
4433 
4434 TRANS(STF, ALL, do_st_fpr, a, MO_32)
4435 TRANS(STDF, ALL, do_st_fpr, a, MO_64)
4436 TRANS(STQF, ALL, do_st_fpr, a, MO_128)
4437 
4438 TRANS(STFA, 64, do_st_fpr, a, MO_32)
4439 TRANS(STDFA, 64, do_st_fpr, a, MO_64)
4440 TRANS(STQFA, 64, do_st_fpr, a, MO_128)
4441 
4442 static bool trans_STDFQ(DisasContext *dc, arg_STDFQ *a)
4443 {
4444     if (!avail_32(dc)) {
4445         return false;
4446     }
4447     if (!supervisor(dc)) {
4448         return raise_priv(dc);
4449     }
4450     if (gen_trap_ifnofpu(dc)) {
4451         return true;
4452     }
4453     gen_op_fpexception_im(dc, FSR_FTT_SEQ_ERROR);
4454     return true;
4455 }
4456 
4457 static bool trans_LDFSR(DisasContext *dc, arg_r_r_ri *a)
4458 {
4459     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4460     TCGv_i32 tmp;
4461 
4462     if (addr == NULL) {
4463         return false;
4464     }
4465     if (gen_trap_ifnofpu(dc)) {
4466         return true;
4467     }
4468 
4469     tmp = tcg_temp_new_i32();
4470     tcg_gen_qemu_ld_i32(tmp, addr, dc->mem_idx, MO_TEUL | MO_ALIGN);
4471 
4472     tcg_gen_extract_i32(cpu_fcc[0], tmp, FSR_FCC0_SHIFT, 2);
4473     /* LDFSR does not change FCC[1-3]. */
4474 
4475     gen_helper_set_fsr_nofcc_noftt(tcg_env, tmp);
4476     return advance_pc(dc);
4477 }
4478 
4479 static bool do_ldxfsr(DisasContext *dc, arg_r_r_ri *a, bool entire)
4480 {
4481 #ifdef TARGET_SPARC64
4482     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4483     TCGv_i64 t64;
4484     TCGv_i32 lo, hi;
4485 
4486     if (addr == NULL) {
4487         return false;
4488     }
4489     if (gen_trap_ifnofpu(dc)) {
4490         return true;
4491     }
4492 
4493     t64 = tcg_temp_new_i64();
4494     tcg_gen_qemu_ld_i64(t64, addr, dc->mem_idx, MO_TEUQ | MO_ALIGN);
4495 
4496     lo = tcg_temp_new_i32();
4497     hi = cpu_fcc[3];
4498     tcg_gen_extr_i64_i32(lo, hi, t64);
4499     tcg_gen_extract_i32(cpu_fcc[0], lo, FSR_FCC0_SHIFT, 2);
4500     tcg_gen_extract_i32(cpu_fcc[1], hi, FSR_FCC1_SHIFT - 32, 2);
4501     tcg_gen_extract_i32(cpu_fcc[2], hi, FSR_FCC2_SHIFT - 32, 2);
4502     tcg_gen_extract_i32(cpu_fcc[3], hi, FSR_FCC3_SHIFT - 32, 2);
4503 
4504     if (entire) {
4505         gen_helper_set_fsr_nofcc(tcg_env, lo);
4506     } else {
4507         gen_helper_set_fsr_nofcc_noftt(tcg_env, lo);
4508     }
4509     return advance_pc(dc);
4510 #else
4511     return false;
4512 #endif
4513 }
4514 
4515 TRANS(LDXFSR, 64, do_ldxfsr, a, false)
4516 TRANS(LDXEFSR, VIS3B, do_ldxfsr, a, true)
4517 
4518 static bool do_stfsr(DisasContext *dc, arg_r_r_ri *a, MemOp mop)
4519 {
4520     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4521     TCGv fsr;
4522 
4523     if (addr == NULL) {
4524         return false;
4525     }
4526     if (gen_trap_ifnofpu(dc)) {
4527         return true;
4528     }
4529 
4530     fsr = tcg_temp_new();
4531     gen_helper_get_fsr(fsr, tcg_env);
4532     tcg_gen_qemu_st_tl(fsr, addr, dc->mem_idx, mop | MO_ALIGN);
4533     return advance_pc(dc);
4534 }
4535 
4536 TRANS(STFSR, ALL, do_stfsr, a, MO_TEUL)
4537 TRANS(STXFSR, 64, do_stfsr, a, MO_TEUQ)
4538 
4539 static bool do_fc(DisasContext *dc, int rd, int32_t c)
4540 {
4541     if (gen_trap_ifnofpu(dc)) {
4542         return true;
4543     }
4544     gen_store_fpr_F(dc, rd, tcg_constant_i32(c));
4545     return advance_pc(dc);
4546 }
4547 
4548 TRANS(FZEROs, VIS1, do_fc, a->rd, 0)
4549 TRANS(FONEs, VIS1, do_fc, a->rd, -1)
4550 
4551 static bool do_dc(DisasContext *dc, int rd, int64_t c)
4552 {
4553     if (gen_trap_ifnofpu(dc)) {
4554         return true;
4555     }
4556     gen_store_fpr_D(dc, rd, tcg_constant_i64(c));
4557     return advance_pc(dc);
4558 }
4559 
4560 TRANS(FZEROd, VIS1, do_dc, a->rd, 0)
4561 TRANS(FONEd, VIS1, do_dc, a->rd, -1)
4562 
4563 static bool do_ff(DisasContext *dc, arg_r_r *a,
4564                   void (*func)(TCGv_i32, TCGv_i32))
4565 {
4566     TCGv_i32 tmp;
4567 
4568     if (gen_trap_ifnofpu(dc)) {
4569         return true;
4570     }
4571 
4572     tmp = gen_load_fpr_F(dc, a->rs);
4573     func(tmp, tmp);
4574     gen_store_fpr_F(dc, a->rd, tmp);
4575     return advance_pc(dc);
4576 }
4577 
4578 TRANS(FMOVs, ALL, do_ff, a, gen_op_fmovs)
4579 TRANS(FNEGs, ALL, do_ff, a, gen_op_fnegs)
4580 TRANS(FABSs, ALL, do_ff, a, gen_op_fabss)
4581 TRANS(FSRCs, VIS1, do_ff, a, tcg_gen_mov_i32)
4582 TRANS(FNOTs, VIS1, do_ff, a, tcg_gen_not_i32)
4583 
4584 static bool do_fd(DisasContext *dc, arg_r_r *a,
4585                   void (*func)(TCGv_i32, TCGv_i64))
4586 {
4587     TCGv_i32 dst;
4588     TCGv_i64 src;
4589 
4590     if (gen_trap_ifnofpu(dc)) {
4591         return true;
4592     }
4593 
4594     dst = tcg_temp_new_i32();
4595     src = gen_load_fpr_D(dc, a->rs);
4596     func(dst, src);
4597     gen_store_fpr_F(dc, a->rd, dst);
4598     return advance_pc(dc);
4599 }
4600 
4601 TRANS(FPACK16, VIS1, do_fd, a, gen_op_fpack16)
4602 TRANS(FPACKFIX, VIS1, do_fd, a, gen_op_fpackfix)
4603 
4604 static bool do_env_ff(DisasContext *dc, arg_r_r *a,
4605                       void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
4606 {
4607     TCGv_i32 tmp;
4608 
4609     if (gen_trap_ifnofpu(dc)) {
4610         return true;
4611     }
4612 
4613     tmp = gen_load_fpr_F(dc, a->rs);
4614     func(tmp, tcg_env, tmp);
4615     gen_store_fpr_F(dc, a->rd, tmp);
4616     return advance_pc(dc);
4617 }
4618 
4619 TRANS(FSQRTs, ALL, do_env_ff, a, gen_helper_fsqrts)
4620 TRANS(FiTOs, ALL, do_env_ff, a, gen_helper_fitos)
4621 TRANS(FsTOi, ALL, do_env_ff, a, gen_helper_fstoi)
4622 
4623 static bool do_env_fd(DisasContext *dc, arg_r_r *a,
4624                       void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
4625 {
4626     TCGv_i32 dst;
4627     TCGv_i64 src;
4628 
4629     if (gen_trap_ifnofpu(dc)) {
4630         return true;
4631     }
4632 
4633     dst = tcg_temp_new_i32();
4634     src = gen_load_fpr_D(dc, a->rs);
4635     func(dst, tcg_env, src);
4636     gen_store_fpr_F(dc, a->rd, dst);
4637     return advance_pc(dc);
4638 }
4639 
4640 TRANS(FdTOs, ALL, do_env_fd, a, gen_helper_fdtos)
4641 TRANS(FdTOi, ALL, do_env_fd, a, gen_helper_fdtoi)
4642 TRANS(FxTOs, 64, do_env_fd, a, gen_helper_fxtos)
4643 
4644 static bool do_dd(DisasContext *dc, arg_r_r *a,
4645                   void (*func)(TCGv_i64, TCGv_i64))
4646 {
4647     TCGv_i64 dst, src;
4648 
4649     if (gen_trap_ifnofpu(dc)) {
4650         return true;
4651     }
4652 
4653     dst = tcg_temp_new_i64();
4654     src = gen_load_fpr_D(dc, a->rs);
4655     func(dst, src);
4656     gen_store_fpr_D(dc, a->rd, dst);
4657     return advance_pc(dc);
4658 }
4659 
4660 TRANS(FMOVd, 64, do_dd, a, gen_op_fmovd)
4661 TRANS(FNEGd, 64, do_dd, a, gen_op_fnegd)
4662 TRANS(FABSd, 64, do_dd, a, gen_op_fabsd)
4663 TRANS(FSRCd, VIS1, do_dd, a, tcg_gen_mov_i64)
4664 TRANS(FNOTd, VIS1, do_dd, a, tcg_gen_not_i64)
4665 
4666 static bool do_env_dd(DisasContext *dc, arg_r_r *a,
4667                       void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
4668 {
4669     TCGv_i64 dst, src;
4670 
4671     if (gen_trap_ifnofpu(dc)) {
4672         return true;
4673     }
4674 
4675     dst = tcg_temp_new_i64();
4676     src = gen_load_fpr_D(dc, a->rs);
4677     func(dst, tcg_env, src);
4678     gen_store_fpr_D(dc, a->rd, dst);
4679     return advance_pc(dc);
4680 }
4681 
4682 TRANS(FSQRTd, ALL, do_env_dd, a, gen_helper_fsqrtd)
4683 TRANS(FxTOd, 64, do_env_dd, a, gen_helper_fxtod)
4684 TRANS(FdTOx, 64, do_env_dd, a, gen_helper_fdtox)
4685 
4686 static bool do_df(DisasContext *dc, arg_r_r *a,
4687                   void (*func)(TCGv_i64, TCGv_i32))
4688 {
4689     TCGv_i64 dst;
4690     TCGv_i32 src;
4691 
4692     if (gen_trap_ifnofpu(dc)) {
4693         return true;
4694     }
4695 
4696     dst = tcg_temp_new_i64();
4697     src = gen_load_fpr_F(dc, a->rs);
4698     func(dst, src);
4699     gen_store_fpr_D(dc, a->rd, dst);
4700     return advance_pc(dc);
4701 }
4702 
4703 TRANS(FEXPAND, VIS1, do_df, a, gen_helper_fexpand)
4704 
4705 static bool do_env_df(DisasContext *dc, arg_r_r *a,
4706                       void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
4707 {
4708     TCGv_i64 dst;
4709     TCGv_i32 src;
4710 
4711     if (gen_trap_ifnofpu(dc)) {
4712         return true;
4713     }
4714 
4715     dst = tcg_temp_new_i64();
4716     src = gen_load_fpr_F(dc, a->rs);
4717     func(dst, tcg_env, src);
4718     gen_store_fpr_D(dc, a->rd, dst);
4719     return advance_pc(dc);
4720 }
4721 
4722 TRANS(FiTOd, ALL, do_env_df, a, gen_helper_fitod)
4723 TRANS(FsTOd, ALL, do_env_df, a, gen_helper_fstod)
4724 TRANS(FsTOx, 64, do_env_df, a, gen_helper_fstox)
4725 
4726 static bool do_qq(DisasContext *dc, arg_r_r *a,
4727                   void (*func)(TCGv_i128, TCGv_i128))
4728 {
4729     TCGv_i128 t;
4730 
4731     if (gen_trap_ifnofpu(dc)) {
4732         return true;
4733     }
4734     if (gen_trap_float128(dc)) {
4735         return true;
4736     }
4737 
4738     gen_op_clear_ieee_excp_and_FTT();
4739     t = gen_load_fpr_Q(dc, a->rs);
4740     func(t, t);
4741     gen_store_fpr_Q(dc, a->rd, t);
4742     return advance_pc(dc);
4743 }
4744 
4745 TRANS(FMOVq, 64, do_qq, a, tcg_gen_mov_i128)
4746 TRANS(FNEGq, 64, do_qq, a, gen_op_fnegq)
4747 TRANS(FABSq, 64, do_qq, a, gen_op_fabsq)
4748 
4749 static bool do_env_qq(DisasContext *dc, arg_r_r *a,
4750                       void (*func)(TCGv_i128, TCGv_env, TCGv_i128))
4751 {
4752     TCGv_i128 t;
4753 
4754     if (gen_trap_ifnofpu(dc)) {
4755         return true;
4756     }
4757     if (gen_trap_float128(dc)) {
4758         return true;
4759     }
4760 
4761     t = gen_load_fpr_Q(dc, a->rs);
4762     func(t, tcg_env, t);
4763     gen_store_fpr_Q(dc, a->rd, t);
4764     return advance_pc(dc);
4765 }
4766 
4767 TRANS(FSQRTq, ALL, do_env_qq, a, gen_helper_fsqrtq)
4768 
4769 static bool do_env_fq(DisasContext *dc, arg_r_r *a,
4770                       void (*func)(TCGv_i32, TCGv_env, TCGv_i128))
4771 {
4772     TCGv_i128 src;
4773     TCGv_i32 dst;
4774 
4775     if (gen_trap_ifnofpu(dc)) {
4776         return true;
4777     }
4778     if (gen_trap_float128(dc)) {
4779         return true;
4780     }
4781 
4782     src = gen_load_fpr_Q(dc, a->rs);
4783     dst = tcg_temp_new_i32();
4784     func(dst, tcg_env, src);
4785     gen_store_fpr_F(dc, a->rd, dst);
4786     return advance_pc(dc);
4787 }
4788 
4789 TRANS(FqTOs, ALL, do_env_fq, a, gen_helper_fqtos)
4790 TRANS(FqTOi, ALL, do_env_fq, a, gen_helper_fqtoi)
4791 
4792 static bool do_env_dq(DisasContext *dc, arg_r_r *a,
4793                       void (*func)(TCGv_i64, TCGv_env, TCGv_i128))
4794 {
4795     TCGv_i128 src;
4796     TCGv_i64 dst;
4797 
4798     if (gen_trap_ifnofpu(dc)) {
4799         return true;
4800     }
4801     if (gen_trap_float128(dc)) {
4802         return true;
4803     }
4804 
4805     src = gen_load_fpr_Q(dc, a->rs);
4806     dst = tcg_temp_new_i64();
4807     func(dst, tcg_env, src);
4808     gen_store_fpr_D(dc, a->rd, dst);
4809     return advance_pc(dc);
4810 }
4811 
4812 TRANS(FqTOd, ALL, do_env_dq, a, gen_helper_fqtod)
4813 TRANS(FqTOx, 64, do_env_dq, a, gen_helper_fqtox)
4814 
4815 static bool do_env_qf(DisasContext *dc, arg_r_r *a,
4816                       void (*func)(TCGv_i128, TCGv_env, TCGv_i32))
4817 {
4818     TCGv_i32 src;
4819     TCGv_i128 dst;
4820 
4821     if (gen_trap_ifnofpu(dc)) {
4822         return true;
4823     }
4824     if (gen_trap_float128(dc)) {
4825         return true;
4826     }
4827 
4828     src = gen_load_fpr_F(dc, a->rs);
4829     dst = tcg_temp_new_i128();
4830     func(dst, tcg_env, src);
4831     gen_store_fpr_Q(dc, a->rd, dst);
4832     return advance_pc(dc);
4833 }
4834 
4835 TRANS(FiTOq, ALL, do_env_qf, a, gen_helper_fitoq)
4836 TRANS(FsTOq, ALL, do_env_qf, a, gen_helper_fstoq)
4837 
4838 static bool do_env_qd(DisasContext *dc, arg_r_r *a,
4839                       void (*func)(TCGv_i128, TCGv_env, TCGv_i64))
4840 {
4841     TCGv_i64 src;
4842     TCGv_i128 dst;
4843 
4844     if (gen_trap_ifnofpu(dc)) {
4845         return true;
4846     }
4847     if (gen_trap_float128(dc)) {
4848         return true;
4849     }
4850 
4851     src = gen_load_fpr_D(dc, a->rs);
4852     dst = tcg_temp_new_i128();
4853     func(dst, tcg_env, src);
4854     gen_store_fpr_Q(dc, a->rd, dst);
4855     return advance_pc(dc);
4856 }
4857 
4858 TRANS(FdTOq, ALL, do_env_qd, a, gen_helper_fdtoq)
4859 TRANS(FxTOq, 64, do_env_qd, a, gen_helper_fxtoq)
4860 
4861 static bool do_fff(DisasContext *dc, arg_r_r_r *a,
4862                    void (*func)(TCGv_i32, TCGv_i32, TCGv_i32))
4863 {
4864     TCGv_i32 src1, src2;
4865 
4866     if (gen_trap_ifnofpu(dc)) {
4867         return true;
4868     }
4869 
4870     src1 = gen_load_fpr_F(dc, a->rs1);
4871     src2 = gen_load_fpr_F(dc, a->rs2);
4872     func(src1, src1, src2);
4873     gen_store_fpr_F(dc, a->rd, src1);
4874     return advance_pc(dc);
4875 }
4876 
4877 TRANS(FPADD16s, VIS1, do_fff, a, tcg_gen_vec_add16_i32)
4878 TRANS(FPADD32s, VIS1, do_fff, a, tcg_gen_add_i32)
4879 TRANS(FPSUB16s, VIS1, do_fff, a, tcg_gen_vec_sub16_i32)
4880 TRANS(FPSUB32s, VIS1, do_fff, a, tcg_gen_sub_i32)
4881 TRANS(FNORs, VIS1, do_fff, a, tcg_gen_nor_i32)
4882 TRANS(FANDNOTs, VIS1, do_fff, a, tcg_gen_andc_i32)
4883 TRANS(FXORs, VIS1, do_fff, a, tcg_gen_xor_i32)
4884 TRANS(FNANDs, VIS1, do_fff, a, tcg_gen_nand_i32)
4885 TRANS(FANDs, VIS1, do_fff, a, tcg_gen_and_i32)
4886 TRANS(FXNORs, VIS1, do_fff, a, tcg_gen_eqv_i32)
4887 TRANS(FORNOTs, VIS1, do_fff, a, tcg_gen_orc_i32)
4888 TRANS(FORs, VIS1, do_fff, a, tcg_gen_or_i32)
4889 
4890 TRANS(FHADDs, VIS3, do_fff, a, gen_op_fhadds)
4891 TRANS(FHSUBs, VIS3, do_fff, a, gen_op_fhsubs)
4892 TRANS(FNHADDs, VIS3, do_fff, a, gen_op_fnhadds)
4893 
4894 TRANS(FPADDS16s, VIS3, do_fff, a, gen_op_fpadds16s)
4895 TRANS(FPSUBS16s, VIS3, do_fff, a, gen_op_fpsubs16s)
4896 TRANS(FPADDS32s, VIS3, do_fff, a, gen_op_fpadds32s)
4897 TRANS(FPSUBS32s, VIS3, do_fff, a, gen_op_fpsubs32s)
4898 
4899 static bool do_env_fff(DisasContext *dc, arg_r_r_r *a,
4900                        void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
4901 {
4902     TCGv_i32 src1, src2;
4903 
4904     if (gen_trap_ifnofpu(dc)) {
4905         return true;
4906     }
4907 
4908     src1 = gen_load_fpr_F(dc, a->rs1);
4909     src2 = gen_load_fpr_F(dc, a->rs2);
4910     func(src1, tcg_env, src1, src2);
4911     gen_store_fpr_F(dc, a->rd, src1);
4912     return advance_pc(dc);
4913 }
4914 
4915 TRANS(FADDs, ALL, do_env_fff, a, gen_helper_fadds)
4916 TRANS(FSUBs, ALL, do_env_fff, a, gen_helper_fsubs)
4917 TRANS(FMULs, ALL, do_env_fff, a, gen_helper_fmuls)
4918 TRANS(FDIVs, ALL, do_env_fff, a, gen_helper_fdivs)
4919 TRANS(FNADDs, VIS3, do_env_fff, a, gen_helper_fnadds)
4920 TRANS(FNMULs, VIS3, do_env_fff, a, gen_helper_fnmuls)
4921 
4922 static bool do_dff(DisasContext *dc, arg_r_r_r *a,
4923                    void (*func)(TCGv_i64, TCGv_i32, TCGv_i32))
4924 {
4925     TCGv_i64 dst;
4926     TCGv_i32 src1, src2;
4927 
4928     if (gen_trap_ifnofpu(dc)) {
4929         return true;
4930     }
4931 
4932     dst = tcg_temp_new_i64();
4933     src1 = gen_load_fpr_F(dc, a->rs1);
4934     src2 = gen_load_fpr_F(dc, a->rs2);
4935     func(dst, src1, src2);
4936     gen_store_fpr_D(dc, a->rd, dst);
4937     return advance_pc(dc);
4938 }
4939 
4940 TRANS(FMUL8x16AU, VIS1, do_dff, a, gen_op_fmul8x16au)
4941 TRANS(FMUL8x16AL, VIS1, do_dff, a, gen_op_fmul8x16al)
4942 TRANS(FMULD8SUx16, VIS1, do_dff, a, gen_op_fmuld8sux16)
4943 TRANS(FMULD8ULx16, VIS1, do_dff, a, gen_op_fmuld8ulx16)
4944 TRANS(FPMERGE, VIS1, do_dff, a, gen_helper_fpmerge)
4945 
4946 static bool do_dfd(DisasContext *dc, arg_r_r_r *a,
4947                    void (*func)(TCGv_i64, TCGv_i32, TCGv_i64))
4948 {
4949     TCGv_i64 dst, src2;
4950     TCGv_i32 src1;
4951 
4952     if (gen_trap_ifnofpu(dc)) {
4953         return true;
4954     }
4955 
4956     dst = tcg_temp_new_i64();
4957     src1 = gen_load_fpr_F(dc, a->rs1);
4958     src2 = gen_load_fpr_D(dc, a->rs2);
4959     func(dst, src1, src2);
4960     gen_store_fpr_D(dc, a->rd, dst);
4961     return advance_pc(dc);
4962 }
4963 
4964 TRANS(FMUL8x16, VIS1, do_dfd, a, gen_helper_fmul8x16)
4965 
4966 static bool do_gvec_ddd(DisasContext *dc, arg_r_r_r *a, MemOp vece,
4967                         void (*func)(unsigned, uint32_t, uint32_t,
4968                                      uint32_t, uint32_t, uint32_t))
4969 {
4970     if (gen_trap_ifnofpu(dc)) {
4971         return true;
4972     }
4973 
4974     func(vece, gen_offset_fpr_D(a->rd), gen_offset_fpr_D(a->rs1),
4975          gen_offset_fpr_D(a->rs2), 8, 8);
4976     return advance_pc(dc);
4977 }
4978 
4979 TRANS(FPADD16, VIS1, do_gvec_ddd, a, MO_16, tcg_gen_gvec_add)
4980 TRANS(FPADD32, VIS1, do_gvec_ddd, a, MO_32, tcg_gen_gvec_add)
4981 TRANS(FPSUB16, VIS1, do_gvec_ddd, a, MO_16, tcg_gen_gvec_sub)
4982 TRANS(FPSUB32, VIS1, do_gvec_ddd, a, MO_32, tcg_gen_gvec_sub)
4983 TRANS(FCHKSM16, VIS3, do_gvec_ddd, a, MO_16, gen_op_fchksm16)
4984 TRANS(FMEAN16, VIS3, do_gvec_ddd, a, MO_16, gen_op_fmean16)
4985 
4986 TRANS(FPADDS16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_ssadd)
4987 TRANS(FPADDS32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_ssadd)
4988 TRANS(FPSUBS16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_sssub)
4989 TRANS(FPSUBS32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_sssub)
4990 
4991 TRANS(FSLL16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_shlv)
4992 TRANS(FSLL32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_shlv)
4993 TRANS(FSRL16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_shrv)
4994 TRANS(FSRL32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_shrv)
4995 TRANS(FSRA16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_sarv)
4996 TRANS(FSRA32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_sarv)
4997 
4998 static bool do_ddd(DisasContext *dc, arg_r_r_r *a,
4999                    void (*func)(TCGv_i64, TCGv_i64, TCGv_i64))
5000 {
5001     TCGv_i64 dst, src1, src2;
5002 
5003     if (gen_trap_ifnofpu(dc)) {
5004         return true;
5005     }
5006 
5007     dst = tcg_temp_new_i64();
5008     src1 = gen_load_fpr_D(dc, a->rs1);
5009     src2 = gen_load_fpr_D(dc, a->rs2);
5010     func(dst, src1, src2);
5011     gen_store_fpr_D(dc, a->rd, dst);
5012     return advance_pc(dc);
5013 }
5014 
5015 TRANS(FMUL8SUx16, VIS1, do_ddd, a, gen_helper_fmul8sux16)
5016 TRANS(FMUL8ULx16, VIS1, do_ddd, a, gen_helper_fmul8ulx16)
5017 
5018 TRANS(FNORd, VIS1, do_ddd, a, tcg_gen_nor_i64)
5019 TRANS(FANDNOTd, VIS1, do_ddd, a, tcg_gen_andc_i64)
5020 TRANS(FXORd, VIS1, do_ddd, a, tcg_gen_xor_i64)
5021 TRANS(FNANDd, VIS1, do_ddd, a, tcg_gen_nand_i64)
5022 TRANS(FANDd, VIS1, do_ddd, a, tcg_gen_and_i64)
5023 TRANS(FXNORd, VIS1, do_ddd, a, tcg_gen_eqv_i64)
5024 TRANS(FORNOTd, VIS1, do_ddd, a, tcg_gen_orc_i64)
5025 TRANS(FORd, VIS1, do_ddd, a, tcg_gen_or_i64)
5026 
5027 TRANS(FPACK32, VIS1, do_ddd, a, gen_op_fpack32)
5028 TRANS(FALIGNDATAg, VIS1, do_ddd, a, gen_op_faligndata)
5029 TRANS(BSHUFFLE, VIS2, do_ddd, a, gen_op_bshuffle)
5030 
5031 TRANS(FHADDd, VIS3, do_ddd, a, gen_op_fhaddd)
5032 TRANS(FHSUBd, VIS3, do_ddd, a, gen_op_fhsubd)
5033 TRANS(FNHADDd, VIS3, do_ddd, a, gen_op_fnhaddd)
5034 
5035 TRANS(FPADD64, VIS3B, do_ddd, a, tcg_gen_add_i64)
5036 TRANS(FPSUB64, VIS3B, do_ddd, a, tcg_gen_sub_i64)
5037 TRANS(FSLAS16, VIS3, do_ddd, a, gen_helper_fslas16)
5038 TRANS(FSLAS32, VIS3, do_ddd, a, gen_helper_fslas32)
5039 
5040 static bool do_rdd(DisasContext *dc, arg_r_r_r *a,
5041                    void (*func)(TCGv, TCGv_i64, TCGv_i64))
5042 {
5043     TCGv_i64 src1, src2;
5044     TCGv dst;
5045 
5046     if (gen_trap_ifnofpu(dc)) {
5047         return true;
5048     }
5049 
5050     dst = gen_dest_gpr(dc, a->rd);
5051     src1 = gen_load_fpr_D(dc, a->rs1);
5052     src2 = gen_load_fpr_D(dc, a->rs2);
5053     func(dst, src1, src2);
5054     gen_store_gpr(dc, a->rd, dst);
5055     return advance_pc(dc);
5056 }
5057 
5058 TRANS(FPCMPLE16, VIS1, do_rdd, a, gen_helper_fcmple16)
5059 TRANS(FPCMPNE16, VIS1, do_rdd, a, gen_helper_fcmpne16)
5060 TRANS(FPCMPGT16, VIS1, do_rdd, a, gen_helper_fcmpgt16)
5061 TRANS(FPCMPEQ16, VIS1, do_rdd, a, gen_helper_fcmpeq16)
5062 
5063 TRANS(FPCMPLE32, VIS1, do_rdd, a, gen_helper_fcmple32)
5064 TRANS(FPCMPNE32, VIS1, do_rdd, a, gen_helper_fcmpne32)
5065 TRANS(FPCMPGT32, VIS1, do_rdd, a, gen_helper_fcmpgt32)
5066 TRANS(FPCMPEQ32, VIS1, do_rdd, a, gen_helper_fcmpeq32)
5067 
5068 TRANS(FPCMPEQ8, VIS3B, do_rdd, a, gen_helper_fcmpeq8)
5069 TRANS(FPCMPNE8, VIS3B, do_rdd, a, gen_helper_fcmpne8)
5070 TRANS(FPCMPULE8, VIS3B, do_rdd, a, gen_helper_fcmpule8)
5071 TRANS(FPCMPUGT8, VIS3B, do_rdd, a, gen_helper_fcmpugt8)
5072 
5073 static bool do_env_ddd(DisasContext *dc, arg_r_r_r *a,
5074                        void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
5075 {
5076     TCGv_i64 dst, src1, src2;
5077 
5078     if (gen_trap_ifnofpu(dc)) {
5079         return true;
5080     }
5081 
5082     dst = tcg_temp_new_i64();
5083     src1 = gen_load_fpr_D(dc, a->rs1);
5084     src2 = gen_load_fpr_D(dc, a->rs2);
5085     func(dst, tcg_env, src1, src2);
5086     gen_store_fpr_D(dc, a->rd, dst);
5087     return advance_pc(dc);
5088 }
5089 
5090 TRANS(FADDd, ALL, do_env_ddd, a, gen_helper_faddd)
5091 TRANS(FSUBd, ALL, do_env_ddd, a, gen_helper_fsubd)
5092 TRANS(FMULd, ALL, do_env_ddd, a, gen_helper_fmuld)
5093 TRANS(FDIVd, ALL, do_env_ddd, a, gen_helper_fdivd)
5094 TRANS(FNADDd, VIS3, do_env_ddd, a, gen_helper_fnaddd)
5095 TRANS(FNMULd, VIS3, do_env_ddd, a, gen_helper_fnmuld)
5096 
5097 static bool trans_FsMULd(DisasContext *dc, arg_r_r_r *a)
5098 {
5099     TCGv_i64 dst;
5100     TCGv_i32 src1, src2;
5101 
5102     if (gen_trap_ifnofpu(dc)) {
5103         return true;
5104     }
5105     if (!(dc->def->features & CPU_FEATURE_FSMULD)) {
5106         return raise_unimpfpop(dc);
5107     }
5108 
5109     dst = tcg_temp_new_i64();
5110     src1 = gen_load_fpr_F(dc, a->rs1);
5111     src2 = gen_load_fpr_F(dc, a->rs2);
5112     gen_helper_fsmuld(dst, tcg_env, src1, src2);
5113     gen_store_fpr_D(dc, a->rd, dst);
5114     return advance_pc(dc);
5115 }
5116 
5117 static bool trans_FNsMULd(DisasContext *dc, arg_r_r_r *a)
5118 {
5119     TCGv_i64 dst;
5120     TCGv_i32 src1, src2;
5121 
5122     if (!avail_VIS3(dc)) {
5123         return false;
5124     }
5125     if (gen_trap_ifnofpu(dc)) {
5126         return true;
5127     }
5128     dst = tcg_temp_new_i64();
5129     src1 = gen_load_fpr_F(dc, a->rs1);
5130     src2 = gen_load_fpr_F(dc, a->rs2);
5131     gen_helper_fnsmuld(dst, tcg_env, src1, src2);
5132     gen_store_fpr_D(dc, a->rd, dst);
5133     return advance_pc(dc);
5134 }
5135 
5136 static bool do_ffff(DisasContext *dc, arg_r_r_r_r *a,
5137                     void (*func)(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_i32))
5138 {
5139     TCGv_i32 dst, src1, src2, src3;
5140 
5141     if (gen_trap_ifnofpu(dc)) {
5142         return true;
5143     }
5144 
5145     src1 = gen_load_fpr_F(dc, a->rs1);
5146     src2 = gen_load_fpr_F(dc, a->rs2);
5147     src3 = gen_load_fpr_F(dc, a->rs3);
5148     dst = tcg_temp_new_i32();
5149     func(dst, src1, src2, src3);
5150     gen_store_fpr_F(dc, a->rd, dst);
5151     return advance_pc(dc);
5152 }
5153 
5154 TRANS(FMADDs, FMAF, do_ffff, a, gen_op_fmadds)
5155 TRANS(FMSUBs, FMAF, do_ffff, a, gen_op_fmsubs)
5156 TRANS(FNMSUBs, FMAF, do_ffff, a, gen_op_fnmsubs)
5157 TRANS(FNMADDs, FMAF, do_ffff, a, gen_op_fnmadds)
5158 
5159 static bool do_dddd(DisasContext *dc, arg_r_r_r_r *a,
5160                     void (*func)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
5161 {
5162     TCGv_i64 dst, src1, src2, src3;
5163 
5164     if (gen_trap_ifnofpu(dc)) {
5165         return true;
5166     }
5167 
5168     dst  = tcg_temp_new_i64();
5169     src1 = gen_load_fpr_D(dc, a->rs1);
5170     src2 = gen_load_fpr_D(dc, a->rs2);
5171     src3 = gen_load_fpr_D(dc, a->rs3);
5172     func(dst, src1, src2, src3);
5173     gen_store_fpr_D(dc, a->rd, dst);
5174     return advance_pc(dc);
5175 }
5176 
5177 TRANS(PDIST, VIS1, do_dddd, a, gen_helper_pdist)
5178 TRANS(FMADDd, FMAF, do_dddd, a, gen_op_fmaddd)
5179 TRANS(FMSUBd, FMAF, do_dddd, a, gen_op_fmsubd)
5180 TRANS(FNMSUBd, FMAF, do_dddd, a, gen_op_fnmsubd)
5181 TRANS(FNMADDd, FMAF, do_dddd, a, gen_op_fnmaddd)
5182 
5183 static bool do_env_qqq(DisasContext *dc, arg_r_r_r *a,
5184                        void (*func)(TCGv_i128, TCGv_env, TCGv_i128, TCGv_i128))
5185 {
5186     TCGv_i128 src1, src2;
5187 
5188     if (gen_trap_ifnofpu(dc)) {
5189         return true;
5190     }
5191     if (gen_trap_float128(dc)) {
5192         return true;
5193     }
5194 
5195     src1 = gen_load_fpr_Q(dc, a->rs1);
5196     src2 = gen_load_fpr_Q(dc, a->rs2);
5197     func(src1, tcg_env, src1, src2);
5198     gen_store_fpr_Q(dc, a->rd, src1);
5199     return advance_pc(dc);
5200 }
5201 
5202 TRANS(FADDq, ALL, do_env_qqq, a, gen_helper_faddq)
5203 TRANS(FSUBq, ALL, do_env_qqq, a, gen_helper_fsubq)
5204 TRANS(FMULq, ALL, do_env_qqq, a, gen_helper_fmulq)
5205 TRANS(FDIVq, ALL, do_env_qqq, a, gen_helper_fdivq)
5206 
5207 static bool trans_FdMULq(DisasContext *dc, arg_r_r_r *a)
5208 {
5209     TCGv_i64 src1, src2;
5210     TCGv_i128 dst;
5211 
5212     if (gen_trap_ifnofpu(dc)) {
5213         return true;
5214     }
5215     if (gen_trap_float128(dc)) {
5216         return true;
5217     }
5218 
5219     src1 = gen_load_fpr_D(dc, a->rs1);
5220     src2 = gen_load_fpr_D(dc, a->rs2);
5221     dst = tcg_temp_new_i128();
5222     gen_helper_fdmulq(dst, tcg_env, src1, src2);
5223     gen_store_fpr_Q(dc, a->rd, dst);
5224     return advance_pc(dc);
5225 }
5226 
5227 static bool do_fmovr(DisasContext *dc, arg_FMOVRs *a, bool is_128,
5228                      void (*func)(DisasContext *, DisasCompare *, int, int))
5229 {
5230     DisasCompare cmp;
5231 
5232     if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
5233         return false;
5234     }
5235     if (gen_trap_ifnofpu(dc)) {
5236         return true;
5237     }
5238     if (is_128 && gen_trap_float128(dc)) {
5239         return true;
5240     }
5241 
5242     gen_op_clear_ieee_excp_and_FTT();
5243     func(dc, &cmp, a->rd, a->rs2);
5244     return advance_pc(dc);
5245 }
5246 
5247 TRANS(FMOVRs, 64, do_fmovr, a, false, gen_fmovs)
5248 TRANS(FMOVRd, 64, do_fmovr, a, false, gen_fmovd)
5249 TRANS(FMOVRq, 64, do_fmovr, a, true, gen_fmovq)
5250 
5251 static bool do_fmovcc(DisasContext *dc, arg_FMOVscc *a, bool is_128,
5252                       void (*func)(DisasContext *, DisasCompare *, int, int))
5253 {
5254     DisasCompare cmp;
5255 
5256     if (gen_trap_ifnofpu(dc)) {
5257         return true;
5258     }
5259     if (is_128 && gen_trap_float128(dc)) {
5260         return true;
5261     }
5262 
5263     gen_op_clear_ieee_excp_and_FTT();
5264     gen_compare(&cmp, a->cc, a->cond, dc);
5265     func(dc, &cmp, a->rd, a->rs2);
5266     return advance_pc(dc);
5267 }
5268 
5269 TRANS(FMOVscc, 64, do_fmovcc, a, false, gen_fmovs)
5270 TRANS(FMOVdcc, 64, do_fmovcc, a, false, gen_fmovd)
5271 TRANS(FMOVqcc, 64, do_fmovcc, a, true, gen_fmovq)
5272 
5273 static bool do_fmovfcc(DisasContext *dc, arg_FMOVsfcc *a, bool is_128,
5274                        void (*func)(DisasContext *, DisasCompare *, int, int))
5275 {
5276     DisasCompare cmp;
5277 
5278     if (gen_trap_ifnofpu(dc)) {
5279         return true;
5280     }
5281     if (is_128 && gen_trap_float128(dc)) {
5282         return true;
5283     }
5284 
5285     gen_op_clear_ieee_excp_and_FTT();
5286     gen_fcompare(&cmp, a->cc, a->cond);
5287     func(dc, &cmp, a->rd, a->rs2);
5288     return advance_pc(dc);
5289 }
5290 
5291 TRANS(FMOVsfcc, 64, do_fmovfcc, a, false, gen_fmovs)
5292 TRANS(FMOVdfcc, 64, do_fmovfcc, a, false, gen_fmovd)
5293 TRANS(FMOVqfcc, 64, do_fmovfcc, a, true, gen_fmovq)
5294 
5295 static bool do_fcmps(DisasContext *dc, arg_FCMPs *a, bool e)
5296 {
5297     TCGv_i32 src1, src2;
5298 
5299     if (avail_32(dc) && a->cc != 0) {
5300         return false;
5301     }
5302     if (gen_trap_ifnofpu(dc)) {
5303         return true;
5304     }
5305 
5306     src1 = gen_load_fpr_F(dc, a->rs1);
5307     src2 = gen_load_fpr_F(dc, a->rs2);
5308     if (e) {
5309         gen_helper_fcmpes(cpu_fcc[a->cc], tcg_env, src1, src2);
5310     } else {
5311         gen_helper_fcmps(cpu_fcc[a->cc], tcg_env, src1, src2);
5312     }
5313     return advance_pc(dc);
5314 }
5315 
5316 TRANS(FCMPs, ALL, do_fcmps, a, false)
5317 TRANS(FCMPEs, ALL, do_fcmps, a, true)
5318 
5319 static bool do_fcmpd(DisasContext *dc, arg_FCMPd *a, bool e)
5320 {
5321     TCGv_i64 src1, src2;
5322 
5323     if (avail_32(dc) && a->cc != 0) {
5324         return false;
5325     }
5326     if (gen_trap_ifnofpu(dc)) {
5327         return true;
5328     }
5329 
5330     src1 = gen_load_fpr_D(dc, a->rs1);
5331     src2 = gen_load_fpr_D(dc, a->rs2);
5332     if (e) {
5333         gen_helper_fcmped(cpu_fcc[a->cc], tcg_env, src1, src2);
5334     } else {
5335         gen_helper_fcmpd(cpu_fcc[a->cc], tcg_env, src1, src2);
5336     }
5337     return advance_pc(dc);
5338 }
5339 
5340 TRANS(FCMPd, ALL, do_fcmpd, a, false)
5341 TRANS(FCMPEd, ALL, do_fcmpd, a, true)
5342 
5343 static bool do_fcmpq(DisasContext *dc, arg_FCMPq *a, bool e)
5344 {
5345     TCGv_i128 src1, src2;
5346 
5347     if (avail_32(dc) && a->cc != 0) {
5348         return false;
5349     }
5350     if (gen_trap_ifnofpu(dc)) {
5351         return true;
5352     }
5353     if (gen_trap_float128(dc)) {
5354         return true;
5355     }
5356 
5357     src1 = gen_load_fpr_Q(dc, a->rs1);
5358     src2 = gen_load_fpr_Q(dc, a->rs2);
5359     if (e) {
5360         gen_helper_fcmpeq(cpu_fcc[a->cc], tcg_env, src1, src2);
5361     } else {
5362         gen_helper_fcmpq(cpu_fcc[a->cc], tcg_env, src1, src2);
5363     }
5364     return advance_pc(dc);
5365 }
5366 
5367 TRANS(FCMPq, ALL, do_fcmpq, a, false)
5368 TRANS(FCMPEq, ALL, do_fcmpq, a, true)
5369 
5370 static bool trans_FLCMPs(DisasContext *dc, arg_FLCMPs *a)
5371 {
5372     TCGv_i32 src1, src2;
5373 
5374     if (!avail_VIS3(dc)) {
5375         return false;
5376     }
5377     if (gen_trap_ifnofpu(dc)) {
5378         return true;
5379     }
5380 
5381     src1 = gen_load_fpr_F(dc, a->rs1);
5382     src2 = gen_load_fpr_F(dc, a->rs2);
5383     gen_helper_flcmps(cpu_fcc[a->cc], src1, src2);
5384     return advance_pc(dc);
5385 }
5386 
5387 static bool trans_FLCMPd(DisasContext *dc, arg_FLCMPd *a)
5388 {
5389     TCGv_i64 src1, src2;
5390 
5391     if (!avail_VIS3(dc)) {
5392         return false;
5393     }
5394     if (gen_trap_ifnofpu(dc)) {
5395         return true;
5396     }
5397 
5398     src1 = gen_load_fpr_D(dc, a->rs1);
5399     src2 = gen_load_fpr_D(dc, a->rs2);
5400     gen_helper_flcmpd(cpu_fcc[a->cc], src1, src2);
5401     return advance_pc(dc);
5402 }
5403 
5404 static bool do_movf2r(DisasContext *dc, arg_r_r *a,
5405                       int (*offset)(unsigned int),
5406                       void (*load)(TCGv, TCGv_ptr, tcg_target_long))
5407 {
5408     TCGv dst;
5409 
5410     if (gen_trap_ifnofpu(dc)) {
5411         return true;
5412     }
5413     dst = gen_dest_gpr(dc, a->rd);
5414     load(dst, tcg_env, offset(a->rs));
5415     gen_store_gpr(dc, a->rd, dst);
5416     return advance_pc(dc);
5417 }
5418 
5419 TRANS(MOVsTOsw, VIS3B, do_movf2r, a, gen_offset_fpr_F, tcg_gen_ld32s_tl)
5420 TRANS(MOVsTOuw, VIS3B, do_movf2r, a, gen_offset_fpr_F, tcg_gen_ld32u_tl)
5421 TRANS(MOVdTOx, VIS3B, do_movf2r, a, gen_offset_fpr_D, tcg_gen_ld_tl)
5422 
5423 static bool do_movr2f(DisasContext *dc, arg_r_r *a,
5424                       int (*offset)(unsigned int),
5425                       void (*store)(TCGv, TCGv_ptr, tcg_target_long))
5426 {
5427     TCGv src;
5428 
5429     if (gen_trap_ifnofpu(dc)) {
5430         return true;
5431     }
5432     src = gen_load_gpr(dc, a->rs);
5433     store(src, tcg_env, offset(a->rd));
5434     return advance_pc(dc);
5435 }
5436 
5437 TRANS(MOVwTOs, VIS3B, do_movr2f, a, gen_offset_fpr_F, tcg_gen_st32_tl)
5438 TRANS(MOVxTOd, VIS3B, do_movr2f, a, gen_offset_fpr_D, tcg_gen_st_tl)
5439 
5440 static void sparc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
5441 {
5442     DisasContext *dc = container_of(dcbase, DisasContext, base);
5443     int bound;
5444 
5445     dc->pc = dc->base.pc_first;
5446     dc->npc = (target_ulong)dc->base.tb->cs_base;
5447     dc->mem_idx = dc->base.tb->flags & TB_FLAG_MMU_MASK;
5448     dc->def = &cpu_env(cs)->def;
5449     dc->fpu_enabled = tb_fpu_enabled(dc->base.tb->flags);
5450     dc->address_mask_32bit = tb_am_enabled(dc->base.tb->flags);
5451 #ifndef CONFIG_USER_ONLY
5452     dc->supervisor = (dc->base.tb->flags & TB_FLAG_SUPER) != 0;
5453 #endif
5454 #ifdef TARGET_SPARC64
5455     dc->fprs_dirty = 0;
5456     dc->asi = (dc->base.tb->flags >> TB_FLAG_ASI_SHIFT) & 0xff;
5457 #ifndef CONFIG_USER_ONLY
5458     dc->hypervisor = (dc->base.tb->flags & TB_FLAG_HYPER) != 0;
5459 #endif
5460 #endif
5461     /*
5462      * if we reach a page boundary, we stop generation so that the
5463      * PC of a TT_TFAULT exception is always in the right page
5464      */
5465     bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
5466     dc->base.max_insns = MIN(dc->base.max_insns, bound);
5467 }
5468 
5469 static void sparc_tr_tb_start(DisasContextBase *db, CPUState *cs)
5470 {
5471 }
5472 
5473 static void sparc_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
5474 {
5475     DisasContext *dc = container_of(dcbase, DisasContext, base);
5476     target_ulong npc = dc->npc;
5477 
5478     if (npc & 3) {
5479         switch (npc) {
5480         case JUMP_PC:
5481             assert(dc->jump_pc[1] == dc->pc + 4);
5482             npc = dc->jump_pc[0] | JUMP_PC;
5483             break;
5484         case DYNAMIC_PC:
5485         case DYNAMIC_PC_LOOKUP:
5486             npc = DYNAMIC_PC;
5487             break;
5488         default:
5489             g_assert_not_reached();
5490         }
5491     }
5492     tcg_gen_insn_start(dc->pc, npc);
5493 }
5494 
5495 static void sparc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
5496 {
5497     DisasContext *dc = container_of(dcbase, DisasContext, base);
5498     unsigned int insn;
5499 
5500     insn = translator_ldl(cpu_env(cs), &dc->base, dc->pc);
5501     dc->base.pc_next += 4;
5502 
5503     if (!decode(dc, insn)) {
5504         gen_exception(dc, TT_ILL_INSN);
5505     }
5506 
5507     if (dc->base.is_jmp == DISAS_NORETURN) {
5508         return;
5509     }
5510     if (dc->pc != dc->base.pc_next) {
5511         dc->base.is_jmp = DISAS_TOO_MANY;
5512     }
5513 }
5514 
5515 static void sparc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
5516 {
5517     DisasContext *dc = container_of(dcbase, DisasContext, base);
5518     DisasDelayException *e, *e_next;
5519     bool may_lookup;
5520 
5521     finishing_insn(dc);
5522 
5523     switch (dc->base.is_jmp) {
5524     case DISAS_NEXT:
5525     case DISAS_TOO_MANY:
5526         if (((dc->pc | dc->npc) & 3) == 0) {
5527             /* static PC and NPC: we can use direct chaining */
5528             gen_goto_tb(dc, 0, dc->pc, dc->npc);
5529             break;
5530         }
5531 
5532         may_lookup = true;
5533         if (dc->pc & 3) {
5534             switch (dc->pc) {
5535             case DYNAMIC_PC_LOOKUP:
5536                 break;
5537             case DYNAMIC_PC:
5538                 may_lookup = false;
5539                 break;
5540             default:
5541                 g_assert_not_reached();
5542             }
5543         } else {
5544             tcg_gen_movi_tl(cpu_pc, dc->pc);
5545         }
5546 
5547         if (dc->npc & 3) {
5548             switch (dc->npc) {
5549             case JUMP_PC:
5550                 gen_generic_branch(dc);
5551                 break;
5552             case DYNAMIC_PC:
5553                 may_lookup = false;
5554                 break;
5555             case DYNAMIC_PC_LOOKUP:
5556                 break;
5557             default:
5558                 g_assert_not_reached();
5559             }
5560         } else {
5561             tcg_gen_movi_tl(cpu_npc, dc->npc);
5562         }
5563         if (may_lookup) {
5564             tcg_gen_lookup_and_goto_ptr();
5565         } else {
5566             tcg_gen_exit_tb(NULL, 0);
5567         }
5568         break;
5569 
5570     case DISAS_NORETURN:
5571        break;
5572 
5573     case DISAS_EXIT:
5574         /* Exit TB */
5575         save_state(dc);
5576         tcg_gen_exit_tb(NULL, 0);
5577         break;
5578 
5579     default:
5580         g_assert_not_reached();
5581     }
5582 
5583     for (e = dc->delay_excp_list; e ; e = e_next) {
5584         gen_set_label(e->lab);
5585 
5586         tcg_gen_movi_tl(cpu_pc, e->pc);
5587         if (e->npc % 4 == 0) {
5588             tcg_gen_movi_tl(cpu_npc, e->npc);
5589         }
5590         gen_helper_raise_exception(tcg_env, e->excp);
5591 
5592         e_next = e->next;
5593         g_free(e);
5594     }
5595 }
5596 
5597 static const TranslatorOps sparc_tr_ops = {
5598     .init_disas_context = sparc_tr_init_disas_context,
5599     .tb_start           = sparc_tr_tb_start,
5600     .insn_start         = sparc_tr_insn_start,
5601     .translate_insn     = sparc_tr_translate_insn,
5602     .tb_stop            = sparc_tr_tb_stop,
5603 };
5604 
5605 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
5606                            vaddr pc, void *host_pc)
5607 {
5608     DisasContext dc = {};
5609 
5610     translator_loop(cs, tb, max_insns, pc, host_pc, &sparc_tr_ops, &dc.base);
5611 }
5612 
5613 void sparc_tcg_init(void)
5614 {
5615     static const char gregnames[32][4] = {
5616         "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7",
5617         "o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7",
5618         "l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7",
5619         "i0", "i1", "i2", "i3", "i4", "i5", "i6", "i7",
5620     };
5621 
5622     static const struct { TCGv_i32 *ptr; int off; const char *name; } r32[] = {
5623 #ifdef TARGET_SPARC64
5624         { &cpu_fprs, offsetof(CPUSPARCState, fprs), "fprs" },
5625         { &cpu_fcc[0], offsetof(CPUSPARCState, fcc[0]), "fcc0" },
5626         { &cpu_fcc[1], offsetof(CPUSPARCState, fcc[1]), "fcc1" },
5627         { &cpu_fcc[2], offsetof(CPUSPARCState, fcc[2]), "fcc2" },
5628         { &cpu_fcc[3], offsetof(CPUSPARCState, fcc[3]), "fcc3" },
5629 #else
5630         { &cpu_fcc[0], offsetof(CPUSPARCState, fcc[0]), "fcc" },
5631 #endif
5632     };
5633 
5634     static const struct { TCGv *ptr; int off; const char *name; } rtl[] = {
5635 #ifdef TARGET_SPARC64
5636         { &cpu_gsr, offsetof(CPUSPARCState, gsr), "gsr" },
5637         { &cpu_xcc_Z, offsetof(CPUSPARCState, xcc_Z), "xcc_Z" },
5638         { &cpu_xcc_C, offsetof(CPUSPARCState, xcc_C), "xcc_C" },
5639 #endif
5640         { &cpu_cc_N, offsetof(CPUSPARCState, cc_N), "cc_N" },
5641         { &cpu_cc_V, offsetof(CPUSPARCState, cc_V), "cc_V" },
5642         { &cpu_icc_Z, offsetof(CPUSPARCState, icc_Z), "icc_Z" },
5643         { &cpu_icc_C, offsetof(CPUSPARCState, icc_C), "icc_C" },
5644         { &cpu_cond, offsetof(CPUSPARCState, cond), "cond" },
5645         { &cpu_pc, offsetof(CPUSPARCState, pc), "pc" },
5646         { &cpu_npc, offsetof(CPUSPARCState, npc), "npc" },
5647         { &cpu_y, offsetof(CPUSPARCState, y), "y" },
5648         { &cpu_tbr, offsetof(CPUSPARCState, tbr), "tbr" },
5649     };
5650 
5651     unsigned int i;
5652 
5653     cpu_regwptr = tcg_global_mem_new_ptr(tcg_env,
5654                                          offsetof(CPUSPARCState, regwptr),
5655                                          "regwptr");
5656 
5657     for (i = 0; i < ARRAY_SIZE(r32); ++i) {
5658         *r32[i].ptr = tcg_global_mem_new_i32(tcg_env, r32[i].off, r32[i].name);
5659     }
5660 
5661     for (i = 0; i < ARRAY_SIZE(rtl); ++i) {
5662         *rtl[i].ptr = tcg_global_mem_new(tcg_env, rtl[i].off, rtl[i].name);
5663     }
5664 
5665     cpu_regs[0] = NULL;
5666     for (i = 1; i < 8; ++i) {
5667         cpu_regs[i] = tcg_global_mem_new(tcg_env,
5668                                          offsetof(CPUSPARCState, gregs[i]),
5669                                          gregnames[i]);
5670     }
5671 
5672     for (i = 8; i < 32; ++i) {
5673         cpu_regs[i] = tcg_global_mem_new(cpu_regwptr,
5674                                          (i - 8) * sizeof(target_ulong),
5675                                          gregnames[i]);
5676     }
5677 }
5678 
5679 void sparc_restore_state_to_opc(CPUState *cs,
5680                                 const TranslationBlock *tb,
5681                                 const uint64_t *data)
5682 {
5683     CPUSPARCState *env = cpu_env(cs);
5684     target_ulong pc = data[0];
5685     target_ulong npc = data[1];
5686 
5687     env->pc = pc;
5688     if (npc == DYNAMIC_PC) {
5689         /* dynamic NPC: already stored */
5690     } else if (npc & JUMP_PC) {
5691         /* jump PC: use 'cond' and the jump targets of the translation */
5692         if (env->cond) {
5693             env->npc = npc & ~3;
5694         } else {
5695             env->npc = pc + 4;
5696         }
5697     } else {
5698         env->npc = npc;
5699     }
5700 }
5701