xref: /openbmc/qemu/target/sparc/translate.c (revision fbc5c8d4e8f10fdb780c450aa49b503e6d592cc6)
1 /*
2    SPARC translation
3 
4    Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5    Copyright (C) 2003-2005 Fabrice Bellard
6 
7    This library is free software; you can redistribute it and/or
8    modify it under the terms of the GNU Lesser General Public
9    License as published by the Free Software Foundation; either
10    version 2.1 of the License, or (at your option) any later version.
11 
12    This library is distributed in the hope that it will be useful,
13    but WITHOUT ANY WARRANTY; without even the implied warranty of
14    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15    Lesser General Public License for more details.
16 
17    You should have received a copy of the GNU Lesser General Public
18    License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 
23 #include "cpu.h"
24 #include "exec/helper-proto.h"
25 #include "exec/exec-all.h"
26 #include "tcg/tcg-op.h"
27 #include "tcg/tcg-op-gvec.h"
28 #include "exec/helper-gen.h"
29 #include "exec/translator.h"
30 #include "exec/log.h"
31 #include "fpu/softfloat.h"
32 #include "asi.h"
33 
34 #define HELPER_H "helper.h"
35 #include "exec/helper-info.c.inc"
36 #undef  HELPER_H
37 
38 #ifdef TARGET_SPARC64
39 # define gen_helper_rdpsr(D, E)                 qemu_build_not_reached()
40 # define gen_helper_rdasr17(D, E)               qemu_build_not_reached()
41 # define gen_helper_rett(E)                     qemu_build_not_reached()
42 # define gen_helper_power_down(E)               qemu_build_not_reached()
43 # define gen_helper_wrpsr(E, S)                 qemu_build_not_reached()
44 #else
45 # define gen_helper_clear_softint(E, S)         qemu_build_not_reached()
46 # define gen_helper_done(E)                     qemu_build_not_reached()
47 # define gen_helper_flushw(E)                   qemu_build_not_reached()
48 # define gen_helper_fmul8x16a(D, S1, S2)        qemu_build_not_reached()
49 # define gen_helper_rdccr(D, E)                 qemu_build_not_reached()
50 # define gen_helper_rdcwp(D, E)                 qemu_build_not_reached()
51 # define gen_helper_restored(E)                 qemu_build_not_reached()
52 # define gen_helper_retry(E)                    qemu_build_not_reached()
53 # define gen_helper_saved(E)                    qemu_build_not_reached()
54 # define gen_helper_set_softint(E, S)           qemu_build_not_reached()
55 # define gen_helper_tick_get_count(D, E, T, C)  qemu_build_not_reached()
56 # define gen_helper_tick_set_count(P, S)        qemu_build_not_reached()
57 # define gen_helper_tick_set_limit(P, S)        qemu_build_not_reached()
58 # define gen_helper_wrccr(E, S)                 qemu_build_not_reached()
59 # define gen_helper_wrcwp(E, S)                 qemu_build_not_reached()
60 # define gen_helper_wrgl(E, S)                  qemu_build_not_reached()
61 # define gen_helper_write_softint(E, S)         qemu_build_not_reached()
62 # define gen_helper_wrpil(E, S)                 qemu_build_not_reached()
63 # define gen_helper_wrpstate(E, S)              qemu_build_not_reached()
64 # define gen_helper_cmask8               ({ qemu_build_not_reached(); NULL; })
65 # define gen_helper_cmask16              ({ qemu_build_not_reached(); NULL; })
66 # define gen_helper_cmask32              ({ qemu_build_not_reached(); NULL; })
67 # define gen_helper_fcmpeq8              ({ qemu_build_not_reached(); NULL; })
68 # define gen_helper_fcmpeq16             ({ qemu_build_not_reached(); NULL; })
69 # define gen_helper_fcmpeq32             ({ qemu_build_not_reached(); NULL; })
70 # define gen_helper_fcmpgt16             ({ qemu_build_not_reached(); NULL; })
71 # define gen_helper_fcmpgt32             ({ qemu_build_not_reached(); NULL; })
72 # define gen_helper_fcmple16             ({ qemu_build_not_reached(); NULL; })
73 # define gen_helper_fcmple32             ({ qemu_build_not_reached(); NULL; })
74 # define gen_helper_fcmpne8              ({ qemu_build_not_reached(); NULL; })
75 # define gen_helper_fcmpne16             ({ qemu_build_not_reached(); NULL; })
76 # define gen_helper_fcmpne32             ({ qemu_build_not_reached(); NULL; })
77 # define gen_helper_fcmpule8             ({ qemu_build_not_reached(); NULL; })
78 # define gen_helper_fcmpugt8             ({ qemu_build_not_reached(); NULL; })
79 # define gen_helper_fdtox                ({ qemu_build_not_reached(); NULL; })
80 # define gen_helper_fexpand              ({ qemu_build_not_reached(); NULL; })
81 # define gen_helper_fmul8sux16           ({ qemu_build_not_reached(); NULL; })
82 # define gen_helper_fmul8ulx16           ({ qemu_build_not_reached(); NULL; })
83 # define gen_helper_fmul8x16             ({ qemu_build_not_reached(); NULL; })
84 # define gen_helper_fpmerge              ({ qemu_build_not_reached(); NULL; })
85 # define gen_helper_fqtox                ({ qemu_build_not_reached(); NULL; })
86 # define gen_helper_fslas16              ({ qemu_build_not_reached(); NULL; })
87 # define gen_helper_fslas32              ({ qemu_build_not_reached(); NULL; })
88 # define gen_helper_fstox                ({ qemu_build_not_reached(); NULL; })
89 # define gen_helper_fxtod                ({ qemu_build_not_reached(); NULL; })
90 # define gen_helper_fxtoq                ({ qemu_build_not_reached(); NULL; })
91 # define gen_helper_fxtos                ({ qemu_build_not_reached(); NULL; })
92 # define gen_helper_pdist                ({ qemu_build_not_reached(); NULL; })
93 # define MAXTL_MASK                             0
94 #endif
95 
96 /* Dynamic PC, must exit to main loop. */
97 #define DYNAMIC_PC         1
98 /* Dynamic PC, one of two values according to jump_pc[T2]. */
99 #define JUMP_PC            2
100 /* Dynamic PC, may lookup next TB. */
101 #define DYNAMIC_PC_LOOKUP  3
102 
103 #define DISAS_EXIT  DISAS_TARGET_0
104 
105 /* global register indexes */
106 static TCGv_ptr cpu_regwptr;
107 static TCGv cpu_pc, cpu_npc;
108 static TCGv cpu_regs[32];
109 static TCGv cpu_y;
110 static TCGv cpu_tbr;
111 static TCGv cpu_cond;
112 static TCGv cpu_cc_N;
113 static TCGv cpu_cc_V;
114 static TCGv cpu_icc_Z;
115 static TCGv cpu_icc_C;
116 #ifdef TARGET_SPARC64
117 static TCGv cpu_xcc_Z;
118 static TCGv cpu_xcc_C;
119 static TCGv_i32 cpu_fprs;
120 static TCGv cpu_gsr;
121 #else
122 # define cpu_fprs               ({ qemu_build_not_reached(); (TCGv)NULL; })
123 # define cpu_gsr                ({ qemu_build_not_reached(); (TCGv)NULL; })
124 #endif
125 
126 #ifdef TARGET_SPARC64
127 #define cpu_cc_Z  cpu_xcc_Z
128 #define cpu_cc_C  cpu_xcc_C
129 #else
130 #define cpu_cc_Z  cpu_icc_Z
131 #define cpu_cc_C  cpu_icc_C
132 #define cpu_xcc_Z ({ qemu_build_not_reached(); NULL; })
133 #define cpu_xcc_C ({ qemu_build_not_reached(); NULL; })
134 #endif
135 
136 /* Floating point comparison registers */
137 static TCGv_i32 cpu_fcc[TARGET_FCCREGS];
138 
139 #define env_field_offsetof(X)     offsetof(CPUSPARCState, X)
140 #ifdef TARGET_SPARC64
141 # define env32_field_offsetof(X)  ({ qemu_build_not_reached(); 0; })
142 # define env64_field_offsetof(X)  env_field_offsetof(X)
143 #else
144 # define env32_field_offsetof(X)  env_field_offsetof(X)
145 # define env64_field_offsetof(X)  ({ qemu_build_not_reached(); 0; })
146 #endif
147 
148 typedef struct DisasCompare {
149     TCGCond cond;
150     TCGv c1;
151     int c2;
152 } DisasCompare;
153 
154 typedef struct DisasDelayException {
155     struct DisasDelayException *next;
156     TCGLabel *lab;
157     TCGv_i32 excp;
158     /* Saved state at parent insn. */
159     target_ulong pc;
160     target_ulong npc;
161 } DisasDelayException;
162 
163 typedef struct DisasContext {
164     DisasContextBase base;
165     target_ulong pc;    /* current Program Counter: integer or DYNAMIC_PC */
166     target_ulong npc;   /* next PC: integer or DYNAMIC_PC or JUMP_PC */
167 
168     /* Used when JUMP_PC value is used. */
169     DisasCompare jump;
170     target_ulong jump_pc[2];
171 
172     int mem_idx;
173     bool cpu_cond_live;
174     bool fpu_enabled;
175     bool address_mask_32bit;
176 #ifndef CONFIG_USER_ONLY
177     bool supervisor;
178 #ifdef TARGET_SPARC64
179     bool hypervisor;
180 #endif
181 #endif
182 
183     sparc_def_t *def;
184 #ifdef TARGET_SPARC64
185     int fprs_dirty;
186     int asi;
187 #endif
188     DisasDelayException *delay_excp_list;
189 } DisasContext;
190 
191 // This function uses non-native bit order
192 #define GET_FIELD(X, FROM, TO)                                  \
193     ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
194 
195 // This function uses the order in the manuals, i.e. bit 0 is 2^0
196 #define GET_FIELD_SP(X, FROM, TO)               \
197     GET_FIELD(X, 31 - (TO), 31 - (FROM))
198 
199 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
200 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
201 
202 #define UA2005_HTRAP_MASK 0xff
203 #define V8_TRAP_MASK 0x7f
204 
205 #define IS_IMM (insn & (1<<13))
206 
207 static void gen_update_fprs_dirty(DisasContext *dc, int rd)
208 {
209 #if defined(TARGET_SPARC64)
210     int bit = (rd < 32) ? 1 : 2;
211     /* If we know we've already set this bit within the TB,
212        we can avoid setting it again.  */
213     if (!(dc->fprs_dirty & bit)) {
214         dc->fprs_dirty |= bit;
215         tcg_gen_ori_i32(cpu_fprs, cpu_fprs, bit);
216     }
217 #endif
218 }
219 
220 /* floating point registers moves */
221 
222 static int gen_offset_fpr_F(unsigned int reg)
223 {
224     int ret;
225 
226     tcg_debug_assert(reg < 32);
227     ret= offsetof(CPUSPARCState, fpr[reg / 2]);
228     if (reg & 1) {
229         ret += offsetof(CPU_DoubleU, l.lower);
230     } else {
231         ret += offsetof(CPU_DoubleU, l.upper);
232     }
233     return ret;
234 }
235 
236 static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src)
237 {
238     TCGv_i32 ret = tcg_temp_new_i32();
239     tcg_gen_ld_i32(ret, tcg_env, gen_offset_fpr_F(src));
240     return ret;
241 }
242 
243 static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v)
244 {
245     tcg_gen_st_i32(v, tcg_env, gen_offset_fpr_F(dst));
246     gen_update_fprs_dirty(dc, dst);
247 }
248 
249 static int gen_offset_fpr_D(unsigned int reg)
250 {
251     tcg_debug_assert(reg < 64);
252     tcg_debug_assert(reg % 2 == 0);
253     return offsetof(CPUSPARCState, fpr[reg / 2]);
254 }
255 
256 static TCGv_i64 gen_load_fpr_D(DisasContext *dc, unsigned int src)
257 {
258     TCGv_i64 ret = tcg_temp_new_i64();
259     tcg_gen_ld_i64(ret, tcg_env, gen_offset_fpr_D(src));
260     return ret;
261 }
262 
263 static void gen_store_fpr_D(DisasContext *dc, unsigned int dst, TCGv_i64 v)
264 {
265     tcg_gen_st_i64(v, tcg_env, gen_offset_fpr_D(dst));
266     gen_update_fprs_dirty(dc, dst);
267 }
268 
269 static TCGv_i128 gen_load_fpr_Q(DisasContext *dc, unsigned int src)
270 {
271     TCGv_i128 ret = tcg_temp_new_i128();
272     TCGv_i64 h = gen_load_fpr_D(dc, src);
273     TCGv_i64 l = gen_load_fpr_D(dc, src + 2);
274 
275     tcg_gen_concat_i64_i128(ret, l, h);
276     return ret;
277 }
278 
279 static void gen_store_fpr_Q(DisasContext *dc, unsigned int dst, TCGv_i128 v)
280 {
281     TCGv_i64 h = tcg_temp_new_i64();
282     TCGv_i64 l = tcg_temp_new_i64();
283 
284     tcg_gen_extr_i128_i64(l, h, v);
285     gen_store_fpr_D(dc, dst, h);
286     gen_store_fpr_D(dc, dst + 2, l);
287 }
288 
289 /* moves */
290 #ifdef CONFIG_USER_ONLY
291 #define supervisor(dc) 0
292 #define hypervisor(dc) 0
293 #else
294 #ifdef TARGET_SPARC64
295 #define hypervisor(dc) (dc->hypervisor)
296 #define supervisor(dc) (dc->supervisor | dc->hypervisor)
297 #else
298 #define supervisor(dc) (dc->supervisor)
299 #define hypervisor(dc) 0
300 #endif
301 #endif
302 
303 #if !defined(TARGET_SPARC64)
304 # define AM_CHECK(dc)  false
305 #elif defined(TARGET_ABI32)
306 # define AM_CHECK(dc)  true
307 #elif defined(CONFIG_USER_ONLY)
308 # define AM_CHECK(dc)  false
309 #else
310 # define AM_CHECK(dc)  ((dc)->address_mask_32bit)
311 #endif
312 
313 static void gen_address_mask(DisasContext *dc, TCGv addr)
314 {
315     if (AM_CHECK(dc)) {
316         tcg_gen_andi_tl(addr, addr, 0xffffffffULL);
317     }
318 }
319 
320 static target_ulong address_mask_i(DisasContext *dc, target_ulong addr)
321 {
322     return AM_CHECK(dc) ? (uint32_t)addr : addr;
323 }
324 
325 static TCGv gen_load_gpr(DisasContext *dc, int reg)
326 {
327     if (reg > 0) {
328         assert(reg < 32);
329         return cpu_regs[reg];
330     } else {
331         TCGv t = tcg_temp_new();
332         tcg_gen_movi_tl(t, 0);
333         return t;
334     }
335 }
336 
337 static void gen_store_gpr(DisasContext *dc, int reg, TCGv v)
338 {
339     if (reg > 0) {
340         assert(reg < 32);
341         tcg_gen_mov_tl(cpu_regs[reg], v);
342     }
343 }
344 
345 static TCGv gen_dest_gpr(DisasContext *dc, int reg)
346 {
347     if (reg > 0) {
348         assert(reg < 32);
349         return cpu_regs[reg];
350     } else {
351         return tcg_temp_new();
352     }
353 }
354 
355 static bool use_goto_tb(DisasContext *s, target_ulong pc, target_ulong npc)
356 {
357     return translator_use_goto_tb(&s->base, pc) &&
358            translator_use_goto_tb(&s->base, npc);
359 }
360 
361 static void gen_goto_tb(DisasContext *s, int tb_num,
362                         target_ulong pc, target_ulong npc)
363 {
364     if (use_goto_tb(s, pc, npc))  {
365         /* jump to same page: we can use a direct jump */
366         tcg_gen_goto_tb(tb_num);
367         tcg_gen_movi_tl(cpu_pc, pc);
368         tcg_gen_movi_tl(cpu_npc, npc);
369         tcg_gen_exit_tb(s->base.tb, tb_num);
370     } else {
371         /* jump to another page: we can use an indirect jump */
372         tcg_gen_movi_tl(cpu_pc, pc);
373         tcg_gen_movi_tl(cpu_npc, npc);
374         tcg_gen_lookup_and_goto_ptr();
375     }
376 }
377 
378 static TCGv gen_carry32(void)
379 {
380     if (TARGET_LONG_BITS == 64) {
381         TCGv t = tcg_temp_new();
382         tcg_gen_extract_tl(t, cpu_icc_C, 32, 1);
383         return t;
384     }
385     return cpu_icc_C;
386 }
387 
388 static void gen_op_addcc_int(TCGv dst, TCGv src1, TCGv src2, TCGv cin)
389 {
390     TCGv z = tcg_constant_tl(0);
391 
392     if (cin) {
393         tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, src1, z, cin, z);
394         tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, cpu_cc_N, cpu_cc_C, src2, z);
395     } else {
396         tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, src1, z, src2, z);
397     }
398     tcg_gen_xor_tl(cpu_cc_Z, src1, src2);
399     tcg_gen_xor_tl(cpu_cc_V, cpu_cc_N, src2);
400     tcg_gen_andc_tl(cpu_cc_V, cpu_cc_V, cpu_cc_Z);
401     if (TARGET_LONG_BITS == 64) {
402         /*
403          * Carry-in to bit 32 is result ^ src1 ^ src2.
404          * We already have the src xor term in Z, from computation of V.
405          */
406         tcg_gen_xor_tl(cpu_icc_C, cpu_cc_Z, cpu_cc_N);
407         tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
408     }
409     tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
410     tcg_gen_mov_tl(dst, cpu_cc_N);
411 }
412 
413 static void gen_op_addcc(TCGv dst, TCGv src1, TCGv src2)
414 {
415     gen_op_addcc_int(dst, src1, src2, NULL);
416 }
417 
418 static void gen_op_taddcc(TCGv dst, TCGv src1, TCGv src2)
419 {
420     TCGv t = tcg_temp_new();
421 
422     /* Save the tag bits around modification of dst. */
423     tcg_gen_or_tl(t, src1, src2);
424 
425     gen_op_addcc(dst, src1, src2);
426 
427     /* Incorprate tag bits into icc.V */
428     tcg_gen_andi_tl(t, t, 3);
429     tcg_gen_neg_tl(t, t);
430     tcg_gen_ext32u_tl(t, t);
431     tcg_gen_or_tl(cpu_cc_V, cpu_cc_V, t);
432 }
433 
434 static void gen_op_addc(TCGv dst, TCGv src1, TCGv src2)
435 {
436     tcg_gen_add_tl(dst, src1, src2);
437     tcg_gen_add_tl(dst, dst, gen_carry32());
438 }
439 
440 static void gen_op_addccc(TCGv dst, TCGv src1, TCGv src2)
441 {
442     gen_op_addcc_int(dst, src1, src2, gen_carry32());
443 }
444 
445 static void gen_op_addxc(TCGv dst, TCGv src1, TCGv src2)
446 {
447     tcg_gen_add_tl(dst, src1, src2);
448     tcg_gen_add_tl(dst, dst, cpu_cc_C);
449 }
450 
451 static void gen_op_addxccc(TCGv dst, TCGv src1, TCGv src2)
452 {
453     gen_op_addcc_int(dst, src1, src2, cpu_cc_C);
454 }
455 
456 static void gen_op_subcc_int(TCGv dst, TCGv src1, TCGv src2, TCGv cin)
457 {
458     TCGv z = tcg_constant_tl(0);
459 
460     if (cin) {
461         tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, src1, z, cin, z);
462         tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, cpu_cc_N, cpu_cc_C, src2, z);
463     } else {
464         tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, src1, z, src2, z);
465     }
466     tcg_gen_neg_tl(cpu_cc_C, cpu_cc_C);
467     tcg_gen_xor_tl(cpu_cc_Z, src1, src2);
468     tcg_gen_xor_tl(cpu_cc_V, cpu_cc_N, src1);
469     tcg_gen_and_tl(cpu_cc_V, cpu_cc_V, cpu_cc_Z);
470 #ifdef TARGET_SPARC64
471     tcg_gen_xor_tl(cpu_icc_C, cpu_cc_Z, cpu_cc_N);
472     tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
473 #endif
474     tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
475     tcg_gen_mov_tl(dst, cpu_cc_N);
476 }
477 
478 static void gen_op_subcc(TCGv dst, TCGv src1, TCGv src2)
479 {
480     gen_op_subcc_int(dst, src1, src2, NULL);
481 }
482 
483 static void gen_op_tsubcc(TCGv dst, TCGv src1, TCGv src2)
484 {
485     TCGv t = tcg_temp_new();
486 
487     /* Save the tag bits around modification of dst. */
488     tcg_gen_or_tl(t, src1, src2);
489 
490     gen_op_subcc(dst, src1, src2);
491 
492     /* Incorprate tag bits into icc.V */
493     tcg_gen_andi_tl(t, t, 3);
494     tcg_gen_neg_tl(t, t);
495     tcg_gen_ext32u_tl(t, t);
496     tcg_gen_or_tl(cpu_cc_V, cpu_cc_V, t);
497 }
498 
499 static void gen_op_subc(TCGv dst, TCGv src1, TCGv src2)
500 {
501     tcg_gen_sub_tl(dst, src1, src2);
502     tcg_gen_sub_tl(dst, dst, gen_carry32());
503 }
504 
505 static void gen_op_subccc(TCGv dst, TCGv src1, TCGv src2)
506 {
507     gen_op_subcc_int(dst, src1, src2, gen_carry32());
508 }
509 
510 static void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
511 {
512     TCGv zero = tcg_constant_tl(0);
513     TCGv one = tcg_constant_tl(1);
514     TCGv t_src1 = tcg_temp_new();
515     TCGv t_src2 = tcg_temp_new();
516     TCGv t0 = tcg_temp_new();
517 
518     tcg_gen_ext32u_tl(t_src1, src1);
519     tcg_gen_ext32u_tl(t_src2, src2);
520 
521     /*
522      * if (!(env->y & 1))
523      *   src2 = 0;
524      */
525     tcg_gen_movcond_tl(TCG_COND_TSTEQ, t_src2, cpu_y, one, zero, t_src2);
526 
527     /*
528      * b2 = src1 & 1;
529      * y = (b2 << 31) | (y >> 1);
530      */
531     tcg_gen_extract_tl(t0, cpu_y, 1, 31);
532     tcg_gen_deposit_tl(cpu_y, t0, src1, 31, 1);
533 
534     // b1 = N ^ V;
535     tcg_gen_xor_tl(t0, cpu_cc_N, cpu_cc_V);
536 
537     /*
538      * src1 = (b1 << 31) | (src1 >> 1)
539      */
540     tcg_gen_andi_tl(t0, t0, 1u << 31);
541     tcg_gen_shri_tl(t_src1, t_src1, 1);
542     tcg_gen_or_tl(t_src1, t_src1, t0);
543 
544     gen_op_addcc(dst, t_src1, t_src2);
545 }
546 
547 static void gen_op_multiply(TCGv dst, TCGv src1, TCGv src2, int sign_ext)
548 {
549 #if TARGET_LONG_BITS == 32
550     if (sign_ext) {
551         tcg_gen_muls2_tl(dst, cpu_y, src1, src2);
552     } else {
553         tcg_gen_mulu2_tl(dst, cpu_y, src1, src2);
554     }
555 #else
556     TCGv t0 = tcg_temp_new_i64();
557     TCGv t1 = tcg_temp_new_i64();
558 
559     if (sign_ext) {
560         tcg_gen_ext32s_i64(t0, src1);
561         tcg_gen_ext32s_i64(t1, src2);
562     } else {
563         tcg_gen_ext32u_i64(t0, src1);
564         tcg_gen_ext32u_i64(t1, src2);
565     }
566 
567     tcg_gen_mul_i64(dst, t0, t1);
568     tcg_gen_shri_i64(cpu_y, dst, 32);
569 #endif
570 }
571 
572 static void gen_op_umul(TCGv dst, TCGv src1, TCGv src2)
573 {
574     /* zero-extend truncated operands before multiplication */
575     gen_op_multiply(dst, src1, src2, 0);
576 }
577 
578 static void gen_op_smul(TCGv dst, TCGv src1, TCGv src2)
579 {
580     /* sign-extend truncated operands before multiplication */
581     gen_op_multiply(dst, src1, src2, 1);
582 }
583 
584 static void gen_op_sdiv(TCGv dst, TCGv src1, TCGv src2)
585 {
586 #ifdef TARGET_SPARC64
587     gen_helper_sdiv(dst, tcg_env, src1, src2);
588     tcg_gen_ext32s_tl(dst, dst);
589 #else
590     TCGv_i64 t64 = tcg_temp_new_i64();
591     gen_helper_sdiv(t64, tcg_env, src1, src2);
592     tcg_gen_trunc_i64_tl(dst, t64);
593 #endif
594 }
595 
596 static void gen_op_udivcc(TCGv dst, TCGv src1, TCGv src2)
597 {
598     TCGv_i64 t64;
599 
600 #ifdef TARGET_SPARC64
601     t64 = cpu_cc_V;
602 #else
603     t64 = tcg_temp_new_i64();
604 #endif
605 
606     gen_helper_udiv(t64, tcg_env, src1, src2);
607 
608 #ifdef TARGET_SPARC64
609     tcg_gen_ext32u_tl(cpu_cc_N, t64);
610     tcg_gen_shri_tl(cpu_cc_V, t64, 32);
611     tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
612     tcg_gen_movi_tl(cpu_icc_C, 0);
613 #else
614     tcg_gen_extr_i64_tl(cpu_cc_N, cpu_cc_V, t64);
615 #endif
616     tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
617     tcg_gen_movi_tl(cpu_cc_C, 0);
618     tcg_gen_mov_tl(dst, cpu_cc_N);
619 }
620 
621 static void gen_op_sdivcc(TCGv dst, TCGv src1, TCGv src2)
622 {
623     TCGv_i64 t64;
624 
625 #ifdef TARGET_SPARC64
626     t64 = cpu_cc_V;
627 #else
628     t64 = tcg_temp_new_i64();
629 #endif
630 
631     gen_helper_sdiv(t64, tcg_env, src1, src2);
632 
633 #ifdef TARGET_SPARC64
634     tcg_gen_ext32s_tl(cpu_cc_N, t64);
635     tcg_gen_shri_tl(cpu_cc_V, t64, 32);
636     tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
637     tcg_gen_movi_tl(cpu_icc_C, 0);
638 #else
639     tcg_gen_extr_i64_tl(cpu_cc_N, cpu_cc_V, t64);
640 #endif
641     tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
642     tcg_gen_movi_tl(cpu_cc_C, 0);
643     tcg_gen_mov_tl(dst, cpu_cc_N);
644 }
645 
646 static void gen_op_taddcctv(TCGv dst, TCGv src1, TCGv src2)
647 {
648     gen_helper_taddcctv(dst, tcg_env, src1, src2);
649 }
650 
651 static void gen_op_tsubcctv(TCGv dst, TCGv src1, TCGv src2)
652 {
653     gen_helper_tsubcctv(dst, tcg_env, src1, src2);
654 }
655 
656 static void gen_op_popc(TCGv dst, TCGv src1, TCGv src2)
657 {
658     tcg_gen_ctpop_tl(dst, src2);
659 }
660 
661 #ifndef TARGET_SPARC64
662 static void gen_helper_array8(TCGv dst, TCGv src1, TCGv src2)
663 {
664     g_assert_not_reached();
665 }
666 #endif
667 
668 static void gen_op_array16(TCGv dst, TCGv src1, TCGv src2)
669 {
670     gen_helper_array8(dst, src1, src2);
671     tcg_gen_shli_tl(dst, dst, 1);
672 }
673 
674 static void gen_op_array32(TCGv dst, TCGv src1, TCGv src2)
675 {
676     gen_helper_array8(dst, src1, src2);
677     tcg_gen_shli_tl(dst, dst, 2);
678 }
679 
680 static void gen_op_fpack16(TCGv_i32 dst, TCGv_i64 src)
681 {
682 #ifdef TARGET_SPARC64
683     gen_helper_fpack16(dst, cpu_gsr, src);
684 #else
685     g_assert_not_reached();
686 #endif
687 }
688 
689 static void gen_op_fpackfix(TCGv_i32 dst, TCGv_i64 src)
690 {
691 #ifdef TARGET_SPARC64
692     gen_helper_fpackfix(dst, cpu_gsr, src);
693 #else
694     g_assert_not_reached();
695 #endif
696 }
697 
698 static void gen_op_fpack32(TCGv_i64 dst, TCGv_i64 src1, TCGv_i64 src2)
699 {
700 #ifdef TARGET_SPARC64
701     gen_helper_fpack32(dst, cpu_gsr, src1, src2);
702 #else
703     g_assert_not_reached();
704 #endif
705 }
706 
707 static void gen_op_fpadds16s(TCGv_i32 d, TCGv_i32 src1, TCGv_i32 src2)
708 {
709     TCGv_i32 t[2];
710 
711     for (int i = 0; i < 2; i++) {
712         TCGv_i32 u = tcg_temp_new_i32();
713         TCGv_i32 v = tcg_temp_new_i32();
714 
715         tcg_gen_sextract_i32(u, src1, i * 16, 16);
716         tcg_gen_sextract_i32(v, src2, i * 16, 16);
717         tcg_gen_add_i32(u, u, v);
718         tcg_gen_smax_i32(u, u, tcg_constant_i32(INT16_MIN));
719         tcg_gen_smin_i32(u, u, tcg_constant_i32(INT16_MAX));
720         t[i] = u;
721     }
722     tcg_gen_deposit_i32(d, t[0], t[1], 16, 16);
723 }
724 
725 static void gen_op_fpsubs16s(TCGv_i32 d, TCGv_i32 src1, TCGv_i32 src2)
726 {
727     TCGv_i32 t[2];
728 
729     for (int i = 0; i < 2; i++) {
730         TCGv_i32 u = tcg_temp_new_i32();
731         TCGv_i32 v = tcg_temp_new_i32();
732 
733         tcg_gen_sextract_i32(u, src1, i * 16, 16);
734         tcg_gen_sextract_i32(v, src2, i * 16, 16);
735         tcg_gen_sub_i32(u, u, v);
736         tcg_gen_smax_i32(u, u, tcg_constant_i32(INT16_MIN));
737         tcg_gen_smin_i32(u, u, tcg_constant_i32(INT16_MAX));
738         t[i] = u;
739     }
740     tcg_gen_deposit_i32(d, t[0], t[1], 16, 16);
741 }
742 
743 static void gen_op_fpadds32s(TCGv_i32 d, TCGv_i32 src1, TCGv_i32 src2)
744 {
745     TCGv_i32 r = tcg_temp_new_i32();
746     TCGv_i32 t = tcg_temp_new_i32();
747     TCGv_i32 v = tcg_temp_new_i32();
748     TCGv_i32 z = tcg_constant_i32(0);
749 
750     tcg_gen_add_i32(r, src1, src2);
751     tcg_gen_xor_i32(t, src1, src2);
752     tcg_gen_xor_i32(v, r, src2);
753     tcg_gen_andc_i32(v, v, t);
754 
755     tcg_gen_setcond_i32(TCG_COND_GE, t, r, z);
756     tcg_gen_addi_i32(t, t, INT32_MAX);
757 
758     tcg_gen_movcond_i32(TCG_COND_LT, d, v, z, t, r);
759 }
760 
761 static void gen_op_fpsubs32s(TCGv_i32 d, TCGv_i32 src1, TCGv_i32 src2)
762 {
763     TCGv_i32 r = tcg_temp_new_i32();
764     TCGv_i32 t = tcg_temp_new_i32();
765     TCGv_i32 v = tcg_temp_new_i32();
766     TCGv_i32 z = tcg_constant_i32(0);
767 
768     tcg_gen_sub_i32(r, src1, src2);
769     tcg_gen_xor_i32(t, src1, src2);
770     tcg_gen_xor_i32(v, r, src1);
771     tcg_gen_and_i32(v, v, t);
772 
773     tcg_gen_setcond_i32(TCG_COND_GE, t, r, z);
774     tcg_gen_addi_i32(t, t, INT32_MAX);
775 
776     tcg_gen_movcond_i32(TCG_COND_LT, d, v, z, t, r);
777 }
778 
779 static void gen_op_faligndata(TCGv_i64 dst, TCGv_i64 s1, TCGv_i64 s2)
780 {
781 #ifdef TARGET_SPARC64
782     TCGv t1, t2, shift;
783 
784     t1 = tcg_temp_new();
785     t2 = tcg_temp_new();
786     shift = tcg_temp_new();
787 
788     tcg_gen_andi_tl(shift, cpu_gsr, 7);
789     tcg_gen_shli_tl(shift, shift, 3);
790     tcg_gen_shl_tl(t1, s1, shift);
791 
792     /*
793      * A shift of 64 does not produce 0 in TCG.  Divide this into a
794      * shift of (up to 63) followed by a constant shift of 1.
795      */
796     tcg_gen_xori_tl(shift, shift, 63);
797     tcg_gen_shr_tl(t2, s2, shift);
798     tcg_gen_shri_tl(t2, t2, 1);
799 
800     tcg_gen_or_tl(dst, t1, t2);
801 #else
802     g_assert_not_reached();
803 #endif
804 }
805 
806 static void gen_op_bshuffle(TCGv_i64 dst, TCGv_i64 src1, TCGv_i64 src2)
807 {
808 #ifdef TARGET_SPARC64
809     gen_helper_bshuffle(dst, cpu_gsr, src1, src2);
810 #else
811     g_assert_not_reached();
812 #endif
813 }
814 
815 static void gen_op_fmul8x16al(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
816 {
817     tcg_gen_ext16s_i32(src2, src2);
818     gen_helper_fmul8x16a(dst, src1, src2);
819 }
820 
821 static void gen_op_fmul8x16au(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
822 {
823     tcg_gen_sari_i32(src2, src2, 16);
824     gen_helper_fmul8x16a(dst, src1, src2);
825 }
826 
827 static void gen_op_fmuld8ulx16(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
828 {
829     TCGv_i32 t0 = tcg_temp_new_i32();
830     TCGv_i32 t1 = tcg_temp_new_i32();
831     TCGv_i32 t2 = tcg_temp_new_i32();
832 
833     tcg_gen_ext8u_i32(t0, src1);
834     tcg_gen_ext16s_i32(t1, src2);
835     tcg_gen_mul_i32(t0, t0, t1);
836 
837     tcg_gen_extract_i32(t1, src1, 16, 8);
838     tcg_gen_sextract_i32(t2, src2, 16, 16);
839     tcg_gen_mul_i32(t1, t1, t2);
840 
841     tcg_gen_concat_i32_i64(dst, t0, t1);
842 }
843 
844 static void gen_op_fmuld8sux16(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
845 {
846     TCGv_i32 t0 = tcg_temp_new_i32();
847     TCGv_i32 t1 = tcg_temp_new_i32();
848     TCGv_i32 t2 = tcg_temp_new_i32();
849 
850     /*
851      * The insn description talks about extracting the upper 8 bits
852      * of the signed 16-bit input rs1, performing the multiply, then
853      * shifting left by 8 bits.  Instead, zap the lower 8 bits of
854      * the rs1 input, which avoids the need for two shifts.
855      */
856     tcg_gen_ext16s_i32(t0, src1);
857     tcg_gen_andi_i32(t0, t0, ~0xff);
858     tcg_gen_ext16s_i32(t1, src2);
859     tcg_gen_mul_i32(t0, t0, t1);
860 
861     tcg_gen_sextract_i32(t1, src1, 16, 16);
862     tcg_gen_andi_i32(t1, t1, ~0xff);
863     tcg_gen_sextract_i32(t2, src2, 16, 16);
864     tcg_gen_mul_i32(t1, t1, t2);
865 
866     tcg_gen_concat_i32_i64(dst, t0, t1);
867 }
868 
869 #ifdef TARGET_SPARC64
870 static void gen_vec_fchksm16(unsigned vece, TCGv_vec dst,
871                              TCGv_vec src1, TCGv_vec src2)
872 {
873     TCGv_vec a = tcg_temp_new_vec_matching(dst);
874     TCGv_vec c = tcg_temp_new_vec_matching(dst);
875 
876     tcg_gen_add_vec(vece, a, src1, src2);
877     tcg_gen_cmp_vec(TCG_COND_LTU, vece, c, a, src1);
878     /* Vector cmp produces -1 for true, so subtract to add carry. */
879     tcg_gen_sub_vec(vece, dst, a, c);
880 }
881 
882 static void gen_op_fchksm16(unsigned vece, uint32_t dofs, uint32_t aofs,
883                             uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
884 {
885     static const TCGOpcode vecop_list[] = {
886         INDEX_op_cmp_vec, INDEX_op_add_vec, INDEX_op_sub_vec,
887     };
888     static const GVecGen3 op = {
889         .fni8 = gen_helper_fchksm16,
890         .fniv = gen_vec_fchksm16,
891         .opt_opc = vecop_list,
892         .vece = MO_16,
893     };
894     tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &op);
895 }
896 
897 static void gen_vec_fmean16(unsigned vece, TCGv_vec dst,
898                             TCGv_vec src1, TCGv_vec src2)
899 {
900     TCGv_vec t = tcg_temp_new_vec_matching(dst);
901 
902     tcg_gen_or_vec(vece, t, src1, src2);
903     tcg_gen_and_vec(vece, t, t, tcg_constant_vec_matching(dst, vece, 1));
904     tcg_gen_sari_vec(vece, src1, src1, 1);
905     tcg_gen_sari_vec(vece, src2, src2, 1);
906     tcg_gen_add_vec(vece, dst, src1, src2);
907     tcg_gen_add_vec(vece, dst, dst, t);
908 }
909 
910 static void gen_op_fmean16(unsigned vece, uint32_t dofs, uint32_t aofs,
911                            uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
912 {
913     static const TCGOpcode vecop_list[] = {
914         INDEX_op_add_vec, INDEX_op_sari_vec,
915     };
916     static const GVecGen3 op = {
917         .fni8 = gen_helper_fmean16,
918         .fniv = gen_vec_fmean16,
919         .opt_opc = vecop_list,
920         .vece = MO_16,
921     };
922     tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &op);
923 }
924 #else
925 #define gen_op_fchksm16   ({ qemu_build_not_reached(); NULL; })
926 #define gen_op_fmean16    ({ qemu_build_not_reached(); NULL; })
927 #endif
928 
929 static void finishing_insn(DisasContext *dc)
930 {
931     /*
932      * From here, there is no future path through an unwinding exception.
933      * If the current insn cannot raise an exception, the computation of
934      * cpu_cond may be able to be elided.
935      */
936     if (dc->cpu_cond_live) {
937         tcg_gen_discard_tl(cpu_cond);
938         dc->cpu_cond_live = false;
939     }
940 }
941 
942 static void gen_generic_branch(DisasContext *dc)
943 {
944     TCGv npc0 = tcg_constant_tl(dc->jump_pc[0]);
945     TCGv npc1 = tcg_constant_tl(dc->jump_pc[1]);
946     TCGv c2 = tcg_constant_tl(dc->jump.c2);
947 
948     tcg_gen_movcond_tl(dc->jump.cond, cpu_npc, dc->jump.c1, c2, npc0, npc1);
949 }
950 
951 /* call this function before using the condition register as it may
952    have been set for a jump */
953 static void flush_cond(DisasContext *dc)
954 {
955     if (dc->npc == JUMP_PC) {
956         gen_generic_branch(dc);
957         dc->npc = DYNAMIC_PC_LOOKUP;
958     }
959 }
960 
961 static void save_npc(DisasContext *dc)
962 {
963     if (dc->npc & 3) {
964         switch (dc->npc) {
965         case JUMP_PC:
966             gen_generic_branch(dc);
967             dc->npc = DYNAMIC_PC_LOOKUP;
968             break;
969         case DYNAMIC_PC:
970         case DYNAMIC_PC_LOOKUP:
971             break;
972         default:
973             g_assert_not_reached();
974         }
975     } else {
976         tcg_gen_movi_tl(cpu_npc, dc->npc);
977     }
978 }
979 
980 static void save_state(DisasContext *dc)
981 {
982     tcg_gen_movi_tl(cpu_pc, dc->pc);
983     save_npc(dc);
984 }
985 
986 static void gen_exception(DisasContext *dc, int which)
987 {
988     finishing_insn(dc);
989     save_state(dc);
990     gen_helper_raise_exception(tcg_env, tcg_constant_i32(which));
991     dc->base.is_jmp = DISAS_NORETURN;
992 }
993 
994 static TCGLabel *delay_exceptionv(DisasContext *dc, TCGv_i32 excp)
995 {
996     DisasDelayException *e = g_new0(DisasDelayException, 1);
997 
998     e->next = dc->delay_excp_list;
999     dc->delay_excp_list = e;
1000 
1001     e->lab = gen_new_label();
1002     e->excp = excp;
1003     e->pc = dc->pc;
1004     /* Caller must have used flush_cond before branch. */
1005     assert(e->npc != JUMP_PC);
1006     e->npc = dc->npc;
1007 
1008     return e->lab;
1009 }
1010 
1011 static TCGLabel *delay_exception(DisasContext *dc, int excp)
1012 {
1013     return delay_exceptionv(dc, tcg_constant_i32(excp));
1014 }
1015 
1016 static void gen_check_align(DisasContext *dc, TCGv addr, int mask)
1017 {
1018     TCGv t = tcg_temp_new();
1019     TCGLabel *lab;
1020 
1021     tcg_gen_andi_tl(t, addr, mask);
1022 
1023     flush_cond(dc);
1024     lab = delay_exception(dc, TT_UNALIGNED);
1025     tcg_gen_brcondi_tl(TCG_COND_NE, t, 0, lab);
1026 }
1027 
1028 static void gen_mov_pc_npc(DisasContext *dc)
1029 {
1030     finishing_insn(dc);
1031 
1032     if (dc->npc & 3) {
1033         switch (dc->npc) {
1034         case JUMP_PC:
1035             gen_generic_branch(dc);
1036             tcg_gen_mov_tl(cpu_pc, cpu_npc);
1037             dc->pc = DYNAMIC_PC_LOOKUP;
1038             break;
1039         case DYNAMIC_PC:
1040         case DYNAMIC_PC_LOOKUP:
1041             tcg_gen_mov_tl(cpu_pc, cpu_npc);
1042             dc->pc = dc->npc;
1043             break;
1044         default:
1045             g_assert_not_reached();
1046         }
1047     } else {
1048         dc->pc = dc->npc;
1049     }
1050 }
1051 
1052 static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond,
1053                         DisasContext *dc)
1054 {
1055     TCGv t1;
1056 
1057     cmp->c1 = t1 = tcg_temp_new();
1058     cmp->c2 = 0;
1059 
1060     switch (cond & 7) {
1061     case 0x0: /* never */
1062         cmp->cond = TCG_COND_NEVER;
1063         cmp->c1 = tcg_constant_tl(0);
1064         break;
1065 
1066     case 0x1: /* eq: Z */
1067         cmp->cond = TCG_COND_EQ;
1068         if (TARGET_LONG_BITS == 32 || xcc) {
1069             tcg_gen_mov_tl(t1, cpu_cc_Z);
1070         } else {
1071             tcg_gen_ext32u_tl(t1, cpu_icc_Z);
1072         }
1073         break;
1074 
1075     case 0x2: /* le: Z | (N ^ V) */
1076         /*
1077          * Simplify:
1078          *   cc_Z || (N ^ V) < 0        NE
1079          *   cc_Z && !((N ^ V) < 0)     EQ
1080          *   cc_Z & ~((N ^ V) >> TLB)   EQ
1081          */
1082         cmp->cond = TCG_COND_EQ;
1083         tcg_gen_xor_tl(t1, cpu_cc_N, cpu_cc_V);
1084         tcg_gen_sextract_tl(t1, t1, xcc ? 63 : 31, 1);
1085         tcg_gen_andc_tl(t1, xcc ? cpu_cc_Z : cpu_icc_Z, t1);
1086         if (TARGET_LONG_BITS == 64 && !xcc) {
1087             tcg_gen_ext32u_tl(t1, t1);
1088         }
1089         break;
1090 
1091     case 0x3: /* lt: N ^ V */
1092         cmp->cond = TCG_COND_LT;
1093         tcg_gen_xor_tl(t1, cpu_cc_N, cpu_cc_V);
1094         if (TARGET_LONG_BITS == 64 && !xcc) {
1095             tcg_gen_ext32s_tl(t1, t1);
1096         }
1097         break;
1098 
1099     case 0x4: /* leu: Z | C */
1100         /*
1101          * Simplify:
1102          *   cc_Z == 0 || cc_C != 0     NE
1103          *   cc_Z != 0 && cc_C == 0     EQ
1104          *   cc_Z & (cc_C ? 0 : -1)     EQ
1105          *   cc_Z & (cc_C - 1)          EQ
1106          */
1107         cmp->cond = TCG_COND_EQ;
1108         if (TARGET_LONG_BITS == 32 || xcc) {
1109             tcg_gen_subi_tl(t1, cpu_cc_C, 1);
1110             tcg_gen_and_tl(t1, t1, cpu_cc_Z);
1111         } else {
1112             tcg_gen_extract_tl(t1, cpu_icc_C, 32, 1);
1113             tcg_gen_subi_tl(t1, t1, 1);
1114             tcg_gen_and_tl(t1, t1, cpu_icc_Z);
1115             tcg_gen_ext32u_tl(t1, t1);
1116         }
1117         break;
1118 
1119     case 0x5: /* ltu: C */
1120         cmp->cond = TCG_COND_NE;
1121         if (TARGET_LONG_BITS == 32 || xcc) {
1122             tcg_gen_mov_tl(t1, cpu_cc_C);
1123         } else {
1124             tcg_gen_extract_tl(t1, cpu_icc_C, 32, 1);
1125         }
1126         break;
1127 
1128     case 0x6: /* neg: N */
1129         cmp->cond = TCG_COND_LT;
1130         if (TARGET_LONG_BITS == 32 || xcc) {
1131             tcg_gen_mov_tl(t1, cpu_cc_N);
1132         } else {
1133             tcg_gen_ext32s_tl(t1, cpu_cc_N);
1134         }
1135         break;
1136 
1137     case 0x7: /* vs: V */
1138         cmp->cond = TCG_COND_LT;
1139         if (TARGET_LONG_BITS == 32 || xcc) {
1140             tcg_gen_mov_tl(t1, cpu_cc_V);
1141         } else {
1142             tcg_gen_ext32s_tl(t1, cpu_cc_V);
1143         }
1144         break;
1145     }
1146     if (cond & 8) {
1147         cmp->cond = tcg_invert_cond(cmp->cond);
1148     }
1149 }
1150 
1151 static void gen_fcompare(DisasCompare *cmp, unsigned int cc, unsigned int cond)
1152 {
1153     TCGv_i32 fcc = cpu_fcc[cc];
1154     TCGv_i32 c1 = fcc;
1155     int c2 = 0;
1156     TCGCond tcond;
1157 
1158     /*
1159      * FCC values:
1160      * 0 =
1161      * 1 <
1162      * 2 >
1163      * 3 unordered
1164      */
1165     switch (cond & 7) {
1166     case 0x0: /* fbn */
1167         tcond = TCG_COND_NEVER;
1168         break;
1169     case 0x1: /* fbne : !0 */
1170         tcond = TCG_COND_NE;
1171         break;
1172     case 0x2: /* fblg : 1 or 2 */
1173         /* fcc in {1,2} - 1 -> fcc in {0,1} */
1174         c1 = tcg_temp_new_i32();
1175         tcg_gen_addi_i32(c1, fcc, -1);
1176         c2 = 1;
1177         tcond = TCG_COND_LEU;
1178         break;
1179     case 0x3: /* fbul : 1 or 3 */
1180         c1 = tcg_temp_new_i32();
1181         tcg_gen_andi_i32(c1, fcc, 1);
1182         tcond = TCG_COND_NE;
1183         break;
1184     case 0x4: /* fbl  : 1 */
1185         c2 = 1;
1186         tcond = TCG_COND_EQ;
1187         break;
1188     case 0x5: /* fbug : 2 or 3 */
1189         c2 = 2;
1190         tcond = TCG_COND_GEU;
1191         break;
1192     case 0x6: /* fbg  : 2 */
1193         c2 = 2;
1194         tcond = TCG_COND_EQ;
1195         break;
1196     case 0x7: /* fbu  : 3 */
1197         c2 = 3;
1198         tcond = TCG_COND_EQ;
1199         break;
1200     }
1201     if (cond & 8) {
1202         tcond = tcg_invert_cond(tcond);
1203     }
1204 
1205     cmp->cond = tcond;
1206     cmp->c2 = c2;
1207     cmp->c1 = tcg_temp_new();
1208     tcg_gen_extu_i32_tl(cmp->c1, c1);
1209 }
1210 
1211 static bool gen_compare_reg(DisasCompare *cmp, int cond, TCGv r_src)
1212 {
1213     static const TCGCond cond_reg[4] = {
1214         TCG_COND_NEVER,  /* reserved */
1215         TCG_COND_EQ,
1216         TCG_COND_LE,
1217         TCG_COND_LT,
1218     };
1219     TCGCond tcond;
1220 
1221     if ((cond & 3) == 0) {
1222         return false;
1223     }
1224     tcond = cond_reg[cond & 3];
1225     if (cond & 4) {
1226         tcond = tcg_invert_cond(tcond);
1227     }
1228 
1229     cmp->cond = tcond;
1230     cmp->c1 = tcg_temp_new();
1231     cmp->c2 = 0;
1232     tcg_gen_mov_tl(cmp->c1, r_src);
1233     return true;
1234 }
1235 
1236 static void gen_op_clear_ieee_excp_and_FTT(void)
1237 {
1238     tcg_gen_st_i32(tcg_constant_i32(0), tcg_env,
1239                    offsetof(CPUSPARCState, fsr_cexc_ftt));
1240 }
1241 
1242 static void gen_op_fmovs(TCGv_i32 dst, TCGv_i32 src)
1243 {
1244     gen_op_clear_ieee_excp_and_FTT();
1245     tcg_gen_mov_i32(dst, src);
1246 }
1247 
1248 static void gen_op_fnegs(TCGv_i32 dst, TCGv_i32 src)
1249 {
1250     gen_op_clear_ieee_excp_and_FTT();
1251     tcg_gen_xori_i32(dst, src, 1u << 31);
1252 }
1253 
1254 static void gen_op_fabss(TCGv_i32 dst, TCGv_i32 src)
1255 {
1256     gen_op_clear_ieee_excp_and_FTT();
1257     tcg_gen_andi_i32(dst, src, ~(1u << 31));
1258 }
1259 
1260 static void gen_op_fmovd(TCGv_i64 dst, TCGv_i64 src)
1261 {
1262     gen_op_clear_ieee_excp_and_FTT();
1263     tcg_gen_mov_i64(dst, src);
1264 }
1265 
1266 static void gen_op_fnegd(TCGv_i64 dst, TCGv_i64 src)
1267 {
1268     gen_op_clear_ieee_excp_and_FTT();
1269     tcg_gen_xori_i64(dst, src, 1ull << 63);
1270 }
1271 
1272 static void gen_op_fabsd(TCGv_i64 dst, TCGv_i64 src)
1273 {
1274     gen_op_clear_ieee_excp_and_FTT();
1275     tcg_gen_andi_i64(dst, src, ~(1ull << 63));
1276 }
1277 
1278 static void gen_op_fnegq(TCGv_i128 dst, TCGv_i128 src)
1279 {
1280     TCGv_i64 l = tcg_temp_new_i64();
1281     TCGv_i64 h = tcg_temp_new_i64();
1282 
1283     tcg_gen_extr_i128_i64(l, h, src);
1284     tcg_gen_xori_i64(h, h, 1ull << 63);
1285     tcg_gen_concat_i64_i128(dst, l, h);
1286 }
1287 
1288 static void gen_op_fabsq(TCGv_i128 dst, TCGv_i128 src)
1289 {
1290     TCGv_i64 l = tcg_temp_new_i64();
1291     TCGv_i64 h = tcg_temp_new_i64();
1292 
1293     tcg_gen_extr_i128_i64(l, h, src);
1294     tcg_gen_andi_i64(h, h, ~(1ull << 63));
1295     tcg_gen_concat_i64_i128(dst, l, h);
1296 }
1297 
1298 static void gen_op_fmadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
1299 {
1300     gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(0));
1301 }
1302 
1303 static void gen_op_fmaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
1304 {
1305     gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(0));
1306 }
1307 
1308 static void gen_op_fmsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
1309 {
1310     int op = float_muladd_negate_c;
1311     gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1312 }
1313 
1314 static void gen_op_fmsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
1315 {
1316     int op = float_muladd_negate_c;
1317     gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1318 }
1319 
1320 static void gen_op_fnmsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
1321 {
1322     int op = float_muladd_negate_c | float_muladd_negate_result;
1323     gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1324 }
1325 
1326 static void gen_op_fnmsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
1327 {
1328     int op = float_muladd_negate_c | float_muladd_negate_result;
1329     gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1330 }
1331 
1332 static void gen_op_fnmadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
1333 {
1334     int op = float_muladd_negate_result;
1335     gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1336 }
1337 
1338 static void gen_op_fnmaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
1339 {
1340     int op = float_muladd_negate_result;
1341     gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1342 }
1343 
1344 /* Use muladd to compute (1 * src1) + src2 / 2 with one rounding. */
1345 static void gen_op_fhadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
1346 {
1347     TCGv_i32 one = tcg_constant_i32(float32_one);
1348     int op = float_muladd_halve_result;
1349     gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
1350 }
1351 
1352 static void gen_op_fhaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
1353 {
1354     TCGv_i64 one = tcg_constant_i64(float64_one);
1355     int op = float_muladd_halve_result;
1356     gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
1357 }
1358 
1359 /* Use muladd to compute (1 * src1) - src2 / 2 with one rounding. */
1360 static void gen_op_fhsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
1361 {
1362     TCGv_i32 one = tcg_constant_i32(float32_one);
1363     int op = float_muladd_negate_c | float_muladd_halve_result;
1364     gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
1365 }
1366 
1367 static void gen_op_fhsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
1368 {
1369     TCGv_i64 one = tcg_constant_i64(float64_one);
1370     int op = float_muladd_negate_c | float_muladd_halve_result;
1371     gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
1372 }
1373 
1374 /* Use muladd to compute -((1 * src1) + src2 / 2) with one rounding. */
1375 static void gen_op_fnhadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
1376 {
1377     TCGv_i32 one = tcg_constant_i32(float32_one);
1378     int op = float_muladd_negate_result | float_muladd_halve_result;
1379     gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
1380 }
1381 
1382 static void gen_op_fnhaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
1383 {
1384     TCGv_i64 one = tcg_constant_i64(float64_one);
1385     int op = float_muladd_negate_result | float_muladd_halve_result;
1386     gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
1387 }
1388 
1389 static void gen_op_fpexception_im(DisasContext *dc, int ftt)
1390 {
1391     /*
1392      * CEXC is only set when succesfully completing an FPop,
1393      * or when raising FSR_FTT_IEEE_EXCP, i.e. check_ieee_exception.
1394      * Thus we can simply store FTT into this field.
1395      */
1396     tcg_gen_st_i32(tcg_constant_i32(ftt), tcg_env,
1397                    offsetof(CPUSPARCState, fsr_cexc_ftt));
1398     gen_exception(dc, TT_FP_EXCP);
1399 }
1400 
1401 static int gen_trap_ifnofpu(DisasContext *dc)
1402 {
1403 #if !defined(CONFIG_USER_ONLY)
1404     if (!dc->fpu_enabled) {
1405         gen_exception(dc, TT_NFPU_INSN);
1406         return 1;
1407     }
1408 #endif
1409     return 0;
1410 }
1411 
1412 /* asi moves */
1413 typedef enum {
1414     GET_ASI_HELPER,
1415     GET_ASI_EXCP,
1416     GET_ASI_DIRECT,
1417     GET_ASI_DTWINX,
1418     GET_ASI_CODE,
1419     GET_ASI_BLOCK,
1420     GET_ASI_SHORT,
1421     GET_ASI_BCOPY,
1422     GET_ASI_BFILL,
1423 } ASIType;
1424 
1425 typedef struct {
1426     ASIType type;
1427     int asi;
1428     int mem_idx;
1429     MemOp memop;
1430 } DisasASI;
1431 
1432 /*
1433  * Build DisasASI.
1434  * For asi == -1, treat as non-asi.
1435  * For ask == -2, treat as immediate offset (v8 error, v9 %asi).
1436  */
1437 static DisasASI resolve_asi(DisasContext *dc, int asi, MemOp memop)
1438 {
1439     ASIType type = GET_ASI_HELPER;
1440     int mem_idx = dc->mem_idx;
1441 
1442     if (asi == -1) {
1443         /* Artificial "non-asi" case. */
1444         type = GET_ASI_DIRECT;
1445         goto done;
1446     }
1447 
1448 #ifndef TARGET_SPARC64
1449     /* Before v9, all asis are immediate and privileged.  */
1450     if (asi < 0) {
1451         gen_exception(dc, TT_ILL_INSN);
1452         type = GET_ASI_EXCP;
1453     } else if (supervisor(dc)
1454                /* Note that LEON accepts ASI_USERDATA in user mode, for
1455                   use with CASA.  Also note that previous versions of
1456                   QEMU allowed (and old versions of gcc emitted) ASI_P
1457                   for LEON, which is incorrect.  */
1458                || (asi == ASI_USERDATA
1459                    && (dc->def->features & CPU_FEATURE_CASA))) {
1460         switch (asi) {
1461         case ASI_USERDATA:    /* User data access */
1462             mem_idx = MMU_USER_IDX;
1463             type = GET_ASI_DIRECT;
1464             break;
1465         case ASI_KERNELDATA:  /* Supervisor data access */
1466             mem_idx = MMU_KERNEL_IDX;
1467             type = GET_ASI_DIRECT;
1468             break;
1469         case ASI_USERTXT:     /* User text access */
1470             mem_idx = MMU_USER_IDX;
1471             type = GET_ASI_CODE;
1472             break;
1473         case ASI_KERNELTXT:   /* Supervisor text access */
1474             mem_idx = MMU_KERNEL_IDX;
1475             type = GET_ASI_CODE;
1476             break;
1477         case ASI_M_BYPASS:    /* MMU passthrough */
1478         case ASI_LEON_BYPASS: /* LEON MMU passthrough */
1479             mem_idx = MMU_PHYS_IDX;
1480             type = GET_ASI_DIRECT;
1481             break;
1482         case ASI_M_BCOPY: /* Block copy, sta access */
1483             mem_idx = MMU_KERNEL_IDX;
1484             type = GET_ASI_BCOPY;
1485             break;
1486         case ASI_M_BFILL: /* Block fill, stda access */
1487             mem_idx = MMU_KERNEL_IDX;
1488             type = GET_ASI_BFILL;
1489             break;
1490         }
1491 
1492         /* MMU_PHYS_IDX is used when the MMU is disabled to passthrough the
1493          * permissions check in get_physical_address(..).
1494          */
1495         mem_idx = (dc->mem_idx == MMU_PHYS_IDX) ? MMU_PHYS_IDX : mem_idx;
1496     } else {
1497         gen_exception(dc, TT_PRIV_INSN);
1498         type = GET_ASI_EXCP;
1499     }
1500 #else
1501     if (asi < 0) {
1502         asi = dc->asi;
1503     }
1504     /* With v9, all asis below 0x80 are privileged.  */
1505     /* ??? We ought to check cpu_has_hypervisor, but we didn't copy
1506        down that bit into DisasContext.  For the moment that's ok,
1507        since the direct implementations below doesn't have any ASIs
1508        in the restricted [0x30, 0x7f] range, and the check will be
1509        done properly in the helper.  */
1510     if (!supervisor(dc) && asi < 0x80) {
1511         gen_exception(dc, TT_PRIV_ACT);
1512         type = GET_ASI_EXCP;
1513     } else {
1514         switch (asi) {
1515         case ASI_REAL:      /* Bypass */
1516         case ASI_REAL_IO:   /* Bypass, non-cacheable */
1517         case ASI_REAL_L:    /* Bypass LE */
1518         case ASI_REAL_IO_L: /* Bypass, non-cacheable LE */
1519         case ASI_TWINX_REAL:   /* Real address, twinx */
1520         case ASI_TWINX_REAL_L: /* Real address, twinx, LE */
1521         case ASI_QUAD_LDD_PHYS:
1522         case ASI_QUAD_LDD_PHYS_L:
1523             mem_idx = MMU_PHYS_IDX;
1524             break;
1525         case ASI_N:  /* Nucleus */
1526         case ASI_NL: /* Nucleus LE */
1527         case ASI_TWINX_N:
1528         case ASI_TWINX_NL:
1529         case ASI_NUCLEUS_QUAD_LDD:
1530         case ASI_NUCLEUS_QUAD_LDD_L:
1531             if (hypervisor(dc)) {
1532                 mem_idx = MMU_PHYS_IDX;
1533             } else {
1534                 mem_idx = MMU_NUCLEUS_IDX;
1535             }
1536             break;
1537         case ASI_AIUP:  /* As if user primary */
1538         case ASI_AIUPL: /* As if user primary LE */
1539         case ASI_TWINX_AIUP:
1540         case ASI_TWINX_AIUP_L:
1541         case ASI_BLK_AIUP_4V:
1542         case ASI_BLK_AIUP_L_4V:
1543         case ASI_BLK_AIUP:
1544         case ASI_BLK_AIUPL:
1545             mem_idx = MMU_USER_IDX;
1546             break;
1547         case ASI_AIUS:  /* As if user secondary */
1548         case ASI_AIUSL: /* As if user secondary LE */
1549         case ASI_TWINX_AIUS:
1550         case ASI_TWINX_AIUS_L:
1551         case ASI_BLK_AIUS_4V:
1552         case ASI_BLK_AIUS_L_4V:
1553         case ASI_BLK_AIUS:
1554         case ASI_BLK_AIUSL:
1555             mem_idx = MMU_USER_SECONDARY_IDX;
1556             break;
1557         case ASI_S:  /* Secondary */
1558         case ASI_SL: /* Secondary LE */
1559         case ASI_TWINX_S:
1560         case ASI_TWINX_SL:
1561         case ASI_BLK_COMMIT_S:
1562         case ASI_BLK_S:
1563         case ASI_BLK_SL:
1564         case ASI_FL8_S:
1565         case ASI_FL8_SL:
1566         case ASI_FL16_S:
1567         case ASI_FL16_SL:
1568             if (mem_idx == MMU_USER_IDX) {
1569                 mem_idx = MMU_USER_SECONDARY_IDX;
1570             } else if (mem_idx == MMU_KERNEL_IDX) {
1571                 mem_idx = MMU_KERNEL_SECONDARY_IDX;
1572             }
1573             break;
1574         case ASI_P:  /* Primary */
1575         case ASI_PL: /* Primary LE */
1576         case ASI_TWINX_P:
1577         case ASI_TWINX_PL:
1578         case ASI_BLK_COMMIT_P:
1579         case ASI_BLK_P:
1580         case ASI_BLK_PL:
1581         case ASI_FL8_P:
1582         case ASI_FL8_PL:
1583         case ASI_FL16_P:
1584         case ASI_FL16_PL:
1585             break;
1586         }
1587         switch (asi) {
1588         case ASI_REAL:
1589         case ASI_REAL_IO:
1590         case ASI_REAL_L:
1591         case ASI_REAL_IO_L:
1592         case ASI_N:
1593         case ASI_NL:
1594         case ASI_AIUP:
1595         case ASI_AIUPL:
1596         case ASI_AIUS:
1597         case ASI_AIUSL:
1598         case ASI_S:
1599         case ASI_SL:
1600         case ASI_P:
1601         case ASI_PL:
1602             type = GET_ASI_DIRECT;
1603             break;
1604         case ASI_TWINX_REAL:
1605         case ASI_TWINX_REAL_L:
1606         case ASI_TWINX_N:
1607         case ASI_TWINX_NL:
1608         case ASI_TWINX_AIUP:
1609         case ASI_TWINX_AIUP_L:
1610         case ASI_TWINX_AIUS:
1611         case ASI_TWINX_AIUS_L:
1612         case ASI_TWINX_P:
1613         case ASI_TWINX_PL:
1614         case ASI_TWINX_S:
1615         case ASI_TWINX_SL:
1616         case ASI_QUAD_LDD_PHYS:
1617         case ASI_QUAD_LDD_PHYS_L:
1618         case ASI_NUCLEUS_QUAD_LDD:
1619         case ASI_NUCLEUS_QUAD_LDD_L:
1620             type = GET_ASI_DTWINX;
1621             break;
1622         case ASI_BLK_COMMIT_P:
1623         case ASI_BLK_COMMIT_S:
1624         case ASI_BLK_AIUP_4V:
1625         case ASI_BLK_AIUP_L_4V:
1626         case ASI_BLK_AIUP:
1627         case ASI_BLK_AIUPL:
1628         case ASI_BLK_AIUS_4V:
1629         case ASI_BLK_AIUS_L_4V:
1630         case ASI_BLK_AIUS:
1631         case ASI_BLK_AIUSL:
1632         case ASI_BLK_S:
1633         case ASI_BLK_SL:
1634         case ASI_BLK_P:
1635         case ASI_BLK_PL:
1636             type = GET_ASI_BLOCK;
1637             break;
1638         case ASI_FL8_S:
1639         case ASI_FL8_SL:
1640         case ASI_FL8_P:
1641         case ASI_FL8_PL:
1642             memop = MO_UB;
1643             type = GET_ASI_SHORT;
1644             break;
1645         case ASI_FL16_S:
1646         case ASI_FL16_SL:
1647         case ASI_FL16_P:
1648         case ASI_FL16_PL:
1649             memop = MO_TEUW;
1650             type = GET_ASI_SHORT;
1651             break;
1652         }
1653         /* The little-endian asis all have bit 3 set.  */
1654         if (asi & 8) {
1655             memop ^= MO_BSWAP;
1656         }
1657     }
1658 #endif
1659 
1660  done:
1661     return (DisasASI){ type, asi, mem_idx, memop };
1662 }
1663 
1664 #if defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
1665 static void gen_helper_ld_asi(TCGv_i64 r, TCGv_env e, TCGv a,
1666                               TCGv_i32 asi, TCGv_i32 mop)
1667 {
1668     g_assert_not_reached();
1669 }
1670 
1671 static void gen_helper_st_asi(TCGv_env e, TCGv a, TCGv_i64 r,
1672                               TCGv_i32 asi, TCGv_i32 mop)
1673 {
1674     g_assert_not_reached();
1675 }
1676 #endif
1677 
1678 static void gen_ld_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr)
1679 {
1680     switch (da->type) {
1681     case GET_ASI_EXCP:
1682         break;
1683     case GET_ASI_DTWINX: /* Reserved for ldda.  */
1684         gen_exception(dc, TT_ILL_INSN);
1685         break;
1686     case GET_ASI_DIRECT:
1687         tcg_gen_qemu_ld_tl(dst, addr, da->mem_idx, da->memop | MO_ALIGN);
1688         break;
1689 
1690     case GET_ASI_CODE:
1691 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
1692         {
1693             MemOpIdx oi = make_memop_idx(da->memop, da->mem_idx);
1694             TCGv_i64 t64 = tcg_temp_new_i64();
1695 
1696             gen_helper_ld_code(t64, tcg_env, addr, tcg_constant_i32(oi));
1697             tcg_gen_trunc_i64_tl(dst, t64);
1698         }
1699         break;
1700 #else
1701         g_assert_not_reached();
1702 #endif
1703 
1704     default:
1705         {
1706             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1707             TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN);
1708 
1709             save_state(dc);
1710 #ifdef TARGET_SPARC64
1711             gen_helper_ld_asi(dst, tcg_env, addr, r_asi, r_mop);
1712 #else
1713             {
1714                 TCGv_i64 t64 = tcg_temp_new_i64();
1715                 gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
1716                 tcg_gen_trunc_i64_tl(dst, t64);
1717             }
1718 #endif
1719         }
1720         break;
1721     }
1722 }
1723 
1724 static void gen_st_asi(DisasContext *dc, DisasASI *da, TCGv src, TCGv addr)
1725 {
1726     switch (da->type) {
1727     case GET_ASI_EXCP:
1728         break;
1729 
1730     case GET_ASI_DTWINX: /* Reserved for stda.  */
1731         if (TARGET_LONG_BITS == 32) {
1732             gen_exception(dc, TT_ILL_INSN);
1733             break;
1734         } else if (!(dc->def->features & CPU_FEATURE_HYPV)) {
1735             /* Pre OpenSPARC CPUs don't have these */
1736             gen_exception(dc, TT_ILL_INSN);
1737             break;
1738         }
1739         /* In OpenSPARC T1+ CPUs TWINX ASIs in store are ST_BLKINIT_ ASIs */
1740         /* fall through */
1741 
1742     case GET_ASI_DIRECT:
1743         tcg_gen_qemu_st_tl(src, addr, da->mem_idx, da->memop | MO_ALIGN);
1744         break;
1745 
1746     case GET_ASI_BCOPY:
1747         assert(TARGET_LONG_BITS == 32);
1748         /*
1749          * Copy 32 bytes from the address in SRC to ADDR.
1750          *
1751          * From Ross RT625 hyperSPARC manual, section 4.6:
1752          * "Block Copy and Block Fill will work only on cache line boundaries."
1753          *
1754          * It does not specify if an unaliged address is truncated or trapped.
1755          * Previous qemu behaviour was to truncate to 4 byte alignment, which
1756          * is obviously wrong.  The only place I can see this used is in the
1757          * Linux kernel which begins with page alignment, advancing by 32,
1758          * so is always aligned.  Assume truncation as the simpler option.
1759          *
1760          * Since the loads and stores are paired, allow the copy to happen
1761          * in the host endianness.  The copy need not be atomic.
1762          */
1763         {
1764             MemOp mop = MO_128 | MO_ATOM_IFALIGN_PAIR;
1765             TCGv saddr = tcg_temp_new();
1766             TCGv daddr = tcg_temp_new();
1767             TCGv_i128 tmp = tcg_temp_new_i128();
1768 
1769             tcg_gen_andi_tl(saddr, src, -32);
1770             tcg_gen_andi_tl(daddr, addr, -32);
1771             tcg_gen_qemu_ld_i128(tmp, saddr, da->mem_idx, mop);
1772             tcg_gen_qemu_st_i128(tmp, daddr, da->mem_idx, mop);
1773             tcg_gen_addi_tl(saddr, saddr, 16);
1774             tcg_gen_addi_tl(daddr, daddr, 16);
1775             tcg_gen_qemu_ld_i128(tmp, saddr, da->mem_idx, mop);
1776             tcg_gen_qemu_st_i128(tmp, daddr, da->mem_idx, mop);
1777         }
1778         break;
1779 
1780     default:
1781         {
1782             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1783             TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN);
1784 
1785             save_state(dc);
1786 #ifdef TARGET_SPARC64
1787             gen_helper_st_asi(tcg_env, addr, src, r_asi, r_mop);
1788 #else
1789             {
1790                 TCGv_i64 t64 = tcg_temp_new_i64();
1791                 tcg_gen_extu_tl_i64(t64, src);
1792                 gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
1793             }
1794 #endif
1795 
1796             /* A write to a TLB register may alter page maps.  End the TB. */
1797             dc->npc = DYNAMIC_PC;
1798         }
1799         break;
1800     }
1801 }
1802 
1803 static void gen_swap_asi(DisasContext *dc, DisasASI *da,
1804                          TCGv dst, TCGv src, TCGv addr)
1805 {
1806     switch (da->type) {
1807     case GET_ASI_EXCP:
1808         break;
1809     case GET_ASI_DIRECT:
1810         tcg_gen_atomic_xchg_tl(dst, addr, src,
1811                                da->mem_idx, da->memop | MO_ALIGN);
1812         break;
1813     default:
1814         /* ??? Should be DAE_invalid_asi.  */
1815         gen_exception(dc, TT_DATA_ACCESS);
1816         break;
1817     }
1818 }
1819 
1820 static void gen_cas_asi(DisasContext *dc, DisasASI *da,
1821                         TCGv oldv, TCGv newv, TCGv cmpv, TCGv addr)
1822 {
1823     switch (da->type) {
1824     case GET_ASI_EXCP:
1825         return;
1826     case GET_ASI_DIRECT:
1827         tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, newv,
1828                                   da->mem_idx, da->memop | MO_ALIGN);
1829         break;
1830     default:
1831         /* ??? Should be DAE_invalid_asi.  */
1832         gen_exception(dc, TT_DATA_ACCESS);
1833         break;
1834     }
1835 }
1836 
1837 static void gen_ldstub_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr)
1838 {
1839     switch (da->type) {
1840     case GET_ASI_EXCP:
1841         break;
1842     case GET_ASI_DIRECT:
1843         tcg_gen_atomic_xchg_tl(dst, addr, tcg_constant_tl(0xff),
1844                                da->mem_idx, MO_UB);
1845         break;
1846     default:
1847         /* ??? In theory, this should be raise DAE_invalid_asi.
1848            But the SS-20 roms do ldstuba [%l0] #ASI_M_CTL, %o1.  */
1849         if (tb_cflags(dc->base.tb) & CF_PARALLEL) {
1850             gen_helper_exit_atomic(tcg_env);
1851         } else {
1852             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1853             TCGv_i32 r_mop = tcg_constant_i32(MO_UB);
1854             TCGv_i64 s64, t64;
1855 
1856             save_state(dc);
1857             t64 = tcg_temp_new_i64();
1858             gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
1859 
1860             s64 = tcg_constant_i64(0xff);
1861             gen_helper_st_asi(tcg_env, addr, s64, r_asi, r_mop);
1862 
1863             tcg_gen_trunc_i64_tl(dst, t64);
1864 
1865             /* End the TB.  */
1866             dc->npc = DYNAMIC_PC;
1867         }
1868         break;
1869     }
1870 }
1871 
1872 static void gen_ldf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size,
1873                         TCGv addr, int rd)
1874 {
1875     MemOp memop = da->memop;
1876     MemOp size = memop & MO_SIZE;
1877     TCGv_i32 d32;
1878     TCGv_i64 d64, l64;
1879     TCGv addr_tmp;
1880 
1881     /* TODO: Use 128-bit load/store below. */
1882     if (size == MO_128) {
1883         memop = (memop & ~MO_SIZE) | MO_64;
1884     }
1885 
1886     switch (da->type) {
1887     case GET_ASI_EXCP:
1888         break;
1889 
1890     case GET_ASI_DIRECT:
1891         memop |= MO_ALIGN_4;
1892         switch (size) {
1893         case MO_32:
1894             d32 = tcg_temp_new_i32();
1895             tcg_gen_qemu_ld_i32(d32, addr, da->mem_idx, memop);
1896             gen_store_fpr_F(dc, rd, d32);
1897             break;
1898 
1899         case MO_64:
1900             d64 = tcg_temp_new_i64();
1901             tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx, memop);
1902             gen_store_fpr_D(dc, rd, d64);
1903             break;
1904 
1905         case MO_128:
1906             d64 = tcg_temp_new_i64();
1907             l64 = tcg_temp_new_i64();
1908             tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx, memop);
1909             addr_tmp = tcg_temp_new();
1910             tcg_gen_addi_tl(addr_tmp, addr, 8);
1911             tcg_gen_qemu_ld_i64(l64, addr_tmp, da->mem_idx, memop);
1912             gen_store_fpr_D(dc, rd, d64);
1913             gen_store_fpr_D(dc, rd + 2, l64);
1914             break;
1915         default:
1916             g_assert_not_reached();
1917         }
1918         break;
1919 
1920     case GET_ASI_BLOCK:
1921         /* Valid for lddfa on aligned registers only.  */
1922         if (orig_size == MO_64 && (rd & 7) == 0) {
1923             /* The first operation checks required alignment.  */
1924             addr_tmp = tcg_temp_new();
1925             d64 = tcg_temp_new_i64();
1926             for (int i = 0; ; ++i) {
1927                 tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx,
1928                                     memop | (i == 0 ? MO_ALIGN_64 : 0));
1929                 gen_store_fpr_D(dc, rd + 2 * i, d64);
1930                 if (i == 7) {
1931                     break;
1932                 }
1933                 tcg_gen_addi_tl(addr_tmp, addr, 8);
1934                 addr = addr_tmp;
1935             }
1936         } else {
1937             gen_exception(dc, TT_ILL_INSN);
1938         }
1939         break;
1940 
1941     case GET_ASI_SHORT:
1942         /* Valid for lddfa only.  */
1943         if (orig_size == MO_64) {
1944             d64 = tcg_temp_new_i64();
1945             tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx, memop | MO_ALIGN);
1946             gen_store_fpr_D(dc, rd, d64);
1947         } else {
1948             gen_exception(dc, TT_ILL_INSN);
1949         }
1950         break;
1951 
1952     default:
1953         {
1954             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1955             TCGv_i32 r_mop = tcg_constant_i32(memop | MO_ALIGN);
1956 
1957             save_state(dc);
1958             /* According to the table in the UA2011 manual, the only
1959                other asis that are valid for ldfa/lddfa/ldqfa are
1960                the NO_FAULT asis.  We still need a helper for these,
1961                but we can just use the integer asi helper for them.  */
1962             switch (size) {
1963             case MO_32:
1964                 d64 = tcg_temp_new_i64();
1965                 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
1966                 d32 = tcg_temp_new_i32();
1967                 tcg_gen_extrl_i64_i32(d32, d64);
1968                 gen_store_fpr_F(dc, rd, d32);
1969                 break;
1970             case MO_64:
1971                 d64 = tcg_temp_new_i64();
1972                 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
1973                 gen_store_fpr_D(dc, rd, d64);
1974                 break;
1975             case MO_128:
1976                 d64 = tcg_temp_new_i64();
1977                 l64 = tcg_temp_new_i64();
1978                 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
1979                 addr_tmp = tcg_temp_new();
1980                 tcg_gen_addi_tl(addr_tmp, addr, 8);
1981                 gen_helper_ld_asi(l64, tcg_env, addr_tmp, r_asi, r_mop);
1982                 gen_store_fpr_D(dc, rd, d64);
1983                 gen_store_fpr_D(dc, rd + 2, l64);
1984                 break;
1985             default:
1986                 g_assert_not_reached();
1987             }
1988         }
1989         break;
1990     }
1991 }
1992 
1993 static void gen_stf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size,
1994                         TCGv addr, int rd)
1995 {
1996     MemOp memop = da->memop;
1997     MemOp size = memop & MO_SIZE;
1998     TCGv_i32 d32;
1999     TCGv_i64 d64;
2000     TCGv addr_tmp;
2001 
2002     /* TODO: Use 128-bit load/store below. */
2003     if (size == MO_128) {
2004         memop = (memop & ~MO_SIZE) | MO_64;
2005     }
2006 
2007     switch (da->type) {
2008     case GET_ASI_EXCP:
2009         break;
2010 
2011     case GET_ASI_DIRECT:
2012         memop |= MO_ALIGN_4;
2013         switch (size) {
2014         case MO_32:
2015             d32 = gen_load_fpr_F(dc, rd);
2016             tcg_gen_qemu_st_i32(d32, addr, da->mem_idx, memop | MO_ALIGN);
2017             break;
2018         case MO_64:
2019             d64 = gen_load_fpr_D(dc, rd);
2020             tcg_gen_qemu_st_i64(d64, addr, da->mem_idx, memop | MO_ALIGN_4);
2021             break;
2022         case MO_128:
2023             /* Only 4-byte alignment required.  However, it is legal for the
2024                cpu to signal the alignment fault, and the OS trap handler is
2025                required to fix it up.  Requiring 16-byte alignment here avoids
2026                having to probe the second page before performing the first
2027                write.  */
2028             d64 = gen_load_fpr_D(dc, rd);
2029             tcg_gen_qemu_st_i64(d64, addr, da->mem_idx, memop | MO_ALIGN_16);
2030             addr_tmp = tcg_temp_new();
2031             tcg_gen_addi_tl(addr_tmp, addr, 8);
2032             d64 = gen_load_fpr_D(dc, rd + 2);
2033             tcg_gen_qemu_st_i64(d64, addr_tmp, da->mem_idx, memop);
2034             break;
2035         default:
2036             g_assert_not_reached();
2037         }
2038         break;
2039 
2040     case GET_ASI_BLOCK:
2041         /* Valid for stdfa on aligned registers only.  */
2042         if (orig_size == MO_64 && (rd & 7) == 0) {
2043             /* The first operation checks required alignment.  */
2044             addr_tmp = tcg_temp_new();
2045             for (int i = 0; ; ++i) {
2046                 d64 = gen_load_fpr_D(dc, rd + 2 * i);
2047                 tcg_gen_qemu_st_i64(d64, addr, da->mem_idx,
2048                                     memop | (i == 0 ? MO_ALIGN_64 : 0));
2049                 if (i == 7) {
2050                     break;
2051                 }
2052                 tcg_gen_addi_tl(addr_tmp, addr, 8);
2053                 addr = addr_tmp;
2054             }
2055         } else {
2056             gen_exception(dc, TT_ILL_INSN);
2057         }
2058         break;
2059 
2060     case GET_ASI_SHORT:
2061         /* Valid for stdfa only.  */
2062         if (orig_size == MO_64) {
2063             d64 = gen_load_fpr_D(dc, rd);
2064             tcg_gen_qemu_st_i64(d64, addr, da->mem_idx, memop | MO_ALIGN);
2065         } else {
2066             gen_exception(dc, TT_ILL_INSN);
2067         }
2068         break;
2069 
2070     default:
2071         /* According to the table in the UA2011 manual, the only
2072            other asis that are valid for ldfa/lddfa/ldqfa are
2073            the PST* asis, which aren't currently handled.  */
2074         gen_exception(dc, TT_ILL_INSN);
2075         break;
2076     }
2077 }
2078 
2079 static void gen_ldda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd)
2080 {
2081     TCGv hi = gen_dest_gpr(dc, rd);
2082     TCGv lo = gen_dest_gpr(dc, rd + 1);
2083 
2084     switch (da->type) {
2085     case GET_ASI_EXCP:
2086         return;
2087 
2088     case GET_ASI_DTWINX:
2089 #ifdef TARGET_SPARC64
2090         {
2091             MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16;
2092             TCGv_i128 t = tcg_temp_new_i128();
2093 
2094             tcg_gen_qemu_ld_i128(t, addr, da->mem_idx, mop);
2095             /*
2096              * Note that LE twinx acts as if each 64-bit register result is
2097              * byte swapped.  We perform one 128-bit LE load, so must swap
2098              * the order of the writebacks.
2099              */
2100             if ((mop & MO_BSWAP) == MO_TE) {
2101                 tcg_gen_extr_i128_i64(lo, hi, t);
2102             } else {
2103                 tcg_gen_extr_i128_i64(hi, lo, t);
2104             }
2105         }
2106         break;
2107 #else
2108         g_assert_not_reached();
2109 #endif
2110 
2111     case GET_ASI_DIRECT:
2112         {
2113             TCGv_i64 tmp = tcg_temp_new_i64();
2114 
2115             tcg_gen_qemu_ld_i64(tmp, addr, da->mem_idx, da->memop | MO_ALIGN);
2116 
2117             /* Note that LE ldda acts as if each 32-bit register
2118                result is byte swapped.  Having just performed one
2119                64-bit bswap, we need now to swap the writebacks.  */
2120             if ((da->memop & MO_BSWAP) == MO_TE) {
2121                 tcg_gen_extr_i64_tl(lo, hi, tmp);
2122             } else {
2123                 tcg_gen_extr_i64_tl(hi, lo, tmp);
2124             }
2125         }
2126         break;
2127 
2128     case GET_ASI_CODE:
2129 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
2130         {
2131             MemOpIdx oi = make_memop_idx(da->memop, da->mem_idx);
2132             TCGv_i64 tmp = tcg_temp_new_i64();
2133 
2134             gen_helper_ld_code(tmp, tcg_env, addr, tcg_constant_i32(oi));
2135 
2136             /* See above.  */
2137             if ((da->memop & MO_BSWAP) == MO_TE) {
2138                 tcg_gen_extr_i64_tl(lo, hi, tmp);
2139             } else {
2140                 tcg_gen_extr_i64_tl(hi, lo, tmp);
2141             }
2142         }
2143         break;
2144 #else
2145         g_assert_not_reached();
2146 #endif
2147 
2148     default:
2149         /* ??? In theory we've handled all of the ASIs that are valid
2150            for ldda, and this should raise DAE_invalid_asi.  However,
2151            real hardware allows others.  This can be seen with e.g.
2152            FreeBSD 10.3 wrt ASI_IC_TAG.  */
2153         {
2154             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2155             TCGv_i32 r_mop = tcg_constant_i32(da->memop);
2156             TCGv_i64 tmp = tcg_temp_new_i64();
2157 
2158             save_state(dc);
2159             gen_helper_ld_asi(tmp, tcg_env, addr, r_asi, r_mop);
2160 
2161             /* See above.  */
2162             if ((da->memop & MO_BSWAP) == MO_TE) {
2163                 tcg_gen_extr_i64_tl(lo, hi, tmp);
2164             } else {
2165                 tcg_gen_extr_i64_tl(hi, lo, tmp);
2166             }
2167         }
2168         break;
2169     }
2170 
2171     gen_store_gpr(dc, rd, hi);
2172     gen_store_gpr(dc, rd + 1, lo);
2173 }
2174 
2175 static void gen_stda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd)
2176 {
2177     TCGv hi = gen_load_gpr(dc, rd);
2178     TCGv lo = gen_load_gpr(dc, rd + 1);
2179 
2180     switch (da->type) {
2181     case GET_ASI_EXCP:
2182         break;
2183 
2184     case GET_ASI_DTWINX:
2185 #ifdef TARGET_SPARC64
2186         {
2187             MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16;
2188             TCGv_i128 t = tcg_temp_new_i128();
2189 
2190             /*
2191              * Note that LE twinx acts as if each 64-bit register result is
2192              * byte swapped.  We perform one 128-bit LE store, so must swap
2193              * the order of the construction.
2194              */
2195             if ((mop & MO_BSWAP) == MO_TE) {
2196                 tcg_gen_concat_i64_i128(t, lo, hi);
2197             } else {
2198                 tcg_gen_concat_i64_i128(t, hi, lo);
2199             }
2200             tcg_gen_qemu_st_i128(t, addr, da->mem_idx, mop);
2201         }
2202         break;
2203 #else
2204         g_assert_not_reached();
2205 #endif
2206 
2207     case GET_ASI_DIRECT:
2208         {
2209             TCGv_i64 t64 = tcg_temp_new_i64();
2210 
2211             /* Note that LE stda acts as if each 32-bit register result is
2212                byte swapped.  We will perform one 64-bit LE store, so now
2213                we must swap the order of the construction.  */
2214             if ((da->memop & MO_BSWAP) == MO_TE) {
2215                 tcg_gen_concat_tl_i64(t64, lo, hi);
2216             } else {
2217                 tcg_gen_concat_tl_i64(t64, hi, lo);
2218             }
2219             tcg_gen_qemu_st_i64(t64, addr, da->mem_idx, da->memop | MO_ALIGN);
2220         }
2221         break;
2222 
2223     case GET_ASI_BFILL:
2224         assert(TARGET_LONG_BITS == 32);
2225         /*
2226          * Store 32 bytes of [rd:rd+1] to ADDR.
2227          * See comments for GET_ASI_COPY above.
2228          */
2229         {
2230             MemOp mop = MO_TE | MO_128 | MO_ATOM_IFALIGN_PAIR;
2231             TCGv_i64 t8 = tcg_temp_new_i64();
2232             TCGv_i128 t16 = tcg_temp_new_i128();
2233             TCGv daddr = tcg_temp_new();
2234 
2235             tcg_gen_concat_tl_i64(t8, lo, hi);
2236             tcg_gen_concat_i64_i128(t16, t8, t8);
2237             tcg_gen_andi_tl(daddr, addr, -32);
2238             tcg_gen_qemu_st_i128(t16, daddr, da->mem_idx, mop);
2239             tcg_gen_addi_tl(daddr, daddr, 16);
2240             tcg_gen_qemu_st_i128(t16, daddr, da->mem_idx, mop);
2241         }
2242         break;
2243 
2244     default:
2245         /* ??? In theory we've handled all of the ASIs that are valid
2246            for stda, and this should raise DAE_invalid_asi.  */
2247         {
2248             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2249             TCGv_i32 r_mop = tcg_constant_i32(da->memop);
2250             TCGv_i64 t64 = tcg_temp_new_i64();
2251 
2252             /* See above.  */
2253             if ((da->memop & MO_BSWAP) == MO_TE) {
2254                 tcg_gen_concat_tl_i64(t64, lo, hi);
2255             } else {
2256                 tcg_gen_concat_tl_i64(t64, hi, lo);
2257             }
2258 
2259             save_state(dc);
2260             gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
2261         }
2262         break;
2263     }
2264 }
2265 
2266 static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2267 {
2268 #ifdef TARGET_SPARC64
2269     TCGv_i32 c32, zero, dst, s1, s2;
2270     TCGv_i64 c64 = tcg_temp_new_i64();
2271 
2272     /* We have two choices here: extend the 32 bit data and use movcond_i64,
2273        or fold the comparison down to 32 bits and use movcond_i32.  Choose
2274        the later.  */
2275     c32 = tcg_temp_new_i32();
2276     tcg_gen_setcondi_i64(cmp->cond, c64, cmp->c1, cmp->c2);
2277     tcg_gen_extrl_i64_i32(c32, c64);
2278 
2279     s1 = gen_load_fpr_F(dc, rs);
2280     s2 = gen_load_fpr_F(dc, rd);
2281     dst = tcg_temp_new_i32();
2282     zero = tcg_constant_i32(0);
2283 
2284     tcg_gen_movcond_i32(TCG_COND_NE, dst, c32, zero, s1, s2);
2285 
2286     gen_store_fpr_F(dc, rd, dst);
2287 #else
2288     qemu_build_not_reached();
2289 #endif
2290 }
2291 
2292 static void gen_fmovd(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2293 {
2294 #ifdef TARGET_SPARC64
2295     TCGv_i64 dst = tcg_temp_new_i64();
2296     tcg_gen_movcond_i64(cmp->cond, dst, cmp->c1, tcg_constant_tl(cmp->c2),
2297                         gen_load_fpr_D(dc, rs),
2298                         gen_load_fpr_D(dc, rd));
2299     gen_store_fpr_D(dc, rd, dst);
2300 #else
2301     qemu_build_not_reached();
2302 #endif
2303 }
2304 
2305 static void gen_fmovq(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2306 {
2307 #ifdef TARGET_SPARC64
2308     TCGv c2 = tcg_constant_tl(cmp->c2);
2309     TCGv_i64 h = tcg_temp_new_i64();
2310     TCGv_i64 l = tcg_temp_new_i64();
2311 
2312     tcg_gen_movcond_i64(cmp->cond, h, cmp->c1, c2,
2313                         gen_load_fpr_D(dc, rs),
2314                         gen_load_fpr_D(dc, rd));
2315     tcg_gen_movcond_i64(cmp->cond, l, cmp->c1, c2,
2316                         gen_load_fpr_D(dc, rs + 2),
2317                         gen_load_fpr_D(dc, rd + 2));
2318     gen_store_fpr_D(dc, rd, h);
2319     gen_store_fpr_D(dc, rd + 2, l);
2320 #else
2321     qemu_build_not_reached();
2322 #endif
2323 }
2324 
2325 #ifdef TARGET_SPARC64
2326 static void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr)
2327 {
2328     TCGv_i32 r_tl = tcg_temp_new_i32();
2329 
2330     /* load env->tl into r_tl */
2331     tcg_gen_ld_i32(r_tl, tcg_env, offsetof(CPUSPARCState, tl));
2332 
2333     /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2334     tcg_gen_andi_i32(r_tl, r_tl, MAXTL_MASK);
2335 
2336     /* calculate offset to current trap state from env->ts, reuse r_tl */
2337     tcg_gen_muli_i32(r_tl, r_tl, sizeof (trap_state));
2338     tcg_gen_addi_ptr(r_tsptr, tcg_env, offsetof(CPUSPARCState, ts));
2339 
2340     /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2341     {
2342         TCGv_ptr r_tl_tmp = tcg_temp_new_ptr();
2343         tcg_gen_ext_i32_ptr(r_tl_tmp, r_tl);
2344         tcg_gen_add_ptr(r_tsptr, r_tsptr, r_tl_tmp);
2345     }
2346 }
2347 #endif
2348 
2349 static int extract_dfpreg(DisasContext *dc, int x)
2350 {
2351     int r = x & 0x1e;
2352 #ifdef TARGET_SPARC64
2353     r |= (x & 1) << 5;
2354 #endif
2355     return r;
2356 }
2357 
2358 static int extract_qfpreg(DisasContext *dc, int x)
2359 {
2360     int r = x & 0x1c;
2361 #ifdef TARGET_SPARC64
2362     r |= (x & 1) << 5;
2363 #endif
2364     return r;
2365 }
2366 
2367 /* Include the auto-generated decoder.  */
2368 #include "decode-insns.c.inc"
2369 
2370 #define TRANS(NAME, AVAIL, FUNC, ...) \
2371     static bool trans_##NAME(DisasContext *dc, arg_##NAME *a) \
2372     { return avail_##AVAIL(dc) && FUNC(dc, __VA_ARGS__); }
2373 
2374 #define avail_ALL(C)      true
2375 #ifdef TARGET_SPARC64
2376 # define avail_32(C)      false
2377 # define avail_ASR17(C)   false
2378 # define avail_CASA(C)    true
2379 # define avail_DIV(C)     true
2380 # define avail_MUL(C)     true
2381 # define avail_POWERDOWN(C) false
2382 # define avail_64(C)      true
2383 # define avail_FMAF(C)    ((C)->def->features & CPU_FEATURE_FMAF)
2384 # define avail_GL(C)      ((C)->def->features & CPU_FEATURE_GL)
2385 # define avail_HYPV(C)    ((C)->def->features & CPU_FEATURE_HYPV)
2386 # define avail_VIS1(C)    ((C)->def->features & CPU_FEATURE_VIS1)
2387 # define avail_VIS2(C)    ((C)->def->features & CPU_FEATURE_VIS2)
2388 # define avail_VIS3(C)    ((C)->def->features & CPU_FEATURE_VIS3)
2389 # define avail_VIS3B(C)   avail_VIS3(C)
2390 #else
2391 # define avail_32(C)      true
2392 # define avail_ASR17(C)   ((C)->def->features & CPU_FEATURE_ASR17)
2393 # define avail_CASA(C)    ((C)->def->features & CPU_FEATURE_CASA)
2394 # define avail_DIV(C)     ((C)->def->features & CPU_FEATURE_DIV)
2395 # define avail_MUL(C)     ((C)->def->features & CPU_FEATURE_MUL)
2396 # define avail_POWERDOWN(C) ((C)->def->features & CPU_FEATURE_POWERDOWN)
2397 # define avail_64(C)      false
2398 # define avail_FMAF(C)    false
2399 # define avail_GL(C)      false
2400 # define avail_HYPV(C)    false
2401 # define avail_VIS1(C)    false
2402 # define avail_VIS2(C)    false
2403 # define avail_VIS3(C)    false
2404 # define avail_VIS3B(C)   false
2405 #endif
2406 
2407 /* Default case for non jump instructions. */
2408 static bool advance_pc(DisasContext *dc)
2409 {
2410     TCGLabel *l1;
2411 
2412     finishing_insn(dc);
2413 
2414     if (dc->npc & 3) {
2415         switch (dc->npc) {
2416         case DYNAMIC_PC:
2417         case DYNAMIC_PC_LOOKUP:
2418             dc->pc = dc->npc;
2419             tcg_gen_mov_tl(cpu_pc, cpu_npc);
2420             tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
2421             break;
2422 
2423         case JUMP_PC:
2424             /* we can do a static jump */
2425             l1 = gen_new_label();
2426             tcg_gen_brcondi_tl(dc->jump.cond, dc->jump.c1, dc->jump.c2, l1);
2427 
2428             /* jump not taken */
2429             gen_goto_tb(dc, 1, dc->jump_pc[1], dc->jump_pc[1] + 4);
2430 
2431             /* jump taken */
2432             gen_set_label(l1);
2433             gen_goto_tb(dc, 0, dc->jump_pc[0], dc->jump_pc[0] + 4);
2434 
2435             dc->base.is_jmp = DISAS_NORETURN;
2436             break;
2437 
2438         default:
2439             g_assert_not_reached();
2440         }
2441     } else {
2442         dc->pc = dc->npc;
2443         dc->npc = dc->npc + 4;
2444     }
2445     return true;
2446 }
2447 
2448 /*
2449  * Major opcodes 00 and 01 -- branches, call, and sethi
2450  */
2451 
2452 static bool advance_jump_cond(DisasContext *dc, DisasCompare *cmp,
2453                               bool annul, int disp)
2454 {
2455     target_ulong dest = address_mask_i(dc, dc->pc + disp * 4);
2456     target_ulong npc;
2457 
2458     finishing_insn(dc);
2459 
2460     if (cmp->cond == TCG_COND_ALWAYS) {
2461         if (annul) {
2462             dc->pc = dest;
2463             dc->npc = dest + 4;
2464         } else {
2465             gen_mov_pc_npc(dc);
2466             dc->npc = dest;
2467         }
2468         return true;
2469     }
2470 
2471     if (cmp->cond == TCG_COND_NEVER) {
2472         npc = dc->npc;
2473         if (npc & 3) {
2474             gen_mov_pc_npc(dc);
2475             if (annul) {
2476                 tcg_gen_addi_tl(cpu_pc, cpu_pc, 4);
2477             }
2478             tcg_gen_addi_tl(cpu_npc, cpu_pc, 4);
2479         } else {
2480             dc->pc = npc + (annul ? 4 : 0);
2481             dc->npc = dc->pc + 4;
2482         }
2483         return true;
2484     }
2485 
2486     flush_cond(dc);
2487     npc = dc->npc;
2488 
2489     if (annul) {
2490         TCGLabel *l1 = gen_new_label();
2491 
2492         tcg_gen_brcondi_tl(tcg_invert_cond(cmp->cond), cmp->c1, cmp->c2, l1);
2493         gen_goto_tb(dc, 0, npc, dest);
2494         gen_set_label(l1);
2495         gen_goto_tb(dc, 1, npc + 4, npc + 8);
2496 
2497         dc->base.is_jmp = DISAS_NORETURN;
2498     } else {
2499         if (npc & 3) {
2500             switch (npc) {
2501             case DYNAMIC_PC:
2502             case DYNAMIC_PC_LOOKUP:
2503                 tcg_gen_mov_tl(cpu_pc, cpu_npc);
2504                 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
2505                 tcg_gen_movcond_tl(cmp->cond, cpu_npc,
2506                                    cmp->c1, tcg_constant_tl(cmp->c2),
2507                                    tcg_constant_tl(dest), cpu_npc);
2508                 dc->pc = npc;
2509                 break;
2510             default:
2511                 g_assert_not_reached();
2512             }
2513         } else {
2514             dc->pc = npc;
2515             dc->npc = JUMP_PC;
2516             dc->jump = *cmp;
2517             dc->jump_pc[0] = dest;
2518             dc->jump_pc[1] = npc + 4;
2519 
2520             /* The condition for cpu_cond is always NE -- normalize. */
2521             if (cmp->cond == TCG_COND_NE) {
2522                 tcg_gen_xori_tl(cpu_cond, cmp->c1, cmp->c2);
2523             } else {
2524                 tcg_gen_setcondi_tl(cmp->cond, cpu_cond, cmp->c1, cmp->c2);
2525             }
2526             dc->cpu_cond_live = true;
2527         }
2528     }
2529     return true;
2530 }
2531 
2532 static bool raise_priv(DisasContext *dc)
2533 {
2534     gen_exception(dc, TT_PRIV_INSN);
2535     return true;
2536 }
2537 
2538 static bool raise_unimpfpop(DisasContext *dc)
2539 {
2540     gen_op_fpexception_im(dc, FSR_FTT_UNIMPFPOP);
2541     return true;
2542 }
2543 
2544 static bool gen_trap_float128(DisasContext *dc)
2545 {
2546     if (dc->def->features & CPU_FEATURE_FLOAT128) {
2547         return false;
2548     }
2549     return raise_unimpfpop(dc);
2550 }
2551 
2552 static bool do_bpcc(DisasContext *dc, arg_bcc *a)
2553 {
2554     DisasCompare cmp;
2555 
2556     gen_compare(&cmp, a->cc, a->cond, dc);
2557     return advance_jump_cond(dc, &cmp, a->a, a->i);
2558 }
2559 
2560 TRANS(Bicc, ALL, do_bpcc, a)
2561 TRANS(BPcc,  64, do_bpcc, a)
2562 
2563 static bool do_fbpfcc(DisasContext *dc, arg_bcc *a)
2564 {
2565     DisasCompare cmp;
2566 
2567     if (gen_trap_ifnofpu(dc)) {
2568         return true;
2569     }
2570     gen_fcompare(&cmp, a->cc, a->cond);
2571     return advance_jump_cond(dc, &cmp, a->a, a->i);
2572 }
2573 
2574 TRANS(FBPfcc,  64, do_fbpfcc, a)
2575 TRANS(FBfcc,  ALL, do_fbpfcc, a)
2576 
2577 static bool trans_BPr(DisasContext *dc, arg_BPr *a)
2578 {
2579     DisasCompare cmp;
2580 
2581     if (!avail_64(dc)) {
2582         return false;
2583     }
2584     if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
2585         return false;
2586     }
2587     return advance_jump_cond(dc, &cmp, a->a, a->i);
2588 }
2589 
2590 static bool trans_CALL(DisasContext *dc, arg_CALL *a)
2591 {
2592     target_long target = address_mask_i(dc, dc->pc + a->i * 4);
2593 
2594     gen_store_gpr(dc, 15, tcg_constant_tl(dc->pc));
2595     gen_mov_pc_npc(dc);
2596     dc->npc = target;
2597     return true;
2598 }
2599 
2600 static bool trans_NCP(DisasContext *dc, arg_NCP *a)
2601 {
2602     /*
2603      * For sparc32, always generate the no-coprocessor exception.
2604      * For sparc64, always generate illegal instruction.
2605      */
2606 #ifdef TARGET_SPARC64
2607     return false;
2608 #else
2609     gen_exception(dc, TT_NCP_INSN);
2610     return true;
2611 #endif
2612 }
2613 
2614 static bool trans_SETHI(DisasContext *dc, arg_SETHI *a)
2615 {
2616     /* Special-case %g0 because that's the canonical nop.  */
2617     if (a->rd) {
2618         gen_store_gpr(dc, a->rd, tcg_constant_tl((uint32_t)a->i << 10));
2619     }
2620     return advance_pc(dc);
2621 }
2622 
2623 /*
2624  * Major Opcode 10 -- integer, floating-point, vis, and system insns.
2625  */
2626 
2627 static bool do_tcc(DisasContext *dc, int cond, int cc,
2628                    int rs1, bool imm, int rs2_or_imm)
2629 {
2630     int mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc)
2631                 ? UA2005_HTRAP_MASK : V8_TRAP_MASK);
2632     DisasCompare cmp;
2633     TCGLabel *lab;
2634     TCGv_i32 trap;
2635 
2636     /* Trap never.  */
2637     if (cond == 0) {
2638         return advance_pc(dc);
2639     }
2640 
2641     /*
2642      * Immediate traps are the most common case.  Since this value is
2643      * live across the branch, it really pays to evaluate the constant.
2644      */
2645     if (rs1 == 0 && (imm || rs2_or_imm == 0)) {
2646         trap = tcg_constant_i32((rs2_or_imm & mask) + TT_TRAP);
2647     } else {
2648         trap = tcg_temp_new_i32();
2649         tcg_gen_trunc_tl_i32(trap, gen_load_gpr(dc, rs1));
2650         if (imm) {
2651             tcg_gen_addi_i32(trap, trap, rs2_or_imm);
2652         } else {
2653             TCGv_i32 t2 = tcg_temp_new_i32();
2654             tcg_gen_trunc_tl_i32(t2, gen_load_gpr(dc, rs2_or_imm));
2655             tcg_gen_add_i32(trap, trap, t2);
2656         }
2657         tcg_gen_andi_i32(trap, trap, mask);
2658         tcg_gen_addi_i32(trap, trap, TT_TRAP);
2659     }
2660 
2661     finishing_insn(dc);
2662 
2663     /* Trap always.  */
2664     if (cond == 8) {
2665         save_state(dc);
2666         gen_helper_raise_exception(tcg_env, trap);
2667         dc->base.is_jmp = DISAS_NORETURN;
2668         return true;
2669     }
2670 
2671     /* Conditional trap.  */
2672     flush_cond(dc);
2673     lab = delay_exceptionv(dc, trap);
2674     gen_compare(&cmp, cc, cond, dc);
2675     tcg_gen_brcondi_tl(cmp.cond, cmp.c1, cmp.c2, lab);
2676 
2677     return advance_pc(dc);
2678 }
2679 
2680 static bool trans_Tcc_r(DisasContext *dc, arg_Tcc_r *a)
2681 {
2682     if (avail_32(dc) && a->cc) {
2683         return false;
2684     }
2685     return do_tcc(dc, a->cond, a->cc, a->rs1, false, a->rs2);
2686 }
2687 
2688 static bool trans_Tcc_i_v7(DisasContext *dc, arg_Tcc_i_v7 *a)
2689 {
2690     if (avail_64(dc)) {
2691         return false;
2692     }
2693     return do_tcc(dc, a->cond, 0, a->rs1, true, a->i);
2694 }
2695 
2696 static bool trans_Tcc_i_v9(DisasContext *dc, arg_Tcc_i_v9 *a)
2697 {
2698     if (avail_32(dc)) {
2699         return false;
2700     }
2701     return do_tcc(dc, a->cond, a->cc, a->rs1, true, a->i);
2702 }
2703 
2704 static bool trans_STBAR(DisasContext *dc, arg_STBAR *a)
2705 {
2706     tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
2707     return advance_pc(dc);
2708 }
2709 
2710 static bool trans_MEMBAR(DisasContext *dc, arg_MEMBAR *a)
2711 {
2712     if (avail_32(dc)) {
2713         return false;
2714     }
2715     if (a->mmask) {
2716         /* Note TCG_MO_* was modeled on sparc64, so mmask matches. */
2717         tcg_gen_mb(a->mmask | TCG_BAR_SC);
2718     }
2719     if (a->cmask) {
2720         /* For #Sync, etc, end the TB to recognize interrupts. */
2721         dc->base.is_jmp = DISAS_EXIT;
2722     }
2723     return advance_pc(dc);
2724 }
2725 
2726 static bool do_rd_special(DisasContext *dc, bool priv, int rd,
2727                           TCGv (*func)(DisasContext *, TCGv))
2728 {
2729     if (!priv) {
2730         return raise_priv(dc);
2731     }
2732     gen_store_gpr(dc, rd, func(dc, gen_dest_gpr(dc, rd)));
2733     return advance_pc(dc);
2734 }
2735 
2736 static TCGv do_rdy(DisasContext *dc, TCGv dst)
2737 {
2738     return cpu_y;
2739 }
2740 
2741 static bool trans_RDY(DisasContext *dc, arg_RDY *a)
2742 {
2743     /*
2744      * TODO: Need a feature bit for sparcv8.  In the meantime, treat all
2745      * 32-bit cpus like sparcv7, which ignores the rs1 field.
2746      * This matches after all other ASR, so Leon3 Asr17 is handled first.
2747      */
2748     if (avail_64(dc) && a->rs1 != 0) {
2749         return false;
2750     }
2751     return do_rd_special(dc, true, a->rd, do_rdy);
2752 }
2753 
2754 static TCGv do_rd_leon3_config(DisasContext *dc, TCGv dst)
2755 {
2756     gen_helper_rdasr17(dst, tcg_env);
2757     return dst;
2758 }
2759 
2760 TRANS(RDASR17, ASR17, do_rd_special, true, a->rd, do_rd_leon3_config)
2761 
2762 static TCGv do_rdccr(DisasContext *dc, TCGv dst)
2763 {
2764     gen_helper_rdccr(dst, tcg_env);
2765     return dst;
2766 }
2767 
2768 TRANS(RDCCR, 64, do_rd_special, true, a->rd, do_rdccr)
2769 
2770 static TCGv do_rdasi(DisasContext *dc, TCGv dst)
2771 {
2772 #ifdef TARGET_SPARC64
2773     return tcg_constant_tl(dc->asi);
2774 #else
2775     qemu_build_not_reached();
2776 #endif
2777 }
2778 
2779 TRANS(RDASI, 64, do_rd_special, true, a->rd, do_rdasi)
2780 
2781 static TCGv do_rdtick(DisasContext *dc, TCGv dst)
2782 {
2783     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
2784 
2785     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
2786     if (translator_io_start(&dc->base)) {
2787         dc->base.is_jmp = DISAS_EXIT;
2788     }
2789     gen_helper_tick_get_count(dst, tcg_env, r_tickptr,
2790                               tcg_constant_i32(dc->mem_idx));
2791     return dst;
2792 }
2793 
2794 /* TODO: non-priv access only allowed when enabled. */
2795 TRANS(RDTICK, 64, do_rd_special, true, a->rd, do_rdtick)
2796 
2797 static TCGv do_rdpc(DisasContext *dc, TCGv dst)
2798 {
2799     return tcg_constant_tl(address_mask_i(dc, dc->pc));
2800 }
2801 
2802 TRANS(RDPC, 64, do_rd_special, true, a->rd, do_rdpc)
2803 
2804 static TCGv do_rdfprs(DisasContext *dc, TCGv dst)
2805 {
2806     tcg_gen_ext_i32_tl(dst, cpu_fprs);
2807     return dst;
2808 }
2809 
2810 TRANS(RDFPRS, 64, do_rd_special, true, a->rd, do_rdfprs)
2811 
2812 static TCGv do_rdgsr(DisasContext *dc, TCGv dst)
2813 {
2814     gen_trap_ifnofpu(dc);
2815     return cpu_gsr;
2816 }
2817 
2818 TRANS(RDGSR, 64, do_rd_special, true, a->rd, do_rdgsr)
2819 
2820 static TCGv do_rdsoftint(DisasContext *dc, TCGv dst)
2821 {
2822     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(softint));
2823     return dst;
2824 }
2825 
2826 TRANS(RDSOFTINT, 64, do_rd_special, supervisor(dc), a->rd, do_rdsoftint)
2827 
2828 static TCGv do_rdtick_cmpr(DisasContext *dc, TCGv dst)
2829 {
2830     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(tick_cmpr));
2831     return dst;
2832 }
2833 
2834 /* TODO: non-priv access only allowed when enabled. */
2835 TRANS(RDTICK_CMPR, 64, do_rd_special, true, a->rd, do_rdtick_cmpr)
2836 
2837 static TCGv do_rdstick(DisasContext *dc, TCGv dst)
2838 {
2839     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
2840 
2841     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick));
2842     if (translator_io_start(&dc->base)) {
2843         dc->base.is_jmp = DISAS_EXIT;
2844     }
2845     gen_helper_tick_get_count(dst, tcg_env, r_tickptr,
2846                               tcg_constant_i32(dc->mem_idx));
2847     return dst;
2848 }
2849 
2850 /* TODO: non-priv access only allowed when enabled. */
2851 TRANS(RDSTICK, 64, do_rd_special, true, a->rd, do_rdstick)
2852 
2853 static TCGv do_rdstick_cmpr(DisasContext *dc, TCGv dst)
2854 {
2855     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(stick_cmpr));
2856     return dst;
2857 }
2858 
2859 /* TODO: supervisor access only allowed when enabled by hypervisor. */
2860 TRANS(RDSTICK_CMPR, 64, do_rd_special, supervisor(dc), a->rd, do_rdstick_cmpr)
2861 
2862 /*
2863  * UltraSPARC-T1 Strand status.
2864  * HYPV check maybe not enough, UA2005 & UA2007 describe
2865  * this ASR as impl. dep
2866  */
2867 static TCGv do_rdstrand_status(DisasContext *dc, TCGv dst)
2868 {
2869     return tcg_constant_tl(1);
2870 }
2871 
2872 TRANS(RDSTRAND_STATUS, HYPV, do_rd_special, true, a->rd, do_rdstrand_status)
2873 
2874 static TCGv do_rdpsr(DisasContext *dc, TCGv dst)
2875 {
2876     gen_helper_rdpsr(dst, tcg_env);
2877     return dst;
2878 }
2879 
2880 TRANS(RDPSR, 32, do_rd_special, supervisor(dc), a->rd, do_rdpsr)
2881 
2882 static TCGv do_rdhpstate(DisasContext *dc, TCGv dst)
2883 {
2884     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hpstate));
2885     return dst;
2886 }
2887 
2888 TRANS(RDHPR_hpstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhpstate)
2889 
2890 static TCGv do_rdhtstate(DisasContext *dc, TCGv dst)
2891 {
2892     TCGv_i32 tl = tcg_temp_new_i32();
2893     TCGv_ptr tp = tcg_temp_new_ptr();
2894 
2895     tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl));
2896     tcg_gen_andi_i32(tl, tl, MAXTL_MASK);
2897     tcg_gen_shli_i32(tl, tl, 3);
2898     tcg_gen_ext_i32_ptr(tp, tl);
2899     tcg_gen_add_ptr(tp, tp, tcg_env);
2900 
2901     tcg_gen_ld_tl(dst, tp, env64_field_offsetof(htstate));
2902     return dst;
2903 }
2904 
2905 TRANS(RDHPR_htstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtstate)
2906 
2907 static TCGv do_rdhintp(DisasContext *dc, TCGv dst)
2908 {
2909     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hintp));
2910     return dst;
2911 }
2912 
2913 TRANS(RDHPR_hintp, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhintp)
2914 
2915 static TCGv do_rdhtba(DisasContext *dc, TCGv dst)
2916 {
2917     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(htba));
2918     return dst;
2919 }
2920 
2921 TRANS(RDHPR_htba, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtba)
2922 
2923 static TCGv do_rdhver(DisasContext *dc, TCGv dst)
2924 {
2925     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hver));
2926     return dst;
2927 }
2928 
2929 TRANS(RDHPR_hver, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhver)
2930 
2931 static TCGv do_rdhstick_cmpr(DisasContext *dc, TCGv dst)
2932 {
2933     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hstick_cmpr));
2934     return dst;
2935 }
2936 
2937 TRANS(RDHPR_hstick_cmpr, HYPV, do_rd_special, hypervisor(dc), a->rd,
2938       do_rdhstick_cmpr)
2939 
2940 static TCGv do_rdwim(DisasContext *dc, TCGv dst)
2941 {
2942     tcg_gen_ld_tl(dst, tcg_env, env32_field_offsetof(wim));
2943     return dst;
2944 }
2945 
2946 TRANS(RDWIM, 32, do_rd_special, supervisor(dc), a->rd, do_rdwim)
2947 
2948 static TCGv do_rdtpc(DisasContext *dc, TCGv dst)
2949 {
2950 #ifdef TARGET_SPARC64
2951     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2952 
2953     gen_load_trap_state_at_tl(r_tsptr);
2954     tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tpc));
2955     return dst;
2956 #else
2957     qemu_build_not_reached();
2958 #endif
2959 }
2960 
2961 TRANS(RDPR_tpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtpc)
2962 
2963 static TCGv do_rdtnpc(DisasContext *dc, TCGv dst)
2964 {
2965 #ifdef TARGET_SPARC64
2966     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2967 
2968     gen_load_trap_state_at_tl(r_tsptr);
2969     tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tnpc));
2970     return dst;
2971 #else
2972     qemu_build_not_reached();
2973 #endif
2974 }
2975 
2976 TRANS(RDPR_tnpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtnpc)
2977 
2978 static TCGv do_rdtstate(DisasContext *dc, TCGv dst)
2979 {
2980 #ifdef TARGET_SPARC64
2981     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2982 
2983     gen_load_trap_state_at_tl(r_tsptr);
2984     tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tstate));
2985     return dst;
2986 #else
2987     qemu_build_not_reached();
2988 #endif
2989 }
2990 
2991 TRANS(RDPR_tstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdtstate)
2992 
2993 static TCGv do_rdtt(DisasContext *dc, TCGv dst)
2994 {
2995 #ifdef TARGET_SPARC64
2996     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2997 
2998     gen_load_trap_state_at_tl(r_tsptr);
2999     tcg_gen_ld32s_tl(dst, r_tsptr, offsetof(trap_state, tt));
3000     return dst;
3001 #else
3002     qemu_build_not_reached();
3003 #endif
3004 }
3005 
3006 TRANS(RDPR_tt, 64, do_rd_special, supervisor(dc), a->rd, do_rdtt)
3007 TRANS(RDPR_tick, 64, do_rd_special, supervisor(dc), a->rd, do_rdtick)
3008 
3009 static TCGv do_rdtba(DisasContext *dc, TCGv dst)
3010 {
3011     return cpu_tbr;
3012 }
3013 
3014 TRANS(RDTBR, 32, do_rd_special, supervisor(dc), a->rd, do_rdtba)
3015 TRANS(RDPR_tba, 64, do_rd_special, supervisor(dc), a->rd, do_rdtba)
3016 
3017 static TCGv do_rdpstate(DisasContext *dc, TCGv dst)
3018 {
3019     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(pstate));
3020     return dst;
3021 }
3022 
3023 TRANS(RDPR_pstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdpstate)
3024 
3025 static TCGv do_rdtl(DisasContext *dc, TCGv dst)
3026 {
3027     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(tl));
3028     return dst;
3029 }
3030 
3031 TRANS(RDPR_tl, 64, do_rd_special, supervisor(dc), a->rd, do_rdtl)
3032 
3033 static TCGv do_rdpil(DisasContext *dc, TCGv dst)
3034 {
3035     tcg_gen_ld32s_tl(dst, tcg_env, env_field_offsetof(psrpil));
3036     return dst;
3037 }
3038 
3039 TRANS(RDPR_pil, 64, do_rd_special, supervisor(dc), a->rd, do_rdpil)
3040 
3041 static TCGv do_rdcwp(DisasContext *dc, TCGv dst)
3042 {
3043     gen_helper_rdcwp(dst, tcg_env);
3044     return dst;
3045 }
3046 
3047 TRANS(RDPR_cwp, 64, do_rd_special, supervisor(dc), a->rd, do_rdcwp)
3048 
3049 static TCGv do_rdcansave(DisasContext *dc, TCGv dst)
3050 {
3051     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cansave));
3052     return dst;
3053 }
3054 
3055 TRANS(RDPR_cansave, 64, do_rd_special, supervisor(dc), a->rd, do_rdcansave)
3056 
3057 static TCGv do_rdcanrestore(DisasContext *dc, TCGv dst)
3058 {
3059     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(canrestore));
3060     return dst;
3061 }
3062 
3063 TRANS(RDPR_canrestore, 64, do_rd_special, supervisor(dc), a->rd,
3064       do_rdcanrestore)
3065 
3066 static TCGv do_rdcleanwin(DisasContext *dc, TCGv dst)
3067 {
3068     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cleanwin));
3069     return dst;
3070 }
3071 
3072 TRANS(RDPR_cleanwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdcleanwin)
3073 
3074 static TCGv do_rdotherwin(DisasContext *dc, TCGv dst)
3075 {
3076     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(otherwin));
3077     return dst;
3078 }
3079 
3080 TRANS(RDPR_otherwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdotherwin)
3081 
3082 static TCGv do_rdwstate(DisasContext *dc, TCGv dst)
3083 {
3084     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(wstate));
3085     return dst;
3086 }
3087 
3088 TRANS(RDPR_wstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdwstate)
3089 
3090 static TCGv do_rdgl(DisasContext *dc, TCGv dst)
3091 {
3092     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(gl));
3093     return dst;
3094 }
3095 
3096 TRANS(RDPR_gl, GL, do_rd_special, supervisor(dc), a->rd, do_rdgl)
3097 
3098 /* UA2005 strand status */
3099 static TCGv do_rdssr(DisasContext *dc, TCGv dst)
3100 {
3101     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(ssr));
3102     return dst;
3103 }
3104 
3105 TRANS(RDPR_strand_status, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdssr)
3106 
3107 static TCGv do_rdver(DisasContext *dc, TCGv dst)
3108 {
3109     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(version));
3110     return dst;
3111 }
3112 
3113 TRANS(RDPR_ver, 64, do_rd_special, supervisor(dc), a->rd, do_rdver)
3114 
3115 static bool trans_FLUSHW(DisasContext *dc, arg_FLUSHW *a)
3116 {
3117     if (avail_64(dc)) {
3118         gen_helper_flushw(tcg_env);
3119         return advance_pc(dc);
3120     }
3121     return false;
3122 }
3123 
3124 static bool do_wr_special(DisasContext *dc, arg_r_r_ri *a, bool priv,
3125                           void (*func)(DisasContext *, TCGv))
3126 {
3127     TCGv src;
3128 
3129     /* For simplicity, we under-decoded the rs2 form. */
3130     if (!a->imm && (a->rs2_or_imm & ~0x1f)) {
3131         return false;
3132     }
3133     if (!priv) {
3134         return raise_priv(dc);
3135     }
3136 
3137     if (a->rs1 == 0 && (a->imm || a->rs2_or_imm == 0)) {
3138         src = tcg_constant_tl(a->rs2_or_imm);
3139     } else {
3140         TCGv src1 = gen_load_gpr(dc, a->rs1);
3141         if (a->rs2_or_imm == 0) {
3142             src = src1;
3143         } else {
3144             src = tcg_temp_new();
3145             if (a->imm) {
3146                 tcg_gen_xori_tl(src, src1, a->rs2_or_imm);
3147             } else {
3148                 tcg_gen_xor_tl(src, src1, gen_load_gpr(dc, a->rs2_or_imm));
3149             }
3150         }
3151     }
3152     func(dc, src);
3153     return advance_pc(dc);
3154 }
3155 
3156 static void do_wry(DisasContext *dc, TCGv src)
3157 {
3158     tcg_gen_ext32u_tl(cpu_y, src);
3159 }
3160 
3161 TRANS(WRY, ALL, do_wr_special, a, true, do_wry)
3162 
3163 static void do_wrccr(DisasContext *dc, TCGv src)
3164 {
3165     gen_helper_wrccr(tcg_env, src);
3166 }
3167 
3168 TRANS(WRCCR, 64, do_wr_special, a, true, do_wrccr)
3169 
3170 static void do_wrasi(DisasContext *dc, TCGv src)
3171 {
3172     TCGv tmp = tcg_temp_new();
3173 
3174     tcg_gen_ext8u_tl(tmp, src);
3175     tcg_gen_st32_tl(tmp, tcg_env, env64_field_offsetof(asi));
3176     /* End TB to notice changed ASI. */
3177     dc->base.is_jmp = DISAS_EXIT;
3178 }
3179 
3180 TRANS(WRASI, 64, do_wr_special, a, true, do_wrasi)
3181 
3182 static void do_wrfprs(DisasContext *dc, TCGv src)
3183 {
3184 #ifdef TARGET_SPARC64
3185     tcg_gen_trunc_tl_i32(cpu_fprs, src);
3186     dc->fprs_dirty = 0;
3187     dc->base.is_jmp = DISAS_EXIT;
3188 #else
3189     qemu_build_not_reached();
3190 #endif
3191 }
3192 
3193 TRANS(WRFPRS, 64, do_wr_special, a, true, do_wrfprs)
3194 
3195 static void do_wrgsr(DisasContext *dc, TCGv src)
3196 {
3197     gen_trap_ifnofpu(dc);
3198     tcg_gen_mov_tl(cpu_gsr, src);
3199 }
3200 
3201 TRANS(WRGSR, 64, do_wr_special, a, true, do_wrgsr)
3202 
3203 static void do_wrsoftint_set(DisasContext *dc, TCGv src)
3204 {
3205     gen_helper_set_softint(tcg_env, src);
3206 }
3207 
3208 TRANS(WRSOFTINT_SET, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_set)
3209 
3210 static void do_wrsoftint_clr(DisasContext *dc, TCGv src)
3211 {
3212     gen_helper_clear_softint(tcg_env, src);
3213 }
3214 
3215 TRANS(WRSOFTINT_CLR, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_clr)
3216 
3217 static void do_wrsoftint(DisasContext *dc, TCGv src)
3218 {
3219     gen_helper_write_softint(tcg_env, src);
3220 }
3221 
3222 TRANS(WRSOFTINT, 64, do_wr_special, a, supervisor(dc), do_wrsoftint)
3223 
3224 static void do_wrtick_cmpr(DisasContext *dc, TCGv src)
3225 {
3226     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3227 
3228     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(tick_cmpr));
3229     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
3230     translator_io_start(&dc->base);
3231     gen_helper_tick_set_limit(r_tickptr, src);
3232     /* End TB to handle timer interrupt */
3233     dc->base.is_jmp = DISAS_EXIT;
3234 }
3235 
3236 TRANS(WRTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrtick_cmpr)
3237 
3238 static void do_wrstick(DisasContext *dc, TCGv src)
3239 {
3240 #ifdef TARGET_SPARC64
3241     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3242 
3243     tcg_gen_ld_ptr(r_tickptr, tcg_env, offsetof(CPUSPARCState, stick));
3244     translator_io_start(&dc->base);
3245     gen_helper_tick_set_count(r_tickptr, src);
3246     /* End TB to handle timer interrupt */
3247     dc->base.is_jmp = DISAS_EXIT;
3248 #else
3249     qemu_build_not_reached();
3250 #endif
3251 }
3252 
3253 TRANS(WRSTICK, 64, do_wr_special, a, supervisor(dc), do_wrstick)
3254 
3255 static void do_wrstick_cmpr(DisasContext *dc, TCGv src)
3256 {
3257     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3258 
3259     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(stick_cmpr));
3260     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick));
3261     translator_io_start(&dc->base);
3262     gen_helper_tick_set_limit(r_tickptr, src);
3263     /* End TB to handle timer interrupt */
3264     dc->base.is_jmp = DISAS_EXIT;
3265 }
3266 
3267 TRANS(WRSTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrstick_cmpr)
3268 
3269 static void do_wrpowerdown(DisasContext *dc, TCGv src)
3270 {
3271     finishing_insn(dc);
3272     save_state(dc);
3273     gen_helper_power_down(tcg_env);
3274 }
3275 
3276 TRANS(WRPOWERDOWN, POWERDOWN, do_wr_special, a, supervisor(dc), do_wrpowerdown)
3277 
3278 static void do_wrpsr(DisasContext *dc, TCGv src)
3279 {
3280     gen_helper_wrpsr(tcg_env, src);
3281     dc->base.is_jmp = DISAS_EXIT;
3282 }
3283 
3284 TRANS(WRPSR, 32, do_wr_special, a, supervisor(dc), do_wrpsr)
3285 
3286 static void do_wrwim(DisasContext *dc, TCGv src)
3287 {
3288     target_ulong mask = MAKE_64BIT_MASK(0, dc->def->nwindows);
3289     TCGv tmp = tcg_temp_new();
3290 
3291     tcg_gen_andi_tl(tmp, src, mask);
3292     tcg_gen_st_tl(tmp, tcg_env, env32_field_offsetof(wim));
3293 }
3294 
3295 TRANS(WRWIM, 32, do_wr_special, a, supervisor(dc), do_wrwim)
3296 
3297 static void do_wrtpc(DisasContext *dc, TCGv src)
3298 {
3299 #ifdef TARGET_SPARC64
3300     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3301 
3302     gen_load_trap_state_at_tl(r_tsptr);
3303     tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tpc));
3304 #else
3305     qemu_build_not_reached();
3306 #endif
3307 }
3308 
3309 TRANS(WRPR_tpc, 64, do_wr_special, a, supervisor(dc), do_wrtpc)
3310 
3311 static void do_wrtnpc(DisasContext *dc, TCGv src)
3312 {
3313 #ifdef TARGET_SPARC64
3314     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3315 
3316     gen_load_trap_state_at_tl(r_tsptr);
3317     tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tnpc));
3318 #else
3319     qemu_build_not_reached();
3320 #endif
3321 }
3322 
3323 TRANS(WRPR_tnpc, 64, do_wr_special, a, supervisor(dc), do_wrtnpc)
3324 
3325 static void do_wrtstate(DisasContext *dc, TCGv src)
3326 {
3327 #ifdef TARGET_SPARC64
3328     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3329 
3330     gen_load_trap_state_at_tl(r_tsptr);
3331     tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tstate));
3332 #else
3333     qemu_build_not_reached();
3334 #endif
3335 }
3336 
3337 TRANS(WRPR_tstate, 64, do_wr_special, a, supervisor(dc), do_wrtstate)
3338 
3339 static void do_wrtt(DisasContext *dc, TCGv src)
3340 {
3341 #ifdef TARGET_SPARC64
3342     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3343 
3344     gen_load_trap_state_at_tl(r_tsptr);
3345     tcg_gen_st32_tl(src, r_tsptr, offsetof(trap_state, tt));
3346 #else
3347     qemu_build_not_reached();
3348 #endif
3349 }
3350 
3351 TRANS(WRPR_tt, 64, do_wr_special, a, supervisor(dc), do_wrtt)
3352 
3353 static void do_wrtick(DisasContext *dc, TCGv src)
3354 {
3355     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3356 
3357     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
3358     translator_io_start(&dc->base);
3359     gen_helper_tick_set_count(r_tickptr, src);
3360     /* End TB to handle timer interrupt */
3361     dc->base.is_jmp = DISAS_EXIT;
3362 }
3363 
3364 TRANS(WRPR_tick, 64, do_wr_special, a, supervisor(dc), do_wrtick)
3365 
3366 static void do_wrtba(DisasContext *dc, TCGv src)
3367 {
3368     tcg_gen_mov_tl(cpu_tbr, src);
3369 }
3370 
3371 TRANS(WRPR_tba, 64, do_wr_special, a, supervisor(dc), do_wrtba)
3372 
3373 static void do_wrpstate(DisasContext *dc, TCGv src)
3374 {
3375     save_state(dc);
3376     if (translator_io_start(&dc->base)) {
3377         dc->base.is_jmp = DISAS_EXIT;
3378     }
3379     gen_helper_wrpstate(tcg_env, src);
3380     dc->npc = DYNAMIC_PC;
3381 }
3382 
3383 TRANS(WRPR_pstate, 64, do_wr_special, a, supervisor(dc), do_wrpstate)
3384 
3385 static void do_wrtl(DisasContext *dc, TCGv src)
3386 {
3387     save_state(dc);
3388     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(tl));
3389     dc->npc = DYNAMIC_PC;
3390 }
3391 
3392 TRANS(WRPR_tl, 64, do_wr_special, a, supervisor(dc), do_wrtl)
3393 
3394 static void do_wrpil(DisasContext *dc, TCGv src)
3395 {
3396     if (translator_io_start(&dc->base)) {
3397         dc->base.is_jmp = DISAS_EXIT;
3398     }
3399     gen_helper_wrpil(tcg_env, src);
3400 }
3401 
3402 TRANS(WRPR_pil, 64, do_wr_special, a, supervisor(dc), do_wrpil)
3403 
3404 static void do_wrcwp(DisasContext *dc, TCGv src)
3405 {
3406     gen_helper_wrcwp(tcg_env, src);
3407 }
3408 
3409 TRANS(WRPR_cwp, 64, do_wr_special, a, supervisor(dc), do_wrcwp)
3410 
3411 static void do_wrcansave(DisasContext *dc, TCGv src)
3412 {
3413     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cansave));
3414 }
3415 
3416 TRANS(WRPR_cansave, 64, do_wr_special, a, supervisor(dc), do_wrcansave)
3417 
3418 static void do_wrcanrestore(DisasContext *dc, TCGv src)
3419 {
3420     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(canrestore));
3421 }
3422 
3423 TRANS(WRPR_canrestore, 64, do_wr_special, a, supervisor(dc), do_wrcanrestore)
3424 
3425 static void do_wrcleanwin(DisasContext *dc, TCGv src)
3426 {
3427     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cleanwin));
3428 }
3429 
3430 TRANS(WRPR_cleanwin, 64, do_wr_special, a, supervisor(dc), do_wrcleanwin)
3431 
3432 static void do_wrotherwin(DisasContext *dc, TCGv src)
3433 {
3434     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(otherwin));
3435 }
3436 
3437 TRANS(WRPR_otherwin, 64, do_wr_special, a, supervisor(dc), do_wrotherwin)
3438 
3439 static void do_wrwstate(DisasContext *dc, TCGv src)
3440 {
3441     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(wstate));
3442 }
3443 
3444 TRANS(WRPR_wstate, 64, do_wr_special, a, supervisor(dc), do_wrwstate)
3445 
3446 static void do_wrgl(DisasContext *dc, TCGv src)
3447 {
3448     gen_helper_wrgl(tcg_env, src);
3449 }
3450 
3451 TRANS(WRPR_gl, GL, do_wr_special, a, supervisor(dc), do_wrgl)
3452 
3453 /* UA2005 strand status */
3454 static void do_wrssr(DisasContext *dc, TCGv src)
3455 {
3456     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(ssr));
3457 }
3458 
3459 TRANS(WRPR_strand_status, HYPV, do_wr_special, a, hypervisor(dc), do_wrssr)
3460 
3461 TRANS(WRTBR, 32, do_wr_special, a, supervisor(dc), do_wrtba)
3462 
3463 static void do_wrhpstate(DisasContext *dc, TCGv src)
3464 {
3465     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hpstate));
3466     dc->base.is_jmp = DISAS_EXIT;
3467 }
3468 
3469 TRANS(WRHPR_hpstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhpstate)
3470 
3471 static void do_wrhtstate(DisasContext *dc, TCGv src)
3472 {
3473     TCGv_i32 tl = tcg_temp_new_i32();
3474     TCGv_ptr tp = tcg_temp_new_ptr();
3475 
3476     tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl));
3477     tcg_gen_andi_i32(tl, tl, MAXTL_MASK);
3478     tcg_gen_shli_i32(tl, tl, 3);
3479     tcg_gen_ext_i32_ptr(tp, tl);
3480     tcg_gen_add_ptr(tp, tp, tcg_env);
3481 
3482     tcg_gen_st_tl(src, tp, env64_field_offsetof(htstate));
3483 }
3484 
3485 TRANS(WRHPR_htstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtstate)
3486 
3487 static void do_wrhintp(DisasContext *dc, TCGv src)
3488 {
3489     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hintp));
3490 }
3491 
3492 TRANS(WRHPR_hintp, HYPV, do_wr_special, a, hypervisor(dc), do_wrhintp)
3493 
3494 static void do_wrhtba(DisasContext *dc, TCGv src)
3495 {
3496     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(htba));
3497 }
3498 
3499 TRANS(WRHPR_htba, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtba)
3500 
3501 static void do_wrhstick_cmpr(DisasContext *dc, TCGv src)
3502 {
3503     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3504 
3505     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hstick_cmpr));
3506     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(hstick));
3507     translator_io_start(&dc->base);
3508     gen_helper_tick_set_limit(r_tickptr, src);
3509     /* End TB to handle timer interrupt */
3510     dc->base.is_jmp = DISAS_EXIT;
3511 }
3512 
3513 TRANS(WRHPR_hstick_cmpr, HYPV, do_wr_special, a, hypervisor(dc),
3514       do_wrhstick_cmpr)
3515 
3516 static bool do_saved_restored(DisasContext *dc, bool saved)
3517 {
3518     if (!supervisor(dc)) {
3519         return raise_priv(dc);
3520     }
3521     if (saved) {
3522         gen_helper_saved(tcg_env);
3523     } else {
3524         gen_helper_restored(tcg_env);
3525     }
3526     return advance_pc(dc);
3527 }
3528 
3529 TRANS(SAVED, 64, do_saved_restored, true)
3530 TRANS(RESTORED, 64, do_saved_restored, false)
3531 
3532 static bool trans_NOP(DisasContext *dc, arg_NOP *a)
3533 {
3534     return advance_pc(dc);
3535 }
3536 
3537 /*
3538  * TODO: Need a feature bit for sparcv8.
3539  * In the meantime, treat all 32-bit cpus like sparcv7.
3540  */
3541 TRANS(NOP_v7, 32, trans_NOP, a)
3542 TRANS(NOP_v9, 64, trans_NOP, a)
3543 
3544 static bool do_arith_int(DisasContext *dc, arg_r_r_ri_cc *a,
3545                          void (*func)(TCGv, TCGv, TCGv),
3546                          void (*funci)(TCGv, TCGv, target_long),
3547                          bool logic_cc)
3548 {
3549     TCGv dst, src1;
3550 
3551     /* For simplicity, we under-decoded the rs2 form. */
3552     if (!a->imm && a->rs2_or_imm & ~0x1f) {
3553         return false;
3554     }
3555 
3556     if (logic_cc) {
3557         dst = cpu_cc_N;
3558     } else {
3559         dst = gen_dest_gpr(dc, a->rd);
3560     }
3561     src1 = gen_load_gpr(dc, a->rs1);
3562 
3563     if (a->imm || a->rs2_or_imm == 0) {
3564         if (funci) {
3565             funci(dst, src1, a->rs2_or_imm);
3566         } else {
3567             func(dst, src1, tcg_constant_tl(a->rs2_or_imm));
3568         }
3569     } else {
3570         func(dst, src1, cpu_regs[a->rs2_or_imm]);
3571     }
3572 
3573     if (logic_cc) {
3574         if (TARGET_LONG_BITS == 64) {
3575             tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
3576             tcg_gen_movi_tl(cpu_icc_C, 0);
3577         }
3578         tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
3579         tcg_gen_movi_tl(cpu_cc_C, 0);
3580         tcg_gen_movi_tl(cpu_cc_V, 0);
3581     }
3582 
3583     gen_store_gpr(dc, a->rd, dst);
3584     return advance_pc(dc);
3585 }
3586 
3587 static bool do_arith(DisasContext *dc, arg_r_r_ri_cc *a,
3588                      void (*func)(TCGv, TCGv, TCGv),
3589                      void (*funci)(TCGv, TCGv, target_long),
3590                      void (*func_cc)(TCGv, TCGv, TCGv))
3591 {
3592     if (a->cc) {
3593         return do_arith_int(dc, a, func_cc, NULL, false);
3594     }
3595     return do_arith_int(dc, a, func, funci, false);
3596 }
3597 
3598 static bool do_logic(DisasContext *dc, arg_r_r_ri_cc *a,
3599                      void (*func)(TCGv, TCGv, TCGv),
3600                      void (*funci)(TCGv, TCGv, target_long))
3601 {
3602     return do_arith_int(dc, a, func, funci, a->cc);
3603 }
3604 
3605 TRANS(ADD, ALL, do_arith, a, tcg_gen_add_tl, tcg_gen_addi_tl, gen_op_addcc)
3606 TRANS(SUB, ALL, do_arith, a, tcg_gen_sub_tl, tcg_gen_subi_tl, gen_op_subcc)
3607 TRANS(ADDC, ALL, do_arith, a, gen_op_addc, NULL, gen_op_addccc)
3608 TRANS(SUBC, ALL, do_arith, a, gen_op_subc, NULL, gen_op_subccc)
3609 
3610 TRANS(TADDcc, ALL, do_arith, a, NULL, NULL, gen_op_taddcc)
3611 TRANS(TSUBcc, ALL, do_arith, a, NULL, NULL, gen_op_tsubcc)
3612 TRANS(TADDccTV, ALL, do_arith, a, NULL, NULL, gen_op_taddcctv)
3613 TRANS(TSUBccTV, ALL, do_arith, a, NULL, NULL, gen_op_tsubcctv)
3614 
3615 TRANS(AND, ALL, do_logic, a, tcg_gen_and_tl, tcg_gen_andi_tl)
3616 TRANS(XOR, ALL, do_logic, a, tcg_gen_xor_tl, tcg_gen_xori_tl)
3617 TRANS(ANDN, ALL, do_logic, a, tcg_gen_andc_tl, NULL)
3618 TRANS(ORN, ALL, do_logic, a, tcg_gen_orc_tl, NULL)
3619 TRANS(XORN, ALL, do_logic, a, tcg_gen_eqv_tl, NULL)
3620 
3621 TRANS(MULX, 64, do_arith, a, tcg_gen_mul_tl, tcg_gen_muli_tl, NULL)
3622 TRANS(UMUL, MUL, do_logic, a, gen_op_umul, NULL)
3623 TRANS(SMUL, MUL, do_logic, a, gen_op_smul, NULL)
3624 TRANS(MULScc, ALL, do_arith, a, NULL, NULL, gen_op_mulscc)
3625 
3626 TRANS(UDIVcc, DIV, do_arith, a, NULL, NULL, gen_op_udivcc)
3627 TRANS(SDIV, DIV, do_arith, a, gen_op_sdiv, NULL, gen_op_sdivcc)
3628 
3629 /* TODO: Should have feature bit -- comes in with UltraSparc T2. */
3630 TRANS(POPC, 64, do_arith, a, gen_op_popc, NULL, NULL)
3631 
3632 static bool trans_OR(DisasContext *dc, arg_r_r_ri_cc *a)
3633 {
3634     /* OR with %g0 is the canonical alias for MOV. */
3635     if (!a->cc && a->rs1 == 0) {
3636         if (a->imm || a->rs2_or_imm == 0) {
3637             gen_store_gpr(dc, a->rd, tcg_constant_tl(a->rs2_or_imm));
3638         } else if (a->rs2_or_imm & ~0x1f) {
3639             /* For simplicity, we under-decoded the rs2 form. */
3640             return false;
3641         } else {
3642             gen_store_gpr(dc, a->rd, cpu_regs[a->rs2_or_imm]);
3643         }
3644         return advance_pc(dc);
3645     }
3646     return do_logic(dc, a, tcg_gen_or_tl, tcg_gen_ori_tl);
3647 }
3648 
3649 static bool trans_UDIV(DisasContext *dc, arg_r_r_ri *a)
3650 {
3651     TCGv_i64 t1, t2;
3652     TCGv dst;
3653 
3654     if (!avail_DIV(dc)) {
3655         return false;
3656     }
3657     /* For simplicity, we under-decoded the rs2 form. */
3658     if (!a->imm && a->rs2_or_imm & ~0x1f) {
3659         return false;
3660     }
3661 
3662     if (unlikely(a->rs2_or_imm == 0)) {
3663         gen_exception(dc, TT_DIV_ZERO);
3664         return true;
3665     }
3666 
3667     if (a->imm) {
3668         t2 = tcg_constant_i64((uint32_t)a->rs2_or_imm);
3669     } else {
3670         TCGLabel *lab;
3671         TCGv_i32 n2;
3672 
3673         finishing_insn(dc);
3674         flush_cond(dc);
3675 
3676         n2 = tcg_temp_new_i32();
3677         tcg_gen_trunc_tl_i32(n2, cpu_regs[a->rs2_or_imm]);
3678 
3679         lab = delay_exception(dc, TT_DIV_ZERO);
3680         tcg_gen_brcondi_i32(TCG_COND_EQ, n2, 0, lab);
3681 
3682         t2 = tcg_temp_new_i64();
3683 #ifdef TARGET_SPARC64
3684         tcg_gen_ext32u_i64(t2, cpu_regs[a->rs2_or_imm]);
3685 #else
3686         tcg_gen_extu_i32_i64(t2, cpu_regs[a->rs2_or_imm]);
3687 #endif
3688     }
3689 
3690     t1 = tcg_temp_new_i64();
3691     tcg_gen_concat_tl_i64(t1, gen_load_gpr(dc, a->rs1), cpu_y);
3692 
3693     tcg_gen_divu_i64(t1, t1, t2);
3694     tcg_gen_umin_i64(t1, t1, tcg_constant_i64(UINT32_MAX));
3695 
3696     dst = gen_dest_gpr(dc, a->rd);
3697     tcg_gen_trunc_i64_tl(dst, t1);
3698     gen_store_gpr(dc, a->rd, dst);
3699     return advance_pc(dc);
3700 }
3701 
3702 static bool trans_UDIVX(DisasContext *dc, arg_r_r_ri *a)
3703 {
3704     TCGv dst, src1, src2;
3705 
3706     if (!avail_64(dc)) {
3707         return false;
3708     }
3709     /* For simplicity, we under-decoded the rs2 form. */
3710     if (!a->imm && a->rs2_or_imm & ~0x1f) {
3711         return false;
3712     }
3713 
3714     if (unlikely(a->rs2_or_imm == 0)) {
3715         gen_exception(dc, TT_DIV_ZERO);
3716         return true;
3717     }
3718 
3719     if (a->imm) {
3720         src2 = tcg_constant_tl(a->rs2_or_imm);
3721     } else {
3722         TCGLabel *lab;
3723 
3724         finishing_insn(dc);
3725         flush_cond(dc);
3726 
3727         lab = delay_exception(dc, TT_DIV_ZERO);
3728         src2 = cpu_regs[a->rs2_or_imm];
3729         tcg_gen_brcondi_tl(TCG_COND_EQ, src2, 0, lab);
3730     }
3731 
3732     dst = gen_dest_gpr(dc, a->rd);
3733     src1 = gen_load_gpr(dc, a->rs1);
3734 
3735     tcg_gen_divu_tl(dst, src1, src2);
3736     gen_store_gpr(dc, a->rd, dst);
3737     return advance_pc(dc);
3738 }
3739 
3740 static bool trans_SDIVX(DisasContext *dc, arg_r_r_ri *a)
3741 {
3742     TCGv dst, src1, src2;
3743 
3744     if (!avail_64(dc)) {
3745         return false;
3746     }
3747     /* For simplicity, we under-decoded the rs2 form. */
3748     if (!a->imm && a->rs2_or_imm & ~0x1f) {
3749         return false;
3750     }
3751 
3752     if (unlikely(a->rs2_or_imm == 0)) {
3753         gen_exception(dc, TT_DIV_ZERO);
3754         return true;
3755     }
3756 
3757     dst = gen_dest_gpr(dc, a->rd);
3758     src1 = gen_load_gpr(dc, a->rs1);
3759 
3760     if (a->imm) {
3761         if (unlikely(a->rs2_or_imm == -1)) {
3762             tcg_gen_neg_tl(dst, src1);
3763             gen_store_gpr(dc, a->rd, dst);
3764             return advance_pc(dc);
3765         }
3766         src2 = tcg_constant_tl(a->rs2_or_imm);
3767     } else {
3768         TCGLabel *lab;
3769         TCGv t1, t2;
3770 
3771         finishing_insn(dc);
3772         flush_cond(dc);
3773 
3774         lab = delay_exception(dc, TT_DIV_ZERO);
3775         src2 = cpu_regs[a->rs2_or_imm];
3776         tcg_gen_brcondi_tl(TCG_COND_EQ, src2, 0, lab);
3777 
3778         /*
3779          * Need to avoid INT64_MIN / -1, which will trap on x86 host.
3780          * Set SRC2 to 1 as a new divisor, to produce the correct result.
3781          */
3782         t1 = tcg_temp_new();
3783         t2 = tcg_temp_new();
3784         tcg_gen_setcondi_tl(TCG_COND_EQ, t1, src1, (target_long)INT64_MIN);
3785         tcg_gen_setcondi_tl(TCG_COND_EQ, t2, src2, -1);
3786         tcg_gen_and_tl(t1, t1, t2);
3787         tcg_gen_movcond_tl(TCG_COND_NE, t1, t1, tcg_constant_tl(0),
3788                            tcg_constant_tl(1), src2);
3789         src2 = t1;
3790     }
3791 
3792     tcg_gen_div_tl(dst, src1, src2);
3793     gen_store_gpr(dc, a->rd, dst);
3794     return advance_pc(dc);
3795 }
3796 
3797 static bool gen_edge(DisasContext *dc, arg_r_r_r *a,
3798                      int width, bool cc, bool little_endian)
3799 {
3800     TCGv dst, s1, s2, l, r, t, m;
3801     uint64_t amask = address_mask_i(dc, -8);
3802 
3803     dst = gen_dest_gpr(dc, a->rd);
3804     s1 = gen_load_gpr(dc, a->rs1);
3805     s2 = gen_load_gpr(dc, a->rs2);
3806 
3807     if (cc) {
3808         gen_op_subcc(cpu_cc_N, s1, s2);
3809     }
3810 
3811     l = tcg_temp_new();
3812     r = tcg_temp_new();
3813     t = tcg_temp_new();
3814 
3815     switch (width) {
3816     case 8:
3817         tcg_gen_andi_tl(l, s1, 7);
3818         tcg_gen_andi_tl(r, s2, 7);
3819         tcg_gen_xori_tl(r, r, 7);
3820         m = tcg_constant_tl(0xff);
3821         break;
3822     case 16:
3823         tcg_gen_extract_tl(l, s1, 1, 2);
3824         tcg_gen_extract_tl(r, s2, 1, 2);
3825         tcg_gen_xori_tl(r, r, 3);
3826         m = tcg_constant_tl(0xf);
3827         break;
3828     case 32:
3829         tcg_gen_extract_tl(l, s1, 2, 1);
3830         tcg_gen_extract_tl(r, s2, 2, 1);
3831         tcg_gen_xori_tl(r, r, 1);
3832         m = tcg_constant_tl(0x3);
3833         break;
3834     default:
3835         abort();
3836     }
3837 
3838     /* Compute Left Edge */
3839     if (little_endian) {
3840         tcg_gen_shl_tl(l, m, l);
3841         tcg_gen_and_tl(l, l, m);
3842     } else {
3843         tcg_gen_shr_tl(l, m, l);
3844     }
3845     /* Compute Right Edge */
3846     if (little_endian) {
3847         tcg_gen_shr_tl(r, m, r);
3848     } else {
3849         tcg_gen_shl_tl(r, m, r);
3850         tcg_gen_and_tl(r, r, m);
3851     }
3852 
3853     /* Compute dst = (s1 == s2 under amask ? l : l & r) */
3854     tcg_gen_xor_tl(t, s1, s2);
3855     tcg_gen_and_tl(r, r, l);
3856     tcg_gen_movcond_tl(TCG_COND_TSTEQ, dst, t, tcg_constant_tl(amask), r, l);
3857 
3858     gen_store_gpr(dc, a->rd, dst);
3859     return advance_pc(dc);
3860 }
3861 
3862 TRANS(EDGE8cc, VIS1, gen_edge, a, 8, 1, 0)
3863 TRANS(EDGE8Lcc, VIS1, gen_edge, a, 8, 1, 1)
3864 TRANS(EDGE16cc, VIS1, gen_edge, a, 16, 1, 0)
3865 TRANS(EDGE16Lcc, VIS1, gen_edge, a, 16, 1, 1)
3866 TRANS(EDGE32cc, VIS1, gen_edge, a, 32, 1, 0)
3867 TRANS(EDGE32Lcc, VIS1, gen_edge, a, 32, 1, 1)
3868 
3869 TRANS(EDGE8N, VIS2, gen_edge, a, 8, 0, 0)
3870 TRANS(EDGE8LN, VIS2, gen_edge, a, 8, 0, 1)
3871 TRANS(EDGE16N, VIS2, gen_edge, a, 16, 0, 0)
3872 TRANS(EDGE16LN, VIS2, gen_edge, a, 16, 0, 1)
3873 TRANS(EDGE32N, VIS2, gen_edge, a, 32, 0, 0)
3874 TRANS(EDGE32LN, VIS2, gen_edge, a, 32, 0, 1)
3875 
3876 static bool do_rrr(DisasContext *dc, arg_r_r_r *a,
3877                    void (*func)(TCGv, TCGv, TCGv))
3878 {
3879     TCGv dst = gen_dest_gpr(dc, a->rd);
3880     TCGv src1 = gen_load_gpr(dc, a->rs1);
3881     TCGv src2 = gen_load_gpr(dc, a->rs2);
3882 
3883     func(dst, src1, src2);
3884     gen_store_gpr(dc, a->rd, dst);
3885     return advance_pc(dc);
3886 }
3887 
3888 TRANS(ARRAY8, VIS1, do_rrr, a, gen_helper_array8)
3889 TRANS(ARRAY16, VIS1, do_rrr, a, gen_op_array16)
3890 TRANS(ARRAY32, VIS1, do_rrr, a, gen_op_array32)
3891 
3892 TRANS(ADDXC, VIS3, do_rrr, a, gen_op_addxc)
3893 TRANS(ADDXCcc, VIS3, do_rrr, a, gen_op_addxccc)
3894 
3895 static void gen_op_alignaddr(TCGv dst, TCGv s1, TCGv s2)
3896 {
3897 #ifdef TARGET_SPARC64
3898     TCGv tmp = tcg_temp_new();
3899 
3900     tcg_gen_add_tl(tmp, s1, s2);
3901     tcg_gen_andi_tl(dst, tmp, -8);
3902     tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
3903 #else
3904     g_assert_not_reached();
3905 #endif
3906 }
3907 
3908 static void gen_op_alignaddrl(TCGv dst, TCGv s1, TCGv s2)
3909 {
3910 #ifdef TARGET_SPARC64
3911     TCGv tmp = tcg_temp_new();
3912 
3913     tcg_gen_add_tl(tmp, s1, s2);
3914     tcg_gen_andi_tl(dst, tmp, -8);
3915     tcg_gen_neg_tl(tmp, tmp);
3916     tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
3917 #else
3918     g_assert_not_reached();
3919 #endif
3920 }
3921 
3922 TRANS(ALIGNADDR, VIS1, do_rrr, a, gen_op_alignaddr)
3923 TRANS(ALIGNADDRL, VIS1, do_rrr, a, gen_op_alignaddrl)
3924 
3925 static void gen_op_bmask(TCGv dst, TCGv s1, TCGv s2)
3926 {
3927 #ifdef TARGET_SPARC64
3928     tcg_gen_add_tl(dst, s1, s2);
3929     tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, dst, 32, 32);
3930 #else
3931     g_assert_not_reached();
3932 #endif
3933 }
3934 
3935 TRANS(BMASK, VIS2, do_rrr, a, gen_op_bmask)
3936 
3937 static bool do_cmask(DisasContext *dc, int rs2, void (*func)(TCGv, TCGv, TCGv))
3938 {
3939     func(cpu_gsr, cpu_gsr, gen_load_gpr(dc, rs2));
3940     return true;
3941 }
3942 
3943 TRANS(CMASK8, VIS3, do_cmask, a->rs2, gen_helper_cmask8)
3944 TRANS(CMASK16, VIS3, do_cmask, a->rs2, gen_helper_cmask16)
3945 TRANS(CMASK32, VIS3, do_cmask, a->rs2, gen_helper_cmask32)
3946 
3947 static bool do_shift_r(DisasContext *dc, arg_shiftr *a, bool l, bool u)
3948 {
3949     TCGv dst, src1, src2;
3950 
3951     /* Reject 64-bit shifts for sparc32. */
3952     if (avail_32(dc) && a->x) {
3953         return false;
3954     }
3955 
3956     src2 = tcg_temp_new();
3957     tcg_gen_andi_tl(src2, gen_load_gpr(dc, a->rs2), a->x ? 63 : 31);
3958     src1 = gen_load_gpr(dc, a->rs1);
3959     dst = gen_dest_gpr(dc, a->rd);
3960 
3961     if (l) {
3962         tcg_gen_shl_tl(dst, src1, src2);
3963         if (!a->x) {
3964             tcg_gen_ext32u_tl(dst, dst);
3965         }
3966     } else if (u) {
3967         if (!a->x) {
3968             tcg_gen_ext32u_tl(dst, src1);
3969             src1 = dst;
3970         }
3971         tcg_gen_shr_tl(dst, src1, src2);
3972     } else {
3973         if (!a->x) {
3974             tcg_gen_ext32s_tl(dst, src1);
3975             src1 = dst;
3976         }
3977         tcg_gen_sar_tl(dst, src1, src2);
3978     }
3979     gen_store_gpr(dc, a->rd, dst);
3980     return advance_pc(dc);
3981 }
3982 
3983 TRANS(SLL_r, ALL, do_shift_r, a, true, true)
3984 TRANS(SRL_r, ALL, do_shift_r, a, false, true)
3985 TRANS(SRA_r, ALL, do_shift_r, a, false, false)
3986 
3987 static bool do_shift_i(DisasContext *dc, arg_shifti *a, bool l, bool u)
3988 {
3989     TCGv dst, src1;
3990 
3991     /* Reject 64-bit shifts for sparc32. */
3992     if (avail_32(dc) && (a->x || a->i >= 32)) {
3993         return false;
3994     }
3995 
3996     src1 = gen_load_gpr(dc, a->rs1);
3997     dst = gen_dest_gpr(dc, a->rd);
3998 
3999     if (avail_32(dc) || a->x) {
4000         if (l) {
4001             tcg_gen_shli_tl(dst, src1, a->i);
4002         } else if (u) {
4003             tcg_gen_shri_tl(dst, src1, a->i);
4004         } else {
4005             tcg_gen_sari_tl(dst, src1, a->i);
4006         }
4007     } else {
4008         if (l) {
4009             tcg_gen_deposit_z_tl(dst, src1, a->i, 32 - a->i);
4010         } else if (u) {
4011             tcg_gen_extract_tl(dst, src1, a->i, 32 - a->i);
4012         } else {
4013             tcg_gen_sextract_tl(dst, src1, a->i, 32 - a->i);
4014         }
4015     }
4016     gen_store_gpr(dc, a->rd, dst);
4017     return advance_pc(dc);
4018 }
4019 
4020 TRANS(SLL_i, ALL, do_shift_i, a, true, true)
4021 TRANS(SRL_i, ALL, do_shift_i, a, false, true)
4022 TRANS(SRA_i, ALL, do_shift_i, a, false, false)
4023 
4024 static TCGv gen_rs2_or_imm(DisasContext *dc, bool imm, int rs2_or_imm)
4025 {
4026     /* For simplicity, we under-decoded the rs2 form. */
4027     if (!imm && rs2_or_imm & ~0x1f) {
4028         return NULL;
4029     }
4030     if (imm || rs2_or_imm == 0) {
4031         return tcg_constant_tl(rs2_or_imm);
4032     } else {
4033         return cpu_regs[rs2_or_imm];
4034     }
4035 }
4036 
4037 static bool do_mov_cond(DisasContext *dc, DisasCompare *cmp, int rd, TCGv src2)
4038 {
4039     TCGv dst = gen_load_gpr(dc, rd);
4040     TCGv c2 = tcg_constant_tl(cmp->c2);
4041 
4042     tcg_gen_movcond_tl(cmp->cond, dst, cmp->c1, c2, src2, dst);
4043     gen_store_gpr(dc, rd, dst);
4044     return advance_pc(dc);
4045 }
4046 
4047 static bool trans_MOVcc(DisasContext *dc, arg_MOVcc *a)
4048 {
4049     TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4050     DisasCompare cmp;
4051 
4052     if (src2 == NULL) {
4053         return false;
4054     }
4055     gen_compare(&cmp, a->cc, a->cond, dc);
4056     return do_mov_cond(dc, &cmp, a->rd, src2);
4057 }
4058 
4059 static bool trans_MOVfcc(DisasContext *dc, arg_MOVfcc *a)
4060 {
4061     TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4062     DisasCompare cmp;
4063 
4064     if (src2 == NULL) {
4065         return false;
4066     }
4067     gen_fcompare(&cmp, a->cc, a->cond);
4068     return do_mov_cond(dc, &cmp, a->rd, src2);
4069 }
4070 
4071 static bool trans_MOVR(DisasContext *dc, arg_MOVR *a)
4072 {
4073     TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4074     DisasCompare cmp;
4075 
4076     if (src2 == NULL) {
4077         return false;
4078     }
4079     if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
4080         return false;
4081     }
4082     return do_mov_cond(dc, &cmp, a->rd, src2);
4083 }
4084 
4085 static bool do_add_special(DisasContext *dc, arg_r_r_ri *a,
4086                            bool (*func)(DisasContext *dc, int rd, TCGv src))
4087 {
4088     TCGv src1, sum;
4089 
4090     /* For simplicity, we under-decoded the rs2 form. */
4091     if (!a->imm && a->rs2_or_imm & ~0x1f) {
4092         return false;
4093     }
4094 
4095     /*
4096      * Always load the sum into a new temporary.
4097      * This is required to capture the value across a window change,
4098      * e.g. SAVE and RESTORE, and may be optimized away otherwise.
4099      */
4100     sum = tcg_temp_new();
4101     src1 = gen_load_gpr(dc, a->rs1);
4102     if (a->imm || a->rs2_or_imm == 0) {
4103         tcg_gen_addi_tl(sum, src1, a->rs2_or_imm);
4104     } else {
4105         tcg_gen_add_tl(sum, src1, cpu_regs[a->rs2_or_imm]);
4106     }
4107     return func(dc, a->rd, sum);
4108 }
4109 
4110 static bool do_jmpl(DisasContext *dc, int rd, TCGv src)
4111 {
4112     /*
4113      * Preserve pc across advance, so that we can delay
4114      * the writeback to rd until after src is consumed.
4115      */
4116     target_ulong cur_pc = dc->pc;
4117 
4118     gen_check_align(dc, src, 3);
4119 
4120     gen_mov_pc_npc(dc);
4121     tcg_gen_mov_tl(cpu_npc, src);
4122     gen_address_mask(dc, cpu_npc);
4123     gen_store_gpr(dc, rd, tcg_constant_tl(cur_pc));
4124 
4125     dc->npc = DYNAMIC_PC_LOOKUP;
4126     return true;
4127 }
4128 
4129 TRANS(JMPL, ALL, do_add_special, a, do_jmpl)
4130 
4131 static bool do_rett(DisasContext *dc, int rd, TCGv src)
4132 {
4133     if (!supervisor(dc)) {
4134         return raise_priv(dc);
4135     }
4136 
4137     gen_check_align(dc, src, 3);
4138 
4139     gen_mov_pc_npc(dc);
4140     tcg_gen_mov_tl(cpu_npc, src);
4141     gen_helper_rett(tcg_env);
4142 
4143     dc->npc = DYNAMIC_PC;
4144     return true;
4145 }
4146 
4147 TRANS(RETT, 32, do_add_special, a, do_rett)
4148 
4149 static bool do_return(DisasContext *dc, int rd, TCGv src)
4150 {
4151     gen_check_align(dc, src, 3);
4152     gen_helper_restore(tcg_env);
4153 
4154     gen_mov_pc_npc(dc);
4155     tcg_gen_mov_tl(cpu_npc, src);
4156     gen_address_mask(dc, cpu_npc);
4157 
4158     dc->npc = DYNAMIC_PC_LOOKUP;
4159     return true;
4160 }
4161 
4162 TRANS(RETURN, 64, do_add_special, a, do_return)
4163 
4164 static bool do_save(DisasContext *dc, int rd, TCGv src)
4165 {
4166     gen_helper_save(tcg_env);
4167     gen_store_gpr(dc, rd, src);
4168     return advance_pc(dc);
4169 }
4170 
4171 TRANS(SAVE, ALL, do_add_special, a, do_save)
4172 
4173 static bool do_restore(DisasContext *dc, int rd, TCGv src)
4174 {
4175     gen_helper_restore(tcg_env);
4176     gen_store_gpr(dc, rd, src);
4177     return advance_pc(dc);
4178 }
4179 
4180 TRANS(RESTORE, ALL, do_add_special, a, do_restore)
4181 
4182 static bool do_done_retry(DisasContext *dc, bool done)
4183 {
4184     if (!supervisor(dc)) {
4185         return raise_priv(dc);
4186     }
4187     dc->npc = DYNAMIC_PC;
4188     dc->pc = DYNAMIC_PC;
4189     translator_io_start(&dc->base);
4190     if (done) {
4191         gen_helper_done(tcg_env);
4192     } else {
4193         gen_helper_retry(tcg_env);
4194     }
4195     return true;
4196 }
4197 
4198 TRANS(DONE, 64, do_done_retry, true)
4199 TRANS(RETRY, 64, do_done_retry, false)
4200 
4201 /*
4202  * Major opcode 11 -- load and store instructions
4203  */
4204 
4205 static TCGv gen_ldst_addr(DisasContext *dc, int rs1, bool imm, int rs2_or_imm)
4206 {
4207     TCGv addr, tmp = NULL;
4208 
4209     /* For simplicity, we under-decoded the rs2 form. */
4210     if (!imm && rs2_or_imm & ~0x1f) {
4211         return NULL;
4212     }
4213 
4214     addr = gen_load_gpr(dc, rs1);
4215     if (rs2_or_imm) {
4216         tmp = tcg_temp_new();
4217         if (imm) {
4218             tcg_gen_addi_tl(tmp, addr, rs2_or_imm);
4219         } else {
4220             tcg_gen_add_tl(tmp, addr, cpu_regs[rs2_or_imm]);
4221         }
4222         addr = tmp;
4223     }
4224     if (AM_CHECK(dc)) {
4225         if (!tmp) {
4226             tmp = tcg_temp_new();
4227         }
4228         tcg_gen_ext32u_tl(tmp, addr);
4229         addr = tmp;
4230     }
4231     return addr;
4232 }
4233 
4234 static bool do_ld_gpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4235 {
4236     TCGv reg, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4237     DisasASI da;
4238 
4239     if (addr == NULL) {
4240         return false;
4241     }
4242     da = resolve_asi(dc, a->asi, mop);
4243 
4244     reg = gen_dest_gpr(dc, a->rd);
4245     gen_ld_asi(dc, &da, reg, addr);
4246     gen_store_gpr(dc, a->rd, reg);
4247     return advance_pc(dc);
4248 }
4249 
4250 TRANS(LDUW, ALL, do_ld_gpr, a, MO_TEUL)
4251 TRANS(LDUB, ALL, do_ld_gpr, a, MO_UB)
4252 TRANS(LDUH, ALL, do_ld_gpr, a, MO_TEUW)
4253 TRANS(LDSB, ALL, do_ld_gpr, a, MO_SB)
4254 TRANS(LDSH, ALL, do_ld_gpr, a, MO_TESW)
4255 TRANS(LDSW, 64, do_ld_gpr, a, MO_TESL)
4256 TRANS(LDX, 64, do_ld_gpr, a, MO_TEUQ)
4257 
4258 static bool do_st_gpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4259 {
4260     TCGv reg, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4261     DisasASI da;
4262 
4263     if (addr == NULL) {
4264         return false;
4265     }
4266     da = resolve_asi(dc, a->asi, mop);
4267 
4268     reg = gen_load_gpr(dc, a->rd);
4269     gen_st_asi(dc, &da, reg, addr);
4270     return advance_pc(dc);
4271 }
4272 
4273 TRANS(STW, ALL, do_st_gpr, a, MO_TEUL)
4274 TRANS(STB, ALL, do_st_gpr, a, MO_UB)
4275 TRANS(STH, ALL, do_st_gpr, a, MO_TEUW)
4276 TRANS(STX, 64, do_st_gpr, a, MO_TEUQ)
4277 
4278 static bool trans_LDD(DisasContext *dc, arg_r_r_ri_asi *a)
4279 {
4280     TCGv addr;
4281     DisasASI da;
4282 
4283     if (a->rd & 1) {
4284         return false;
4285     }
4286     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4287     if (addr == NULL) {
4288         return false;
4289     }
4290     da = resolve_asi(dc, a->asi, MO_TEUQ);
4291     gen_ldda_asi(dc, &da, addr, a->rd);
4292     return advance_pc(dc);
4293 }
4294 
4295 static bool trans_STD(DisasContext *dc, arg_r_r_ri_asi *a)
4296 {
4297     TCGv addr;
4298     DisasASI da;
4299 
4300     if (a->rd & 1) {
4301         return false;
4302     }
4303     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4304     if (addr == NULL) {
4305         return false;
4306     }
4307     da = resolve_asi(dc, a->asi, MO_TEUQ);
4308     gen_stda_asi(dc, &da, addr, a->rd);
4309     return advance_pc(dc);
4310 }
4311 
4312 static bool trans_LDSTUB(DisasContext *dc, arg_r_r_ri_asi *a)
4313 {
4314     TCGv addr, reg;
4315     DisasASI da;
4316 
4317     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4318     if (addr == NULL) {
4319         return false;
4320     }
4321     da = resolve_asi(dc, a->asi, MO_UB);
4322 
4323     reg = gen_dest_gpr(dc, a->rd);
4324     gen_ldstub_asi(dc, &da, reg, addr);
4325     gen_store_gpr(dc, a->rd, reg);
4326     return advance_pc(dc);
4327 }
4328 
4329 static bool trans_SWAP(DisasContext *dc, arg_r_r_ri_asi *a)
4330 {
4331     TCGv addr, dst, src;
4332     DisasASI da;
4333 
4334     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4335     if (addr == NULL) {
4336         return false;
4337     }
4338     da = resolve_asi(dc, a->asi, MO_TEUL);
4339 
4340     dst = gen_dest_gpr(dc, a->rd);
4341     src = gen_load_gpr(dc, a->rd);
4342     gen_swap_asi(dc, &da, dst, src, addr);
4343     gen_store_gpr(dc, a->rd, dst);
4344     return advance_pc(dc);
4345 }
4346 
4347 static bool do_casa(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4348 {
4349     TCGv addr, o, n, c;
4350     DisasASI da;
4351 
4352     addr = gen_ldst_addr(dc, a->rs1, true, 0);
4353     if (addr == NULL) {
4354         return false;
4355     }
4356     da = resolve_asi(dc, a->asi, mop);
4357 
4358     o = gen_dest_gpr(dc, a->rd);
4359     n = gen_load_gpr(dc, a->rd);
4360     c = gen_load_gpr(dc, a->rs2_or_imm);
4361     gen_cas_asi(dc, &da, o, n, c, addr);
4362     gen_store_gpr(dc, a->rd, o);
4363     return advance_pc(dc);
4364 }
4365 
4366 TRANS(CASA, CASA, do_casa, a, MO_TEUL)
4367 TRANS(CASXA, 64, do_casa, a, MO_TEUQ)
4368 
4369 static bool do_ld_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz)
4370 {
4371     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4372     DisasASI da;
4373 
4374     if (addr == NULL) {
4375         return false;
4376     }
4377     if (gen_trap_ifnofpu(dc)) {
4378         return true;
4379     }
4380     if (sz == MO_128 && gen_trap_float128(dc)) {
4381         return true;
4382     }
4383     da = resolve_asi(dc, a->asi, MO_TE | sz);
4384     gen_ldf_asi(dc, &da, sz, addr, a->rd);
4385     gen_update_fprs_dirty(dc, a->rd);
4386     return advance_pc(dc);
4387 }
4388 
4389 TRANS(LDF, ALL, do_ld_fpr, a, MO_32)
4390 TRANS(LDDF, ALL, do_ld_fpr, a, MO_64)
4391 TRANS(LDQF, ALL, do_ld_fpr, a, MO_128)
4392 
4393 TRANS(LDFA, 64, do_ld_fpr, a, MO_32)
4394 TRANS(LDDFA, 64, do_ld_fpr, a, MO_64)
4395 TRANS(LDQFA, 64, do_ld_fpr, a, MO_128)
4396 
4397 static bool do_st_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz)
4398 {
4399     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4400     DisasASI da;
4401 
4402     if (addr == NULL) {
4403         return false;
4404     }
4405     if (gen_trap_ifnofpu(dc)) {
4406         return true;
4407     }
4408     if (sz == MO_128 && gen_trap_float128(dc)) {
4409         return true;
4410     }
4411     da = resolve_asi(dc, a->asi, MO_TE | sz);
4412     gen_stf_asi(dc, &da, sz, addr, a->rd);
4413     return advance_pc(dc);
4414 }
4415 
4416 TRANS(STF, ALL, do_st_fpr, a, MO_32)
4417 TRANS(STDF, ALL, do_st_fpr, a, MO_64)
4418 TRANS(STQF, ALL, do_st_fpr, a, MO_128)
4419 
4420 TRANS(STFA, 64, do_st_fpr, a, MO_32)
4421 TRANS(STDFA, 64, do_st_fpr, a, MO_64)
4422 TRANS(STQFA, 64, do_st_fpr, a, MO_128)
4423 
4424 static bool trans_STDFQ(DisasContext *dc, arg_STDFQ *a)
4425 {
4426     if (!avail_32(dc)) {
4427         return false;
4428     }
4429     if (!supervisor(dc)) {
4430         return raise_priv(dc);
4431     }
4432     if (gen_trap_ifnofpu(dc)) {
4433         return true;
4434     }
4435     gen_op_fpexception_im(dc, FSR_FTT_SEQ_ERROR);
4436     return true;
4437 }
4438 
4439 static bool trans_LDFSR(DisasContext *dc, arg_r_r_ri *a)
4440 {
4441     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4442     TCGv_i32 tmp;
4443 
4444     if (addr == NULL) {
4445         return false;
4446     }
4447     if (gen_trap_ifnofpu(dc)) {
4448         return true;
4449     }
4450 
4451     tmp = tcg_temp_new_i32();
4452     tcg_gen_qemu_ld_i32(tmp, addr, dc->mem_idx, MO_TEUL | MO_ALIGN);
4453 
4454     tcg_gen_extract_i32(cpu_fcc[0], tmp, FSR_FCC0_SHIFT, 2);
4455     /* LDFSR does not change FCC[1-3]. */
4456 
4457     gen_helper_set_fsr_nofcc_noftt(tcg_env, tmp);
4458     return advance_pc(dc);
4459 }
4460 
4461 static bool trans_LDXFSR(DisasContext *dc, arg_r_r_ri *a)
4462 {
4463 #ifdef TARGET_SPARC64
4464     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4465     TCGv_i64 t64;
4466     TCGv_i32 lo, hi;
4467 
4468     if (addr == NULL) {
4469         return false;
4470     }
4471     if (gen_trap_ifnofpu(dc)) {
4472         return true;
4473     }
4474 
4475     t64 = tcg_temp_new_i64();
4476     tcg_gen_qemu_ld_i64(t64, addr, dc->mem_idx, MO_TEUQ | MO_ALIGN);
4477 
4478     lo = tcg_temp_new_i32();
4479     hi = cpu_fcc[3];
4480     tcg_gen_extr_i64_i32(lo, hi, t64);
4481     tcg_gen_extract_i32(cpu_fcc[0], lo, FSR_FCC0_SHIFT, 2);
4482     tcg_gen_extract_i32(cpu_fcc[1], hi, FSR_FCC1_SHIFT - 32, 2);
4483     tcg_gen_extract_i32(cpu_fcc[2], hi, FSR_FCC2_SHIFT - 32, 2);
4484     tcg_gen_extract_i32(cpu_fcc[3], hi, FSR_FCC3_SHIFT - 32, 2);
4485 
4486     gen_helper_set_fsr_nofcc_noftt(tcg_env, lo);
4487     return advance_pc(dc);
4488 #else
4489     return false;
4490 #endif
4491 }
4492 
4493 static bool do_stfsr(DisasContext *dc, arg_r_r_ri *a, MemOp mop)
4494 {
4495     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4496     TCGv fsr;
4497 
4498     if (addr == NULL) {
4499         return false;
4500     }
4501     if (gen_trap_ifnofpu(dc)) {
4502         return true;
4503     }
4504 
4505     fsr = tcg_temp_new();
4506     gen_helper_get_fsr(fsr, tcg_env);
4507     tcg_gen_qemu_st_tl(fsr, addr, dc->mem_idx, mop | MO_ALIGN);
4508     return advance_pc(dc);
4509 }
4510 
4511 TRANS(STFSR, ALL, do_stfsr, a, MO_TEUL)
4512 TRANS(STXFSR, 64, do_stfsr, a, MO_TEUQ)
4513 
4514 static bool do_fc(DisasContext *dc, int rd, int32_t c)
4515 {
4516     if (gen_trap_ifnofpu(dc)) {
4517         return true;
4518     }
4519     gen_store_fpr_F(dc, rd, tcg_constant_i32(c));
4520     return advance_pc(dc);
4521 }
4522 
4523 TRANS(FZEROs, VIS1, do_fc, a->rd, 0)
4524 TRANS(FONEs, VIS1, do_fc, a->rd, -1)
4525 
4526 static bool do_dc(DisasContext *dc, int rd, int64_t c)
4527 {
4528     if (gen_trap_ifnofpu(dc)) {
4529         return true;
4530     }
4531     gen_store_fpr_D(dc, rd, tcg_constant_i64(c));
4532     return advance_pc(dc);
4533 }
4534 
4535 TRANS(FZEROd, VIS1, do_dc, a->rd, 0)
4536 TRANS(FONEd, VIS1, do_dc, a->rd, -1)
4537 
4538 static bool do_ff(DisasContext *dc, arg_r_r *a,
4539                   void (*func)(TCGv_i32, TCGv_i32))
4540 {
4541     TCGv_i32 tmp;
4542 
4543     if (gen_trap_ifnofpu(dc)) {
4544         return true;
4545     }
4546 
4547     tmp = gen_load_fpr_F(dc, a->rs);
4548     func(tmp, tmp);
4549     gen_store_fpr_F(dc, a->rd, tmp);
4550     return advance_pc(dc);
4551 }
4552 
4553 TRANS(FMOVs, ALL, do_ff, a, gen_op_fmovs)
4554 TRANS(FNEGs, ALL, do_ff, a, gen_op_fnegs)
4555 TRANS(FABSs, ALL, do_ff, a, gen_op_fabss)
4556 TRANS(FSRCs, VIS1, do_ff, a, tcg_gen_mov_i32)
4557 TRANS(FNOTs, VIS1, do_ff, a, tcg_gen_not_i32)
4558 
4559 static bool do_fd(DisasContext *dc, arg_r_r *a,
4560                   void (*func)(TCGv_i32, TCGv_i64))
4561 {
4562     TCGv_i32 dst;
4563     TCGv_i64 src;
4564 
4565     if (gen_trap_ifnofpu(dc)) {
4566         return true;
4567     }
4568 
4569     dst = tcg_temp_new_i32();
4570     src = gen_load_fpr_D(dc, a->rs);
4571     func(dst, src);
4572     gen_store_fpr_F(dc, a->rd, dst);
4573     return advance_pc(dc);
4574 }
4575 
4576 TRANS(FPACK16, VIS1, do_fd, a, gen_op_fpack16)
4577 TRANS(FPACKFIX, VIS1, do_fd, a, gen_op_fpackfix)
4578 
4579 static bool do_env_ff(DisasContext *dc, arg_r_r *a,
4580                       void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
4581 {
4582     TCGv_i32 tmp;
4583 
4584     if (gen_trap_ifnofpu(dc)) {
4585         return true;
4586     }
4587 
4588     tmp = gen_load_fpr_F(dc, a->rs);
4589     func(tmp, tcg_env, tmp);
4590     gen_store_fpr_F(dc, a->rd, tmp);
4591     return advance_pc(dc);
4592 }
4593 
4594 TRANS(FSQRTs, ALL, do_env_ff, a, gen_helper_fsqrts)
4595 TRANS(FiTOs, ALL, do_env_ff, a, gen_helper_fitos)
4596 TRANS(FsTOi, ALL, do_env_ff, a, gen_helper_fstoi)
4597 
4598 static bool do_env_fd(DisasContext *dc, arg_r_r *a,
4599                       void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
4600 {
4601     TCGv_i32 dst;
4602     TCGv_i64 src;
4603 
4604     if (gen_trap_ifnofpu(dc)) {
4605         return true;
4606     }
4607 
4608     dst = tcg_temp_new_i32();
4609     src = gen_load_fpr_D(dc, a->rs);
4610     func(dst, tcg_env, src);
4611     gen_store_fpr_F(dc, a->rd, dst);
4612     return advance_pc(dc);
4613 }
4614 
4615 TRANS(FdTOs, ALL, do_env_fd, a, gen_helper_fdtos)
4616 TRANS(FdTOi, ALL, do_env_fd, a, gen_helper_fdtoi)
4617 TRANS(FxTOs, 64, do_env_fd, a, gen_helper_fxtos)
4618 
4619 static bool do_dd(DisasContext *dc, arg_r_r *a,
4620                   void (*func)(TCGv_i64, TCGv_i64))
4621 {
4622     TCGv_i64 dst, src;
4623 
4624     if (gen_trap_ifnofpu(dc)) {
4625         return true;
4626     }
4627 
4628     dst = tcg_temp_new_i64();
4629     src = gen_load_fpr_D(dc, a->rs);
4630     func(dst, src);
4631     gen_store_fpr_D(dc, a->rd, dst);
4632     return advance_pc(dc);
4633 }
4634 
4635 TRANS(FMOVd, 64, do_dd, a, gen_op_fmovd)
4636 TRANS(FNEGd, 64, do_dd, a, gen_op_fnegd)
4637 TRANS(FABSd, 64, do_dd, a, gen_op_fabsd)
4638 TRANS(FSRCd, VIS1, do_dd, a, tcg_gen_mov_i64)
4639 TRANS(FNOTd, VIS1, do_dd, a, tcg_gen_not_i64)
4640 
4641 static bool do_env_dd(DisasContext *dc, arg_r_r *a,
4642                       void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
4643 {
4644     TCGv_i64 dst, src;
4645 
4646     if (gen_trap_ifnofpu(dc)) {
4647         return true;
4648     }
4649 
4650     dst = tcg_temp_new_i64();
4651     src = gen_load_fpr_D(dc, a->rs);
4652     func(dst, tcg_env, src);
4653     gen_store_fpr_D(dc, a->rd, dst);
4654     return advance_pc(dc);
4655 }
4656 
4657 TRANS(FSQRTd, ALL, do_env_dd, a, gen_helper_fsqrtd)
4658 TRANS(FxTOd, 64, do_env_dd, a, gen_helper_fxtod)
4659 TRANS(FdTOx, 64, do_env_dd, a, gen_helper_fdtox)
4660 
4661 static bool do_df(DisasContext *dc, arg_r_r *a,
4662                   void (*func)(TCGv_i64, TCGv_i32))
4663 {
4664     TCGv_i64 dst;
4665     TCGv_i32 src;
4666 
4667     if (gen_trap_ifnofpu(dc)) {
4668         return true;
4669     }
4670 
4671     dst = tcg_temp_new_i64();
4672     src = gen_load_fpr_F(dc, a->rs);
4673     func(dst, src);
4674     gen_store_fpr_D(dc, a->rd, dst);
4675     return advance_pc(dc);
4676 }
4677 
4678 TRANS(FEXPAND, VIS1, do_df, a, gen_helper_fexpand)
4679 
4680 static bool do_env_df(DisasContext *dc, arg_r_r *a,
4681                       void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
4682 {
4683     TCGv_i64 dst;
4684     TCGv_i32 src;
4685 
4686     if (gen_trap_ifnofpu(dc)) {
4687         return true;
4688     }
4689 
4690     dst = tcg_temp_new_i64();
4691     src = gen_load_fpr_F(dc, a->rs);
4692     func(dst, tcg_env, src);
4693     gen_store_fpr_D(dc, a->rd, dst);
4694     return advance_pc(dc);
4695 }
4696 
4697 TRANS(FiTOd, ALL, do_env_df, a, gen_helper_fitod)
4698 TRANS(FsTOd, ALL, do_env_df, a, gen_helper_fstod)
4699 TRANS(FsTOx, 64, do_env_df, a, gen_helper_fstox)
4700 
4701 static bool do_qq(DisasContext *dc, arg_r_r *a,
4702                   void (*func)(TCGv_i128, TCGv_i128))
4703 {
4704     TCGv_i128 t;
4705 
4706     if (gen_trap_ifnofpu(dc)) {
4707         return true;
4708     }
4709     if (gen_trap_float128(dc)) {
4710         return true;
4711     }
4712 
4713     gen_op_clear_ieee_excp_and_FTT();
4714     t = gen_load_fpr_Q(dc, a->rs);
4715     func(t, t);
4716     gen_store_fpr_Q(dc, a->rd, t);
4717     return advance_pc(dc);
4718 }
4719 
4720 TRANS(FMOVq, 64, do_qq, a, tcg_gen_mov_i128)
4721 TRANS(FNEGq, 64, do_qq, a, gen_op_fnegq)
4722 TRANS(FABSq, 64, do_qq, a, gen_op_fabsq)
4723 
4724 static bool do_env_qq(DisasContext *dc, arg_r_r *a,
4725                       void (*func)(TCGv_i128, TCGv_env, TCGv_i128))
4726 {
4727     TCGv_i128 t;
4728 
4729     if (gen_trap_ifnofpu(dc)) {
4730         return true;
4731     }
4732     if (gen_trap_float128(dc)) {
4733         return true;
4734     }
4735 
4736     t = gen_load_fpr_Q(dc, a->rs);
4737     func(t, tcg_env, t);
4738     gen_store_fpr_Q(dc, a->rd, t);
4739     return advance_pc(dc);
4740 }
4741 
4742 TRANS(FSQRTq, ALL, do_env_qq, a, gen_helper_fsqrtq)
4743 
4744 static bool do_env_fq(DisasContext *dc, arg_r_r *a,
4745                       void (*func)(TCGv_i32, TCGv_env, TCGv_i128))
4746 {
4747     TCGv_i128 src;
4748     TCGv_i32 dst;
4749 
4750     if (gen_trap_ifnofpu(dc)) {
4751         return true;
4752     }
4753     if (gen_trap_float128(dc)) {
4754         return true;
4755     }
4756 
4757     src = gen_load_fpr_Q(dc, a->rs);
4758     dst = tcg_temp_new_i32();
4759     func(dst, tcg_env, src);
4760     gen_store_fpr_F(dc, a->rd, dst);
4761     return advance_pc(dc);
4762 }
4763 
4764 TRANS(FqTOs, ALL, do_env_fq, a, gen_helper_fqtos)
4765 TRANS(FqTOi, ALL, do_env_fq, a, gen_helper_fqtoi)
4766 
4767 static bool do_env_dq(DisasContext *dc, arg_r_r *a,
4768                       void (*func)(TCGv_i64, TCGv_env, TCGv_i128))
4769 {
4770     TCGv_i128 src;
4771     TCGv_i64 dst;
4772 
4773     if (gen_trap_ifnofpu(dc)) {
4774         return true;
4775     }
4776     if (gen_trap_float128(dc)) {
4777         return true;
4778     }
4779 
4780     src = gen_load_fpr_Q(dc, a->rs);
4781     dst = tcg_temp_new_i64();
4782     func(dst, tcg_env, src);
4783     gen_store_fpr_D(dc, a->rd, dst);
4784     return advance_pc(dc);
4785 }
4786 
4787 TRANS(FqTOd, ALL, do_env_dq, a, gen_helper_fqtod)
4788 TRANS(FqTOx, 64, do_env_dq, a, gen_helper_fqtox)
4789 
4790 static bool do_env_qf(DisasContext *dc, arg_r_r *a,
4791                       void (*func)(TCGv_i128, TCGv_env, TCGv_i32))
4792 {
4793     TCGv_i32 src;
4794     TCGv_i128 dst;
4795 
4796     if (gen_trap_ifnofpu(dc)) {
4797         return true;
4798     }
4799     if (gen_trap_float128(dc)) {
4800         return true;
4801     }
4802 
4803     src = gen_load_fpr_F(dc, a->rs);
4804     dst = tcg_temp_new_i128();
4805     func(dst, tcg_env, src);
4806     gen_store_fpr_Q(dc, a->rd, dst);
4807     return advance_pc(dc);
4808 }
4809 
4810 TRANS(FiTOq, ALL, do_env_qf, a, gen_helper_fitoq)
4811 TRANS(FsTOq, ALL, do_env_qf, a, gen_helper_fstoq)
4812 
4813 static bool do_env_qd(DisasContext *dc, arg_r_r *a,
4814                       void (*func)(TCGv_i128, TCGv_env, TCGv_i64))
4815 {
4816     TCGv_i64 src;
4817     TCGv_i128 dst;
4818 
4819     if (gen_trap_ifnofpu(dc)) {
4820         return true;
4821     }
4822     if (gen_trap_float128(dc)) {
4823         return true;
4824     }
4825 
4826     src = gen_load_fpr_D(dc, a->rs);
4827     dst = tcg_temp_new_i128();
4828     func(dst, tcg_env, src);
4829     gen_store_fpr_Q(dc, a->rd, dst);
4830     return advance_pc(dc);
4831 }
4832 
4833 TRANS(FdTOq, ALL, do_env_qd, a, gen_helper_fdtoq)
4834 TRANS(FxTOq, 64, do_env_qd, a, gen_helper_fxtoq)
4835 
4836 static bool do_fff(DisasContext *dc, arg_r_r_r *a,
4837                    void (*func)(TCGv_i32, TCGv_i32, TCGv_i32))
4838 {
4839     TCGv_i32 src1, src2;
4840 
4841     if (gen_trap_ifnofpu(dc)) {
4842         return true;
4843     }
4844 
4845     src1 = gen_load_fpr_F(dc, a->rs1);
4846     src2 = gen_load_fpr_F(dc, a->rs2);
4847     func(src1, src1, src2);
4848     gen_store_fpr_F(dc, a->rd, src1);
4849     return advance_pc(dc);
4850 }
4851 
4852 TRANS(FPADD16s, VIS1, do_fff, a, tcg_gen_vec_add16_i32)
4853 TRANS(FPADD32s, VIS1, do_fff, a, tcg_gen_add_i32)
4854 TRANS(FPSUB16s, VIS1, do_fff, a, tcg_gen_vec_sub16_i32)
4855 TRANS(FPSUB32s, VIS1, do_fff, a, tcg_gen_sub_i32)
4856 TRANS(FNORs, VIS1, do_fff, a, tcg_gen_nor_i32)
4857 TRANS(FANDNOTs, VIS1, do_fff, a, tcg_gen_andc_i32)
4858 TRANS(FXORs, VIS1, do_fff, a, tcg_gen_xor_i32)
4859 TRANS(FNANDs, VIS1, do_fff, a, tcg_gen_nand_i32)
4860 TRANS(FANDs, VIS1, do_fff, a, tcg_gen_and_i32)
4861 TRANS(FXNORs, VIS1, do_fff, a, tcg_gen_eqv_i32)
4862 TRANS(FORNOTs, VIS1, do_fff, a, tcg_gen_orc_i32)
4863 TRANS(FORs, VIS1, do_fff, a, tcg_gen_or_i32)
4864 
4865 TRANS(FHADDs, VIS3, do_fff, a, gen_op_fhadds)
4866 TRANS(FHSUBs, VIS3, do_fff, a, gen_op_fhsubs)
4867 TRANS(FNHADDs, VIS3, do_fff, a, gen_op_fnhadds)
4868 
4869 TRANS(FPADDS16s, VIS3, do_fff, a, gen_op_fpadds16s)
4870 TRANS(FPSUBS16s, VIS3, do_fff, a, gen_op_fpsubs16s)
4871 TRANS(FPADDS32s, VIS3, do_fff, a, gen_op_fpadds32s)
4872 TRANS(FPSUBS32s, VIS3, do_fff, a, gen_op_fpsubs32s)
4873 
4874 static bool do_env_fff(DisasContext *dc, arg_r_r_r *a,
4875                        void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
4876 {
4877     TCGv_i32 src1, src2;
4878 
4879     if (gen_trap_ifnofpu(dc)) {
4880         return true;
4881     }
4882 
4883     src1 = gen_load_fpr_F(dc, a->rs1);
4884     src2 = gen_load_fpr_F(dc, a->rs2);
4885     func(src1, tcg_env, src1, src2);
4886     gen_store_fpr_F(dc, a->rd, src1);
4887     return advance_pc(dc);
4888 }
4889 
4890 TRANS(FADDs, ALL, do_env_fff, a, gen_helper_fadds)
4891 TRANS(FSUBs, ALL, do_env_fff, a, gen_helper_fsubs)
4892 TRANS(FMULs, ALL, do_env_fff, a, gen_helper_fmuls)
4893 TRANS(FDIVs, ALL, do_env_fff, a, gen_helper_fdivs)
4894 TRANS(FNADDs, VIS3, do_env_fff, a, gen_helper_fnadds)
4895 TRANS(FNMULs, VIS3, do_env_fff, a, gen_helper_fnmuls)
4896 
4897 static bool do_dff(DisasContext *dc, arg_r_r_r *a,
4898                    void (*func)(TCGv_i64, TCGv_i32, TCGv_i32))
4899 {
4900     TCGv_i64 dst;
4901     TCGv_i32 src1, src2;
4902 
4903     if (gen_trap_ifnofpu(dc)) {
4904         return true;
4905     }
4906 
4907     dst = tcg_temp_new_i64();
4908     src1 = gen_load_fpr_F(dc, a->rs1);
4909     src2 = gen_load_fpr_F(dc, a->rs2);
4910     func(dst, src1, src2);
4911     gen_store_fpr_D(dc, a->rd, dst);
4912     return advance_pc(dc);
4913 }
4914 
4915 TRANS(FMUL8x16AU, VIS1, do_dff, a, gen_op_fmul8x16au)
4916 TRANS(FMUL8x16AL, VIS1, do_dff, a, gen_op_fmul8x16al)
4917 TRANS(FMULD8SUx16, VIS1, do_dff, a, gen_op_fmuld8sux16)
4918 TRANS(FMULD8ULx16, VIS1, do_dff, a, gen_op_fmuld8ulx16)
4919 TRANS(FPMERGE, VIS1, do_dff, a, gen_helper_fpmerge)
4920 
4921 static bool do_dfd(DisasContext *dc, arg_r_r_r *a,
4922                    void (*func)(TCGv_i64, TCGv_i32, TCGv_i64))
4923 {
4924     TCGv_i64 dst, src2;
4925     TCGv_i32 src1;
4926 
4927     if (gen_trap_ifnofpu(dc)) {
4928         return true;
4929     }
4930 
4931     dst = tcg_temp_new_i64();
4932     src1 = gen_load_fpr_F(dc, a->rs1);
4933     src2 = gen_load_fpr_D(dc, a->rs2);
4934     func(dst, src1, src2);
4935     gen_store_fpr_D(dc, a->rd, dst);
4936     return advance_pc(dc);
4937 }
4938 
4939 TRANS(FMUL8x16, VIS1, do_dfd, a, gen_helper_fmul8x16)
4940 
4941 static bool do_gvec_ddd(DisasContext *dc, arg_r_r_r *a, MemOp vece,
4942                         void (*func)(unsigned, uint32_t, uint32_t,
4943                                      uint32_t, uint32_t, uint32_t))
4944 {
4945     if (gen_trap_ifnofpu(dc)) {
4946         return true;
4947     }
4948 
4949     func(vece, gen_offset_fpr_D(a->rd), gen_offset_fpr_D(a->rs1),
4950          gen_offset_fpr_D(a->rs2), 8, 8);
4951     return advance_pc(dc);
4952 }
4953 
4954 TRANS(FPADD16, VIS1, do_gvec_ddd, a, MO_16, tcg_gen_gvec_add)
4955 TRANS(FPADD32, VIS1, do_gvec_ddd, a, MO_32, tcg_gen_gvec_add)
4956 TRANS(FPSUB16, VIS1, do_gvec_ddd, a, MO_16, tcg_gen_gvec_sub)
4957 TRANS(FPSUB32, VIS1, do_gvec_ddd, a, MO_32, tcg_gen_gvec_sub)
4958 TRANS(FCHKSM16, VIS3, do_gvec_ddd, a, MO_16, gen_op_fchksm16)
4959 TRANS(FMEAN16, VIS3, do_gvec_ddd, a, MO_16, gen_op_fmean16)
4960 
4961 TRANS(FPADDS16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_ssadd)
4962 TRANS(FPADDS32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_ssadd)
4963 TRANS(FPSUBS16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_sssub)
4964 TRANS(FPSUBS32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_sssub)
4965 
4966 TRANS(FSLL16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_shlv)
4967 TRANS(FSLL32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_shlv)
4968 TRANS(FSRL16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_shrv)
4969 TRANS(FSRL32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_shrv)
4970 TRANS(FSRA16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_sarv)
4971 TRANS(FSRA32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_sarv)
4972 
4973 static bool do_ddd(DisasContext *dc, arg_r_r_r *a,
4974                    void (*func)(TCGv_i64, TCGv_i64, TCGv_i64))
4975 {
4976     TCGv_i64 dst, src1, src2;
4977 
4978     if (gen_trap_ifnofpu(dc)) {
4979         return true;
4980     }
4981 
4982     dst = tcg_temp_new_i64();
4983     src1 = gen_load_fpr_D(dc, a->rs1);
4984     src2 = gen_load_fpr_D(dc, a->rs2);
4985     func(dst, src1, src2);
4986     gen_store_fpr_D(dc, a->rd, dst);
4987     return advance_pc(dc);
4988 }
4989 
4990 TRANS(FMUL8SUx16, VIS1, do_ddd, a, gen_helper_fmul8sux16)
4991 TRANS(FMUL8ULx16, VIS1, do_ddd, a, gen_helper_fmul8ulx16)
4992 
4993 TRANS(FNORd, VIS1, do_ddd, a, tcg_gen_nor_i64)
4994 TRANS(FANDNOTd, VIS1, do_ddd, a, tcg_gen_andc_i64)
4995 TRANS(FXORd, VIS1, do_ddd, a, tcg_gen_xor_i64)
4996 TRANS(FNANDd, VIS1, do_ddd, a, tcg_gen_nand_i64)
4997 TRANS(FANDd, VIS1, do_ddd, a, tcg_gen_and_i64)
4998 TRANS(FXNORd, VIS1, do_ddd, a, tcg_gen_eqv_i64)
4999 TRANS(FORNOTd, VIS1, do_ddd, a, tcg_gen_orc_i64)
5000 TRANS(FORd, VIS1, do_ddd, a, tcg_gen_or_i64)
5001 
5002 TRANS(FPACK32, VIS1, do_ddd, a, gen_op_fpack32)
5003 TRANS(FALIGNDATAg, VIS1, do_ddd, a, gen_op_faligndata)
5004 TRANS(BSHUFFLE, VIS2, do_ddd, a, gen_op_bshuffle)
5005 
5006 TRANS(FHADDd, VIS3, do_ddd, a, gen_op_fhaddd)
5007 TRANS(FHSUBd, VIS3, do_ddd, a, gen_op_fhsubd)
5008 TRANS(FNHADDd, VIS3, do_ddd, a, gen_op_fnhaddd)
5009 
5010 TRANS(FPADD64, VIS3B, do_ddd, a, tcg_gen_add_i64)
5011 TRANS(FPSUB64, VIS3B, do_ddd, a, tcg_gen_sub_i64)
5012 TRANS(FSLAS16, VIS3, do_ddd, a, gen_helper_fslas16)
5013 TRANS(FSLAS32, VIS3, do_ddd, a, gen_helper_fslas32)
5014 
5015 static bool do_rdd(DisasContext *dc, arg_r_r_r *a,
5016                    void (*func)(TCGv, TCGv_i64, TCGv_i64))
5017 {
5018     TCGv_i64 src1, src2;
5019     TCGv dst;
5020 
5021     if (gen_trap_ifnofpu(dc)) {
5022         return true;
5023     }
5024 
5025     dst = gen_dest_gpr(dc, a->rd);
5026     src1 = gen_load_fpr_D(dc, a->rs1);
5027     src2 = gen_load_fpr_D(dc, a->rs2);
5028     func(dst, src1, src2);
5029     gen_store_gpr(dc, a->rd, dst);
5030     return advance_pc(dc);
5031 }
5032 
5033 TRANS(FPCMPLE16, VIS1, do_rdd, a, gen_helper_fcmple16)
5034 TRANS(FPCMPNE16, VIS1, do_rdd, a, gen_helper_fcmpne16)
5035 TRANS(FPCMPGT16, VIS1, do_rdd, a, gen_helper_fcmpgt16)
5036 TRANS(FPCMPEQ16, VIS1, do_rdd, a, gen_helper_fcmpeq16)
5037 
5038 TRANS(FPCMPLE32, VIS1, do_rdd, a, gen_helper_fcmple32)
5039 TRANS(FPCMPNE32, VIS1, do_rdd, a, gen_helper_fcmpne32)
5040 TRANS(FPCMPGT32, VIS1, do_rdd, a, gen_helper_fcmpgt32)
5041 TRANS(FPCMPEQ32, VIS1, do_rdd, a, gen_helper_fcmpeq32)
5042 
5043 TRANS(FPCMPEQ8, VIS3B, do_rdd, a, gen_helper_fcmpeq8)
5044 TRANS(FPCMPNE8, VIS3B, do_rdd, a, gen_helper_fcmpne8)
5045 TRANS(FPCMPULE8, VIS3B, do_rdd, a, gen_helper_fcmpule8)
5046 TRANS(FPCMPUGT8, VIS3B, do_rdd, a, gen_helper_fcmpugt8)
5047 
5048 static bool do_env_ddd(DisasContext *dc, arg_r_r_r *a,
5049                        void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
5050 {
5051     TCGv_i64 dst, src1, src2;
5052 
5053     if (gen_trap_ifnofpu(dc)) {
5054         return true;
5055     }
5056 
5057     dst = tcg_temp_new_i64();
5058     src1 = gen_load_fpr_D(dc, a->rs1);
5059     src2 = gen_load_fpr_D(dc, a->rs2);
5060     func(dst, tcg_env, src1, src2);
5061     gen_store_fpr_D(dc, a->rd, dst);
5062     return advance_pc(dc);
5063 }
5064 
5065 TRANS(FADDd, ALL, do_env_ddd, a, gen_helper_faddd)
5066 TRANS(FSUBd, ALL, do_env_ddd, a, gen_helper_fsubd)
5067 TRANS(FMULd, ALL, do_env_ddd, a, gen_helper_fmuld)
5068 TRANS(FDIVd, ALL, do_env_ddd, a, gen_helper_fdivd)
5069 TRANS(FNADDd, VIS3, do_env_ddd, a, gen_helper_fnaddd)
5070 TRANS(FNMULd, VIS3, do_env_ddd, a, gen_helper_fnmuld)
5071 
5072 static bool trans_FsMULd(DisasContext *dc, arg_r_r_r *a)
5073 {
5074     TCGv_i64 dst;
5075     TCGv_i32 src1, src2;
5076 
5077     if (gen_trap_ifnofpu(dc)) {
5078         return true;
5079     }
5080     if (!(dc->def->features & CPU_FEATURE_FSMULD)) {
5081         return raise_unimpfpop(dc);
5082     }
5083 
5084     dst = tcg_temp_new_i64();
5085     src1 = gen_load_fpr_F(dc, a->rs1);
5086     src2 = gen_load_fpr_F(dc, a->rs2);
5087     gen_helper_fsmuld(dst, tcg_env, src1, src2);
5088     gen_store_fpr_D(dc, a->rd, dst);
5089     return advance_pc(dc);
5090 }
5091 
5092 static bool trans_FNsMULd(DisasContext *dc, arg_r_r_r *a)
5093 {
5094     TCGv_i64 dst;
5095     TCGv_i32 src1, src2;
5096 
5097     if (!avail_VIS3(dc)) {
5098         return false;
5099     }
5100     if (gen_trap_ifnofpu(dc)) {
5101         return true;
5102     }
5103     dst = tcg_temp_new_i64();
5104     src1 = gen_load_fpr_F(dc, a->rs1);
5105     src2 = gen_load_fpr_F(dc, a->rs2);
5106     gen_helper_fnsmuld(dst, tcg_env, src1, src2);
5107     gen_store_fpr_D(dc, a->rd, dst);
5108     return advance_pc(dc);
5109 }
5110 
5111 static bool do_ffff(DisasContext *dc, arg_r_r_r_r *a,
5112                     void (*func)(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_i32))
5113 {
5114     TCGv_i32 dst, src1, src2, src3;
5115 
5116     if (gen_trap_ifnofpu(dc)) {
5117         return true;
5118     }
5119 
5120     src1 = gen_load_fpr_F(dc, a->rs1);
5121     src2 = gen_load_fpr_F(dc, a->rs2);
5122     src3 = gen_load_fpr_F(dc, a->rs3);
5123     dst = tcg_temp_new_i32();
5124     func(dst, src1, src2, src3);
5125     gen_store_fpr_F(dc, a->rd, dst);
5126     return advance_pc(dc);
5127 }
5128 
5129 TRANS(FMADDs, FMAF, do_ffff, a, gen_op_fmadds)
5130 TRANS(FMSUBs, FMAF, do_ffff, a, gen_op_fmsubs)
5131 TRANS(FNMSUBs, FMAF, do_ffff, a, gen_op_fnmsubs)
5132 TRANS(FNMADDs, FMAF, do_ffff, a, gen_op_fnmadds)
5133 
5134 static bool do_dddd(DisasContext *dc, arg_r_r_r_r *a,
5135                     void (*func)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
5136 {
5137     TCGv_i64 dst, src1, src2, src3;
5138 
5139     if (gen_trap_ifnofpu(dc)) {
5140         return true;
5141     }
5142 
5143     dst  = tcg_temp_new_i64();
5144     src1 = gen_load_fpr_D(dc, a->rs1);
5145     src2 = gen_load_fpr_D(dc, a->rs2);
5146     src3 = gen_load_fpr_D(dc, a->rs3);
5147     func(dst, src1, src2, src3);
5148     gen_store_fpr_D(dc, a->rd, dst);
5149     return advance_pc(dc);
5150 }
5151 
5152 TRANS(PDIST, VIS1, do_dddd, a, gen_helper_pdist)
5153 TRANS(FMADDd, FMAF, do_dddd, a, gen_op_fmaddd)
5154 TRANS(FMSUBd, FMAF, do_dddd, a, gen_op_fmsubd)
5155 TRANS(FNMSUBd, FMAF, do_dddd, a, gen_op_fnmsubd)
5156 TRANS(FNMADDd, FMAF, do_dddd, a, gen_op_fnmaddd)
5157 
5158 static bool do_env_qqq(DisasContext *dc, arg_r_r_r *a,
5159                        void (*func)(TCGv_i128, TCGv_env, TCGv_i128, TCGv_i128))
5160 {
5161     TCGv_i128 src1, src2;
5162 
5163     if (gen_trap_ifnofpu(dc)) {
5164         return true;
5165     }
5166     if (gen_trap_float128(dc)) {
5167         return true;
5168     }
5169 
5170     src1 = gen_load_fpr_Q(dc, a->rs1);
5171     src2 = gen_load_fpr_Q(dc, a->rs2);
5172     func(src1, tcg_env, src1, src2);
5173     gen_store_fpr_Q(dc, a->rd, src1);
5174     return advance_pc(dc);
5175 }
5176 
5177 TRANS(FADDq, ALL, do_env_qqq, a, gen_helper_faddq)
5178 TRANS(FSUBq, ALL, do_env_qqq, a, gen_helper_fsubq)
5179 TRANS(FMULq, ALL, do_env_qqq, a, gen_helper_fmulq)
5180 TRANS(FDIVq, ALL, do_env_qqq, a, gen_helper_fdivq)
5181 
5182 static bool trans_FdMULq(DisasContext *dc, arg_r_r_r *a)
5183 {
5184     TCGv_i64 src1, src2;
5185     TCGv_i128 dst;
5186 
5187     if (gen_trap_ifnofpu(dc)) {
5188         return true;
5189     }
5190     if (gen_trap_float128(dc)) {
5191         return true;
5192     }
5193 
5194     src1 = gen_load_fpr_D(dc, a->rs1);
5195     src2 = gen_load_fpr_D(dc, a->rs2);
5196     dst = tcg_temp_new_i128();
5197     gen_helper_fdmulq(dst, tcg_env, src1, src2);
5198     gen_store_fpr_Q(dc, a->rd, dst);
5199     return advance_pc(dc);
5200 }
5201 
5202 static bool do_fmovr(DisasContext *dc, arg_FMOVRs *a, bool is_128,
5203                      void (*func)(DisasContext *, DisasCompare *, int, int))
5204 {
5205     DisasCompare cmp;
5206 
5207     if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
5208         return false;
5209     }
5210     if (gen_trap_ifnofpu(dc)) {
5211         return true;
5212     }
5213     if (is_128 && gen_trap_float128(dc)) {
5214         return true;
5215     }
5216 
5217     gen_op_clear_ieee_excp_and_FTT();
5218     func(dc, &cmp, a->rd, a->rs2);
5219     return advance_pc(dc);
5220 }
5221 
5222 TRANS(FMOVRs, 64, do_fmovr, a, false, gen_fmovs)
5223 TRANS(FMOVRd, 64, do_fmovr, a, false, gen_fmovd)
5224 TRANS(FMOVRq, 64, do_fmovr, a, true, gen_fmovq)
5225 
5226 static bool do_fmovcc(DisasContext *dc, arg_FMOVscc *a, bool is_128,
5227                       void (*func)(DisasContext *, DisasCompare *, int, int))
5228 {
5229     DisasCompare cmp;
5230 
5231     if (gen_trap_ifnofpu(dc)) {
5232         return true;
5233     }
5234     if (is_128 && gen_trap_float128(dc)) {
5235         return true;
5236     }
5237 
5238     gen_op_clear_ieee_excp_and_FTT();
5239     gen_compare(&cmp, a->cc, a->cond, dc);
5240     func(dc, &cmp, a->rd, a->rs2);
5241     return advance_pc(dc);
5242 }
5243 
5244 TRANS(FMOVscc, 64, do_fmovcc, a, false, gen_fmovs)
5245 TRANS(FMOVdcc, 64, do_fmovcc, a, false, gen_fmovd)
5246 TRANS(FMOVqcc, 64, do_fmovcc, a, true, gen_fmovq)
5247 
5248 static bool do_fmovfcc(DisasContext *dc, arg_FMOVsfcc *a, bool is_128,
5249                        void (*func)(DisasContext *, DisasCompare *, int, int))
5250 {
5251     DisasCompare cmp;
5252 
5253     if (gen_trap_ifnofpu(dc)) {
5254         return true;
5255     }
5256     if (is_128 && gen_trap_float128(dc)) {
5257         return true;
5258     }
5259 
5260     gen_op_clear_ieee_excp_and_FTT();
5261     gen_fcompare(&cmp, a->cc, a->cond);
5262     func(dc, &cmp, a->rd, a->rs2);
5263     return advance_pc(dc);
5264 }
5265 
5266 TRANS(FMOVsfcc, 64, do_fmovfcc, a, false, gen_fmovs)
5267 TRANS(FMOVdfcc, 64, do_fmovfcc, a, false, gen_fmovd)
5268 TRANS(FMOVqfcc, 64, do_fmovfcc, a, true, gen_fmovq)
5269 
5270 static bool do_fcmps(DisasContext *dc, arg_FCMPs *a, bool e)
5271 {
5272     TCGv_i32 src1, src2;
5273 
5274     if (avail_32(dc) && a->cc != 0) {
5275         return false;
5276     }
5277     if (gen_trap_ifnofpu(dc)) {
5278         return true;
5279     }
5280 
5281     src1 = gen_load_fpr_F(dc, a->rs1);
5282     src2 = gen_load_fpr_F(dc, a->rs2);
5283     if (e) {
5284         gen_helper_fcmpes(cpu_fcc[a->cc], tcg_env, src1, src2);
5285     } else {
5286         gen_helper_fcmps(cpu_fcc[a->cc], tcg_env, src1, src2);
5287     }
5288     return advance_pc(dc);
5289 }
5290 
5291 TRANS(FCMPs, ALL, do_fcmps, a, false)
5292 TRANS(FCMPEs, ALL, do_fcmps, a, true)
5293 
5294 static bool do_fcmpd(DisasContext *dc, arg_FCMPd *a, bool e)
5295 {
5296     TCGv_i64 src1, src2;
5297 
5298     if (avail_32(dc) && a->cc != 0) {
5299         return false;
5300     }
5301     if (gen_trap_ifnofpu(dc)) {
5302         return true;
5303     }
5304 
5305     src1 = gen_load_fpr_D(dc, a->rs1);
5306     src2 = gen_load_fpr_D(dc, a->rs2);
5307     if (e) {
5308         gen_helper_fcmped(cpu_fcc[a->cc], tcg_env, src1, src2);
5309     } else {
5310         gen_helper_fcmpd(cpu_fcc[a->cc], tcg_env, src1, src2);
5311     }
5312     return advance_pc(dc);
5313 }
5314 
5315 TRANS(FCMPd, ALL, do_fcmpd, a, false)
5316 TRANS(FCMPEd, ALL, do_fcmpd, a, true)
5317 
5318 static bool do_fcmpq(DisasContext *dc, arg_FCMPq *a, bool e)
5319 {
5320     TCGv_i128 src1, src2;
5321 
5322     if (avail_32(dc) && a->cc != 0) {
5323         return false;
5324     }
5325     if (gen_trap_ifnofpu(dc)) {
5326         return true;
5327     }
5328     if (gen_trap_float128(dc)) {
5329         return true;
5330     }
5331 
5332     src1 = gen_load_fpr_Q(dc, a->rs1);
5333     src2 = gen_load_fpr_Q(dc, a->rs2);
5334     if (e) {
5335         gen_helper_fcmpeq(cpu_fcc[a->cc], tcg_env, src1, src2);
5336     } else {
5337         gen_helper_fcmpq(cpu_fcc[a->cc], tcg_env, src1, src2);
5338     }
5339     return advance_pc(dc);
5340 }
5341 
5342 TRANS(FCMPq, ALL, do_fcmpq, a, false)
5343 TRANS(FCMPEq, ALL, do_fcmpq, a, true)
5344 
5345 static bool trans_FLCMPs(DisasContext *dc, arg_FLCMPs *a)
5346 {
5347     TCGv_i32 src1, src2;
5348 
5349     if (!avail_VIS3(dc)) {
5350         return false;
5351     }
5352     if (gen_trap_ifnofpu(dc)) {
5353         return true;
5354     }
5355 
5356     src1 = gen_load_fpr_F(dc, a->rs1);
5357     src2 = gen_load_fpr_F(dc, a->rs2);
5358     gen_helper_flcmps(cpu_fcc[a->cc], src1, src2);
5359     return advance_pc(dc);
5360 }
5361 
5362 static bool trans_FLCMPd(DisasContext *dc, arg_FLCMPd *a)
5363 {
5364     TCGv_i64 src1, src2;
5365 
5366     if (!avail_VIS3(dc)) {
5367         return false;
5368     }
5369     if (gen_trap_ifnofpu(dc)) {
5370         return true;
5371     }
5372 
5373     src1 = gen_load_fpr_D(dc, a->rs1);
5374     src2 = gen_load_fpr_D(dc, a->rs2);
5375     gen_helper_flcmpd(cpu_fcc[a->cc], src1, src2);
5376     return advance_pc(dc);
5377 }
5378 
5379 static void sparc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
5380 {
5381     DisasContext *dc = container_of(dcbase, DisasContext, base);
5382     int bound;
5383 
5384     dc->pc = dc->base.pc_first;
5385     dc->npc = (target_ulong)dc->base.tb->cs_base;
5386     dc->mem_idx = dc->base.tb->flags & TB_FLAG_MMU_MASK;
5387     dc->def = &cpu_env(cs)->def;
5388     dc->fpu_enabled = tb_fpu_enabled(dc->base.tb->flags);
5389     dc->address_mask_32bit = tb_am_enabled(dc->base.tb->flags);
5390 #ifndef CONFIG_USER_ONLY
5391     dc->supervisor = (dc->base.tb->flags & TB_FLAG_SUPER) != 0;
5392 #endif
5393 #ifdef TARGET_SPARC64
5394     dc->fprs_dirty = 0;
5395     dc->asi = (dc->base.tb->flags >> TB_FLAG_ASI_SHIFT) & 0xff;
5396 #ifndef CONFIG_USER_ONLY
5397     dc->hypervisor = (dc->base.tb->flags & TB_FLAG_HYPER) != 0;
5398 #endif
5399 #endif
5400     /*
5401      * if we reach a page boundary, we stop generation so that the
5402      * PC of a TT_TFAULT exception is always in the right page
5403      */
5404     bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
5405     dc->base.max_insns = MIN(dc->base.max_insns, bound);
5406 }
5407 
5408 static void sparc_tr_tb_start(DisasContextBase *db, CPUState *cs)
5409 {
5410 }
5411 
5412 static void sparc_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
5413 {
5414     DisasContext *dc = container_of(dcbase, DisasContext, base);
5415     target_ulong npc = dc->npc;
5416 
5417     if (npc & 3) {
5418         switch (npc) {
5419         case JUMP_PC:
5420             assert(dc->jump_pc[1] == dc->pc + 4);
5421             npc = dc->jump_pc[0] | JUMP_PC;
5422             break;
5423         case DYNAMIC_PC:
5424         case DYNAMIC_PC_LOOKUP:
5425             npc = DYNAMIC_PC;
5426             break;
5427         default:
5428             g_assert_not_reached();
5429         }
5430     }
5431     tcg_gen_insn_start(dc->pc, npc);
5432 }
5433 
5434 static void sparc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
5435 {
5436     DisasContext *dc = container_of(dcbase, DisasContext, base);
5437     unsigned int insn;
5438 
5439     insn = translator_ldl(cpu_env(cs), &dc->base, dc->pc);
5440     dc->base.pc_next += 4;
5441 
5442     if (!decode(dc, insn)) {
5443         gen_exception(dc, TT_ILL_INSN);
5444     }
5445 
5446     if (dc->base.is_jmp == DISAS_NORETURN) {
5447         return;
5448     }
5449     if (dc->pc != dc->base.pc_next) {
5450         dc->base.is_jmp = DISAS_TOO_MANY;
5451     }
5452 }
5453 
5454 static void sparc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
5455 {
5456     DisasContext *dc = container_of(dcbase, DisasContext, base);
5457     DisasDelayException *e, *e_next;
5458     bool may_lookup;
5459 
5460     finishing_insn(dc);
5461 
5462     switch (dc->base.is_jmp) {
5463     case DISAS_NEXT:
5464     case DISAS_TOO_MANY:
5465         if (((dc->pc | dc->npc) & 3) == 0) {
5466             /* static PC and NPC: we can use direct chaining */
5467             gen_goto_tb(dc, 0, dc->pc, dc->npc);
5468             break;
5469         }
5470 
5471         may_lookup = true;
5472         if (dc->pc & 3) {
5473             switch (dc->pc) {
5474             case DYNAMIC_PC_LOOKUP:
5475                 break;
5476             case DYNAMIC_PC:
5477                 may_lookup = false;
5478                 break;
5479             default:
5480                 g_assert_not_reached();
5481             }
5482         } else {
5483             tcg_gen_movi_tl(cpu_pc, dc->pc);
5484         }
5485 
5486         if (dc->npc & 3) {
5487             switch (dc->npc) {
5488             case JUMP_PC:
5489                 gen_generic_branch(dc);
5490                 break;
5491             case DYNAMIC_PC:
5492                 may_lookup = false;
5493                 break;
5494             case DYNAMIC_PC_LOOKUP:
5495                 break;
5496             default:
5497                 g_assert_not_reached();
5498             }
5499         } else {
5500             tcg_gen_movi_tl(cpu_npc, dc->npc);
5501         }
5502         if (may_lookup) {
5503             tcg_gen_lookup_and_goto_ptr();
5504         } else {
5505             tcg_gen_exit_tb(NULL, 0);
5506         }
5507         break;
5508 
5509     case DISAS_NORETURN:
5510        break;
5511 
5512     case DISAS_EXIT:
5513         /* Exit TB */
5514         save_state(dc);
5515         tcg_gen_exit_tb(NULL, 0);
5516         break;
5517 
5518     default:
5519         g_assert_not_reached();
5520     }
5521 
5522     for (e = dc->delay_excp_list; e ; e = e_next) {
5523         gen_set_label(e->lab);
5524 
5525         tcg_gen_movi_tl(cpu_pc, e->pc);
5526         if (e->npc % 4 == 0) {
5527             tcg_gen_movi_tl(cpu_npc, e->npc);
5528         }
5529         gen_helper_raise_exception(tcg_env, e->excp);
5530 
5531         e_next = e->next;
5532         g_free(e);
5533     }
5534 }
5535 
5536 static const TranslatorOps sparc_tr_ops = {
5537     .init_disas_context = sparc_tr_init_disas_context,
5538     .tb_start           = sparc_tr_tb_start,
5539     .insn_start         = sparc_tr_insn_start,
5540     .translate_insn     = sparc_tr_translate_insn,
5541     .tb_stop            = sparc_tr_tb_stop,
5542 };
5543 
5544 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
5545                            vaddr pc, void *host_pc)
5546 {
5547     DisasContext dc = {};
5548 
5549     translator_loop(cs, tb, max_insns, pc, host_pc, &sparc_tr_ops, &dc.base);
5550 }
5551 
5552 void sparc_tcg_init(void)
5553 {
5554     static const char gregnames[32][4] = {
5555         "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7",
5556         "o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7",
5557         "l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7",
5558         "i0", "i1", "i2", "i3", "i4", "i5", "i6", "i7",
5559     };
5560 
5561     static const struct { TCGv_i32 *ptr; int off; const char *name; } r32[] = {
5562 #ifdef TARGET_SPARC64
5563         { &cpu_fprs, offsetof(CPUSPARCState, fprs), "fprs" },
5564         { &cpu_fcc[0], offsetof(CPUSPARCState, fcc[0]), "fcc0" },
5565         { &cpu_fcc[1], offsetof(CPUSPARCState, fcc[1]), "fcc1" },
5566         { &cpu_fcc[2], offsetof(CPUSPARCState, fcc[2]), "fcc2" },
5567         { &cpu_fcc[3], offsetof(CPUSPARCState, fcc[3]), "fcc3" },
5568 #else
5569         { &cpu_fcc[0], offsetof(CPUSPARCState, fcc[0]), "fcc" },
5570 #endif
5571     };
5572 
5573     static const struct { TCGv *ptr; int off; const char *name; } rtl[] = {
5574 #ifdef TARGET_SPARC64
5575         { &cpu_gsr, offsetof(CPUSPARCState, gsr), "gsr" },
5576         { &cpu_xcc_Z, offsetof(CPUSPARCState, xcc_Z), "xcc_Z" },
5577         { &cpu_xcc_C, offsetof(CPUSPARCState, xcc_C), "xcc_C" },
5578 #endif
5579         { &cpu_cc_N, offsetof(CPUSPARCState, cc_N), "cc_N" },
5580         { &cpu_cc_V, offsetof(CPUSPARCState, cc_V), "cc_V" },
5581         { &cpu_icc_Z, offsetof(CPUSPARCState, icc_Z), "icc_Z" },
5582         { &cpu_icc_C, offsetof(CPUSPARCState, icc_C), "icc_C" },
5583         { &cpu_cond, offsetof(CPUSPARCState, cond), "cond" },
5584         { &cpu_pc, offsetof(CPUSPARCState, pc), "pc" },
5585         { &cpu_npc, offsetof(CPUSPARCState, npc), "npc" },
5586         { &cpu_y, offsetof(CPUSPARCState, y), "y" },
5587         { &cpu_tbr, offsetof(CPUSPARCState, tbr), "tbr" },
5588     };
5589 
5590     unsigned int i;
5591 
5592     cpu_regwptr = tcg_global_mem_new_ptr(tcg_env,
5593                                          offsetof(CPUSPARCState, regwptr),
5594                                          "regwptr");
5595 
5596     for (i = 0; i < ARRAY_SIZE(r32); ++i) {
5597         *r32[i].ptr = tcg_global_mem_new_i32(tcg_env, r32[i].off, r32[i].name);
5598     }
5599 
5600     for (i = 0; i < ARRAY_SIZE(rtl); ++i) {
5601         *rtl[i].ptr = tcg_global_mem_new(tcg_env, rtl[i].off, rtl[i].name);
5602     }
5603 
5604     cpu_regs[0] = NULL;
5605     for (i = 1; i < 8; ++i) {
5606         cpu_regs[i] = tcg_global_mem_new(tcg_env,
5607                                          offsetof(CPUSPARCState, gregs[i]),
5608                                          gregnames[i]);
5609     }
5610 
5611     for (i = 8; i < 32; ++i) {
5612         cpu_regs[i] = tcg_global_mem_new(cpu_regwptr,
5613                                          (i - 8) * sizeof(target_ulong),
5614                                          gregnames[i]);
5615     }
5616 }
5617 
5618 void sparc_restore_state_to_opc(CPUState *cs,
5619                                 const TranslationBlock *tb,
5620                                 const uint64_t *data)
5621 {
5622     CPUSPARCState *env = cpu_env(cs);
5623     target_ulong pc = data[0];
5624     target_ulong npc = data[1];
5625 
5626     env->pc = pc;
5627     if (npc == DYNAMIC_PC) {
5628         /* dynamic NPC: already stored */
5629     } else if (npc & JUMP_PC) {
5630         /* jump PC: use 'cond' and the jump targets of the translation */
5631         if (env->cond) {
5632             env->npc = npc & ~3;
5633         } else {
5634             env->npc = pc + 4;
5635         }
5636     } else {
5637         env->npc = npc;
5638     }
5639 }
5640