xref: /openbmc/qemu/target/sparc/translate.c (revision 68a414e99d438ff5e3e598d140c8f81638a8ea9e)
1 /*
2    SPARC translation
3 
4    Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5    Copyright (C) 2003-2005 Fabrice Bellard
6 
7    This library is free software; you can redistribute it and/or
8    modify it under the terms of the GNU Lesser General Public
9    License as published by the Free Software Foundation; either
10    version 2.1 of the License, or (at your option) any later version.
11 
12    This library is distributed in the hope that it will be useful,
13    but WITHOUT ANY WARRANTY; without even the implied warranty of
14    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15    Lesser General Public License for more details.
16 
17    You should have received a copy of the GNU Lesser General Public
18    License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 
23 #include "cpu.h"
24 #include "exec/helper-proto.h"
25 #include "exec/exec-all.h"
26 #include "tcg/tcg-op.h"
27 #include "tcg/tcg-op-gvec.h"
28 #include "exec/helper-gen.h"
29 #include "exec/translator.h"
30 #include "exec/log.h"
31 #include "fpu/softfloat.h"
32 #include "asi.h"
33 
34 #define HELPER_H "helper.h"
35 #include "exec/helper-info.c.inc"
36 #undef  HELPER_H
37 
38 #ifdef TARGET_SPARC64
39 # define gen_helper_rdpsr(D, E)                 qemu_build_not_reached()
40 # define gen_helper_rdasr17(D, E)               qemu_build_not_reached()
41 # define gen_helper_rett(E)                     qemu_build_not_reached()
42 # define gen_helper_power_down(E)               qemu_build_not_reached()
43 # define gen_helper_wrpsr(E, S)                 qemu_build_not_reached()
44 #else
45 # define gen_helper_clear_softint(E, S)         qemu_build_not_reached()
46 # define gen_helper_done(E)                     qemu_build_not_reached()
47 # define gen_helper_flushw(E)                   qemu_build_not_reached()
48 # define gen_helper_fmul8x16a(D, S1, S2)        qemu_build_not_reached()
49 # define gen_helper_rdccr(D, E)                 qemu_build_not_reached()
50 # define gen_helper_rdcwp(D, E)                 qemu_build_not_reached()
51 # define gen_helper_restored(E)                 qemu_build_not_reached()
52 # define gen_helper_retry(E)                    qemu_build_not_reached()
53 # define gen_helper_saved(E)                    qemu_build_not_reached()
54 # define gen_helper_set_softint(E, S)           qemu_build_not_reached()
55 # define gen_helper_tick_get_count(D, E, T, C)  qemu_build_not_reached()
56 # define gen_helper_tick_set_count(P, S)        qemu_build_not_reached()
57 # define gen_helper_tick_set_limit(P, S)        qemu_build_not_reached()
58 # define gen_helper_wrccr(E, S)                 qemu_build_not_reached()
59 # define gen_helper_wrcwp(E, S)                 qemu_build_not_reached()
60 # define gen_helper_wrgl(E, S)                  qemu_build_not_reached()
61 # define gen_helper_write_softint(E, S)         qemu_build_not_reached()
62 # define gen_helper_wrpil(E, S)                 qemu_build_not_reached()
63 # define gen_helper_wrpstate(E, S)              qemu_build_not_reached()
64 # define gen_helper_cmask8               ({ qemu_build_not_reached(); NULL; })
65 # define gen_helper_cmask16              ({ qemu_build_not_reached(); NULL; })
66 # define gen_helper_cmask32              ({ qemu_build_not_reached(); NULL; })
67 # define gen_helper_fcmpeq8              ({ qemu_build_not_reached(); NULL; })
68 # define gen_helper_fcmpeq16             ({ qemu_build_not_reached(); NULL; })
69 # define gen_helper_fcmpeq32             ({ qemu_build_not_reached(); NULL; })
70 # define gen_helper_fcmpgt16             ({ qemu_build_not_reached(); NULL; })
71 # define gen_helper_fcmpgt32             ({ qemu_build_not_reached(); NULL; })
72 # define gen_helper_fcmple16             ({ qemu_build_not_reached(); NULL; })
73 # define gen_helper_fcmple32             ({ qemu_build_not_reached(); NULL; })
74 # define gen_helper_fcmpne8              ({ qemu_build_not_reached(); NULL; })
75 # define gen_helper_fcmpne16             ({ qemu_build_not_reached(); NULL; })
76 # define gen_helper_fcmpne32             ({ qemu_build_not_reached(); NULL; })
77 # define gen_helper_fcmpule8             ({ qemu_build_not_reached(); NULL; })
78 # define gen_helper_fcmpugt8             ({ qemu_build_not_reached(); NULL; })
79 # define gen_helper_fdtox                ({ qemu_build_not_reached(); NULL; })
80 # define gen_helper_fexpand              ({ qemu_build_not_reached(); NULL; })
81 # define gen_helper_fmul8sux16           ({ qemu_build_not_reached(); NULL; })
82 # define gen_helper_fmul8ulx16           ({ qemu_build_not_reached(); NULL; })
83 # define gen_helper_fmul8x16             ({ qemu_build_not_reached(); NULL; })
84 # define gen_helper_fpmerge              ({ qemu_build_not_reached(); NULL; })
85 # define gen_helper_fqtox                ({ qemu_build_not_reached(); NULL; })
86 # define gen_helper_fslas16              ({ qemu_build_not_reached(); NULL; })
87 # define gen_helper_fslas32              ({ qemu_build_not_reached(); NULL; })
88 # define gen_helper_fstox                ({ qemu_build_not_reached(); NULL; })
89 # define gen_helper_fxtod                ({ qemu_build_not_reached(); NULL; })
90 # define gen_helper_fxtoq                ({ qemu_build_not_reached(); NULL; })
91 # define gen_helper_fxtos                ({ qemu_build_not_reached(); NULL; })
92 # define gen_helper_pdist                ({ qemu_build_not_reached(); NULL; })
93 # define gen_helper_xmulx                ({ qemu_build_not_reached(); NULL; })
94 # define gen_helper_xmulxhi              ({ qemu_build_not_reached(); NULL; })
95 # define MAXTL_MASK                             0
96 #endif
97 
98 /* Dynamic PC, must exit to main loop. */
99 #define DYNAMIC_PC         1
100 /* Dynamic PC, one of two values according to jump_pc[T2]. */
101 #define JUMP_PC            2
102 /* Dynamic PC, may lookup next TB. */
103 #define DYNAMIC_PC_LOOKUP  3
104 
105 #define DISAS_EXIT  DISAS_TARGET_0
106 
107 /* global register indexes */
108 static TCGv_ptr cpu_regwptr;
109 static TCGv cpu_pc, cpu_npc;
110 static TCGv cpu_regs[32];
111 static TCGv cpu_y;
112 static TCGv cpu_tbr;
113 static TCGv cpu_cond;
114 static TCGv cpu_cc_N;
115 static TCGv cpu_cc_V;
116 static TCGv cpu_icc_Z;
117 static TCGv cpu_icc_C;
118 #ifdef TARGET_SPARC64
119 static TCGv cpu_xcc_Z;
120 static TCGv cpu_xcc_C;
121 static TCGv_i32 cpu_fprs;
122 static TCGv cpu_gsr;
123 #else
124 # define cpu_fprs               ({ qemu_build_not_reached(); (TCGv)NULL; })
125 # define cpu_gsr                ({ qemu_build_not_reached(); (TCGv)NULL; })
126 #endif
127 
128 #ifdef TARGET_SPARC64
129 #define cpu_cc_Z  cpu_xcc_Z
130 #define cpu_cc_C  cpu_xcc_C
131 #else
132 #define cpu_cc_Z  cpu_icc_Z
133 #define cpu_cc_C  cpu_icc_C
134 #define cpu_xcc_Z ({ qemu_build_not_reached(); NULL; })
135 #define cpu_xcc_C ({ qemu_build_not_reached(); NULL; })
136 #endif
137 
138 /* Floating point comparison registers */
139 static TCGv_i32 cpu_fcc[TARGET_FCCREGS];
140 
141 #define env_field_offsetof(X)     offsetof(CPUSPARCState, X)
142 #ifdef TARGET_SPARC64
143 # define env32_field_offsetof(X)  ({ qemu_build_not_reached(); 0; })
144 # define env64_field_offsetof(X)  env_field_offsetof(X)
145 #else
146 # define env32_field_offsetof(X)  env_field_offsetof(X)
147 # define env64_field_offsetof(X)  ({ qemu_build_not_reached(); 0; })
148 #endif
149 
150 typedef struct DisasCompare {
151     TCGCond cond;
152     TCGv c1;
153     int c2;
154 } DisasCompare;
155 
156 typedef struct DisasDelayException {
157     struct DisasDelayException *next;
158     TCGLabel *lab;
159     TCGv_i32 excp;
160     /* Saved state at parent insn. */
161     target_ulong pc;
162     target_ulong npc;
163 } DisasDelayException;
164 
165 typedef struct DisasContext {
166     DisasContextBase base;
167     target_ulong pc;    /* current Program Counter: integer or DYNAMIC_PC */
168     target_ulong npc;   /* next PC: integer or DYNAMIC_PC or JUMP_PC */
169 
170     /* Used when JUMP_PC value is used. */
171     DisasCompare jump;
172     target_ulong jump_pc[2];
173 
174     int mem_idx;
175     bool cpu_cond_live;
176     bool fpu_enabled;
177     bool address_mask_32bit;
178 #ifndef CONFIG_USER_ONLY
179     bool supervisor;
180 #ifdef TARGET_SPARC64
181     bool hypervisor;
182 #endif
183 #endif
184 
185     sparc_def_t *def;
186 #ifdef TARGET_SPARC64
187     int fprs_dirty;
188     int asi;
189 #endif
190     DisasDelayException *delay_excp_list;
191 } DisasContext;
192 
193 // This function uses non-native bit order
194 #define GET_FIELD(X, FROM, TO)                                  \
195     ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
196 
197 // This function uses the order in the manuals, i.e. bit 0 is 2^0
198 #define GET_FIELD_SP(X, FROM, TO)               \
199     GET_FIELD(X, 31 - (TO), 31 - (FROM))
200 
201 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
202 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
203 
204 #define UA2005_HTRAP_MASK 0xff
205 #define V8_TRAP_MASK 0x7f
206 
207 #define IS_IMM (insn & (1<<13))
208 
209 static void gen_update_fprs_dirty(DisasContext *dc, int rd)
210 {
211 #if defined(TARGET_SPARC64)
212     int bit = (rd < 32) ? 1 : 2;
213     /* If we know we've already set this bit within the TB,
214        we can avoid setting it again.  */
215     if (!(dc->fprs_dirty & bit)) {
216         dc->fprs_dirty |= bit;
217         tcg_gen_ori_i32(cpu_fprs, cpu_fprs, bit);
218     }
219 #endif
220 }
221 
222 /* floating point registers moves */
223 
224 static int gen_offset_fpr_F(unsigned int reg)
225 {
226     int ret;
227 
228     tcg_debug_assert(reg < 32);
229     ret= offsetof(CPUSPARCState, fpr[reg / 2]);
230     if (reg & 1) {
231         ret += offsetof(CPU_DoubleU, l.lower);
232     } else {
233         ret += offsetof(CPU_DoubleU, l.upper);
234     }
235     return ret;
236 }
237 
238 static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src)
239 {
240     TCGv_i32 ret = tcg_temp_new_i32();
241     tcg_gen_ld_i32(ret, tcg_env, gen_offset_fpr_F(src));
242     return ret;
243 }
244 
245 static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v)
246 {
247     tcg_gen_st_i32(v, tcg_env, gen_offset_fpr_F(dst));
248     gen_update_fprs_dirty(dc, dst);
249 }
250 
251 static int gen_offset_fpr_D(unsigned int reg)
252 {
253     tcg_debug_assert(reg < 64);
254     tcg_debug_assert(reg % 2 == 0);
255     return offsetof(CPUSPARCState, fpr[reg / 2]);
256 }
257 
258 static TCGv_i64 gen_load_fpr_D(DisasContext *dc, unsigned int src)
259 {
260     TCGv_i64 ret = tcg_temp_new_i64();
261     tcg_gen_ld_i64(ret, tcg_env, gen_offset_fpr_D(src));
262     return ret;
263 }
264 
265 static void gen_store_fpr_D(DisasContext *dc, unsigned int dst, TCGv_i64 v)
266 {
267     tcg_gen_st_i64(v, tcg_env, gen_offset_fpr_D(dst));
268     gen_update_fprs_dirty(dc, dst);
269 }
270 
271 static TCGv_i128 gen_load_fpr_Q(DisasContext *dc, unsigned int src)
272 {
273     TCGv_i128 ret = tcg_temp_new_i128();
274     TCGv_i64 h = gen_load_fpr_D(dc, src);
275     TCGv_i64 l = gen_load_fpr_D(dc, src + 2);
276 
277     tcg_gen_concat_i64_i128(ret, l, h);
278     return ret;
279 }
280 
281 static void gen_store_fpr_Q(DisasContext *dc, unsigned int dst, TCGv_i128 v)
282 {
283     TCGv_i64 h = tcg_temp_new_i64();
284     TCGv_i64 l = tcg_temp_new_i64();
285 
286     tcg_gen_extr_i128_i64(l, h, v);
287     gen_store_fpr_D(dc, dst, h);
288     gen_store_fpr_D(dc, dst + 2, l);
289 }
290 
291 /* moves */
292 #ifdef CONFIG_USER_ONLY
293 #define supervisor(dc) 0
294 #define hypervisor(dc) 0
295 #else
296 #ifdef TARGET_SPARC64
297 #define hypervisor(dc) (dc->hypervisor)
298 #define supervisor(dc) (dc->supervisor | dc->hypervisor)
299 #else
300 #define supervisor(dc) (dc->supervisor)
301 #define hypervisor(dc) 0
302 #endif
303 #endif
304 
305 #if !defined(TARGET_SPARC64)
306 # define AM_CHECK(dc)  false
307 #elif defined(TARGET_ABI32)
308 # define AM_CHECK(dc)  true
309 #elif defined(CONFIG_USER_ONLY)
310 # define AM_CHECK(dc)  false
311 #else
312 # define AM_CHECK(dc)  ((dc)->address_mask_32bit)
313 #endif
314 
315 static void gen_address_mask(DisasContext *dc, TCGv addr)
316 {
317     if (AM_CHECK(dc)) {
318         tcg_gen_andi_tl(addr, addr, 0xffffffffULL);
319     }
320 }
321 
322 static target_ulong address_mask_i(DisasContext *dc, target_ulong addr)
323 {
324     return AM_CHECK(dc) ? (uint32_t)addr : addr;
325 }
326 
327 static TCGv gen_load_gpr(DisasContext *dc, int reg)
328 {
329     if (reg > 0) {
330         assert(reg < 32);
331         return cpu_regs[reg];
332     } else {
333         TCGv t = tcg_temp_new();
334         tcg_gen_movi_tl(t, 0);
335         return t;
336     }
337 }
338 
339 static void gen_store_gpr(DisasContext *dc, int reg, TCGv v)
340 {
341     if (reg > 0) {
342         assert(reg < 32);
343         tcg_gen_mov_tl(cpu_regs[reg], v);
344     }
345 }
346 
347 static TCGv gen_dest_gpr(DisasContext *dc, int reg)
348 {
349     if (reg > 0) {
350         assert(reg < 32);
351         return cpu_regs[reg];
352     } else {
353         return tcg_temp_new();
354     }
355 }
356 
357 static bool use_goto_tb(DisasContext *s, target_ulong pc, target_ulong npc)
358 {
359     return translator_use_goto_tb(&s->base, pc) &&
360            translator_use_goto_tb(&s->base, npc);
361 }
362 
363 static void gen_goto_tb(DisasContext *s, int tb_num,
364                         target_ulong pc, target_ulong npc)
365 {
366     if (use_goto_tb(s, pc, npc))  {
367         /* jump to same page: we can use a direct jump */
368         tcg_gen_goto_tb(tb_num);
369         tcg_gen_movi_tl(cpu_pc, pc);
370         tcg_gen_movi_tl(cpu_npc, npc);
371         tcg_gen_exit_tb(s->base.tb, tb_num);
372     } else {
373         /* jump to another page: we can use an indirect jump */
374         tcg_gen_movi_tl(cpu_pc, pc);
375         tcg_gen_movi_tl(cpu_npc, npc);
376         tcg_gen_lookup_and_goto_ptr();
377     }
378 }
379 
380 static TCGv gen_carry32(void)
381 {
382     if (TARGET_LONG_BITS == 64) {
383         TCGv t = tcg_temp_new();
384         tcg_gen_extract_tl(t, cpu_icc_C, 32, 1);
385         return t;
386     }
387     return cpu_icc_C;
388 }
389 
390 static void gen_op_addcc_int(TCGv dst, TCGv src1, TCGv src2, TCGv cin)
391 {
392     TCGv z = tcg_constant_tl(0);
393 
394     if (cin) {
395         tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, src1, z, cin, z);
396         tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, cpu_cc_N, cpu_cc_C, src2, z);
397     } else {
398         tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, src1, z, src2, z);
399     }
400     tcg_gen_xor_tl(cpu_cc_Z, src1, src2);
401     tcg_gen_xor_tl(cpu_cc_V, cpu_cc_N, src2);
402     tcg_gen_andc_tl(cpu_cc_V, cpu_cc_V, cpu_cc_Z);
403     if (TARGET_LONG_BITS == 64) {
404         /*
405          * Carry-in to bit 32 is result ^ src1 ^ src2.
406          * We already have the src xor term in Z, from computation of V.
407          */
408         tcg_gen_xor_tl(cpu_icc_C, cpu_cc_Z, cpu_cc_N);
409         tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
410     }
411     tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
412     tcg_gen_mov_tl(dst, cpu_cc_N);
413 }
414 
415 static void gen_op_addcc(TCGv dst, TCGv src1, TCGv src2)
416 {
417     gen_op_addcc_int(dst, src1, src2, NULL);
418 }
419 
420 static void gen_op_taddcc(TCGv dst, TCGv src1, TCGv src2)
421 {
422     TCGv t = tcg_temp_new();
423 
424     /* Save the tag bits around modification of dst. */
425     tcg_gen_or_tl(t, src1, src2);
426 
427     gen_op_addcc(dst, src1, src2);
428 
429     /* Incorprate tag bits into icc.V */
430     tcg_gen_andi_tl(t, t, 3);
431     tcg_gen_neg_tl(t, t);
432     tcg_gen_ext32u_tl(t, t);
433     tcg_gen_or_tl(cpu_cc_V, cpu_cc_V, t);
434 }
435 
436 static void gen_op_addc(TCGv dst, TCGv src1, TCGv src2)
437 {
438     tcg_gen_add_tl(dst, src1, src2);
439     tcg_gen_add_tl(dst, dst, gen_carry32());
440 }
441 
442 static void gen_op_addccc(TCGv dst, TCGv src1, TCGv src2)
443 {
444     gen_op_addcc_int(dst, src1, src2, gen_carry32());
445 }
446 
447 static void gen_op_addxc(TCGv dst, TCGv src1, TCGv src2)
448 {
449     tcg_gen_add_tl(dst, src1, src2);
450     tcg_gen_add_tl(dst, dst, cpu_cc_C);
451 }
452 
453 static void gen_op_addxccc(TCGv dst, TCGv src1, TCGv src2)
454 {
455     gen_op_addcc_int(dst, src1, src2, cpu_cc_C);
456 }
457 
458 static void gen_op_subcc_int(TCGv dst, TCGv src1, TCGv src2, TCGv cin)
459 {
460     TCGv z = tcg_constant_tl(0);
461 
462     if (cin) {
463         tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, src1, z, cin, z);
464         tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, cpu_cc_N, cpu_cc_C, src2, z);
465     } else {
466         tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, src1, z, src2, z);
467     }
468     tcg_gen_neg_tl(cpu_cc_C, cpu_cc_C);
469     tcg_gen_xor_tl(cpu_cc_Z, src1, src2);
470     tcg_gen_xor_tl(cpu_cc_V, cpu_cc_N, src1);
471     tcg_gen_and_tl(cpu_cc_V, cpu_cc_V, cpu_cc_Z);
472 #ifdef TARGET_SPARC64
473     tcg_gen_xor_tl(cpu_icc_C, cpu_cc_Z, cpu_cc_N);
474     tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
475 #endif
476     tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
477     tcg_gen_mov_tl(dst, cpu_cc_N);
478 }
479 
480 static void gen_op_subcc(TCGv dst, TCGv src1, TCGv src2)
481 {
482     gen_op_subcc_int(dst, src1, src2, NULL);
483 }
484 
485 static void gen_op_tsubcc(TCGv dst, TCGv src1, TCGv src2)
486 {
487     TCGv t = tcg_temp_new();
488 
489     /* Save the tag bits around modification of dst. */
490     tcg_gen_or_tl(t, src1, src2);
491 
492     gen_op_subcc(dst, src1, src2);
493 
494     /* Incorprate tag bits into icc.V */
495     tcg_gen_andi_tl(t, t, 3);
496     tcg_gen_neg_tl(t, t);
497     tcg_gen_ext32u_tl(t, t);
498     tcg_gen_or_tl(cpu_cc_V, cpu_cc_V, t);
499 }
500 
501 static void gen_op_subc(TCGv dst, TCGv src1, TCGv src2)
502 {
503     tcg_gen_sub_tl(dst, src1, src2);
504     tcg_gen_sub_tl(dst, dst, gen_carry32());
505 }
506 
507 static void gen_op_subccc(TCGv dst, TCGv src1, TCGv src2)
508 {
509     gen_op_subcc_int(dst, src1, src2, gen_carry32());
510 }
511 
512 static void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
513 {
514     TCGv zero = tcg_constant_tl(0);
515     TCGv one = tcg_constant_tl(1);
516     TCGv t_src1 = tcg_temp_new();
517     TCGv t_src2 = tcg_temp_new();
518     TCGv t0 = tcg_temp_new();
519 
520     tcg_gen_ext32u_tl(t_src1, src1);
521     tcg_gen_ext32u_tl(t_src2, src2);
522 
523     /*
524      * if (!(env->y & 1))
525      *   src2 = 0;
526      */
527     tcg_gen_movcond_tl(TCG_COND_TSTEQ, t_src2, cpu_y, one, zero, t_src2);
528 
529     /*
530      * b2 = src1 & 1;
531      * y = (b2 << 31) | (y >> 1);
532      */
533     tcg_gen_extract_tl(t0, cpu_y, 1, 31);
534     tcg_gen_deposit_tl(cpu_y, t0, src1, 31, 1);
535 
536     // b1 = N ^ V;
537     tcg_gen_xor_tl(t0, cpu_cc_N, cpu_cc_V);
538 
539     /*
540      * src1 = (b1 << 31) | (src1 >> 1)
541      */
542     tcg_gen_andi_tl(t0, t0, 1u << 31);
543     tcg_gen_shri_tl(t_src1, t_src1, 1);
544     tcg_gen_or_tl(t_src1, t_src1, t0);
545 
546     gen_op_addcc(dst, t_src1, t_src2);
547 }
548 
549 static void gen_op_multiply(TCGv dst, TCGv src1, TCGv src2, int sign_ext)
550 {
551 #if TARGET_LONG_BITS == 32
552     if (sign_ext) {
553         tcg_gen_muls2_tl(dst, cpu_y, src1, src2);
554     } else {
555         tcg_gen_mulu2_tl(dst, cpu_y, src1, src2);
556     }
557 #else
558     TCGv t0 = tcg_temp_new_i64();
559     TCGv t1 = tcg_temp_new_i64();
560 
561     if (sign_ext) {
562         tcg_gen_ext32s_i64(t0, src1);
563         tcg_gen_ext32s_i64(t1, src2);
564     } else {
565         tcg_gen_ext32u_i64(t0, src1);
566         tcg_gen_ext32u_i64(t1, src2);
567     }
568 
569     tcg_gen_mul_i64(dst, t0, t1);
570     tcg_gen_shri_i64(cpu_y, dst, 32);
571 #endif
572 }
573 
574 static void gen_op_umul(TCGv dst, TCGv src1, TCGv src2)
575 {
576     /* zero-extend truncated operands before multiplication */
577     gen_op_multiply(dst, src1, src2, 0);
578 }
579 
580 static void gen_op_smul(TCGv dst, TCGv src1, TCGv src2)
581 {
582     /* sign-extend truncated operands before multiplication */
583     gen_op_multiply(dst, src1, src2, 1);
584 }
585 
586 static void gen_op_umulxhi(TCGv dst, TCGv src1, TCGv src2)
587 {
588     TCGv discard = tcg_temp_new();
589     tcg_gen_mulu2_tl(discard, dst, src1, src2);
590 }
591 
592 static void gen_op_fpmaddx(TCGv_i64 dst, TCGv_i64 src1,
593                            TCGv_i64 src2, TCGv_i64 src3)
594 {
595     TCGv_i64 t = tcg_temp_new_i64();
596 
597     tcg_gen_mul_i64(t, src1, src2);
598     tcg_gen_add_i64(dst, src3, t);
599 }
600 
601 static void gen_op_fpmaddxhi(TCGv_i64 dst, TCGv_i64 src1,
602                              TCGv_i64 src2, TCGv_i64 src3)
603 {
604     TCGv_i64 l = tcg_temp_new_i64();
605     TCGv_i64 h = tcg_temp_new_i64();
606     TCGv_i64 z = tcg_constant_i64(0);
607 
608     tcg_gen_mulu2_i64(l, h, src1, src2);
609     tcg_gen_add2_i64(l, dst, l, h, src3, z);
610 }
611 
612 static void gen_op_sdiv(TCGv dst, TCGv src1, TCGv src2)
613 {
614 #ifdef TARGET_SPARC64
615     gen_helper_sdiv(dst, tcg_env, src1, src2);
616     tcg_gen_ext32s_tl(dst, dst);
617 #else
618     TCGv_i64 t64 = tcg_temp_new_i64();
619     gen_helper_sdiv(t64, tcg_env, src1, src2);
620     tcg_gen_trunc_i64_tl(dst, t64);
621 #endif
622 }
623 
624 static void gen_op_udivcc(TCGv dst, TCGv src1, TCGv src2)
625 {
626     TCGv_i64 t64;
627 
628 #ifdef TARGET_SPARC64
629     t64 = cpu_cc_V;
630 #else
631     t64 = tcg_temp_new_i64();
632 #endif
633 
634     gen_helper_udiv(t64, tcg_env, src1, src2);
635 
636 #ifdef TARGET_SPARC64
637     tcg_gen_ext32u_tl(cpu_cc_N, t64);
638     tcg_gen_shri_tl(cpu_cc_V, t64, 32);
639     tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
640     tcg_gen_movi_tl(cpu_icc_C, 0);
641 #else
642     tcg_gen_extr_i64_tl(cpu_cc_N, cpu_cc_V, t64);
643 #endif
644     tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
645     tcg_gen_movi_tl(cpu_cc_C, 0);
646     tcg_gen_mov_tl(dst, cpu_cc_N);
647 }
648 
649 static void gen_op_sdivcc(TCGv dst, TCGv src1, TCGv src2)
650 {
651     TCGv_i64 t64;
652 
653 #ifdef TARGET_SPARC64
654     t64 = cpu_cc_V;
655 #else
656     t64 = tcg_temp_new_i64();
657 #endif
658 
659     gen_helper_sdiv(t64, tcg_env, src1, src2);
660 
661 #ifdef TARGET_SPARC64
662     tcg_gen_ext32s_tl(cpu_cc_N, t64);
663     tcg_gen_shri_tl(cpu_cc_V, t64, 32);
664     tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
665     tcg_gen_movi_tl(cpu_icc_C, 0);
666 #else
667     tcg_gen_extr_i64_tl(cpu_cc_N, cpu_cc_V, t64);
668 #endif
669     tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
670     tcg_gen_movi_tl(cpu_cc_C, 0);
671     tcg_gen_mov_tl(dst, cpu_cc_N);
672 }
673 
674 static void gen_op_taddcctv(TCGv dst, TCGv src1, TCGv src2)
675 {
676     gen_helper_taddcctv(dst, tcg_env, src1, src2);
677 }
678 
679 static void gen_op_tsubcctv(TCGv dst, TCGv src1, TCGv src2)
680 {
681     gen_helper_tsubcctv(dst, tcg_env, src1, src2);
682 }
683 
684 static void gen_op_popc(TCGv dst, TCGv src1, TCGv src2)
685 {
686     tcg_gen_ctpop_tl(dst, src2);
687 }
688 
689 static void gen_op_lzcnt(TCGv dst, TCGv src)
690 {
691     tcg_gen_clzi_tl(dst, src, TARGET_LONG_BITS);
692 }
693 
694 #ifndef TARGET_SPARC64
695 static void gen_helper_array8(TCGv dst, TCGv src1, TCGv src2)
696 {
697     g_assert_not_reached();
698 }
699 #endif
700 
701 static void gen_op_array16(TCGv dst, TCGv src1, TCGv src2)
702 {
703     gen_helper_array8(dst, src1, src2);
704     tcg_gen_shli_tl(dst, dst, 1);
705 }
706 
707 static void gen_op_array32(TCGv dst, TCGv src1, TCGv src2)
708 {
709     gen_helper_array8(dst, src1, src2);
710     tcg_gen_shli_tl(dst, dst, 2);
711 }
712 
713 static void gen_op_fpack16(TCGv_i32 dst, TCGv_i64 src)
714 {
715 #ifdef TARGET_SPARC64
716     gen_helper_fpack16(dst, cpu_gsr, src);
717 #else
718     g_assert_not_reached();
719 #endif
720 }
721 
722 static void gen_op_fpackfix(TCGv_i32 dst, TCGv_i64 src)
723 {
724 #ifdef TARGET_SPARC64
725     gen_helper_fpackfix(dst, cpu_gsr, src);
726 #else
727     g_assert_not_reached();
728 #endif
729 }
730 
731 static void gen_op_fpack32(TCGv_i64 dst, TCGv_i64 src1, TCGv_i64 src2)
732 {
733 #ifdef TARGET_SPARC64
734     gen_helper_fpack32(dst, cpu_gsr, src1, src2);
735 #else
736     g_assert_not_reached();
737 #endif
738 }
739 
740 static void gen_op_fpadds16s(TCGv_i32 d, TCGv_i32 src1, TCGv_i32 src2)
741 {
742     TCGv_i32 t[2];
743 
744     for (int i = 0; i < 2; i++) {
745         TCGv_i32 u = tcg_temp_new_i32();
746         TCGv_i32 v = tcg_temp_new_i32();
747 
748         tcg_gen_sextract_i32(u, src1, i * 16, 16);
749         tcg_gen_sextract_i32(v, src2, i * 16, 16);
750         tcg_gen_add_i32(u, u, v);
751         tcg_gen_smax_i32(u, u, tcg_constant_i32(INT16_MIN));
752         tcg_gen_smin_i32(u, u, tcg_constant_i32(INT16_MAX));
753         t[i] = u;
754     }
755     tcg_gen_deposit_i32(d, t[0], t[1], 16, 16);
756 }
757 
758 static void gen_op_fpsubs16s(TCGv_i32 d, TCGv_i32 src1, TCGv_i32 src2)
759 {
760     TCGv_i32 t[2];
761 
762     for (int i = 0; i < 2; i++) {
763         TCGv_i32 u = tcg_temp_new_i32();
764         TCGv_i32 v = tcg_temp_new_i32();
765 
766         tcg_gen_sextract_i32(u, src1, i * 16, 16);
767         tcg_gen_sextract_i32(v, src2, i * 16, 16);
768         tcg_gen_sub_i32(u, u, v);
769         tcg_gen_smax_i32(u, u, tcg_constant_i32(INT16_MIN));
770         tcg_gen_smin_i32(u, u, tcg_constant_i32(INT16_MAX));
771         t[i] = u;
772     }
773     tcg_gen_deposit_i32(d, t[0], t[1], 16, 16);
774 }
775 
776 static void gen_op_fpadds32s(TCGv_i32 d, TCGv_i32 src1, TCGv_i32 src2)
777 {
778     TCGv_i32 r = tcg_temp_new_i32();
779     TCGv_i32 t = tcg_temp_new_i32();
780     TCGv_i32 v = tcg_temp_new_i32();
781     TCGv_i32 z = tcg_constant_i32(0);
782 
783     tcg_gen_add_i32(r, src1, src2);
784     tcg_gen_xor_i32(t, src1, src2);
785     tcg_gen_xor_i32(v, r, src2);
786     tcg_gen_andc_i32(v, v, t);
787 
788     tcg_gen_setcond_i32(TCG_COND_GE, t, r, z);
789     tcg_gen_addi_i32(t, t, INT32_MAX);
790 
791     tcg_gen_movcond_i32(TCG_COND_LT, d, v, z, t, r);
792 }
793 
794 static void gen_op_fpsubs32s(TCGv_i32 d, TCGv_i32 src1, TCGv_i32 src2)
795 {
796     TCGv_i32 r = tcg_temp_new_i32();
797     TCGv_i32 t = tcg_temp_new_i32();
798     TCGv_i32 v = tcg_temp_new_i32();
799     TCGv_i32 z = tcg_constant_i32(0);
800 
801     tcg_gen_sub_i32(r, src1, src2);
802     tcg_gen_xor_i32(t, src1, src2);
803     tcg_gen_xor_i32(v, r, src1);
804     tcg_gen_and_i32(v, v, t);
805 
806     tcg_gen_setcond_i32(TCG_COND_GE, t, r, z);
807     tcg_gen_addi_i32(t, t, INT32_MAX);
808 
809     tcg_gen_movcond_i32(TCG_COND_LT, d, v, z, t, r);
810 }
811 
812 static void gen_op_faligndata(TCGv_i64 dst, TCGv_i64 s1, TCGv_i64 s2)
813 {
814 #ifdef TARGET_SPARC64
815     TCGv t1, t2, shift;
816 
817     t1 = tcg_temp_new();
818     t2 = tcg_temp_new();
819     shift = tcg_temp_new();
820 
821     tcg_gen_andi_tl(shift, cpu_gsr, 7);
822     tcg_gen_shli_tl(shift, shift, 3);
823     tcg_gen_shl_tl(t1, s1, shift);
824 
825     /*
826      * A shift of 64 does not produce 0 in TCG.  Divide this into a
827      * shift of (up to 63) followed by a constant shift of 1.
828      */
829     tcg_gen_xori_tl(shift, shift, 63);
830     tcg_gen_shr_tl(t2, s2, shift);
831     tcg_gen_shri_tl(t2, t2, 1);
832 
833     tcg_gen_or_tl(dst, t1, t2);
834 #else
835     g_assert_not_reached();
836 #endif
837 }
838 
839 static void gen_op_bshuffle(TCGv_i64 dst, TCGv_i64 src1, TCGv_i64 src2)
840 {
841 #ifdef TARGET_SPARC64
842     gen_helper_bshuffle(dst, cpu_gsr, src1, src2);
843 #else
844     g_assert_not_reached();
845 #endif
846 }
847 
848 static void gen_op_pdistn(TCGv dst, TCGv_i64 src1, TCGv_i64 src2)
849 {
850 #ifdef TARGET_SPARC64
851     gen_helper_pdist(dst, tcg_constant_i64(0), src1, src2);
852 #else
853     g_assert_not_reached();
854 #endif
855 }
856 
857 static void gen_op_fmul8x16al(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
858 {
859     tcg_gen_ext16s_i32(src2, src2);
860     gen_helper_fmul8x16a(dst, src1, src2);
861 }
862 
863 static void gen_op_fmul8x16au(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
864 {
865     tcg_gen_sari_i32(src2, src2, 16);
866     gen_helper_fmul8x16a(dst, src1, src2);
867 }
868 
869 static void gen_op_fmuld8ulx16(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
870 {
871     TCGv_i32 t0 = tcg_temp_new_i32();
872     TCGv_i32 t1 = tcg_temp_new_i32();
873     TCGv_i32 t2 = tcg_temp_new_i32();
874 
875     tcg_gen_ext8u_i32(t0, src1);
876     tcg_gen_ext16s_i32(t1, src2);
877     tcg_gen_mul_i32(t0, t0, t1);
878 
879     tcg_gen_extract_i32(t1, src1, 16, 8);
880     tcg_gen_sextract_i32(t2, src2, 16, 16);
881     tcg_gen_mul_i32(t1, t1, t2);
882 
883     tcg_gen_concat_i32_i64(dst, t0, t1);
884 }
885 
886 static void gen_op_fmuld8sux16(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
887 {
888     TCGv_i32 t0 = tcg_temp_new_i32();
889     TCGv_i32 t1 = tcg_temp_new_i32();
890     TCGv_i32 t2 = tcg_temp_new_i32();
891 
892     /*
893      * The insn description talks about extracting the upper 8 bits
894      * of the signed 16-bit input rs1, performing the multiply, then
895      * shifting left by 8 bits.  Instead, zap the lower 8 bits of
896      * the rs1 input, which avoids the need for two shifts.
897      */
898     tcg_gen_ext16s_i32(t0, src1);
899     tcg_gen_andi_i32(t0, t0, ~0xff);
900     tcg_gen_ext16s_i32(t1, src2);
901     tcg_gen_mul_i32(t0, t0, t1);
902 
903     tcg_gen_sextract_i32(t1, src1, 16, 16);
904     tcg_gen_andi_i32(t1, t1, ~0xff);
905     tcg_gen_sextract_i32(t2, src2, 16, 16);
906     tcg_gen_mul_i32(t1, t1, t2);
907 
908     tcg_gen_concat_i32_i64(dst, t0, t1);
909 }
910 
911 #ifdef TARGET_SPARC64
912 static void gen_vec_fchksm16(unsigned vece, TCGv_vec dst,
913                              TCGv_vec src1, TCGv_vec src2)
914 {
915     TCGv_vec a = tcg_temp_new_vec_matching(dst);
916     TCGv_vec c = tcg_temp_new_vec_matching(dst);
917 
918     tcg_gen_add_vec(vece, a, src1, src2);
919     tcg_gen_cmp_vec(TCG_COND_LTU, vece, c, a, src1);
920     /* Vector cmp produces -1 for true, so subtract to add carry. */
921     tcg_gen_sub_vec(vece, dst, a, c);
922 }
923 
924 static void gen_op_fchksm16(unsigned vece, uint32_t dofs, uint32_t aofs,
925                             uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
926 {
927     static const TCGOpcode vecop_list[] = {
928         INDEX_op_cmp_vec, INDEX_op_add_vec, INDEX_op_sub_vec,
929     };
930     static const GVecGen3 op = {
931         .fni8 = gen_helper_fchksm16,
932         .fniv = gen_vec_fchksm16,
933         .opt_opc = vecop_list,
934         .vece = MO_16,
935     };
936     tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &op);
937 }
938 
939 static void gen_vec_fmean16(unsigned vece, TCGv_vec dst,
940                             TCGv_vec src1, TCGv_vec src2)
941 {
942     TCGv_vec t = tcg_temp_new_vec_matching(dst);
943 
944     tcg_gen_or_vec(vece, t, src1, src2);
945     tcg_gen_and_vec(vece, t, t, tcg_constant_vec_matching(dst, vece, 1));
946     tcg_gen_sari_vec(vece, src1, src1, 1);
947     tcg_gen_sari_vec(vece, src2, src2, 1);
948     tcg_gen_add_vec(vece, dst, src1, src2);
949     tcg_gen_add_vec(vece, dst, dst, t);
950 }
951 
952 static void gen_op_fmean16(unsigned vece, uint32_t dofs, uint32_t aofs,
953                            uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
954 {
955     static const TCGOpcode vecop_list[] = {
956         INDEX_op_add_vec, INDEX_op_sari_vec,
957     };
958     static const GVecGen3 op = {
959         .fni8 = gen_helper_fmean16,
960         .fniv = gen_vec_fmean16,
961         .opt_opc = vecop_list,
962         .vece = MO_16,
963     };
964     tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &op);
965 }
966 #else
967 #define gen_op_fchksm16   ({ qemu_build_not_reached(); NULL; })
968 #define gen_op_fmean16    ({ qemu_build_not_reached(); NULL; })
969 #endif
970 
971 static void finishing_insn(DisasContext *dc)
972 {
973     /*
974      * From here, there is no future path through an unwinding exception.
975      * If the current insn cannot raise an exception, the computation of
976      * cpu_cond may be able to be elided.
977      */
978     if (dc->cpu_cond_live) {
979         tcg_gen_discard_tl(cpu_cond);
980         dc->cpu_cond_live = false;
981     }
982 }
983 
984 static void gen_generic_branch(DisasContext *dc)
985 {
986     TCGv npc0 = tcg_constant_tl(dc->jump_pc[0]);
987     TCGv npc1 = tcg_constant_tl(dc->jump_pc[1]);
988     TCGv c2 = tcg_constant_tl(dc->jump.c2);
989 
990     tcg_gen_movcond_tl(dc->jump.cond, cpu_npc, dc->jump.c1, c2, npc0, npc1);
991 }
992 
993 /* call this function before using the condition register as it may
994    have been set for a jump */
995 static void flush_cond(DisasContext *dc)
996 {
997     if (dc->npc == JUMP_PC) {
998         gen_generic_branch(dc);
999         dc->npc = DYNAMIC_PC_LOOKUP;
1000     }
1001 }
1002 
1003 static void save_npc(DisasContext *dc)
1004 {
1005     if (dc->npc & 3) {
1006         switch (dc->npc) {
1007         case JUMP_PC:
1008             gen_generic_branch(dc);
1009             dc->npc = DYNAMIC_PC_LOOKUP;
1010             break;
1011         case DYNAMIC_PC:
1012         case DYNAMIC_PC_LOOKUP:
1013             break;
1014         default:
1015             g_assert_not_reached();
1016         }
1017     } else {
1018         tcg_gen_movi_tl(cpu_npc, dc->npc);
1019     }
1020 }
1021 
1022 static void save_state(DisasContext *dc)
1023 {
1024     tcg_gen_movi_tl(cpu_pc, dc->pc);
1025     save_npc(dc);
1026 }
1027 
1028 static void gen_exception(DisasContext *dc, int which)
1029 {
1030     finishing_insn(dc);
1031     save_state(dc);
1032     gen_helper_raise_exception(tcg_env, tcg_constant_i32(which));
1033     dc->base.is_jmp = DISAS_NORETURN;
1034 }
1035 
1036 static TCGLabel *delay_exceptionv(DisasContext *dc, TCGv_i32 excp)
1037 {
1038     DisasDelayException *e = g_new0(DisasDelayException, 1);
1039 
1040     e->next = dc->delay_excp_list;
1041     dc->delay_excp_list = e;
1042 
1043     e->lab = gen_new_label();
1044     e->excp = excp;
1045     e->pc = dc->pc;
1046     /* Caller must have used flush_cond before branch. */
1047     assert(e->npc != JUMP_PC);
1048     e->npc = dc->npc;
1049 
1050     return e->lab;
1051 }
1052 
1053 static TCGLabel *delay_exception(DisasContext *dc, int excp)
1054 {
1055     return delay_exceptionv(dc, tcg_constant_i32(excp));
1056 }
1057 
1058 static void gen_check_align(DisasContext *dc, TCGv addr, int mask)
1059 {
1060     TCGv t = tcg_temp_new();
1061     TCGLabel *lab;
1062 
1063     tcg_gen_andi_tl(t, addr, mask);
1064 
1065     flush_cond(dc);
1066     lab = delay_exception(dc, TT_UNALIGNED);
1067     tcg_gen_brcondi_tl(TCG_COND_NE, t, 0, lab);
1068 }
1069 
1070 static void gen_mov_pc_npc(DisasContext *dc)
1071 {
1072     finishing_insn(dc);
1073 
1074     if (dc->npc & 3) {
1075         switch (dc->npc) {
1076         case JUMP_PC:
1077             gen_generic_branch(dc);
1078             tcg_gen_mov_tl(cpu_pc, cpu_npc);
1079             dc->pc = DYNAMIC_PC_LOOKUP;
1080             break;
1081         case DYNAMIC_PC:
1082         case DYNAMIC_PC_LOOKUP:
1083             tcg_gen_mov_tl(cpu_pc, cpu_npc);
1084             dc->pc = dc->npc;
1085             break;
1086         default:
1087             g_assert_not_reached();
1088         }
1089     } else {
1090         dc->pc = dc->npc;
1091     }
1092 }
1093 
1094 static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond,
1095                         DisasContext *dc)
1096 {
1097     TCGv t1;
1098 
1099     cmp->c1 = t1 = tcg_temp_new();
1100     cmp->c2 = 0;
1101 
1102     switch (cond & 7) {
1103     case 0x0: /* never */
1104         cmp->cond = TCG_COND_NEVER;
1105         cmp->c1 = tcg_constant_tl(0);
1106         break;
1107 
1108     case 0x1: /* eq: Z */
1109         cmp->cond = TCG_COND_EQ;
1110         if (TARGET_LONG_BITS == 32 || xcc) {
1111             tcg_gen_mov_tl(t1, cpu_cc_Z);
1112         } else {
1113             tcg_gen_ext32u_tl(t1, cpu_icc_Z);
1114         }
1115         break;
1116 
1117     case 0x2: /* le: Z | (N ^ V) */
1118         /*
1119          * Simplify:
1120          *   cc_Z || (N ^ V) < 0        NE
1121          *   cc_Z && !((N ^ V) < 0)     EQ
1122          *   cc_Z & ~((N ^ V) >> TLB)   EQ
1123          */
1124         cmp->cond = TCG_COND_EQ;
1125         tcg_gen_xor_tl(t1, cpu_cc_N, cpu_cc_V);
1126         tcg_gen_sextract_tl(t1, t1, xcc ? 63 : 31, 1);
1127         tcg_gen_andc_tl(t1, xcc ? cpu_cc_Z : cpu_icc_Z, t1);
1128         if (TARGET_LONG_BITS == 64 && !xcc) {
1129             tcg_gen_ext32u_tl(t1, t1);
1130         }
1131         break;
1132 
1133     case 0x3: /* lt: N ^ V */
1134         cmp->cond = TCG_COND_LT;
1135         tcg_gen_xor_tl(t1, cpu_cc_N, cpu_cc_V);
1136         if (TARGET_LONG_BITS == 64 && !xcc) {
1137             tcg_gen_ext32s_tl(t1, t1);
1138         }
1139         break;
1140 
1141     case 0x4: /* leu: Z | C */
1142         /*
1143          * Simplify:
1144          *   cc_Z == 0 || cc_C != 0     NE
1145          *   cc_Z != 0 && cc_C == 0     EQ
1146          *   cc_Z & (cc_C ? 0 : -1)     EQ
1147          *   cc_Z & (cc_C - 1)          EQ
1148          */
1149         cmp->cond = TCG_COND_EQ;
1150         if (TARGET_LONG_BITS == 32 || xcc) {
1151             tcg_gen_subi_tl(t1, cpu_cc_C, 1);
1152             tcg_gen_and_tl(t1, t1, cpu_cc_Z);
1153         } else {
1154             tcg_gen_extract_tl(t1, cpu_icc_C, 32, 1);
1155             tcg_gen_subi_tl(t1, t1, 1);
1156             tcg_gen_and_tl(t1, t1, cpu_icc_Z);
1157             tcg_gen_ext32u_tl(t1, t1);
1158         }
1159         break;
1160 
1161     case 0x5: /* ltu: C */
1162         cmp->cond = TCG_COND_NE;
1163         if (TARGET_LONG_BITS == 32 || xcc) {
1164             tcg_gen_mov_tl(t1, cpu_cc_C);
1165         } else {
1166             tcg_gen_extract_tl(t1, cpu_icc_C, 32, 1);
1167         }
1168         break;
1169 
1170     case 0x6: /* neg: N */
1171         cmp->cond = TCG_COND_LT;
1172         if (TARGET_LONG_BITS == 32 || xcc) {
1173             tcg_gen_mov_tl(t1, cpu_cc_N);
1174         } else {
1175             tcg_gen_ext32s_tl(t1, cpu_cc_N);
1176         }
1177         break;
1178 
1179     case 0x7: /* vs: V */
1180         cmp->cond = TCG_COND_LT;
1181         if (TARGET_LONG_BITS == 32 || xcc) {
1182             tcg_gen_mov_tl(t1, cpu_cc_V);
1183         } else {
1184             tcg_gen_ext32s_tl(t1, cpu_cc_V);
1185         }
1186         break;
1187     }
1188     if (cond & 8) {
1189         cmp->cond = tcg_invert_cond(cmp->cond);
1190     }
1191 }
1192 
1193 static void gen_fcompare(DisasCompare *cmp, unsigned int cc, unsigned int cond)
1194 {
1195     TCGv_i32 fcc = cpu_fcc[cc];
1196     TCGv_i32 c1 = fcc;
1197     int c2 = 0;
1198     TCGCond tcond;
1199 
1200     /*
1201      * FCC values:
1202      * 0 =
1203      * 1 <
1204      * 2 >
1205      * 3 unordered
1206      */
1207     switch (cond & 7) {
1208     case 0x0: /* fbn */
1209         tcond = TCG_COND_NEVER;
1210         break;
1211     case 0x1: /* fbne : !0 */
1212         tcond = TCG_COND_NE;
1213         break;
1214     case 0x2: /* fblg : 1 or 2 */
1215         /* fcc in {1,2} - 1 -> fcc in {0,1} */
1216         c1 = tcg_temp_new_i32();
1217         tcg_gen_addi_i32(c1, fcc, -1);
1218         c2 = 1;
1219         tcond = TCG_COND_LEU;
1220         break;
1221     case 0x3: /* fbul : 1 or 3 */
1222         c1 = tcg_temp_new_i32();
1223         tcg_gen_andi_i32(c1, fcc, 1);
1224         tcond = TCG_COND_NE;
1225         break;
1226     case 0x4: /* fbl  : 1 */
1227         c2 = 1;
1228         tcond = TCG_COND_EQ;
1229         break;
1230     case 0x5: /* fbug : 2 or 3 */
1231         c2 = 2;
1232         tcond = TCG_COND_GEU;
1233         break;
1234     case 0x6: /* fbg  : 2 */
1235         c2 = 2;
1236         tcond = TCG_COND_EQ;
1237         break;
1238     case 0x7: /* fbu  : 3 */
1239         c2 = 3;
1240         tcond = TCG_COND_EQ;
1241         break;
1242     }
1243     if (cond & 8) {
1244         tcond = tcg_invert_cond(tcond);
1245     }
1246 
1247     cmp->cond = tcond;
1248     cmp->c2 = c2;
1249     cmp->c1 = tcg_temp_new();
1250     tcg_gen_extu_i32_tl(cmp->c1, c1);
1251 }
1252 
1253 static bool gen_compare_reg(DisasCompare *cmp, int cond, TCGv r_src)
1254 {
1255     static const TCGCond cond_reg[4] = {
1256         TCG_COND_NEVER,  /* reserved */
1257         TCG_COND_EQ,
1258         TCG_COND_LE,
1259         TCG_COND_LT,
1260     };
1261     TCGCond tcond;
1262 
1263     if ((cond & 3) == 0) {
1264         return false;
1265     }
1266     tcond = cond_reg[cond & 3];
1267     if (cond & 4) {
1268         tcond = tcg_invert_cond(tcond);
1269     }
1270 
1271     cmp->cond = tcond;
1272     cmp->c1 = tcg_temp_new();
1273     cmp->c2 = 0;
1274     tcg_gen_mov_tl(cmp->c1, r_src);
1275     return true;
1276 }
1277 
1278 static void gen_op_clear_ieee_excp_and_FTT(void)
1279 {
1280     tcg_gen_st_i32(tcg_constant_i32(0), tcg_env,
1281                    offsetof(CPUSPARCState, fsr_cexc_ftt));
1282 }
1283 
1284 static void gen_op_fmovs(TCGv_i32 dst, TCGv_i32 src)
1285 {
1286     gen_op_clear_ieee_excp_and_FTT();
1287     tcg_gen_mov_i32(dst, src);
1288 }
1289 
1290 static void gen_op_fnegs(TCGv_i32 dst, TCGv_i32 src)
1291 {
1292     gen_op_clear_ieee_excp_and_FTT();
1293     tcg_gen_xori_i32(dst, src, 1u << 31);
1294 }
1295 
1296 static void gen_op_fabss(TCGv_i32 dst, TCGv_i32 src)
1297 {
1298     gen_op_clear_ieee_excp_and_FTT();
1299     tcg_gen_andi_i32(dst, src, ~(1u << 31));
1300 }
1301 
1302 static void gen_op_fmovd(TCGv_i64 dst, TCGv_i64 src)
1303 {
1304     gen_op_clear_ieee_excp_and_FTT();
1305     tcg_gen_mov_i64(dst, src);
1306 }
1307 
1308 static void gen_op_fnegd(TCGv_i64 dst, TCGv_i64 src)
1309 {
1310     gen_op_clear_ieee_excp_and_FTT();
1311     tcg_gen_xori_i64(dst, src, 1ull << 63);
1312 }
1313 
1314 static void gen_op_fabsd(TCGv_i64 dst, TCGv_i64 src)
1315 {
1316     gen_op_clear_ieee_excp_and_FTT();
1317     tcg_gen_andi_i64(dst, src, ~(1ull << 63));
1318 }
1319 
1320 static void gen_op_fnegq(TCGv_i128 dst, TCGv_i128 src)
1321 {
1322     TCGv_i64 l = tcg_temp_new_i64();
1323     TCGv_i64 h = tcg_temp_new_i64();
1324 
1325     tcg_gen_extr_i128_i64(l, h, src);
1326     tcg_gen_xori_i64(h, h, 1ull << 63);
1327     tcg_gen_concat_i64_i128(dst, l, h);
1328 }
1329 
1330 static void gen_op_fabsq(TCGv_i128 dst, TCGv_i128 src)
1331 {
1332     TCGv_i64 l = tcg_temp_new_i64();
1333     TCGv_i64 h = tcg_temp_new_i64();
1334 
1335     tcg_gen_extr_i128_i64(l, h, src);
1336     tcg_gen_andi_i64(h, h, ~(1ull << 63));
1337     tcg_gen_concat_i64_i128(dst, l, h);
1338 }
1339 
1340 static void gen_op_fmadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
1341 {
1342     gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(0));
1343 }
1344 
1345 static void gen_op_fmaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
1346 {
1347     gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(0));
1348 }
1349 
1350 static void gen_op_fmsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
1351 {
1352     int op = float_muladd_negate_c;
1353     gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1354 }
1355 
1356 static void gen_op_fmsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
1357 {
1358     int op = float_muladd_negate_c;
1359     gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1360 }
1361 
1362 static void gen_op_fnmsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
1363 {
1364     int op = float_muladd_negate_c | float_muladd_negate_result;
1365     gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1366 }
1367 
1368 static void gen_op_fnmsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
1369 {
1370     int op = float_muladd_negate_c | float_muladd_negate_result;
1371     gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1372 }
1373 
1374 static void gen_op_fnmadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
1375 {
1376     int op = float_muladd_negate_result;
1377     gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1378 }
1379 
1380 static void gen_op_fnmaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
1381 {
1382     int op = float_muladd_negate_result;
1383     gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1384 }
1385 
1386 /* Use muladd to compute (1 * src1) + src2 / 2 with one rounding. */
1387 static void gen_op_fhadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
1388 {
1389     TCGv_i32 one = tcg_constant_i32(float32_one);
1390     int op = float_muladd_halve_result;
1391     gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
1392 }
1393 
1394 static void gen_op_fhaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
1395 {
1396     TCGv_i64 one = tcg_constant_i64(float64_one);
1397     int op = float_muladd_halve_result;
1398     gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
1399 }
1400 
1401 /* Use muladd to compute (1 * src1) - src2 / 2 with one rounding. */
1402 static void gen_op_fhsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
1403 {
1404     TCGv_i32 one = tcg_constant_i32(float32_one);
1405     int op = float_muladd_negate_c | float_muladd_halve_result;
1406     gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
1407 }
1408 
1409 static void gen_op_fhsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
1410 {
1411     TCGv_i64 one = tcg_constant_i64(float64_one);
1412     int op = float_muladd_negate_c | float_muladd_halve_result;
1413     gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
1414 }
1415 
1416 /* Use muladd to compute -((1 * src1) + src2 / 2) with one rounding. */
1417 static void gen_op_fnhadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
1418 {
1419     TCGv_i32 one = tcg_constant_i32(float32_one);
1420     int op = float_muladd_negate_result | float_muladd_halve_result;
1421     gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
1422 }
1423 
1424 static void gen_op_fnhaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
1425 {
1426     TCGv_i64 one = tcg_constant_i64(float64_one);
1427     int op = float_muladd_negate_result | float_muladd_halve_result;
1428     gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
1429 }
1430 
1431 static void gen_op_fpexception_im(DisasContext *dc, int ftt)
1432 {
1433     /*
1434      * CEXC is only set when succesfully completing an FPop,
1435      * or when raising FSR_FTT_IEEE_EXCP, i.e. check_ieee_exception.
1436      * Thus we can simply store FTT into this field.
1437      */
1438     tcg_gen_st_i32(tcg_constant_i32(ftt), tcg_env,
1439                    offsetof(CPUSPARCState, fsr_cexc_ftt));
1440     gen_exception(dc, TT_FP_EXCP);
1441 }
1442 
1443 static int gen_trap_ifnofpu(DisasContext *dc)
1444 {
1445 #if !defined(CONFIG_USER_ONLY)
1446     if (!dc->fpu_enabled) {
1447         gen_exception(dc, TT_NFPU_INSN);
1448         return 1;
1449     }
1450 #endif
1451     return 0;
1452 }
1453 
1454 /* asi moves */
1455 typedef enum {
1456     GET_ASI_HELPER,
1457     GET_ASI_EXCP,
1458     GET_ASI_DIRECT,
1459     GET_ASI_DTWINX,
1460     GET_ASI_CODE,
1461     GET_ASI_BLOCK,
1462     GET_ASI_SHORT,
1463     GET_ASI_BCOPY,
1464     GET_ASI_BFILL,
1465 } ASIType;
1466 
1467 typedef struct {
1468     ASIType type;
1469     int asi;
1470     int mem_idx;
1471     MemOp memop;
1472 } DisasASI;
1473 
1474 /*
1475  * Build DisasASI.
1476  * For asi == -1, treat as non-asi.
1477  * For ask == -2, treat as immediate offset (v8 error, v9 %asi).
1478  */
1479 static DisasASI resolve_asi(DisasContext *dc, int asi, MemOp memop)
1480 {
1481     ASIType type = GET_ASI_HELPER;
1482     int mem_idx = dc->mem_idx;
1483 
1484     if (asi == -1) {
1485         /* Artificial "non-asi" case. */
1486         type = GET_ASI_DIRECT;
1487         goto done;
1488     }
1489 
1490 #ifndef TARGET_SPARC64
1491     /* Before v9, all asis are immediate and privileged.  */
1492     if (asi < 0) {
1493         gen_exception(dc, TT_ILL_INSN);
1494         type = GET_ASI_EXCP;
1495     } else if (supervisor(dc)
1496                /* Note that LEON accepts ASI_USERDATA in user mode, for
1497                   use with CASA.  Also note that previous versions of
1498                   QEMU allowed (and old versions of gcc emitted) ASI_P
1499                   for LEON, which is incorrect.  */
1500                || (asi == ASI_USERDATA
1501                    && (dc->def->features & CPU_FEATURE_CASA))) {
1502         switch (asi) {
1503         case ASI_USERDATA:    /* User data access */
1504             mem_idx = MMU_USER_IDX;
1505             type = GET_ASI_DIRECT;
1506             break;
1507         case ASI_KERNELDATA:  /* Supervisor data access */
1508             mem_idx = MMU_KERNEL_IDX;
1509             type = GET_ASI_DIRECT;
1510             break;
1511         case ASI_USERTXT:     /* User text access */
1512             mem_idx = MMU_USER_IDX;
1513             type = GET_ASI_CODE;
1514             break;
1515         case ASI_KERNELTXT:   /* Supervisor text access */
1516             mem_idx = MMU_KERNEL_IDX;
1517             type = GET_ASI_CODE;
1518             break;
1519         case ASI_M_BYPASS:    /* MMU passthrough */
1520         case ASI_LEON_BYPASS: /* LEON MMU passthrough */
1521             mem_idx = MMU_PHYS_IDX;
1522             type = GET_ASI_DIRECT;
1523             break;
1524         case ASI_M_BCOPY: /* Block copy, sta access */
1525             mem_idx = MMU_KERNEL_IDX;
1526             type = GET_ASI_BCOPY;
1527             break;
1528         case ASI_M_BFILL: /* Block fill, stda access */
1529             mem_idx = MMU_KERNEL_IDX;
1530             type = GET_ASI_BFILL;
1531             break;
1532         }
1533 
1534         /* MMU_PHYS_IDX is used when the MMU is disabled to passthrough the
1535          * permissions check in get_physical_address(..).
1536          */
1537         mem_idx = (dc->mem_idx == MMU_PHYS_IDX) ? MMU_PHYS_IDX : mem_idx;
1538     } else {
1539         gen_exception(dc, TT_PRIV_INSN);
1540         type = GET_ASI_EXCP;
1541     }
1542 #else
1543     if (asi < 0) {
1544         asi = dc->asi;
1545     }
1546     /* With v9, all asis below 0x80 are privileged.  */
1547     /* ??? We ought to check cpu_has_hypervisor, but we didn't copy
1548        down that bit into DisasContext.  For the moment that's ok,
1549        since the direct implementations below doesn't have any ASIs
1550        in the restricted [0x30, 0x7f] range, and the check will be
1551        done properly in the helper.  */
1552     if (!supervisor(dc) && asi < 0x80) {
1553         gen_exception(dc, TT_PRIV_ACT);
1554         type = GET_ASI_EXCP;
1555     } else {
1556         switch (asi) {
1557         case ASI_REAL:      /* Bypass */
1558         case ASI_REAL_IO:   /* Bypass, non-cacheable */
1559         case ASI_REAL_L:    /* Bypass LE */
1560         case ASI_REAL_IO_L: /* Bypass, non-cacheable LE */
1561         case ASI_TWINX_REAL:   /* Real address, twinx */
1562         case ASI_TWINX_REAL_L: /* Real address, twinx, LE */
1563         case ASI_QUAD_LDD_PHYS:
1564         case ASI_QUAD_LDD_PHYS_L:
1565             mem_idx = MMU_PHYS_IDX;
1566             break;
1567         case ASI_N:  /* Nucleus */
1568         case ASI_NL: /* Nucleus LE */
1569         case ASI_TWINX_N:
1570         case ASI_TWINX_NL:
1571         case ASI_NUCLEUS_QUAD_LDD:
1572         case ASI_NUCLEUS_QUAD_LDD_L:
1573             if (hypervisor(dc)) {
1574                 mem_idx = MMU_PHYS_IDX;
1575             } else {
1576                 mem_idx = MMU_NUCLEUS_IDX;
1577             }
1578             break;
1579         case ASI_AIUP:  /* As if user primary */
1580         case ASI_AIUPL: /* As if user primary LE */
1581         case ASI_TWINX_AIUP:
1582         case ASI_TWINX_AIUP_L:
1583         case ASI_BLK_AIUP_4V:
1584         case ASI_BLK_AIUP_L_4V:
1585         case ASI_BLK_AIUP:
1586         case ASI_BLK_AIUPL:
1587             mem_idx = MMU_USER_IDX;
1588             break;
1589         case ASI_AIUS:  /* As if user secondary */
1590         case ASI_AIUSL: /* As if user secondary LE */
1591         case ASI_TWINX_AIUS:
1592         case ASI_TWINX_AIUS_L:
1593         case ASI_BLK_AIUS_4V:
1594         case ASI_BLK_AIUS_L_4V:
1595         case ASI_BLK_AIUS:
1596         case ASI_BLK_AIUSL:
1597             mem_idx = MMU_USER_SECONDARY_IDX;
1598             break;
1599         case ASI_S:  /* Secondary */
1600         case ASI_SL: /* Secondary LE */
1601         case ASI_TWINX_S:
1602         case ASI_TWINX_SL:
1603         case ASI_BLK_COMMIT_S:
1604         case ASI_BLK_S:
1605         case ASI_BLK_SL:
1606         case ASI_FL8_S:
1607         case ASI_FL8_SL:
1608         case ASI_FL16_S:
1609         case ASI_FL16_SL:
1610             if (mem_idx == MMU_USER_IDX) {
1611                 mem_idx = MMU_USER_SECONDARY_IDX;
1612             } else if (mem_idx == MMU_KERNEL_IDX) {
1613                 mem_idx = MMU_KERNEL_SECONDARY_IDX;
1614             }
1615             break;
1616         case ASI_P:  /* Primary */
1617         case ASI_PL: /* Primary LE */
1618         case ASI_TWINX_P:
1619         case ASI_TWINX_PL:
1620         case ASI_BLK_COMMIT_P:
1621         case ASI_BLK_P:
1622         case ASI_BLK_PL:
1623         case ASI_FL8_P:
1624         case ASI_FL8_PL:
1625         case ASI_FL16_P:
1626         case ASI_FL16_PL:
1627             break;
1628         }
1629         switch (asi) {
1630         case ASI_REAL:
1631         case ASI_REAL_IO:
1632         case ASI_REAL_L:
1633         case ASI_REAL_IO_L:
1634         case ASI_N:
1635         case ASI_NL:
1636         case ASI_AIUP:
1637         case ASI_AIUPL:
1638         case ASI_AIUS:
1639         case ASI_AIUSL:
1640         case ASI_S:
1641         case ASI_SL:
1642         case ASI_P:
1643         case ASI_PL:
1644             type = GET_ASI_DIRECT;
1645             break;
1646         case ASI_TWINX_REAL:
1647         case ASI_TWINX_REAL_L:
1648         case ASI_TWINX_N:
1649         case ASI_TWINX_NL:
1650         case ASI_TWINX_AIUP:
1651         case ASI_TWINX_AIUP_L:
1652         case ASI_TWINX_AIUS:
1653         case ASI_TWINX_AIUS_L:
1654         case ASI_TWINX_P:
1655         case ASI_TWINX_PL:
1656         case ASI_TWINX_S:
1657         case ASI_TWINX_SL:
1658         case ASI_QUAD_LDD_PHYS:
1659         case ASI_QUAD_LDD_PHYS_L:
1660         case ASI_NUCLEUS_QUAD_LDD:
1661         case ASI_NUCLEUS_QUAD_LDD_L:
1662             type = GET_ASI_DTWINX;
1663             break;
1664         case ASI_BLK_COMMIT_P:
1665         case ASI_BLK_COMMIT_S:
1666         case ASI_BLK_AIUP_4V:
1667         case ASI_BLK_AIUP_L_4V:
1668         case ASI_BLK_AIUP:
1669         case ASI_BLK_AIUPL:
1670         case ASI_BLK_AIUS_4V:
1671         case ASI_BLK_AIUS_L_4V:
1672         case ASI_BLK_AIUS:
1673         case ASI_BLK_AIUSL:
1674         case ASI_BLK_S:
1675         case ASI_BLK_SL:
1676         case ASI_BLK_P:
1677         case ASI_BLK_PL:
1678             type = GET_ASI_BLOCK;
1679             break;
1680         case ASI_FL8_S:
1681         case ASI_FL8_SL:
1682         case ASI_FL8_P:
1683         case ASI_FL8_PL:
1684             memop = MO_UB;
1685             type = GET_ASI_SHORT;
1686             break;
1687         case ASI_FL16_S:
1688         case ASI_FL16_SL:
1689         case ASI_FL16_P:
1690         case ASI_FL16_PL:
1691             memop = MO_TEUW;
1692             type = GET_ASI_SHORT;
1693             break;
1694         }
1695         /* The little-endian asis all have bit 3 set.  */
1696         if (asi & 8) {
1697             memop ^= MO_BSWAP;
1698         }
1699     }
1700 #endif
1701 
1702  done:
1703     return (DisasASI){ type, asi, mem_idx, memop };
1704 }
1705 
1706 #if defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
1707 static void gen_helper_ld_asi(TCGv_i64 r, TCGv_env e, TCGv a,
1708                               TCGv_i32 asi, TCGv_i32 mop)
1709 {
1710     g_assert_not_reached();
1711 }
1712 
1713 static void gen_helper_st_asi(TCGv_env e, TCGv a, TCGv_i64 r,
1714                               TCGv_i32 asi, TCGv_i32 mop)
1715 {
1716     g_assert_not_reached();
1717 }
1718 #endif
1719 
1720 static void gen_ld_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr)
1721 {
1722     switch (da->type) {
1723     case GET_ASI_EXCP:
1724         break;
1725     case GET_ASI_DTWINX: /* Reserved for ldda.  */
1726         gen_exception(dc, TT_ILL_INSN);
1727         break;
1728     case GET_ASI_DIRECT:
1729         tcg_gen_qemu_ld_tl(dst, addr, da->mem_idx, da->memop | MO_ALIGN);
1730         break;
1731 
1732     case GET_ASI_CODE:
1733 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
1734         {
1735             MemOpIdx oi = make_memop_idx(da->memop, da->mem_idx);
1736             TCGv_i64 t64 = tcg_temp_new_i64();
1737 
1738             gen_helper_ld_code(t64, tcg_env, addr, tcg_constant_i32(oi));
1739             tcg_gen_trunc_i64_tl(dst, t64);
1740         }
1741         break;
1742 #else
1743         g_assert_not_reached();
1744 #endif
1745 
1746     default:
1747         {
1748             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1749             TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN);
1750 
1751             save_state(dc);
1752 #ifdef TARGET_SPARC64
1753             gen_helper_ld_asi(dst, tcg_env, addr, r_asi, r_mop);
1754 #else
1755             {
1756                 TCGv_i64 t64 = tcg_temp_new_i64();
1757                 gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
1758                 tcg_gen_trunc_i64_tl(dst, t64);
1759             }
1760 #endif
1761         }
1762         break;
1763     }
1764 }
1765 
1766 static void gen_st_asi(DisasContext *dc, DisasASI *da, TCGv src, TCGv addr)
1767 {
1768     switch (da->type) {
1769     case GET_ASI_EXCP:
1770         break;
1771 
1772     case GET_ASI_DTWINX: /* Reserved for stda.  */
1773         if (TARGET_LONG_BITS == 32) {
1774             gen_exception(dc, TT_ILL_INSN);
1775             break;
1776         } else if (!(dc->def->features & CPU_FEATURE_HYPV)) {
1777             /* Pre OpenSPARC CPUs don't have these */
1778             gen_exception(dc, TT_ILL_INSN);
1779             break;
1780         }
1781         /* In OpenSPARC T1+ CPUs TWINX ASIs in store are ST_BLKINIT_ ASIs */
1782         /* fall through */
1783 
1784     case GET_ASI_DIRECT:
1785         tcg_gen_qemu_st_tl(src, addr, da->mem_idx, da->memop | MO_ALIGN);
1786         break;
1787 
1788     case GET_ASI_BCOPY:
1789         assert(TARGET_LONG_BITS == 32);
1790         /*
1791          * Copy 32 bytes from the address in SRC to ADDR.
1792          *
1793          * From Ross RT625 hyperSPARC manual, section 4.6:
1794          * "Block Copy and Block Fill will work only on cache line boundaries."
1795          *
1796          * It does not specify if an unaliged address is truncated or trapped.
1797          * Previous qemu behaviour was to truncate to 4 byte alignment, which
1798          * is obviously wrong.  The only place I can see this used is in the
1799          * Linux kernel which begins with page alignment, advancing by 32,
1800          * so is always aligned.  Assume truncation as the simpler option.
1801          *
1802          * Since the loads and stores are paired, allow the copy to happen
1803          * in the host endianness.  The copy need not be atomic.
1804          */
1805         {
1806             MemOp mop = MO_128 | MO_ATOM_IFALIGN_PAIR;
1807             TCGv saddr = tcg_temp_new();
1808             TCGv daddr = tcg_temp_new();
1809             TCGv_i128 tmp = tcg_temp_new_i128();
1810 
1811             tcg_gen_andi_tl(saddr, src, -32);
1812             tcg_gen_andi_tl(daddr, addr, -32);
1813             tcg_gen_qemu_ld_i128(tmp, saddr, da->mem_idx, mop);
1814             tcg_gen_qemu_st_i128(tmp, daddr, da->mem_idx, mop);
1815             tcg_gen_addi_tl(saddr, saddr, 16);
1816             tcg_gen_addi_tl(daddr, daddr, 16);
1817             tcg_gen_qemu_ld_i128(tmp, saddr, da->mem_idx, mop);
1818             tcg_gen_qemu_st_i128(tmp, daddr, da->mem_idx, mop);
1819         }
1820         break;
1821 
1822     default:
1823         {
1824             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1825             TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN);
1826 
1827             save_state(dc);
1828 #ifdef TARGET_SPARC64
1829             gen_helper_st_asi(tcg_env, addr, src, r_asi, r_mop);
1830 #else
1831             {
1832                 TCGv_i64 t64 = tcg_temp_new_i64();
1833                 tcg_gen_extu_tl_i64(t64, src);
1834                 gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
1835             }
1836 #endif
1837 
1838             /* A write to a TLB register may alter page maps.  End the TB. */
1839             dc->npc = DYNAMIC_PC;
1840         }
1841         break;
1842     }
1843 }
1844 
1845 static void gen_swap_asi(DisasContext *dc, DisasASI *da,
1846                          TCGv dst, TCGv src, TCGv addr)
1847 {
1848     switch (da->type) {
1849     case GET_ASI_EXCP:
1850         break;
1851     case GET_ASI_DIRECT:
1852         tcg_gen_atomic_xchg_tl(dst, addr, src,
1853                                da->mem_idx, da->memop | MO_ALIGN);
1854         break;
1855     default:
1856         /* ??? Should be DAE_invalid_asi.  */
1857         gen_exception(dc, TT_DATA_ACCESS);
1858         break;
1859     }
1860 }
1861 
1862 static void gen_cas_asi(DisasContext *dc, DisasASI *da,
1863                         TCGv oldv, TCGv newv, TCGv cmpv, TCGv addr)
1864 {
1865     switch (da->type) {
1866     case GET_ASI_EXCP:
1867         return;
1868     case GET_ASI_DIRECT:
1869         tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, newv,
1870                                   da->mem_idx, da->memop | MO_ALIGN);
1871         break;
1872     default:
1873         /* ??? Should be DAE_invalid_asi.  */
1874         gen_exception(dc, TT_DATA_ACCESS);
1875         break;
1876     }
1877 }
1878 
1879 static void gen_ldstub_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr)
1880 {
1881     switch (da->type) {
1882     case GET_ASI_EXCP:
1883         break;
1884     case GET_ASI_DIRECT:
1885         tcg_gen_atomic_xchg_tl(dst, addr, tcg_constant_tl(0xff),
1886                                da->mem_idx, MO_UB);
1887         break;
1888     default:
1889         /* ??? In theory, this should be raise DAE_invalid_asi.
1890            But the SS-20 roms do ldstuba [%l0] #ASI_M_CTL, %o1.  */
1891         if (tb_cflags(dc->base.tb) & CF_PARALLEL) {
1892             gen_helper_exit_atomic(tcg_env);
1893         } else {
1894             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1895             TCGv_i32 r_mop = tcg_constant_i32(MO_UB);
1896             TCGv_i64 s64, t64;
1897 
1898             save_state(dc);
1899             t64 = tcg_temp_new_i64();
1900             gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
1901 
1902             s64 = tcg_constant_i64(0xff);
1903             gen_helper_st_asi(tcg_env, addr, s64, r_asi, r_mop);
1904 
1905             tcg_gen_trunc_i64_tl(dst, t64);
1906 
1907             /* End the TB.  */
1908             dc->npc = DYNAMIC_PC;
1909         }
1910         break;
1911     }
1912 }
1913 
1914 static void gen_ldf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size,
1915                         TCGv addr, int rd)
1916 {
1917     MemOp memop = da->memop;
1918     MemOp size = memop & MO_SIZE;
1919     TCGv_i32 d32;
1920     TCGv_i64 d64, l64;
1921     TCGv addr_tmp;
1922 
1923     /* TODO: Use 128-bit load/store below. */
1924     if (size == MO_128) {
1925         memop = (memop & ~MO_SIZE) | MO_64;
1926     }
1927 
1928     switch (da->type) {
1929     case GET_ASI_EXCP:
1930         break;
1931 
1932     case GET_ASI_DIRECT:
1933         memop |= MO_ALIGN_4;
1934         switch (size) {
1935         case MO_32:
1936             d32 = tcg_temp_new_i32();
1937             tcg_gen_qemu_ld_i32(d32, addr, da->mem_idx, memop);
1938             gen_store_fpr_F(dc, rd, d32);
1939             break;
1940 
1941         case MO_64:
1942             d64 = tcg_temp_new_i64();
1943             tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx, memop);
1944             gen_store_fpr_D(dc, rd, d64);
1945             break;
1946 
1947         case MO_128:
1948             d64 = tcg_temp_new_i64();
1949             l64 = tcg_temp_new_i64();
1950             tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx, memop);
1951             addr_tmp = tcg_temp_new();
1952             tcg_gen_addi_tl(addr_tmp, addr, 8);
1953             tcg_gen_qemu_ld_i64(l64, addr_tmp, da->mem_idx, memop);
1954             gen_store_fpr_D(dc, rd, d64);
1955             gen_store_fpr_D(dc, rd + 2, l64);
1956             break;
1957         default:
1958             g_assert_not_reached();
1959         }
1960         break;
1961 
1962     case GET_ASI_BLOCK:
1963         /* Valid for lddfa on aligned registers only.  */
1964         if (orig_size == MO_64 && (rd & 7) == 0) {
1965             /* The first operation checks required alignment.  */
1966             addr_tmp = tcg_temp_new();
1967             d64 = tcg_temp_new_i64();
1968             for (int i = 0; ; ++i) {
1969                 tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx,
1970                                     memop | (i == 0 ? MO_ALIGN_64 : 0));
1971                 gen_store_fpr_D(dc, rd + 2 * i, d64);
1972                 if (i == 7) {
1973                     break;
1974                 }
1975                 tcg_gen_addi_tl(addr_tmp, addr, 8);
1976                 addr = addr_tmp;
1977             }
1978         } else {
1979             gen_exception(dc, TT_ILL_INSN);
1980         }
1981         break;
1982 
1983     case GET_ASI_SHORT:
1984         /* Valid for lddfa only.  */
1985         if (orig_size == MO_64) {
1986             d64 = tcg_temp_new_i64();
1987             tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx, memop | MO_ALIGN);
1988             gen_store_fpr_D(dc, rd, d64);
1989         } else {
1990             gen_exception(dc, TT_ILL_INSN);
1991         }
1992         break;
1993 
1994     default:
1995         {
1996             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1997             TCGv_i32 r_mop = tcg_constant_i32(memop | MO_ALIGN);
1998 
1999             save_state(dc);
2000             /* According to the table in the UA2011 manual, the only
2001                other asis that are valid for ldfa/lddfa/ldqfa are
2002                the NO_FAULT asis.  We still need a helper for these,
2003                but we can just use the integer asi helper for them.  */
2004             switch (size) {
2005             case MO_32:
2006                 d64 = tcg_temp_new_i64();
2007                 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
2008                 d32 = tcg_temp_new_i32();
2009                 tcg_gen_extrl_i64_i32(d32, d64);
2010                 gen_store_fpr_F(dc, rd, d32);
2011                 break;
2012             case MO_64:
2013                 d64 = tcg_temp_new_i64();
2014                 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
2015                 gen_store_fpr_D(dc, rd, d64);
2016                 break;
2017             case MO_128:
2018                 d64 = tcg_temp_new_i64();
2019                 l64 = tcg_temp_new_i64();
2020                 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
2021                 addr_tmp = tcg_temp_new();
2022                 tcg_gen_addi_tl(addr_tmp, addr, 8);
2023                 gen_helper_ld_asi(l64, tcg_env, addr_tmp, r_asi, r_mop);
2024                 gen_store_fpr_D(dc, rd, d64);
2025                 gen_store_fpr_D(dc, rd + 2, l64);
2026                 break;
2027             default:
2028                 g_assert_not_reached();
2029             }
2030         }
2031         break;
2032     }
2033 }
2034 
2035 static void gen_stf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size,
2036                         TCGv addr, int rd)
2037 {
2038     MemOp memop = da->memop;
2039     MemOp size = memop & MO_SIZE;
2040     TCGv_i32 d32;
2041     TCGv_i64 d64;
2042     TCGv addr_tmp;
2043 
2044     /* TODO: Use 128-bit load/store below. */
2045     if (size == MO_128) {
2046         memop = (memop & ~MO_SIZE) | MO_64;
2047     }
2048 
2049     switch (da->type) {
2050     case GET_ASI_EXCP:
2051         break;
2052 
2053     case GET_ASI_DIRECT:
2054         memop |= MO_ALIGN_4;
2055         switch (size) {
2056         case MO_32:
2057             d32 = gen_load_fpr_F(dc, rd);
2058             tcg_gen_qemu_st_i32(d32, addr, da->mem_idx, memop | MO_ALIGN);
2059             break;
2060         case MO_64:
2061             d64 = gen_load_fpr_D(dc, rd);
2062             tcg_gen_qemu_st_i64(d64, addr, da->mem_idx, memop | MO_ALIGN_4);
2063             break;
2064         case MO_128:
2065             /* Only 4-byte alignment required.  However, it is legal for the
2066                cpu to signal the alignment fault, and the OS trap handler is
2067                required to fix it up.  Requiring 16-byte alignment here avoids
2068                having to probe the second page before performing the first
2069                write.  */
2070             d64 = gen_load_fpr_D(dc, rd);
2071             tcg_gen_qemu_st_i64(d64, addr, da->mem_idx, memop | MO_ALIGN_16);
2072             addr_tmp = tcg_temp_new();
2073             tcg_gen_addi_tl(addr_tmp, addr, 8);
2074             d64 = gen_load_fpr_D(dc, rd + 2);
2075             tcg_gen_qemu_st_i64(d64, addr_tmp, da->mem_idx, memop);
2076             break;
2077         default:
2078             g_assert_not_reached();
2079         }
2080         break;
2081 
2082     case GET_ASI_BLOCK:
2083         /* Valid for stdfa on aligned registers only.  */
2084         if (orig_size == MO_64 && (rd & 7) == 0) {
2085             /* The first operation checks required alignment.  */
2086             addr_tmp = tcg_temp_new();
2087             for (int i = 0; ; ++i) {
2088                 d64 = gen_load_fpr_D(dc, rd + 2 * i);
2089                 tcg_gen_qemu_st_i64(d64, addr, da->mem_idx,
2090                                     memop | (i == 0 ? MO_ALIGN_64 : 0));
2091                 if (i == 7) {
2092                     break;
2093                 }
2094                 tcg_gen_addi_tl(addr_tmp, addr, 8);
2095                 addr = addr_tmp;
2096             }
2097         } else {
2098             gen_exception(dc, TT_ILL_INSN);
2099         }
2100         break;
2101 
2102     case GET_ASI_SHORT:
2103         /* Valid for stdfa only.  */
2104         if (orig_size == MO_64) {
2105             d64 = gen_load_fpr_D(dc, rd);
2106             tcg_gen_qemu_st_i64(d64, addr, da->mem_idx, memop | MO_ALIGN);
2107         } else {
2108             gen_exception(dc, TT_ILL_INSN);
2109         }
2110         break;
2111 
2112     default:
2113         /* According to the table in the UA2011 manual, the only
2114            other asis that are valid for ldfa/lddfa/ldqfa are
2115            the PST* asis, which aren't currently handled.  */
2116         gen_exception(dc, TT_ILL_INSN);
2117         break;
2118     }
2119 }
2120 
2121 static void gen_ldda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd)
2122 {
2123     TCGv hi = gen_dest_gpr(dc, rd);
2124     TCGv lo = gen_dest_gpr(dc, rd + 1);
2125 
2126     switch (da->type) {
2127     case GET_ASI_EXCP:
2128         return;
2129 
2130     case GET_ASI_DTWINX:
2131 #ifdef TARGET_SPARC64
2132         {
2133             MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16;
2134             TCGv_i128 t = tcg_temp_new_i128();
2135 
2136             tcg_gen_qemu_ld_i128(t, addr, da->mem_idx, mop);
2137             /*
2138              * Note that LE twinx acts as if each 64-bit register result is
2139              * byte swapped.  We perform one 128-bit LE load, so must swap
2140              * the order of the writebacks.
2141              */
2142             if ((mop & MO_BSWAP) == MO_TE) {
2143                 tcg_gen_extr_i128_i64(lo, hi, t);
2144             } else {
2145                 tcg_gen_extr_i128_i64(hi, lo, t);
2146             }
2147         }
2148         break;
2149 #else
2150         g_assert_not_reached();
2151 #endif
2152 
2153     case GET_ASI_DIRECT:
2154         {
2155             TCGv_i64 tmp = tcg_temp_new_i64();
2156 
2157             tcg_gen_qemu_ld_i64(tmp, addr, da->mem_idx, da->memop | MO_ALIGN);
2158 
2159             /* Note that LE ldda acts as if each 32-bit register
2160                result is byte swapped.  Having just performed one
2161                64-bit bswap, we need now to swap the writebacks.  */
2162             if ((da->memop & MO_BSWAP) == MO_TE) {
2163                 tcg_gen_extr_i64_tl(lo, hi, tmp);
2164             } else {
2165                 tcg_gen_extr_i64_tl(hi, lo, tmp);
2166             }
2167         }
2168         break;
2169 
2170     case GET_ASI_CODE:
2171 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
2172         {
2173             MemOpIdx oi = make_memop_idx(da->memop, da->mem_idx);
2174             TCGv_i64 tmp = tcg_temp_new_i64();
2175 
2176             gen_helper_ld_code(tmp, tcg_env, addr, tcg_constant_i32(oi));
2177 
2178             /* See above.  */
2179             if ((da->memop & MO_BSWAP) == MO_TE) {
2180                 tcg_gen_extr_i64_tl(lo, hi, tmp);
2181             } else {
2182                 tcg_gen_extr_i64_tl(hi, lo, tmp);
2183             }
2184         }
2185         break;
2186 #else
2187         g_assert_not_reached();
2188 #endif
2189 
2190     default:
2191         /* ??? In theory we've handled all of the ASIs that are valid
2192            for ldda, and this should raise DAE_invalid_asi.  However,
2193            real hardware allows others.  This can be seen with e.g.
2194            FreeBSD 10.3 wrt ASI_IC_TAG.  */
2195         {
2196             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2197             TCGv_i32 r_mop = tcg_constant_i32(da->memop);
2198             TCGv_i64 tmp = tcg_temp_new_i64();
2199 
2200             save_state(dc);
2201             gen_helper_ld_asi(tmp, tcg_env, addr, r_asi, r_mop);
2202 
2203             /* See above.  */
2204             if ((da->memop & MO_BSWAP) == MO_TE) {
2205                 tcg_gen_extr_i64_tl(lo, hi, tmp);
2206             } else {
2207                 tcg_gen_extr_i64_tl(hi, lo, tmp);
2208             }
2209         }
2210         break;
2211     }
2212 
2213     gen_store_gpr(dc, rd, hi);
2214     gen_store_gpr(dc, rd + 1, lo);
2215 }
2216 
2217 static void gen_stda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd)
2218 {
2219     TCGv hi = gen_load_gpr(dc, rd);
2220     TCGv lo = gen_load_gpr(dc, rd + 1);
2221 
2222     switch (da->type) {
2223     case GET_ASI_EXCP:
2224         break;
2225 
2226     case GET_ASI_DTWINX:
2227 #ifdef TARGET_SPARC64
2228         {
2229             MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16;
2230             TCGv_i128 t = tcg_temp_new_i128();
2231 
2232             /*
2233              * Note that LE twinx acts as if each 64-bit register result is
2234              * byte swapped.  We perform one 128-bit LE store, so must swap
2235              * the order of the construction.
2236              */
2237             if ((mop & MO_BSWAP) == MO_TE) {
2238                 tcg_gen_concat_i64_i128(t, lo, hi);
2239             } else {
2240                 tcg_gen_concat_i64_i128(t, hi, lo);
2241             }
2242             tcg_gen_qemu_st_i128(t, addr, da->mem_idx, mop);
2243         }
2244         break;
2245 #else
2246         g_assert_not_reached();
2247 #endif
2248 
2249     case GET_ASI_DIRECT:
2250         {
2251             TCGv_i64 t64 = tcg_temp_new_i64();
2252 
2253             /* Note that LE stda acts as if each 32-bit register result is
2254                byte swapped.  We will perform one 64-bit LE store, so now
2255                we must swap the order of the construction.  */
2256             if ((da->memop & MO_BSWAP) == MO_TE) {
2257                 tcg_gen_concat_tl_i64(t64, lo, hi);
2258             } else {
2259                 tcg_gen_concat_tl_i64(t64, hi, lo);
2260             }
2261             tcg_gen_qemu_st_i64(t64, addr, da->mem_idx, da->memop | MO_ALIGN);
2262         }
2263         break;
2264 
2265     case GET_ASI_BFILL:
2266         assert(TARGET_LONG_BITS == 32);
2267         /*
2268          * Store 32 bytes of [rd:rd+1] to ADDR.
2269          * See comments for GET_ASI_COPY above.
2270          */
2271         {
2272             MemOp mop = MO_TE | MO_128 | MO_ATOM_IFALIGN_PAIR;
2273             TCGv_i64 t8 = tcg_temp_new_i64();
2274             TCGv_i128 t16 = tcg_temp_new_i128();
2275             TCGv daddr = tcg_temp_new();
2276 
2277             tcg_gen_concat_tl_i64(t8, lo, hi);
2278             tcg_gen_concat_i64_i128(t16, t8, t8);
2279             tcg_gen_andi_tl(daddr, addr, -32);
2280             tcg_gen_qemu_st_i128(t16, daddr, da->mem_idx, mop);
2281             tcg_gen_addi_tl(daddr, daddr, 16);
2282             tcg_gen_qemu_st_i128(t16, daddr, da->mem_idx, mop);
2283         }
2284         break;
2285 
2286     default:
2287         /* ??? In theory we've handled all of the ASIs that are valid
2288            for stda, and this should raise DAE_invalid_asi.  */
2289         {
2290             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2291             TCGv_i32 r_mop = tcg_constant_i32(da->memop);
2292             TCGv_i64 t64 = tcg_temp_new_i64();
2293 
2294             /* See above.  */
2295             if ((da->memop & MO_BSWAP) == MO_TE) {
2296                 tcg_gen_concat_tl_i64(t64, lo, hi);
2297             } else {
2298                 tcg_gen_concat_tl_i64(t64, hi, lo);
2299             }
2300 
2301             save_state(dc);
2302             gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
2303         }
2304         break;
2305     }
2306 }
2307 
2308 static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2309 {
2310 #ifdef TARGET_SPARC64
2311     TCGv_i32 c32, zero, dst, s1, s2;
2312     TCGv_i64 c64 = tcg_temp_new_i64();
2313 
2314     /* We have two choices here: extend the 32 bit data and use movcond_i64,
2315        or fold the comparison down to 32 bits and use movcond_i32.  Choose
2316        the later.  */
2317     c32 = tcg_temp_new_i32();
2318     tcg_gen_setcondi_i64(cmp->cond, c64, cmp->c1, cmp->c2);
2319     tcg_gen_extrl_i64_i32(c32, c64);
2320 
2321     s1 = gen_load_fpr_F(dc, rs);
2322     s2 = gen_load_fpr_F(dc, rd);
2323     dst = tcg_temp_new_i32();
2324     zero = tcg_constant_i32(0);
2325 
2326     tcg_gen_movcond_i32(TCG_COND_NE, dst, c32, zero, s1, s2);
2327 
2328     gen_store_fpr_F(dc, rd, dst);
2329 #else
2330     qemu_build_not_reached();
2331 #endif
2332 }
2333 
2334 static void gen_fmovd(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2335 {
2336 #ifdef TARGET_SPARC64
2337     TCGv_i64 dst = tcg_temp_new_i64();
2338     tcg_gen_movcond_i64(cmp->cond, dst, cmp->c1, tcg_constant_tl(cmp->c2),
2339                         gen_load_fpr_D(dc, rs),
2340                         gen_load_fpr_D(dc, rd));
2341     gen_store_fpr_D(dc, rd, dst);
2342 #else
2343     qemu_build_not_reached();
2344 #endif
2345 }
2346 
2347 static void gen_fmovq(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2348 {
2349 #ifdef TARGET_SPARC64
2350     TCGv c2 = tcg_constant_tl(cmp->c2);
2351     TCGv_i64 h = tcg_temp_new_i64();
2352     TCGv_i64 l = tcg_temp_new_i64();
2353 
2354     tcg_gen_movcond_i64(cmp->cond, h, cmp->c1, c2,
2355                         gen_load_fpr_D(dc, rs),
2356                         gen_load_fpr_D(dc, rd));
2357     tcg_gen_movcond_i64(cmp->cond, l, cmp->c1, c2,
2358                         gen_load_fpr_D(dc, rs + 2),
2359                         gen_load_fpr_D(dc, rd + 2));
2360     gen_store_fpr_D(dc, rd, h);
2361     gen_store_fpr_D(dc, rd + 2, l);
2362 #else
2363     qemu_build_not_reached();
2364 #endif
2365 }
2366 
2367 #ifdef TARGET_SPARC64
2368 static void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr)
2369 {
2370     TCGv_i32 r_tl = tcg_temp_new_i32();
2371 
2372     /* load env->tl into r_tl */
2373     tcg_gen_ld_i32(r_tl, tcg_env, offsetof(CPUSPARCState, tl));
2374 
2375     /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2376     tcg_gen_andi_i32(r_tl, r_tl, MAXTL_MASK);
2377 
2378     /* calculate offset to current trap state from env->ts, reuse r_tl */
2379     tcg_gen_muli_i32(r_tl, r_tl, sizeof (trap_state));
2380     tcg_gen_addi_ptr(r_tsptr, tcg_env, offsetof(CPUSPARCState, ts));
2381 
2382     /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2383     {
2384         TCGv_ptr r_tl_tmp = tcg_temp_new_ptr();
2385         tcg_gen_ext_i32_ptr(r_tl_tmp, r_tl);
2386         tcg_gen_add_ptr(r_tsptr, r_tsptr, r_tl_tmp);
2387     }
2388 }
2389 #endif
2390 
2391 static int extract_dfpreg(DisasContext *dc, int x)
2392 {
2393     int r = x & 0x1e;
2394 #ifdef TARGET_SPARC64
2395     r |= (x & 1) << 5;
2396 #endif
2397     return r;
2398 }
2399 
2400 static int extract_qfpreg(DisasContext *dc, int x)
2401 {
2402     int r = x & 0x1c;
2403 #ifdef TARGET_SPARC64
2404     r |= (x & 1) << 5;
2405 #endif
2406     return r;
2407 }
2408 
2409 /* Include the auto-generated decoder.  */
2410 #include "decode-insns.c.inc"
2411 
2412 #define TRANS(NAME, AVAIL, FUNC, ...) \
2413     static bool trans_##NAME(DisasContext *dc, arg_##NAME *a) \
2414     { return avail_##AVAIL(dc) && FUNC(dc, __VA_ARGS__); }
2415 
2416 #define avail_ALL(C)      true
2417 #ifdef TARGET_SPARC64
2418 # define avail_32(C)      false
2419 # define avail_ASR17(C)   false
2420 # define avail_CASA(C)    true
2421 # define avail_DIV(C)     true
2422 # define avail_MUL(C)     true
2423 # define avail_POWERDOWN(C) false
2424 # define avail_64(C)      true
2425 # define avail_FMAF(C)    ((C)->def->features & CPU_FEATURE_FMAF)
2426 # define avail_GL(C)      ((C)->def->features & CPU_FEATURE_GL)
2427 # define avail_HYPV(C)    ((C)->def->features & CPU_FEATURE_HYPV)
2428 # define avail_IMA(C)     ((C)->def->features & CPU_FEATURE_IMA)
2429 # define avail_VIS1(C)    ((C)->def->features & CPU_FEATURE_VIS1)
2430 # define avail_VIS2(C)    ((C)->def->features & CPU_FEATURE_VIS2)
2431 # define avail_VIS3(C)    ((C)->def->features & CPU_FEATURE_VIS3)
2432 # define avail_VIS3B(C)   avail_VIS3(C)
2433 #else
2434 # define avail_32(C)      true
2435 # define avail_ASR17(C)   ((C)->def->features & CPU_FEATURE_ASR17)
2436 # define avail_CASA(C)    ((C)->def->features & CPU_FEATURE_CASA)
2437 # define avail_DIV(C)     ((C)->def->features & CPU_FEATURE_DIV)
2438 # define avail_MUL(C)     ((C)->def->features & CPU_FEATURE_MUL)
2439 # define avail_POWERDOWN(C) ((C)->def->features & CPU_FEATURE_POWERDOWN)
2440 # define avail_64(C)      false
2441 # define avail_FMAF(C)    false
2442 # define avail_GL(C)      false
2443 # define avail_HYPV(C)    false
2444 # define avail_IMA(C)     false
2445 # define avail_VIS1(C)    false
2446 # define avail_VIS2(C)    false
2447 # define avail_VIS3(C)    false
2448 # define avail_VIS3B(C)   false
2449 #endif
2450 
2451 /* Default case for non jump instructions. */
2452 static bool advance_pc(DisasContext *dc)
2453 {
2454     TCGLabel *l1;
2455 
2456     finishing_insn(dc);
2457 
2458     if (dc->npc & 3) {
2459         switch (dc->npc) {
2460         case DYNAMIC_PC:
2461         case DYNAMIC_PC_LOOKUP:
2462             dc->pc = dc->npc;
2463             tcg_gen_mov_tl(cpu_pc, cpu_npc);
2464             tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
2465             break;
2466 
2467         case JUMP_PC:
2468             /* we can do a static jump */
2469             l1 = gen_new_label();
2470             tcg_gen_brcondi_tl(dc->jump.cond, dc->jump.c1, dc->jump.c2, l1);
2471 
2472             /* jump not taken */
2473             gen_goto_tb(dc, 1, dc->jump_pc[1], dc->jump_pc[1] + 4);
2474 
2475             /* jump taken */
2476             gen_set_label(l1);
2477             gen_goto_tb(dc, 0, dc->jump_pc[0], dc->jump_pc[0] + 4);
2478 
2479             dc->base.is_jmp = DISAS_NORETURN;
2480             break;
2481 
2482         default:
2483             g_assert_not_reached();
2484         }
2485     } else {
2486         dc->pc = dc->npc;
2487         dc->npc = dc->npc + 4;
2488     }
2489     return true;
2490 }
2491 
2492 /*
2493  * Major opcodes 00 and 01 -- branches, call, and sethi
2494  */
2495 
2496 static bool advance_jump_cond(DisasContext *dc, DisasCompare *cmp,
2497                               bool annul, int disp)
2498 {
2499     target_ulong dest = address_mask_i(dc, dc->pc + disp * 4);
2500     target_ulong npc;
2501 
2502     finishing_insn(dc);
2503 
2504     if (cmp->cond == TCG_COND_ALWAYS) {
2505         if (annul) {
2506             dc->pc = dest;
2507             dc->npc = dest + 4;
2508         } else {
2509             gen_mov_pc_npc(dc);
2510             dc->npc = dest;
2511         }
2512         return true;
2513     }
2514 
2515     if (cmp->cond == TCG_COND_NEVER) {
2516         npc = dc->npc;
2517         if (npc & 3) {
2518             gen_mov_pc_npc(dc);
2519             if (annul) {
2520                 tcg_gen_addi_tl(cpu_pc, cpu_pc, 4);
2521             }
2522             tcg_gen_addi_tl(cpu_npc, cpu_pc, 4);
2523         } else {
2524             dc->pc = npc + (annul ? 4 : 0);
2525             dc->npc = dc->pc + 4;
2526         }
2527         return true;
2528     }
2529 
2530     flush_cond(dc);
2531     npc = dc->npc;
2532 
2533     if (annul) {
2534         TCGLabel *l1 = gen_new_label();
2535 
2536         tcg_gen_brcondi_tl(tcg_invert_cond(cmp->cond), cmp->c1, cmp->c2, l1);
2537         gen_goto_tb(dc, 0, npc, dest);
2538         gen_set_label(l1);
2539         gen_goto_tb(dc, 1, npc + 4, npc + 8);
2540 
2541         dc->base.is_jmp = DISAS_NORETURN;
2542     } else {
2543         if (npc & 3) {
2544             switch (npc) {
2545             case DYNAMIC_PC:
2546             case DYNAMIC_PC_LOOKUP:
2547                 tcg_gen_mov_tl(cpu_pc, cpu_npc);
2548                 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
2549                 tcg_gen_movcond_tl(cmp->cond, cpu_npc,
2550                                    cmp->c1, tcg_constant_tl(cmp->c2),
2551                                    tcg_constant_tl(dest), cpu_npc);
2552                 dc->pc = npc;
2553                 break;
2554             default:
2555                 g_assert_not_reached();
2556             }
2557         } else {
2558             dc->pc = npc;
2559             dc->npc = JUMP_PC;
2560             dc->jump = *cmp;
2561             dc->jump_pc[0] = dest;
2562             dc->jump_pc[1] = npc + 4;
2563 
2564             /* The condition for cpu_cond is always NE -- normalize. */
2565             if (cmp->cond == TCG_COND_NE) {
2566                 tcg_gen_xori_tl(cpu_cond, cmp->c1, cmp->c2);
2567             } else {
2568                 tcg_gen_setcondi_tl(cmp->cond, cpu_cond, cmp->c1, cmp->c2);
2569             }
2570             dc->cpu_cond_live = true;
2571         }
2572     }
2573     return true;
2574 }
2575 
2576 static bool raise_priv(DisasContext *dc)
2577 {
2578     gen_exception(dc, TT_PRIV_INSN);
2579     return true;
2580 }
2581 
2582 static bool raise_unimpfpop(DisasContext *dc)
2583 {
2584     gen_op_fpexception_im(dc, FSR_FTT_UNIMPFPOP);
2585     return true;
2586 }
2587 
2588 static bool gen_trap_float128(DisasContext *dc)
2589 {
2590     if (dc->def->features & CPU_FEATURE_FLOAT128) {
2591         return false;
2592     }
2593     return raise_unimpfpop(dc);
2594 }
2595 
2596 static bool do_bpcc(DisasContext *dc, arg_bcc *a)
2597 {
2598     DisasCompare cmp;
2599 
2600     gen_compare(&cmp, a->cc, a->cond, dc);
2601     return advance_jump_cond(dc, &cmp, a->a, a->i);
2602 }
2603 
2604 TRANS(Bicc, ALL, do_bpcc, a)
2605 TRANS(BPcc,  64, do_bpcc, a)
2606 
2607 static bool do_fbpfcc(DisasContext *dc, arg_bcc *a)
2608 {
2609     DisasCompare cmp;
2610 
2611     if (gen_trap_ifnofpu(dc)) {
2612         return true;
2613     }
2614     gen_fcompare(&cmp, a->cc, a->cond);
2615     return advance_jump_cond(dc, &cmp, a->a, a->i);
2616 }
2617 
2618 TRANS(FBPfcc,  64, do_fbpfcc, a)
2619 TRANS(FBfcc,  ALL, do_fbpfcc, a)
2620 
2621 static bool trans_BPr(DisasContext *dc, arg_BPr *a)
2622 {
2623     DisasCompare cmp;
2624 
2625     if (!avail_64(dc)) {
2626         return false;
2627     }
2628     if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
2629         return false;
2630     }
2631     return advance_jump_cond(dc, &cmp, a->a, a->i);
2632 }
2633 
2634 static bool trans_CALL(DisasContext *dc, arg_CALL *a)
2635 {
2636     target_long target = address_mask_i(dc, dc->pc + a->i * 4);
2637 
2638     gen_store_gpr(dc, 15, tcg_constant_tl(dc->pc));
2639     gen_mov_pc_npc(dc);
2640     dc->npc = target;
2641     return true;
2642 }
2643 
2644 static bool trans_NCP(DisasContext *dc, arg_NCP *a)
2645 {
2646     /*
2647      * For sparc32, always generate the no-coprocessor exception.
2648      * For sparc64, always generate illegal instruction.
2649      */
2650 #ifdef TARGET_SPARC64
2651     return false;
2652 #else
2653     gen_exception(dc, TT_NCP_INSN);
2654     return true;
2655 #endif
2656 }
2657 
2658 static bool trans_SETHI(DisasContext *dc, arg_SETHI *a)
2659 {
2660     /* Special-case %g0 because that's the canonical nop.  */
2661     if (a->rd) {
2662         gen_store_gpr(dc, a->rd, tcg_constant_tl((uint32_t)a->i << 10));
2663     }
2664     return advance_pc(dc);
2665 }
2666 
2667 /*
2668  * Major Opcode 10 -- integer, floating-point, vis, and system insns.
2669  */
2670 
2671 static bool do_tcc(DisasContext *dc, int cond, int cc,
2672                    int rs1, bool imm, int rs2_or_imm)
2673 {
2674     int mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc)
2675                 ? UA2005_HTRAP_MASK : V8_TRAP_MASK);
2676     DisasCompare cmp;
2677     TCGLabel *lab;
2678     TCGv_i32 trap;
2679 
2680     /* Trap never.  */
2681     if (cond == 0) {
2682         return advance_pc(dc);
2683     }
2684 
2685     /*
2686      * Immediate traps are the most common case.  Since this value is
2687      * live across the branch, it really pays to evaluate the constant.
2688      */
2689     if (rs1 == 0 && (imm || rs2_or_imm == 0)) {
2690         trap = tcg_constant_i32((rs2_or_imm & mask) + TT_TRAP);
2691     } else {
2692         trap = tcg_temp_new_i32();
2693         tcg_gen_trunc_tl_i32(trap, gen_load_gpr(dc, rs1));
2694         if (imm) {
2695             tcg_gen_addi_i32(trap, trap, rs2_or_imm);
2696         } else {
2697             TCGv_i32 t2 = tcg_temp_new_i32();
2698             tcg_gen_trunc_tl_i32(t2, gen_load_gpr(dc, rs2_or_imm));
2699             tcg_gen_add_i32(trap, trap, t2);
2700         }
2701         tcg_gen_andi_i32(trap, trap, mask);
2702         tcg_gen_addi_i32(trap, trap, TT_TRAP);
2703     }
2704 
2705     finishing_insn(dc);
2706 
2707     /* Trap always.  */
2708     if (cond == 8) {
2709         save_state(dc);
2710         gen_helper_raise_exception(tcg_env, trap);
2711         dc->base.is_jmp = DISAS_NORETURN;
2712         return true;
2713     }
2714 
2715     /* Conditional trap.  */
2716     flush_cond(dc);
2717     lab = delay_exceptionv(dc, trap);
2718     gen_compare(&cmp, cc, cond, dc);
2719     tcg_gen_brcondi_tl(cmp.cond, cmp.c1, cmp.c2, lab);
2720 
2721     return advance_pc(dc);
2722 }
2723 
2724 static bool trans_Tcc_r(DisasContext *dc, arg_Tcc_r *a)
2725 {
2726     if (avail_32(dc) && a->cc) {
2727         return false;
2728     }
2729     return do_tcc(dc, a->cond, a->cc, a->rs1, false, a->rs2);
2730 }
2731 
2732 static bool trans_Tcc_i_v7(DisasContext *dc, arg_Tcc_i_v7 *a)
2733 {
2734     if (avail_64(dc)) {
2735         return false;
2736     }
2737     return do_tcc(dc, a->cond, 0, a->rs1, true, a->i);
2738 }
2739 
2740 static bool trans_Tcc_i_v9(DisasContext *dc, arg_Tcc_i_v9 *a)
2741 {
2742     if (avail_32(dc)) {
2743         return false;
2744     }
2745     return do_tcc(dc, a->cond, a->cc, a->rs1, true, a->i);
2746 }
2747 
2748 static bool trans_STBAR(DisasContext *dc, arg_STBAR *a)
2749 {
2750     tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
2751     return advance_pc(dc);
2752 }
2753 
2754 static bool trans_MEMBAR(DisasContext *dc, arg_MEMBAR *a)
2755 {
2756     if (avail_32(dc)) {
2757         return false;
2758     }
2759     if (a->mmask) {
2760         /* Note TCG_MO_* was modeled on sparc64, so mmask matches. */
2761         tcg_gen_mb(a->mmask | TCG_BAR_SC);
2762     }
2763     if (a->cmask) {
2764         /* For #Sync, etc, end the TB to recognize interrupts. */
2765         dc->base.is_jmp = DISAS_EXIT;
2766     }
2767     return advance_pc(dc);
2768 }
2769 
2770 static bool do_rd_special(DisasContext *dc, bool priv, int rd,
2771                           TCGv (*func)(DisasContext *, TCGv))
2772 {
2773     if (!priv) {
2774         return raise_priv(dc);
2775     }
2776     gen_store_gpr(dc, rd, func(dc, gen_dest_gpr(dc, rd)));
2777     return advance_pc(dc);
2778 }
2779 
2780 static TCGv do_rdy(DisasContext *dc, TCGv dst)
2781 {
2782     return cpu_y;
2783 }
2784 
2785 static bool trans_RDY(DisasContext *dc, arg_RDY *a)
2786 {
2787     /*
2788      * TODO: Need a feature bit for sparcv8.  In the meantime, treat all
2789      * 32-bit cpus like sparcv7, which ignores the rs1 field.
2790      * This matches after all other ASR, so Leon3 Asr17 is handled first.
2791      */
2792     if (avail_64(dc) && a->rs1 != 0) {
2793         return false;
2794     }
2795     return do_rd_special(dc, true, a->rd, do_rdy);
2796 }
2797 
2798 static TCGv do_rd_leon3_config(DisasContext *dc, TCGv dst)
2799 {
2800     gen_helper_rdasr17(dst, tcg_env);
2801     return dst;
2802 }
2803 
2804 TRANS(RDASR17, ASR17, do_rd_special, true, a->rd, do_rd_leon3_config)
2805 
2806 static TCGv do_rdccr(DisasContext *dc, TCGv dst)
2807 {
2808     gen_helper_rdccr(dst, tcg_env);
2809     return dst;
2810 }
2811 
2812 TRANS(RDCCR, 64, do_rd_special, true, a->rd, do_rdccr)
2813 
2814 static TCGv do_rdasi(DisasContext *dc, TCGv dst)
2815 {
2816 #ifdef TARGET_SPARC64
2817     return tcg_constant_tl(dc->asi);
2818 #else
2819     qemu_build_not_reached();
2820 #endif
2821 }
2822 
2823 TRANS(RDASI, 64, do_rd_special, true, a->rd, do_rdasi)
2824 
2825 static TCGv do_rdtick(DisasContext *dc, TCGv dst)
2826 {
2827     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
2828 
2829     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
2830     if (translator_io_start(&dc->base)) {
2831         dc->base.is_jmp = DISAS_EXIT;
2832     }
2833     gen_helper_tick_get_count(dst, tcg_env, r_tickptr,
2834                               tcg_constant_i32(dc->mem_idx));
2835     return dst;
2836 }
2837 
2838 /* TODO: non-priv access only allowed when enabled. */
2839 TRANS(RDTICK, 64, do_rd_special, true, a->rd, do_rdtick)
2840 
2841 static TCGv do_rdpc(DisasContext *dc, TCGv dst)
2842 {
2843     return tcg_constant_tl(address_mask_i(dc, dc->pc));
2844 }
2845 
2846 TRANS(RDPC, 64, do_rd_special, true, a->rd, do_rdpc)
2847 
2848 static TCGv do_rdfprs(DisasContext *dc, TCGv dst)
2849 {
2850     tcg_gen_ext_i32_tl(dst, cpu_fprs);
2851     return dst;
2852 }
2853 
2854 TRANS(RDFPRS, 64, do_rd_special, true, a->rd, do_rdfprs)
2855 
2856 static TCGv do_rdgsr(DisasContext *dc, TCGv dst)
2857 {
2858     gen_trap_ifnofpu(dc);
2859     return cpu_gsr;
2860 }
2861 
2862 TRANS(RDGSR, 64, do_rd_special, true, a->rd, do_rdgsr)
2863 
2864 static TCGv do_rdsoftint(DisasContext *dc, TCGv dst)
2865 {
2866     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(softint));
2867     return dst;
2868 }
2869 
2870 TRANS(RDSOFTINT, 64, do_rd_special, supervisor(dc), a->rd, do_rdsoftint)
2871 
2872 static TCGv do_rdtick_cmpr(DisasContext *dc, TCGv dst)
2873 {
2874     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(tick_cmpr));
2875     return dst;
2876 }
2877 
2878 /* TODO: non-priv access only allowed when enabled. */
2879 TRANS(RDTICK_CMPR, 64, do_rd_special, true, a->rd, do_rdtick_cmpr)
2880 
2881 static TCGv do_rdstick(DisasContext *dc, TCGv dst)
2882 {
2883     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
2884 
2885     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick));
2886     if (translator_io_start(&dc->base)) {
2887         dc->base.is_jmp = DISAS_EXIT;
2888     }
2889     gen_helper_tick_get_count(dst, tcg_env, r_tickptr,
2890                               tcg_constant_i32(dc->mem_idx));
2891     return dst;
2892 }
2893 
2894 /* TODO: non-priv access only allowed when enabled. */
2895 TRANS(RDSTICK, 64, do_rd_special, true, a->rd, do_rdstick)
2896 
2897 static TCGv do_rdstick_cmpr(DisasContext *dc, TCGv dst)
2898 {
2899     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(stick_cmpr));
2900     return dst;
2901 }
2902 
2903 /* TODO: supervisor access only allowed when enabled by hypervisor. */
2904 TRANS(RDSTICK_CMPR, 64, do_rd_special, supervisor(dc), a->rd, do_rdstick_cmpr)
2905 
2906 /*
2907  * UltraSPARC-T1 Strand status.
2908  * HYPV check maybe not enough, UA2005 & UA2007 describe
2909  * this ASR as impl. dep
2910  */
2911 static TCGv do_rdstrand_status(DisasContext *dc, TCGv dst)
2912 {
2913     return tcg_constant_tl(1);
2914 }
2915 
2916 TRANS(RDSTRAND_STATUS, HYPV, do_rd_special, true, a->rd, do_rdstrand_status)
2917 
2918 static TCGv do_rdpsr(DisasContext *dc, TCGv dst)
2919 {
2920     gen_helper_rdpsr(dst, tcg_env);
2921     return dst;
2922 }
2923 
2924 TRANS(RDPSR, 32, do_rd_special, supervisor(dc), a->rd, do_rdpsr)
2925 
2926 static TCGv do_rdhpstate(DisasContext *dc, TCGv dst)
2927 {
2928     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hpstate));
2929     return dst;
2930 }
2931 
2932 TRANS(RDHPR_hpstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhpstate)
2933 
2934 static TCGv do_rdhtstate(DisasContext *dc, TCGv dst)
2935 {
2936     TCGv_i32 tl = tcg_temp_new_i32();
2937     TCGv_ptr tp = tcg_temp_new_ptr();
2938 
2939     tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl));
2940     tcg_gen_andi_i32(tl, tl, MAXTL_MASK);
2941     tcg_gen_shli_i32(tl, tl, 3);
2942     tcg_gen_ext_i32_ptr(tp, tl);
2943     tcg_gen_add_ptr(tp, tp, tcg_env);
2944 
2945     tcg_gen_ld_tl(dst, tp, env64_field_offsetof(htstate));
2946     return dst;
2947 }
2948 
2949 TRANS(RDHPR_htstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtstate)
2950 
2951 static TCGv do_rdhintp(DisasContext *dc, TCGv dst)
2952 {
2953     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hintp));
2954     return dst;
2955 }
2956 
2957 TRANS(RDHPR_hintp, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhintp)
2958 
2959 static TCGv do_rdhtba(DisasContext *dc, TCGv dst)
2960 {
2961     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(htba));
2962     return dst;
2963 }
2964 
2965 TRANS(RDHPR_htba, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtba)
2966 
2967 static TCGv do_rdhver(DisasContext *dc, TCGv dst)
2968 {
2969     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hver));
2970     return dst;
2971 }
2972 
2973 TRANS(RDHPR_hver, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhver)
2974 
2975 static TCGv do_rdhstick_cmpr(DisasContext *dc, TCGv dst)
2976 {
2977     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hstick_cmpr));
2978     return dst;
2979 }
2980 
2981 TRANS(RDHPR_hstick_cmpr, HYPV, do_rd_special, hypervisor(dc), a->rd,
2982       do_rdhstick_cmpr)
2983 
2984 static TCGv do_rdwim(DisasContext *dc, TCGv dst)
2985 {
2986     tcg_gen_ld_tl(dst, tcg_env, env32_field_offsetof(wim));
2987     return dst;
2988 }
2989 
2990 TRANS(RDWIM, 32, do_rd_special, supervisor(dc), a->rd, do_rdwim)
2991 
2992 static TCGv do_rdtpc(DisasContext *dc, TCGv dst)
2993 {
2994 #ifdef TARGET_SPARC64
2995     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2996 
2997     gen_load_trap_state_at_tl(r_tsptr);
2998     tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tpc));
2999     return dst;
3000 #else
3001     qemu_build_not_reached();
3002 #endif
3003 }
3004 
3005 TRANS(RDPR_tpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtpc)
3006 
3007 static TCGv do_rdtnpc(DisasContext *dc, TCGv dst)
3008 {
3009 #ifdef TARGET_SPARC64
3010     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3011 
3012     gen_load_trap_state_at_tl(r_tsptr);
3013     tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tnpc));
3014     return dst;
3015 #else
3016     qemu_build_not_reached();
3017 #endif
3018 }
3019 
3020 TRANS(RDPR_tnpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtnpc)
3021 
3022 static TCGv do_rdtstate(DisasContext *dc, TCGv dst)
3023 {
3024 #ifdef TARGET_SPARC64
3025     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3026 
3027     gen_load_trap_state_at_tl(r_tsptr);
3028     tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tstate));
3029     return dst;
3030 #else
3031     qemu_build_not_reached();
3032 #endif
3033 }
3034 
3035 TRANS(RDPR_tstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdtstate)
3036 
3037 static TCGv do_rdtt(DisasContext *dc, TCGv dst)
3038 {
3039 #ifdef TARGET_SPARC64
3040     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3041 
3042     gen_load_trap_state_at_tl(r_tsptr);
3043     tcg_gen_ld32s_tl(dst, r_tsptr, offsetof(trap_state, tt));
3044     return dst;
3045 #else
3046     qemu_build_not_reached();
3047 #endif
3048 }
3049 
3050 TRANS(RDPR_tt, 64, do_rd_special, supervisor(dc), a->rd, do_rdtt)
3051 TRANS(RDPR_tick, 64, do_rd_special, supervisor(dc), a->rd, do_rdtick)
3052 
3053 static TCGv do_rdtba(DisasContext *dc, TCGv dst)
3054 {
3055     return cpu_tbr;
3056 }
3057 
3058 TRANS(RDTBR, 32, do_rd_special, supervisor(dc), a->rd, do_rdtba)
3059 TRANS(RDPR_tba, 64, do_rd_special, supervisor(dc), a->rd, do_rdtba)
3060 
3061 static TCGv do_rdpstate(DisasContext *dc, TCGv dst)
3062 {
3063     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(pstate));
3064     return dst;
3065 }
3066 
3067 TRANS(RDPR_pstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdpstate)
3068 
3069 static TCGv do_rdtl(DisasContext *dc, TCGv dst)
3070 {
3071     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(tl));
3072     return dst;
3073 }
3074 
3075 TRANS(RDPR_tl, 64, do_rd_special, supervisor(dc), a->rd, do_rdtl)
3076 
3077 static TCGv do_rdpil(DisasContext *dc, TCGv dst)
3078 {
3079     tcg_gen_ld32s_tl(dst, tcg_env, env_field_offsetof(psrpil));
3080     return dst;
3081 }
3082 
3083 TRANS(RDPR_pil, 64, do_rd_special, supervisor(dc), a->rd, do_rdpil)
3084 
3085 static TCGv do_rdcwp(DisasContext *dc, TCGv dst)
3086 {
3087     gen_helper_rdcwp(dst, tcg_env);
3088     return dst;
3089 }
3090 
3091 TRANS(RDPR_cwp, 64, do_rd_special, supervisor(dc), a->rd, do_rdcwp)
3092 
3093 static TCGv do_rdcansave(DisasContext *dc, TCGv dst)
3094 {
3095     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cansave));
3096     return dst;
3097 }
3098 
3099 TRANS(RDPR_cansave, 64, do_rd_special, supervisor(dc), a->rd, do_rdcansave)
3100 
3101 static TCGv do_rdcanrestore(DisasContext *dc, TCGv dst)
3102 {
3103     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(canrestore));
3104     return dst;
3105 }
3106 
3107 TRANS(RDPR_canrestore, 64, do_rd_special, supervisor(dc), a->rd,
3108       do_rdcanrestore)
3109 
3110 static TCGv do_rdcleanwin(DisasContext *dc, TCGv dst)
3111 {
3112     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cleanwin));
3113     return dst;
3114 }
3115 
3116 TRANS(RDPR_cleanwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdcleanwin)
3117 
3118 static TCGv do_rdotherwin(DisasContext *dc, TCGv dst)
3119 {
3120     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(otherwin));
3121     return dst;
3122 }
3123 
3124 TRANS(RDPR_otherwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdotherwin)
3125 
3126 static TCGv do_rdwstate(DisasContext *dc, TCGv dst)
3127 {
3128     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(wstate));
3129     return dst;
3130 }
3131 
3132 TRANS(RDPR_wstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdwstate)
3133 
3134 static TCGv do_rdgl(DisasContext *dc, TCGv dst)
3135 {
3136     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(gl));
3137     return dst;
3138 }
3139 
3140 TRANS(RDPR_gl, GL, do_rd_special, supervisor(dc), a->rd, do_rdgl)
3141 
3142 /* UA2005 strand status */
3143 static TCGv do_rdssr(DisasContext *dc, TCGv dst)
3144 {
3145     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(ssr));
3146     return dst;
3147 }
3148 
3149 TRANS(RDPR_strand_status, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdssr)
3150 
3151 static TCGv do_rdver(DisasContext *dc, TCGv dst)
3152 {
3153     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(version));
3154     return dst;
3155 }
3156 
3157 TRANS(RDPR_ver, 64, do_rd_special, supervisor(dc), a->rd, do_rdver)
3158 
3159 static bool trans_FLUSHW(DisasContext *dc, arg_FLUSHW *a)
3160 {
3161     if (avail_64(dc)) {
3162         gen_helper_flushw(tcg_env);
3163         return advance_pc(dc);
3164     }
3165     return false;
3166 }
3167 
3168 static bool do_wr_special(DisasContext *dc, arg_r_r_ri *a, bool priv,
3169                           void (*func)(DisasContext *, TCGv))
3170 {
3171     TCGv src;
3172 
3173     /* For simplicity, we under-decoded the rs2 form. */
3174     if (!a->imm && (a->rs2_or_imm & ~0x1f)) {
3175         return false;
3176     }
3177     if (!priv) {
3178         return raise_priv(dc);
3179     }
3180 
3181     if (a->rs1 == 0 && (a->imm || a->rs2_or_imm == 0)) {
3182         src = tcg_constant_tl(a->rs2_or_imm);
3183     } else {
3184         TCGv src1 = gen_load_gpr(dc, a->rs1);
3185         if (a->rs2_or_imm == 0) {
3186             src = src1;
3187         } else {
3188             src = tcg_temp_new();
3189             if (a->imm) {
3190                 tcg_gen_xori_tl(src, src1, a->rs2_or_imm);
3191             } else {
3192                 tcg_gen_xor_tl(src, src1, gen_load_gpr(dc, a->rs2_or_imm));
3193             }
3194         }
3195     }
3196     func(dc, src);
3197     return advance_pc(dc);
3198 }
3199 
3200 static void do_wry(DisasContext *dc, TCGv src)
3201 {
3202     tcg_gen_ext32u_tl(cpu_y, src);
3203 }
3204 
3205 TRANS(WRY, ALL, do_wr_special, a, true, do_wry)
3206 
3207 static void do_wrccr(DisasContext *dc, TCGv src)
3208 {
3209     gen_helper_wrccr(tcg_env, src);
3210 }
3211 
3212 TRANS(WRCCR, 64, do_wr_special, a, true, do_wrccr)
3213 
3214 static void do_wrasi(DisasContext *dc, TCGv src)
3215 {
3216     TCGv tmp = tcg_temp_new();
3217 
3218     tcg_gen_ext8u_tl(tmp, src);
3219     tcg_gen_st32_tl(tmp, tcg_env, env64_field_offsetof(asi));
3220     /* End TB to notice changed ASI. */
3221     dc->base.is_jmp = DISAS_EXIT;
3222 }
3223 
3224 TRANS(WRASI, 64, do_wr_special, a, true, do_wrasi)
3225 
3226 static void do_wrfprs(DisasContext *dc, TCGv src)
3227 {
3228 #ifdef TARGET_SPARC64
3229     tcg_gen_trunc_tl_i32(cpu_fprs, src);
3230     dc->fprs_dirty = 0;
3231     dc->base.is_jmp = DISAS_EXIT;
3232 #else
3233     qemu_build_not_reached();
3234 #endif
3235 }
3236 
3237 TRANS(WRFPRS, 64, do_wr_special, a, true, do_wrfprs)
3238 
3239 static void do_wrgsr(DisasContext *dc, TCGv src)
3240 {
3241     gen_trap_ifnofpu(dc);
3242     tcg_gen_mov_tl(cpu_gsr, src);
3243 }
3244 
3245 TRANS(WRGSR, 64, do_wr_special, a, true, do_wrgsr)
3246 
3247 static void do_wrsoftint_set(DisasContext *dc, TCGv src)
3248 {
3249     gen_helper_set_softint(tcg_env, src);
3250 }
3251 
3252 TRANS(WRSOFTINT_SET, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_set)
3253 
3254 static void do_wrsoftint_clr(DisasContext *dc, TCGv src)
3255 {
3256     gen_helper_clear_softint(tcg_env, src);
3257 }
3258 
3259 TRANS(WRSOFTINT_CLR, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_clr)
3260 
3261 static void do_wrsoftint(DisasContext *dc, TCGv src)
3262 {
3263     gen_helper_write_softint(tcg_env, src);
3264 }
3265 
3266 TRANS(WRSOFTINT, 64, do_wr_special, a, supervisor(dc), do_wrsoftint)
3267 
3268 static void do_wrtick_cmpr(DisasContext *dc, TCGv src)
3269 {
3270     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3271 
3272     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(tick_cmpr));
3273     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
3274     translator_io_start(&dc->base);
3275     gen_helper_tick_set_limit(r_tickptr, src);
3276     /* End TB to handle timer interrupt */
3277     dc->base.is_jmp = DISAS_EXIT;
3278 }
3279 
3280 TRANS(WRTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrtick_cmpr)
3281 
3282 static void do_wrstick(DisasContext *dc, TCGv src)
3283 {
3284 #ifdef TARGET_SPARC64
3285     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3286 
3287     tcg_gen_ld_ptr(r_tickptr, tcg_env, offsetof(CPUSPARCState, stick));
3288     translator_io_start(&dc->base);
3289     gen_helper_tick_set_count(r_tickptr, src);
3290     /* End TB to handle timer interrupt */
3291     dc->base.is_jmp = DISAS_EXIT;
3292 #else
3293     qemu_build_not_reached();
3294 #endif
3295 }
3296 
3297 TRANS(WRSTICK, 64, do_wr_special, a, supervisor(dc), do_wrstick)
3298 
3299 static void do_wrstick_cmpr(DisasContext *dc, TCGv src)
3300 {
3301     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3302 
3303     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(stick_cmpr));
3304     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick));
3305     translator_io_start(&dc->base);
3306     gen_helper_tick_set_limit(r_tickptr, src);
3307     /* End TB to handle timer interrupt */
3308     dc->base.is_jmp = DISAS_EXIT;
3309 }
3310 
3311 TRANS(WRSTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrstick_cmpr)
3312 
3313 static void do_wrpowerdown(DisasContext *dc, TCGv src)
3314 {
3315     finishing_insn(dc);
3316     save_state(dc);
3317     gen_helper_power_down(tcg_env);
3318 }
3319 
3320 TRANS(WRPOWERDOWN, POWERDOWN, do_wr_special, a, supervisor(dc), do_wrpowerdown)
3321 
3322 static void do_wrpsr(DisasContext *dc, TCGv src)
3323 {
3324     gen_helper_wrpsr(tcg_env, src);
3325     dc->base.is_jmp = DISAS_EXIT;
3326 }
3327 
3328 TRANS(WRPSR, 32, do_wr_special, a, supervisor(dc), do_wrpsr)
3329 
3330 static void do_wrwim(DisasContext *dc, TCGv src)
3331 {
3332     target_ulong mask = MAKE_64BIT_MASK(0, dc->def->nwindows);
3333     TCGv tmp = tcg_temp_new();
3334 
3335     tcg_gen_andi_tl(tmp, src, mask);
3336     tcg_gen_st_tl(tmp, tcg_env, env32_field_offsetof(wim));
3337 }
3338 
3339 TRANS(WRWIM, 32, do_wr_special, a, supervisor(dc), do_wrwim)
3340 
3341 static void do_wrtpc(DisasContext *dc, TCGv src)
3342 {
3343 #ifdef TARGET_SPARC64
3344     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3345 
3346     gen_load_trap_state_at_tl(r_tsptr);
3347     tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tpc));
3348 #else
3349     qemu_build_not_reached();
3350 #endif
3351 }
3352 
3353 TRANS(WRPR_tpc, 64, do_wr_special, a, supervisor(dc), do_wrtpc)
3354 
3355 static void do_wrtnpc(DisasContext *dc, TCGv src)
3356 {
3357 #ifdef TARGET_SPARC64
3358     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3359 
3360     gen_load_trap_state_at_tl(r_tsptr);
3361     tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tnpc));
3362 #else
3363     qemu_build_not_reached();
3364 #endif
3365 }
3366 
3367 TRANS(WRPR_tnpc, 64, do_wr_special, a, supervisor(dc), do_wrtnpc)
3368 
3369 static void do_wrtstate(DisasContext *dc, TCGv src)
3370 {
3371 #ifdef TARGET_SPARC64
3372     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3373 
3374     gen_load_trap_state_at_tl(r_tsptr);
3375     tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tstate));
3376 #else
3377     qemu_build_not_reached();
3378 #endif
3379 }
3380 
3381 TRANS(WRPR_tstate, 64, do_wr_special, a, supervisor(dc), do_wrtstate)
3382 
3383 static void do_wrtt(DisasContext *dc, TCGv src)
3384 {
3385 #ifdef TARGET_SPARC64
3386     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3387 
3388     gen_load_trap_state_at_tl(r_tsptr);
3389     tcg_gen_st32_tl(src, r_tsptr, offsetof(trap_state, tt));
3390 #else
3391     qemu_build_not_reached();
3392 #endif
3393 }
3394 
3395 TRANS(WRPR_tt, 64, do_wr_special, a, supervisor(dc), do_wrtt)
3396 
3397 static void do_wrtick(DisasContext *dc, TCGv src)
3398 {
3399     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3400 
3401     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
3402     translator_io_start(&dc->base);
3403     gen_helper_tick_set_count(r_tickptr, src);
3404     /* End TB to handle timer interrupt */
3405     dc->base.is_jmp = DISAS_EXIT;
3406 }
3407 
3408 TRANS(WRPR_tick, 64, do_wr_special, a, supervisor(dc), do_wrtick)
3409 
3410 static void do_wrtba(DisasContext *dc, TCGv src)
3411 {
3412     tcg_gen_mov_tl(cpu_tbr, src);
3413 }
3414 
3415 TRANS(WRPR_tba, 64, do_wr_special, a, supervisor(dc), do_wrtba)
3416 
3417 static void do_wrpstate(DisasContext *dc, TCGv src)
3418 {
3419     save_state(dc);
3420     if (translator_io_start(&dc->base)) {
3421         dc->base.is_jmp = DISAS_EXIT;
3422     }
3423     gen_helper_wrpstate(tcg_env, src);
3424     dc->npc = DYNAMIC_PC;
3425 }
3426 
3427 TRANS(WRPR_pstate, 64, do_wr_special, a, supervisor(dc), do_wrpstate)
3428 
3429 static void do_wrtl(DisasContext *dc, TCGv src)
3430 {
3431     save_state(dc);
3432     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(tl));
3433     dc->npc = DYNAMIC_PC;
3434 }
3435 
3436 TRANS(WRPR_tl, 64, do_wr_special, a, supervisor(dc), do_wrtl)
3437 
3438 static void do_wrpil(DisasContext *dc, TCGv src)
3439 {
3440     if (translator_io_start(&dc->base)) {
3441         dc->base.is_jmp = DISAS_EXIT;
3442     }
3443     gen_helper_wrpil(tcg_env, src);
3444 }
3445 
3446 TRANS(WRPR_pil, 64, do_wr_special, a, supervisor(dc), do_wrpil)
3447 
3448 static void do_wrcwp(DisasContext *dc, TCGv src)
3449 {
3450     gen_helper_wrcwp(tcg_env, src);
3451 }
3452 
3453 TRANS(WRPR_cwp, 64, do_wr_special, a, supervisor(dc), do_wrcwp)
3454 
3455 static void do_wrcansave(DisasContext *dc, TCGv src)
3456 {
3457     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cansave));
3458 }
3459 
3460 TRANS(WRPR_cansave, 64, do_wr_special, a, supervisor(dc), do_wrcansave)
3461 
3462 static void do_wrcanrestore(DisasContext *dc, TCGv src)
3463 {
3464     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(canrestore));
3465 }
3466 
3467 TRANS(WRPR_canrestore, 64, do_wr_special, a, supervisor(dc), do_wrcanrestore)
3468 
3469 static void do_wrcleanwin(DisasContext *dc, TCGv src)
3470 {
3471     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cleanwin));
3472 }
3473 
3474 TRANS(WRPR_cleanwin, 64, do_wr_special, a, supervisor(dc), do_wrcleanwin)
3475 
3476 static void do_wrotherwin(DisasContext *dc, TCGv src)
3477 {
3478     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(otherwin));
3479 }
3480 
3481 TRANS(WRPR_otherwin, 64, do_wr_special, a, supervisor(dc), do_wrotherwin)
3482 
3483 static void do_wrwstate(DisasContext *dc, TCGv src)
3484 {
3485     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(wstate));
3486 }
3487 
3488 TRANS(WRPR_wstate, 64, do_wr_special, a, supervisor(dc), do_wrwstate)
3489 
3490 static void do_wrgl(DisasContext *dc, TCGv src)
3491 {
3492     gen_helper_wrgl(tcg_env, src);
3493 }
3494 
3495 TRANS(WRPR_gl, GL, do_wr_special, a, supervisor(dc), do_wrgl)
3496 
3497 /* UA2005 strand status */
3498 static void do_wrssr(DisasContext *dc, TCGv src)
3499 {
3500     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(ssr));
3501 }
3502 
3503 TRANS(WRPR_strand_status, HYPV, do_wr_special, a, hypervisor(dc), do_wrssr)
3504 
3505 TRANS(WRTBR, 32, do_wr_special, a, supervisor(dc), do_wrtba)
3506 
3507 static void do_wrhpstate(DisasContext *dc, TCGv src)
3508 {
3509     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hpstate));
3510     dc->base.is_jmp = DISAS_EXIT;
3511 }
3512 
3513 TRANS(WRHPR_hpstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhpstate)
3514 
3515 static void do_wrhtstate(DisasContext *dc, TCGv src)
3516 {
3517     TCGv_i32 tl = tcg_temp_new_i32();
3518     TCGv_ptr tp = tcg_temp_new_ptr();
3519 
3520     tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl));
3521     tcg_gen_andi_i32(tl, tl, MAXTL_MASK);
3522     tcg_gen_shli_i32(tl, tl, 3);
3523     tcg_gen_ext_i32_ptr(tp, tl);
3524     tcg_gen_add_ptr(tp, tp, tcg_env);
3525 
3526     tcg_gen_st_tl(src, tp, env64_field_offsetof(htstate));
3527 }
3528 
3529 TRANS(WRHPR_htstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtstate)
3530 
3531 static void do_wrhintp(DisasContext *dc, TCGv src)
3532 {
3533     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hintp));
3534 }
3535 
3536 TRANS(WRHPR_hintp, HYPV, do_wr_special, a, hypervisor(dc), do_wrhintp)
3537 
3538 static void do_wrhtba(DisasContext *dc, TCGv src)
3539 {
3540     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(htba));
3541 }
3542 
3543 TRANS(WRHPR_htba, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtba)
3544 
3545 static void do_wrhstick_cmpr(DisasContext *dc, TCGv src)
3546 {
3547     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3548 
3549     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hstick_cmpr));
3550     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(hstick));
3551     translator_io_start(&dc->base);
3552     gen_helper_tick_set_limit(r_tickptr, src);
3553     /* End TB to handle timer interrupt */
3554     dc->base.is_jmp = DISAS_EXIT;
3555 }
3556 
3557 TRANS(WRHPR_hstick_cmpr, HYPV, do_wr_special, a, hypervisor(dc),
3558       do_wrhstick_cmpr)
3559 
3560 static bool do_saved_restored(DisasContext *dc, bool saved)
3561 {
3562     if (!supervisor(dc)) {
3563         return raise_priv(dc);
3564     }
3565     if (saved) {
3566         gen_helper_saved(tcg_env);
3567     } else {
3568         gen_helper_restored(tcg_env);
3569     }
3570     return advance_pc(dc);
3571 }
3572 
3573 TRANS(SAVED, 64, do_saved_restored, true)
3574 TRANS(RESTORED, 64, do_saved_restored, false)
3575 
3576 static bool trans_NOP(DisasContext *dc, arg_NOP *a)
3577 {
3578     return advance_pc(dc);
3579 }
3580 
3581 /*
3582  * TODO: Need a feature bit for sparcv8.
3583  * In the meantime, treat all 32-bit cpus like sparcv7.
3584  */
3585 TRANS(NOP_v7, 32, trans_NOP, a)
3586 TRANS(NOP_v9, 64, trans_NOP, a)
3587 
3588 static bool do_arith_int(DisasContext *dc, arg_r_r_ri_cc *a,
3589                          void (*func)(TCGv, TCGv, TCGv),
3590                          void (*funci)(TCGv, TCGv, target_long),
3591                          bool logic_cc)
3592 {
3593     TCGv dst, src1;
3594 
3595     /* For simplicity, we under-decoded the rs2 form. */
3596     if (!a->imm && a->rs2_or_imm & ~0x1f) {
3597         return false;
3598     }
3599 
3600     if (logic_cc) {
3601         dst = cpu_cc_N;
3602     } else {
3603         dst = gen_dest_gpr(dc, a->rd);
3604     }
3605     src1 = gen_load_gpr(dc, a->rs1);
3606 
3607     if (a->imm || a->rs2_or_imm == 0) {
3608         if (funci) {
3609             funci(dst, src1, a->rs2_or_imm);
3610         } else {
3611             func(dst, src1, tcg_constant_tl(a->rs2_or_imm));
3612         }
3613     } else {
3614         func(dst, src1, cpu_regs[a->rs2_or_imm]);
3615     }
3616 
3617     if (logic_cc) {
3618         if (TARGET_LONG_BITS == 64) {
3619             tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
3620             tcg_gen_movi_tl(cpu_icc_C, 0);
3621         }
3622         tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
3623         tcg_gen_movi_tl(cpu_cc_C, 0);
3624         tcg_gen_movi_tl(cpu_cc_V, 0);
3625     }
3626 
3627     gen_store_gpr(dc, a->rd, dst);
3628     return advance_pc(dc);
3629 }
3630 
3631 static bool do_arith(DisasContext *dc, arg_r_r_ri_cc *a,
3632                      void (*func)(TCGv, TCGv, TCGv),
3633                      void (*funci)(TCGv, TCGv, target_long),
3634                      void (*func_cc)(TCGv, TCGv, TCGv))
3635 {
3636     if (a->cc) {
3637         return do_arith_int(dc, a, func_cc, NULL, false);
3638     }
3639     return do_arith_int(dc, a, func, funci, false);
3640 }
3641 
3642 static bool do_logic(DisasContext *dc, arg_r_r_ri_cc *a,
3643                      void (*func)(TCGv, TCGv, TCGv),
3644                      void (*funci)(TCGv, TCGv, target_long))
3645 {
3646     return do_arith_int(dc, a, func, funci, a->cc);
3647 }
3648 
3649 TRANS(ADD, ALL, do_arith, a, tcg_gen_add_tl, tcg_gen_addi_tl, gen_op_addcc)
3650 TRANS(SUB, ALL, do_arith, a, tcg_gen_sub_tl, tcg_gen_subi_tl, gen_op_subcc)
3651 TRANS(ADDC, ALL, do_arith, a, gen_op_addc, NULL, gen_op_addccc)
3652 TRANS(SUBC, ALL, do_arith, a, gen_op_subc, NULL, gen_op_subccc)
3653 
3654 TRANS(TADDcc, ALL, do_arith, a, NULL, NULL, gen_op_taddcc)
3655 TRANS(TSUBcc, ALL, do_arith, a, NULL, NULL, gen_op_tsubcc)
3656 TRANS(TADDccTV, ALL, do_arith, a, NULL, NULL, gen_op_taddcctv)
3657 TRANS(TSUBccTV, ALL, do_arith, a, NULL, NULL, gen_op_tsubcctv)
3658 
3659 TRANS(AND, ALL, do_logic, a, tcg_gen_and_tl, tcg_gen_andi_tl)
3660 TRANS(XOR, ALL, do_logic, a, tcg_gen_xor_tl, tcg_gen_xori_tl)
3661 TRANS(ANDN, ALL, do_logic, a, tcg_gen_andc_tl, NULL)
3662 TRANS(ORN, ALL, do_logic, a, tcg_gen_orc_tl, NULL)
3663 TRANS(XORN, ALL, do_logic, a, tcg_gen_eqv_tl, NULL)
3664 
3665 TRANS(MULX, 64, do_arith, a, tcg_gen_mul_tl, tcg_gen_muli_tl, NULL)
3666 TRANS(UMUL, MUL, do_logic, a, gen_op_umul, NULL)
3667 TRANS(SMUL, MUL, do_logic, a, gen_op_smul, NULL)
3668 TRANS(MULScc, ALL, do_arith, a, NULL, NULL, gen_op_mulscc)
3669 
3670 TRANS(UDIVcc, DIV, do_arith, a, NULL, NULL, gen_op_udivcc)
3671 TRANS(SDIV, DIV, do_arith, a, gen_op_sdiv, NULL, gen_op_sdivcc)
3672 
3673 /* TODO: Should have feature bit -- comes in with UltraSparc T2. */
3674 TRANS(POPC, 64, do_arith, a, gen_op_popc, NULL, NULL)
3675 
3676 static bool trans_OR(DisasContext *dc, arg_r_r_ri_cc *a)
3677 {
3678     /* OR with %g0 is the canonical alias for MOV. */
3679     if (!a->cc && a->rs1 == 0) {
3680         if (a->imm || a->rs2_or_imm == 0) {
3681             gen_store_gpr(dc, a->rd, tcg_constant_tl(a->rs2_or_imm));
3682         } else if (a->rs2_or_imm & ~0x1f) {
3683             /* For simplicity, we under-decoded the rs2 form. */
3684             return false;
3685         } else {
3686             gen_store_gpr(dc, a->rd, cpu_regs[a->rs2_or_imm]);
3687         }
3688         return advance_pc(dc);
3689     }
3690     return do_logic(dc, a, tcg_gen_or_tl, tcg_gen_ori_tl);
3691 }
3692 
3693 static bool trans_UDIV(DisasContext *dc, arg_r_r_ri *a)
3694 {
3695     TCGv_i64 t1, t2;
3696     TCGv dst;
3697 
3698     if (!avail_DIV(dc)) {
3699         return false;
3700     }
3701     /* For simplicity, we under-decoded the rs2 form. */
3702     if (!a->imm && a->rs2_or_imm & ~0x1f) {
3703         return false;
3704     }
3705 
3706     if (unlikely(a->rs2_or_imm == 0)) {
3707         gen_exception(dc, TT_DIV_ZERO);
3708         return true;
3709     }
3710 
3711     if (a->imm) {
3712         t2 = tcg_constant_i64((uint32_t)a->rs2_or_imm);
3713     } else {
3714         TCGLabel *lab;
3715         TCGv_i32 n2;
3716 
3717         finishing_insn(dc);
3718         flush_cond(dc);
3719 
3720         n2 = tcg_temp_new_i32();
3721         tcg_gen_trunc_tl_i32(n2, cpu_regs[a->rs2_or_imm]);
3722 
3723         lab = delay_exception(dc, TT_DIV_ZERO);
3724         tcg_gen_brcondi_i32(TCG_COND_EQ, n2, 0, lab);
3725 
3726         t2 = tcg_temp_new_i64();
3727 #ifdef TARGET_SPARC64
3728         tcg_gen_ext32u_i64(t2, cpu_regs[a->rs2_or_imm]);
3729 #else
3730         tcg_gen_extu_i32_i64(t2, cpu_regs[a->rs2_or_imm]);
3731 #endif
3732     }
3733 
3734     t1 = tcg_temp_new_i64();
3735     tcg_gen_concat_tl_i64(t1, gen_load_gpr(dc, a->rs1), cpu_y);
3736 
3737     tcg_gen_divu_i64(t1, t1, t2);
3738     tcg_gen_umin_i64(t1, t1, tcg_constant_i64(UINT32_MAX));
3739 
3740     dst = gen_dest_gpr(dc, a->rd);
3741     tcg_gen_trunc_i64_tl(dst, t1);
3742     gen_store_gpr(dc, a->rd, dst);
3743     return advance_pc(dc);
3744 }
3745 
3746 static bool trans_UDIVX(DisasContext *dc, arg_r_r_ri *a)
3747 {
3748     TCGv dst, src1, src2;
3749 
3750     if (!avail_64(dc)) {
3751         return false;
3752     }
3753     /* For simplicity, we under-decoded the rs2 form. */
3754     if (!a->imm && a->rs2_or_imm & ~0x1f) {
3755         return false;
3756     }
3757 
3758     if (unlikely(a->rs2_or_imm == 0)) {
3759         gen_exception(dc, TT_DIV_ZERO);
3760         return true;
3761     }
3762 
3763     if (a->imm) {
3764         src2 = tcg_constant_tl(a->rs2_or_imm);
3765     } else {
3766         TCGLabel *lab;
3767 
3768         finishing_insn(dc);
3769         flush_cond(dc);
3770 
3771         lab = delay_exception(dc, TT_DIV_ZERO);
3772         src2 = cpu_regs[a->rs2_or_imm];
3773         tcg_gen_brcondi_tl(TCG_COND_EQ, src2, 0, lab);
3774     }
3775 
3776     dst = gen_dest_gpr(dc, a->rd);
3777     src1 = gen_load_gpr(dc, a->rs1);
3778 
3779     tcg_gen_divu_tl(dst, src1, src2);
3780     gen_store_gpr(dc, a->rd, dst);
3781     return advance_pc(dc);
3782 }
3783 
3784 static bool trans_SDIVX(DisasContext *dc, arg_r_r_ri *a)
3785 {
3786     TCGv dst, src1, src2;
3787 
3788     if (!avail_64(dc)) {
3789         return false;
3790     }
3791     /* For simplicity, we under-decoded the rs2 form. */
3792     if (!a->imm && a->rs2_or_imm & ~0x1f) {
3793         return false;
3794     }
3795 
3796     if (unlikely(a->rs2_or_imm == 0)) {
3797         gen_exception(dc, TT_DIV_ZERO);
3798         return true;
3799     }
3800 
3801     dst = gen_dest_gpr(dc, a->rd);
3802     src1 = gen_load_gpr(dc, a->rs1);
3803 
3804     if (a->imm) {
3805         if (unlikely(a->rs2_or_imm == -1)) {
3806             tcg_gen_neg_tl(dst, src1);
3807             gen_store_gpr(dc, a->rd, dst);
3808             return advance_pc(dc);
3809         }
3810         src2 = tcg_constant_tl(a->rs2_or_imm);
3811     } else {
3812         TCGLabel *lab;
3813         TCGv t1, t2;
3814 
3815         finishing_insn(dc);
3816         flush_cond(dc);
3817 
3818         lab = delay_exception(dc, TT_DIV_ZERO);
3819         src2 = cpu_regs[a->rs2_or_imm];
3820         tcg_gen_brcondi_tl(TCG_COND_EQ, src2, 0, lab);
3821 
3822         /*
3823          * Need to avoid INT64_MIN / -1, which will trap on x86 host.
3824          * Set SRC2 to 1 as a new divisor, to produce the correct result.
3825          */
3826         t1 = tcg_temp_new();
3827         t2 = tcg_temp_new();
3828         tcg_gen_setcondi_tl(TCG_COND_EQ, t1, src1, (target_long)INT64_MIN);
3829         tcg_gen_setcondi_tl(TCG_COND_EQ, t2, src2, -1);
3830         tcg_gen_and_tl(t1, t1, t2);
3831         tcg_gen_movcond_tl(TCG_COND_NE, t1, t1, tcg_constant_tl(0),
3832                            tcg_constant_tl(1), src2);
3833         src2 = t1;
3834     }
3835 
3836     tcg_gen_div_tl(dst, src1, src2);
3837     gen_store_gpr(dc, a->rd, dst);
3838     return advance_pc(dc);
3839 }
3840 
3841 static bool gen_edge(DisasContext *dc, arg_r_r_r *a,
3842                      int width, bool cc, bool little_endian)
3843 {
3844     TCGv dst, s1, s2, l, r, t, m;
3845     uint64_t amask = address_mask_i(dc, -8);
3846 
3847     dst = gen_dest_gpr(dc, a->rd);
3848     s1 = gen_load_gpr(dc, a->rs1);
3849     s2 = gen_load_gpr(dc, a->rs2);
3850 
3851     if (cc) {
3852         gen_op_subcc(cpu_cc_N, s1, s2);
3853     }
3854 
3855     l = tcg_temp_new();
3856     r = tcg_temp_new();
3857     t = tcg_temp_new();
3858 
3859     switch (width) {
3860     case 8:
3861         tcg_gen_andi_tl(l, s1, 7);
3862         tcg_gen_andi_tl(r, s2, 7);
3863         tcg_gen_xori_tl(r, r, 7);
3864         m = tcg_constant_tl(0xff);
3865         break;
3866     case 16:
3867         tcg_gen_extract_tl(l, s1, 1, 2);
3868         tcg_gen_extract_tl(r, s2, 1, 2);
3869         tcg_gen_xori_tl(r, r, 3);
3870         m = tcg_constant_tl(0xf);
3871         break;
3872     case 32:
3873         tcg_gen_extract_tl(l, s1, 2, 1);
3874         tcg_gen_extract_tl(r, s2, 2, 1);
3875         tcg_gen_xori_tl(r, r, 1);
3876         m = tcg_constant_tl(0x3);
3877         break;
3878     default:
3879         abort();
3880     }
3881 
3882     /* Compute Left Edge */
3883     if (little_endian) {
3884         tcg_gen_shl_tl(l, m, l);
3885         tcg_gen_and_tl(l, l, m);
3886     } else {
3887         tcg_gen_shr_tl(l, m, l);
3888     }
3889     /* Compute Right Edge */
3890     if (little_endian) {
3891         tcg_gen_shr_tl(r, m, r);
3892     } else {
3893         tcg_gen_shl_tl(r, m, r);
3894         tcg_gen_and_tl(r, r, m);
3895     }
3896 
3897     /* Compute dst = (s1 == s2 under amask ? l : l & r) */
3898     tcg_gen_xor_tl(t, s1, s2);
3899     tcg_gen_and_tl(r, r, l);
3900     tcg_gen_movcond_tl(TCG_COND_TSTEQ, dst, t, tcg_constant_tl(amask), r, l);
3901 
3902     gen_store_gpr(dc, a->rd, dst);
3903     return advance_pc(dc);
3904 }
3905 
3906 TRANS(EDGE8cc, VIS1, gen_edge, a, 8, 1, 0)
3907 TRANS(EDGE8Lcc, VIS1, gen_edge, a, 8, 1, 1)
3908 TRANS(EDGE16cc, VIS1, gen_edge, a, 16, 1, 0)
3909 TRANS(EDGE16Lcc, VIS1, gen_edge, a, 16, 1, 1)
3910 TRANS(EDGE32cc, VIS1, gen_edge, a, 32, 1, 0)
3911 TRANS(EDGE32Lcc, VIS1, gen_edge, a, 32, 1, 1)
3912 
3913 TRANS(EDGE8N, VIS2, gen_edge, a, 8, 0, 0)
3914 TRANS(EDGE8LN, VIS2, gen_edge, a, 8, 0, 1)
3915 TRANS(EDGE16N, VIS2, gen_edge, a, 16, 0, 0)
3916 TRANS(EDGE16LN, VIS2, gen_edge, a, 16, 0, 1)
3917 TRANS(EDGE32N, VIS2, gen_edge, a, 32, 0, 0)
3918 TRANS(EDGE32LN, VIS2, gen_edge, a, 32, 0, 1)
3919 
3920 static bool do_rr(DisasContext *dc, arg_r_r *a,
3921                   void (*func)(TCGv, TCGv))
3922 {
3923     TCGv dst = gen_dest_gpr(dc, a->rd);
3924     TCGv src = gen_load_gpr(dc, a->rs);
3925 
3926     func(dst, src);
3927     gen_store_gpr(dc, a->rd, dst);
3928     return advance_pc(dc);
3929 }
3930 
3931 TRANS(LZCNT, VIS3, do_rr, a, gen_op_lzcnt)
3932 
3933 static bool do_rrr(DisasContext *dc, arg_r_r_r *a,
3934                    void (*func)(TCGv, TCGv, TCGv))
3935 {
3936     TCGv dst = gen_dest_gpr(dc, a->rd);
3937     TCGv src1 = gen_load_gpr(dc, a->rs1);
3938     TCGv src2 = gen_load_gpr(dc, a->rs2);
3939 
3940     func(dst, src1, src2);
3941     gen_store_gpr(dc, a->rd, dst);
3942     return advance_pc(dc);
3943 }
3944 
3945 TRANS(ARRAY8, VIS1, do_rrr, a, gen_helper_array8)
3946 TRANS(ARRAY16, VIS1, do_rrr, a, gen_op_array16)
3947 TRANS(ARRAY32, VIS1, do_rrr, a, gen_op_array32)
3948 
3949 TRANS(ADDXC, VIS3, do_rrr, a, gen_op_addxc)
3950 TRANS(ADDXCcc, VIS3, do_rrr, a, gen_op_addxccc)
3951 
3952 TRANS(UMULXHI, VIS3, do_rrr, a, gen_op_umulxhi)
3953 
3954 static void gen_op_alignaddr(TCGv dst, TCGv s1, TCGv s2)
3955 {
3956 #ifdef TARGET_SPARC64
3957     TCGv tmp = tcg_temp_new();
3958 
3959     tcg_gen_add_tl(tmp, s1, s2);
3960     tcg_gen_andi_tl(dst, tmp, -8);
3961     tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
3962 #else
3963     g_assert_not_reached();
3964 #endif
3965 }
3966 
3967 static void gen_op_alignaddrl(TCGv dst, TCGv s1, TCGv s2)
3968 {
3969 #ifdef TARGET_SPARC64
3970     TCGv tmp = tcg_temp_new();
3971 
3972     tcg_gen_add_tl(tmp, s1, s2);
3973     tcg_gen_andi_tl(dst, tmp, -8);
3974     tcg_gen_neg_tl(tmp, tmp);
3975     tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
3976 #else
3977     g_assert_not_reached();
3978 #endif
3979 }
3980 
3981 TRANS(ALIGNADDR, VIS1, do_rrr, a, gen_op_alignaddr)
3982 TRANS(ALIGNADDRL, VIS1, do_rrr, a, gen_op_alignaddrl)
3983 
3984 static void gen_op_bmask(TCGv dst, TCGv s1, TCGv s2)
3985 {
3986 #ifdef TARGET_SPARC64
3987     tcg_gen_add_tl(dst, s1, s2);
3988     tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, dst, 32, 32);
3989 #else
3990     g_assert_not_reached();
3991 #endif
3992 }
3993 
3994 TRANS(BMASK, VIS2, do_rrr, a, gen_op_bmask)
3995 
3996 static bool do_cmask(DisasContext *dc, int rs2, void (*func)(TCGv, TCGv, TCGv))
3997 {
3998     func(cpu_gsr, cpu_gsr, gen_load_gpr(dc, rs2));
3999     return true;
4000 }
4001 
4002 TRANS(CMASK8, VIS3, do_cmask, a->rs2, gen_helper_cmask8)
4003 TRANS(CMASK16, VIS3, do_cmask, a->rs2, gen_helper_cmask16)
4004 TRANS(CMASK32, VIS3, do_cmask, a->rs2, gen_helper_cmask32)
4005 
4006 static bool do_shift_r(DisasContext *dc, arg_shiftr *a, bool l, bool u)
4007 {
4008     TCGv dst, src1, src2;
4009 
4010     /* Reject 64-bit shifts for sparc32. */
4011     if (avail_32(dc) && a->x) {
4012         return false;
4013     }
4014 
4015     src2 = tcg_temp_new();
4016     tcg_gen_andi_tl(src2, gen_load_gpr(dc, a->rs2), a->x ? 63 : 31);
4017     src1 = gen_load_gpr(dc, a->rs1);
4018     dst = gen_dest_gpr(dc, a->rd);
4019 
4020     if (l) {
4021         tcg_gen_shl_tl(dst, src1, src2);
4022         if (!a->x) {
4023             tcg_gen_ext32u_tl(dst, dst);
4024         }
4025     } else if (u) {
4026         if (!a->x) {
4027             tcg_gen_ext32u_tl(dst, src1);
4028             src1 = dst;
4029         }
4030         tcg_gen_shr_tl(dst, src1, src2);
4031     } else {
4032         if (!a->x) {
4033             tcg_gen_ext32s_tl(dst, src1);
4034             src1 = dst;
4035         }
4036         tcg_gen_sar_tl(dst, src1, src2);
4037     }
4038     gen_store_gpr(dc, a->rd, dst);
4039     return advance_pc(dc);
4040 }
4041 
4042 TRANS(SLL_r, ALL, do_shift_r, a, true, true)
4043 TRANS(SRL_r, ALL, do_shift_r, a, false, true)
4044 TRANS(SRA_r, ALL, do_shift_r, a, false, false)
4045 
4046 static bool do_shift_i(DisasContext *dc, arg_shifti *a, bool l, bool u)
4047 {
4048     TCGv dst, src1;
4049 
4050     /* Reject 64-bit shifts for sparc32. */
4051     if (avail_32(dc) && (a->x || a->i >= 32)) {
4052         return false;
4053     }
4054 
4055     src1 = gen_load_gpr(dc, a->rs1);
4056     dst = gen_dest_gpr(dc, a->rd);
4057 
4058     if (avail_32(dc) || a->x) {
4059         if (l) {
4060             tcg_gen_shli_tl(dst, src1, a->i);
4061         } else if (u) {
4062             tcg_gen_shri_tl(dst, src1, a->i);
4063         } else {
4064             tcg_gen_sari_tl(dst, src1, a->i);
4065         }
4066     } else {
4067         if (l) {
4068             tcg_gen_deposit_z_tl(dst, src1, a->i, 32 - a->i);
4069         } else if (u) {
4070             tcg_gen_extract_tl(dst, src1, a->i, 32 - a->i);
4071         } else {
4072             tcg_gen_sextract_tl(dst, src1, a->i, 32 - a->i);
4073         }
4074     }
4075     gen_store_gpr(dc, a->rd, dst);
4076     return advance_pc(dc);
4077 }
4078 
4079 TRANS(SLL_i, ALL, do_shift_i, a, true, true)
4080 TRANS(SRL_i, ALL, do_shift_i, a, false, true)
4081 TRANS(SRA_i, ALL, do_shift_i, a, false, false)
4082 
4083 static TCGv gen_rs2_or_imm(DisasContext *dc, bool imm, int rs2_or_imm)
4084 {
4085     /* For simplicity, we under-decoded the rs2 form. */
4086     if (!imm && rs2_or_imm & ~0x1f) {
4087         return NULL;
4088     }
4089     if (imm || rs2_or_imm == 0) {
4090         return tcg_constant_tl(rs2_or_imm);
4091     } else {
4092         return cpu_regs[rs2_or_imm];
4093     }
4094 }
4095 
4096 static bool do_mov_cond(DisasContext *dc, DisasCompare *cmp, int rd, TCGv src2)
4097 {
4098     TCGv dst = gen_load_gpr(dc, rd);
4099     TCGv c2 = tcg_constant_tl(cmp->c2);
4100 
4101     tcg_gen_movcond_tl(cmp->cond, dst, cmp->c1, c2, src2, dst);
4102     gen_store_gpr(dc, rd, dst);
4103     return advance_pc(dc);
4104 }
4105 
4106 static bool trans_MOVcc(DisasContext *dc, arg_MOVcc *a)
4107 {
4108     TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4109     DisasCompare cmp;
4110 
4111     if (src2 == NULL) {
4112         return false;
4113     }
4114     gen_compare(&cmp, a->cc, a->cond, dc);
4115     return do_mov_cond(dc, &cmp, a->rd, src2);
4116 }
4117 
4118 static bool trans_MOVfcc(DisasContext *dc, arg_MOVfcc *a)
4119 {
4120     TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4121     DisasCompare cmp;
4122 
4123     if (src2 == NULL) {
4124         return false;
4125     }
4126     gen_fcompare(&cmp, a->cc, a->cond);
4127     return do_mov_cond(dc, &cmp, a->rd, src2);
4128 }
4129 
4130 static bool trans_MOVR(DisasContext *dc, arg_MOVR *a)
4131 {
4132     TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4133     DisasCompare cmp;
4134 
4135     if (src2 == NULL) {
4136         return false;
4137     }
4138     if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
4139         return false;
4140     }
4141     return do_mov_cond(dc, &cmp, a->rd, src2);
4142 }
4143 
4144 static bool do_add_special(DisasContext *dc, arg_r_r_ri *a,
4145                            bool (*func)(DisasContext *dc, int rd, TCGv src))
4146 {
4147     TCGv src1, sum;
4148 
4149     /* For simplicity, we under-decoded the rs2 form. */
4150     if (!a->imm && a->rs2_or_imm & ~0x1f) {
4151         return false;
4152     }
4153 
4154     /*
4155      * Always load the sum into a new temporary.
4156      * This is required to capture the value across a window change,
4157      * e.g. SAVE and RESTORE, and may be optimized away otherwise.
4158      */
4159     sum = tcg_temp_new();
4160     src1 = gen_load_gpr(dc, a->rs1);
4161     if (a->imm || a->rs2_or_imm == 0) {
4162         tcg_gen_addi_tl(sum, src1, a->rs2_or_imm);
4163     } else {
4164         tcg_gen_add_tl(sum, src1, cpu_regs[a->rs2_or_imm]);
4165     }
4166     return func(dc, a->rd, sum);
4167 }
4168 
4169 static bool do_jmpl(DisasContext *dc, int rd, TCGv src)
4170 {
4171     /*
4172      * Preserve pc across advance, so that we can delay
4173      * the writeback to rd until after src is consumed.
4174      */
4175     target_ulong cur_pc = dc->pc;
4176 
4177     gen_check_align(dc, src, 3);
4178 
4179     gen_mov_pc_npc(dc);
4180     tcg_gen_mov_tl(cpu_npc, src);
4181     gen_address_mask(dc, cpu_npc);
4182     gen_store_gpr(dc, rd, tcg_constant_tl(cur_pc));
4183 
4184     dc->npc = DYNAMIC_PC_LOOKUP;
4185     return true;
4186 }
4187 
4188 TRANS(JMPL, ALL, do_add_special, a, do_jmpl)
4189 
4190 static bool do_rett(DisasContext *dc, int rd, TCGv src)
4191 {
4192     if (!supervisor(dc)) {
4193         return raise_priv(dc);
4194     }
4195 
4196     gen_check_align(dc, src, 3);
4197 
4198     gen_mov_pc_npc(dc);
4199     tcg_gen_mov_tl(cpu_npc, src);
4200     gen_helper_rett(tcg_env);
4201 
4202     dc->npc = DYNAMIC_PC;
4203     return true;
4204 }
4205 
4206 TRANS(RETT, 32, do_add_special, a, do_rett)
4207 
4208 static bool do_return(DisasContext *dc, int rd, TCGv src)
4209 {
4210     gen_check_align(dc, src, 3);
4211     gen_helper_restore(tcg_env);
4212 
4213     gen_mov_pc_npc(dc);
4214     tcg_gen_mov_tl(cpu_npc, src);
4215     gen_address_mask(dc, cpu_npc);
4216 
4217     dc->npc = DYNAMIC_PC_LOOKUP;
4218     return true;
4219 }
4220 
4221 TRANS(RETURN, 64, do_add_special, a, do_return)
4222 
4223 static bool do_save(DisasContext *dc, int rd, TCGv src)
4224 {
4225     gen_helper_save(tcg_env);
4226     gen_store_gpr(dc, rd, src);
4227     return advance_pc(dc);
4228 }
4229 
4230 TRANS(SAVE, ALL, do_add_special, a, do_save)
4231 
4232 static bool do_restore(DisasContext *dc, int rd, TCGv src)
4233 {
4234     gen_helper_restore(tcg_env);
4235     gen_store_gpr(dc, rd, src);
4236     return advance_pc(dc);
4237 }
4238 
4239 TRANS(RESTORE, ALL, do_add_special, a, do_restore)
4240 
4241 static bool do_done_retry(DisasContext *dc, bool done)
4242 {
4243     if (!supervisor(dc)) {
4244         return raise_priv(dc);
4245     }
4246     dc->npc = DYNAMIC_PC;
4247     dc->pc = DYNAMIC_PC;
4248     translator_io_start(&dc->base);
4249     if (done) {
4250         gen_helper_done(tcg_env);
4251     } else {
4252         gen_helper_retry(tcg_env);
4253     }
4254     return true;
4255 }
4256 
4257 TRANS(DONE, 64, do_done_retry, true)
4258 TRANS(RETRY, 64, do_done_retry, false)
4259 
4260 /*
4261  * Major opcode 11 -- load and store instructions
4262  */
4263 
4264 static TCGv gen_ldst_addr(DisasContext *dc, int rs1, bool imm, int rs2_or_imm)
4265 {
4266     TCGv addr, tmp = NULL;
4267 
4268     /* For simplicity, we under-decoded the rs2 form. */
4269     if (!imm && rs2_or_imm & ~0x1f) {
4270         return NULL;
4271     }
4272 
4273     addr = gen_load_gpr(dc, rs1);
4274     if (rs2_or_imm) {
4275         tmp = tcg_temp_new();
4276         if (imm) {
4277             tcg_gen_addi_tl(tmp, addr, rs2_or_imm);
4278         } else {
4279             tcg_gen_add_tl(tmp, addr, cpu_regs[rs2_or_imm]);
4280         }
4281         addr = tmp;
4282     }
4283     if (AM_CHECK(dc)) {
4284         if (!tmp) {
4285             tmp = tcg_temp_new();
4286         }
4287         tcg_gen_ext32u_tl(tmp, addr);
4288         addr = tmp;
4289     }
4290     return addr;
4291 }
4292 
4293 static bool do_ld_gpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4294 {
4295     TCGv reg, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4296     DisasASI da;
4297 
4298     if (addr == NULL) {
4299         return false;
4300     }
4301     da = resolve_asi(dc, a->asi, mop);
4302 
4303     reg = gen_dest_gpr(dc, a->rd);
4304     gen_ld_asi(dc, &da, reg, addr);
4305     gen_store_gpr(dc, a->rd, reg);
4306     return advance_pc(dc);
4307 }
4308 
4309 TRANS(LDUW, ALL, do_ld_gpr, a, MO_TEUL)
4310 TRANS(LDUB, ALL, do_ld_gpr, a, MO_UB)
4311 TRANS(LDUH, ALL, do_ld_gpr, a, MO_TEUW)
4312 TRANS(LDSB, ALL, do_ld_gpr, a, MO_SB)
4313 TRANS(LDSH, ALL, do_ld_gpr, a, MO_TESW)
4314 TRANS(LDSW, 64, do_ld_gpr, a, MO_TESL)
4315 TRANS(LDX, 64, do_ld_gpr, a, MO_TEUQ)
4316 
4317 static bool do_st_gpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4318 {
4319     TCGv reg, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4320     DisasASI da;
4321 
4322     if (addr == NULL) {
4323         return false;
4324     }
4325     da = resolve_asi(dc, a->asi, mop);
4326 
4327     reg = gen_load_gpr(dc, a->rd);
4328     gen_st_asi(dc, &da, reg, addr);
4329     return advance_pc(dc);
4330 }
4331 
4332 TRANS(STW, ALL, do_st_gpr, a, MO_TEUL)
4333 TRANS(STB, ALL, do_st_gpr, a, MO_UB)
4334 TRANS(STH, ALL, do_st_gpr, a, MO_TEUW)
4335 TRANS(STX, 64, do_st_gpr, a, MO_TEUQ)
4336 
4337 static bool trans_LDD(DisasContext *dc, arg_r_r_ri_asi *a)
4338 {
4339     TCGv addr;
4340     DisasASI da;
4341 
4342     if (a->rd & 1) {
4343         return false;
4344     }
4345     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4346     if (addr == NULL) {
4347         return false;
4348     }
4349     da = resolve_asi(dc, a->asi, MO_TEUQ);
4350     gen_ldda_asi(dc, &da, addr, a->rd);
4351     return advance_pc(dc);
4352 }
4353 
4354 static bool trans_STD(DisasContext *dc, arg_r_r_ri_asi *a)
4355 {
4356     TCGv addr;
4357     DisasASI da;
4358 
4359     if (a->rd & 1) {
4360         return false;
4361     }
4362     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4363     if (addr == NULL) {
4364         return false;
4365     }
4366     da = resolve_asi(dc, a->asi, MO_TEUQ);
4367     gen_stda_asi(dc, &da, addr, a->rd);
4368     return advance_pc(dc);
4369 }
4370 
4371 static bool trans_LDSTUB(DisasContext *dc, arg_r_r_ri_asi *a)
4372 {
4373     TCGv addr, reg;
4374     DisasASI da;
4375 
4376     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4377     if (addr == NULL) {
4378         return false;
4379     }
4380     da = resolve_asi(dc, a->asi, MO_UB);
4381 
4382     reg = gen_dest_gpr(dc, a->rd);
4383     gen_ldstub_asi(dc, &da, reg, addr);
4384     gen_store_gpr(dc, a->rd, reg);
4385     return advance_pc(dc);
4386 }
4387 
4388 static bool trans_SWAP(DisasContext *dc, arg_r_r_ri_asi *a)
4389 {
4390     TCGv addr, dst, src;
4391     DisasASI da;
4392 
4393     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4394     if (addr == NULL) {
4395         return false;
4396     }
4397     da = resolve_asi(dc, a->asi, MO_TEUL);
4398 
4399     dst = gen_dest_gpr(dc, a->rd);
4400     src = gen_load_gpr(dc, a->rd);
4401     gen_swap_asi(dc, &da, dst, src, addr);
4402     gen_store_gpr(dc, a->rd, dst);
4403     return advance_pc(dc);
4404 }
4405 
4406 static bool do_casa(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4407 {
4408     TCGv addr, o, n, c;
4409     DisasASI da;
4410 
4411     addr = gen_ldst_addr(dc, a->rs1, true, 0);
4412     if (addr == NULL) {
4413         return false;
4414     }
4415     da = resolve_asi(dc, a->asi, mop);
4416 
4417     o = gen_dest_gpr(dc, a->rd);
4418     n = gen_load_gpr(dc, a->rd);
4419     c = gen_load_gpr(dc, a->rs2_or_imm);
4420     gen_cas_asi(dc, &da, o, n, c, addr);
4421     gen_store_gpr(dc, a->rd, o);
4422     return advance_pc(dc);
4423 }
4424 
4425 TRANS(CASA, CASA, do_casa, a, MO_TEUL)
4426 TRANS(CASXA, 64, do_casa, a, MO_TEUQ)
4427 
4428 static bool do_ld_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz)
4429 {
4430     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4431     DisasASI da;
4432 
4433     if (addr == NULL) {
4434         return false;
4435     }
4436     if (gen_trap_ifnofpu(dc)) {
4437         return true;
4438     }
4439     if (sz == MO_128 && gen_trap_float128(dc)) {
4440         return true;
4441     }
4442     da = resolve_asi(dc, a->asi, MO_TE | sz);
4443     gen_ldf_asi(dc, &da, sz, addr, a->rd);
4444     gen_update_fprs_dirty(dc, a->rd);
4445     return advance_pc(dc);
4446 }
4447 
4448 TRANS(LDF, ALL, do_ld_fpr, a, MO_32)
4449 TRANS(LDDF, ALL, do_ld_fpr, a, MO_64)
4450 TRANS(LDQF, ALL, do_ld_fpr, a, MO_128)
4451 
4452 TRANS(LDFA, 64, do_ld_fpr, a, MO_32)
4453 TRANS(LDDFA, 64, do_ld_fpr, a, MO_64)
4454 TRANS(LDQFA, 64, do_ld_fpr, a, MO_128)
4455 
4456 static bool do_st_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz)
4457 {
4458     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4459     DisasASI da;
4460 
4461     if (addr == NULL) {
4462         return false;
4463     }
4464     if (gen_trap_ifnofpu(dc)) {
4465         return true;
4466     }
4467     if (sz == MO_128 && gen_trap_float128(dc)) {
4468         return true;
4469     }
4470     da = resolve_asi(dc, a->asi, MO_TE | sz);
4471     gen_stf_asi(dc, &da, sz, addr, a->rd);
4472     return advance_pc(dc);
4473 }
4474 
4475 TRANS(STF, ALL, do_st_fpr, a, MO_32)
4476 TRANS(STDF, ALL, do_st_fpr, a, MO_64)
4477 TRANS(STQF, ALL, do_st_fpr, a, MO_128)
4478 
4479 TRANS(STFA, 64, do_st_fpr, a, MO_32)
4480 TRANS(STDFA, 64, do_st_fpr, a, MO_64)
4481 TRANS(STQFA, 64, do_st_fpr, a, MO_128)
4482 
4483 static bool trans_STDFQ(DisasContext *dc, arg_STDFQ *a)
4484 {
4485     if (!avail_32(dc)) {
4486         return false;
4487     }
4488     if (!supervisor(dc)) {
4489         return raise_priv(dc);
4490     }
4491     if (gen_trap_ifnofpu(dc)) {
4492         return true;
4493     }
4494     gen_op_fpexception_im(dc, FSR_FTT_SEQ_ERROR);
4495     return true;
4496 }
4497 
4498 static bool trans_LDFSR(DisasContext *dc, arg_r_r_ri *a)
4499 {
4500     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4501     TCGv_i32 tmp;
4502 
4503     if (addr == NULL) {
4504         return false;
4505     }
4506     if (gen_trap_ifnofpu(dc)) {
4507         return true;
4508     }
4509 
4510     tmp = tcg_temp_new_i32();
4511     tcg_gen_qemu_ld_i32(tmp, addr, dc->mem_idx, MO_TEUL | MO_ALIGN);
4512 
4513     tcg_gen_extract_i32(cpu_fcc[0], tmp, FSR_FCC0_SHIFT, 2);
4514     /* LDFSR does not change FCC[1-3]. */
4515 
4516     gen_helper_set_fsr_nofcc_noftt(tcg_env, tmp);
4517     return advance_pc(dc);
4518 }
4519 
4520 static bool do_ldxfsr(DisasContext *dc, arg_r_r_ri *a, bool entire)
4521 {
4522 #ifdef TARGET_SPARC64
4523     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4524     TCGv_i64 t64;
4525     TCGv_i32 lo, hi;
4526 
4527     if (addr == NULL) {
4528         return false;
4529     }
4530     if (gen_trap_ifnofpu(dc)) {
4531         return true;
4532     }
4533 
4534     t64 = tcg_temp_new_i64();
4535     tcg_gen_qemu_ld_i64(t64, addr, dc->mem_idx, MO_TEUQ | MO_ALIGN);
4536 
4537     lo = tcg_temp_new_i32();
4538     hi = cpu_fcc[3];
4539     tcg_gen_extr_i64_i32(lo, hi, t64);
4540     tcg_gen_extract_i32(cpu_fcc[0], lo, FSR_FCC0_SHIFT, 2);
4541     tcg_gen_extract_i32(cpu_fcc[1], hi, FSR_FCC1_SHIFT - 32, 2);
4542     tcg_gen_extract_i32(cpu_fcc[2], hi, FSR_FCC2_SHIFT - 32, 2);
4543     tcg_gen_extract_i32(cpu_fcc[3], hi, FSR_FCC3_SHIFT - 32, 2);
4544 
4545     if (entire) {
4546         gen_helper_set_fsr_nofcc(tcg_env, lo);
4547     } else {
4548         gen_helper_set_fsr_nofcc_noftt(tcg_env, lo);
4549     }
4550     return advance_pc(dc);
4551 #else
4552     return false;
4553 #endif
4554 }
4555 
4556 TRANS(LDXFSR, 64, do_ldxfsr, a, false)
4557 TRANS(LDXEFSR, VIS3B, do_ldxfsr, a, true)
4558 
4559 static bool do_stfsr(DisasContext *dc, arg_r_r_ri *a, MemOp mop)
4560 {
4561     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4562     TCGv fsr;
4563 
4564     if (addr == NULL) {
4565         return false;
4566     }
4567     if (gen_trap_ifnofpu(dc)) {
4568         return true;
4569     }
4570 
4571     fsr = tcg_temp_new();
4572     gen_helper_get_fsr(fsr, tcg_env);
4573     tcg_gen_qemu_st_tl(fsr, addr, dc->mem_idx, mop | MO_ALIGN);
4574     return advance_pc(dc);
4575 }
4576 
4577 TRANS(STFSR, ALL, do_stfsr, a, MO_TEUL)
4578 TRANS(STXFSR, 64, do_stfsr, a, MO_TEUQ)
4579 
4580 static bool do_fc(DisasContext *dc, int rd, int32_t c)
4581 {
4582     if (gen_trap_ifnofpu(dc)) {
4583         return true;
4584     }
4585     gen_store_fpr_F(dc, rd, tcg_constant_i32(c));
4586     return advance_pc(dc);
4587 }
4588 
4589 TRANS(FZEROs, VIS1, do_fc, a->rd, 0)
4590 TRANS(FONEs, VIS1, do_fc, a->rd, -1)
4591 
4592 static bool do_dc(DisasContext *dc, int rd, int64_t c)
4593 {
4594     if (gen_trap_ifnofpu(dc)) {
4595         return true;
4596     }
4597     gen_store_fpr_D(dc, rd, tcg_constant_i64(c));
4598     return advance_pc(dc);
4599 }
4600 
4601 TRANS(FZEROd, VIS1, do_dc, a->rd, 0)
4602 TRANS(FONEd, VIS1, do_dc, a->rd, -1)
4603 
4604 static bool do_ff(DisasContext *dc, arg_r_r *a,
4605                   void (*func)(TCGv_i32, TCGv_i32))
4606 {
4607     TCGv_i32 tmp;
4608 
4609     if (gen_trap_ifnofpu(dc)) {
4610         return true;
4611     }
4612 
4613     tmp = gen_load_fpr_F(dc, a->rs);
4614     func(tmp, tmp);
4615     gen_store_fpr_F(dc, a->rd, tmp);
4616     return advance_pc(dc);
4617 }
4618 
4619 TRANS(FMOVs, ALL, do_ff, a, gen_op_fmovs)
4620 TRANS(FNEGs, ALL, do_ff, a, gen_op_fnegs)
4621 TRANS(FABSs, ALL, do_ff, a, gen_op_fabss)
4622 TRANS(FSRCs, VIS1, do_ff, a, tcg_gen_mov_i32)
4623 TRANS(FNOTs, VIS1, do_ff, a, tcg_gen_not_i32)
4624 
4625 static bool do_fd(DisasContext *dc, arg_r_r *a,
4626                   void (*func)(TCGv_i32, TCGv_i64))
4627 {
4628     TCGv_i32 dst;
4629     TCGv_i64 src;
4630 
4631     if (gen_trap_ifnofpu(dc)) {
4632         return true;
4633     }
4634 
4635     dst = tcg_temp_new_i32();
4636     src = gen_load_fpr_D(dc, a->rs);
4637     func(dst, src);
4638     gen_store_fpr_F(dc, a->rd, dst);
4639     return advance_pc(dc);
4640 }
4641 
4642 TRANS(FPACK16, VIS1, do_fd, a, gen_op_fpack16)
4643 TRANS(FPACKFIX, VIS1, do_fd, a, gen_op_fpackfix)
4644 
4645 static bool do_env_ff(DisasContext *dc, arg_r_r *a,
4646                       void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
4647 {
4648     TCGv_i32 tmp;
4649 
4650     if (gen_trap_ifnofpu(dc)) {
4651         return true;
4652     }
4653 
4654     tmp = gen_load_fpr_F(dc, a->rs);
4655     func(tmp, tcg_env, tmp);
4656     gen_store_fpr_F(dc, a->rd, tmp);
4657     return advance_pc(dc);
4658 }
4659 
4660 TRANS(FSQRTs, ALL, do_env_ff, a, gen_helper_fsqrts)
4661 TRANS(FiTOs, ALL, do_env_ff, a, gen_helper_fitos)
4662 TRANS(FsTOi, ALL, do_env_ff, a, gen_helper_fstoi)
4663 
4664 static bool do_env_fd(DisasContext *dc, arg_r_r *a,
4665                       void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
4666 {
4667     TCGv_i32 dst;
4668     TCGv_i64 src;
4669 
4670     if (gen_trap_ifnofpu(dc)) {
4671         return true;
4672     }
4673 
4674     dst = tcg_temp_new_i32();
4675     src = gen_load_fpr_D(dc, a->rs);
4676     func(dst, tcg_env, src);
4677     gen_store_fpr_F(dc, a->rd, dst);
4678     return advance_pc(dc);
4679 }
4680 
4681 TRANS(FdTOs, ALL, do_env_fd, a, gen_helper_fdtos)
4682 TRANS(FdTOi, ALL, do_env_fd, a, gen_helper_fdtoi)
4683 TRANS(FxTOs, 64, do_env_fd, a, gen_helper_fxtos)
4684 
4685 static bool do_dd(DisasContext *dc, arg_r_r *a,
4686                   void (*func)(TCGv_i64, TCGv_i64))
4687 {
4688     TCGv_i64 dst, src;
4689 
4690     if (gen_trap_ifnofpu(dc)) {
4691         return true;
4692     }
4693 
4694     dst = tcg_temp_new_i64();
4695     src = gen_load_fpr_D(dc, a->rs);
4696     func(dst, src);
4697     gen_store_fpr_D(dc, a->rd, dst);
4698     return advance_pc(dc);
4699 }
4700 
4701 TRANS(FMOVd, 64, do_dd, a, gen_op_fmovd)
4702 TRANS(FNEGd, 64, do_dd, a, gen_op_fnegd)
4703 TRANS(FABSd, 64, do_dd, a, gen_op_fabsd)
4704 TRANS(FSRCd, VIS1, do_dd, a, tcg_gen_mov_i64)
4705 TRANS(FNOTd, VIS1, do_dd, a, tcg_gen_not_i64)
4706 
4707 static bool do_env_dd(DisasContext *dc, arg_r_r *a,
4708                       void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
4709 {
4710     TCGv_i64 dst, src;
4711 
4712     if (gen_trap_ifnofpu(dc)) {
4713         return true;
4714     }
4715 
4716     dst = tcg_temp_new_i64();
4717     src = gen_load_fpr_D(dc, a->rs);
4718     func(dst, tcg_env, src);
4719     gen_store_fpr_D(dc, a->rd, dst);
4720     return advance_pc(dc);
4721 }
4722 
4723 TRANS(FSQRTd, ALL, do_env_dd, a, gen_helper_fsqrtd)
4724 TRANS(FxTOd, 64, do_env_dd, a, gen_helper_fxtod)
4725 TRANS(FdTOx, 64, do_env_dd, a, gen_helper_fdtox)
4726 
4727 static bool do_df(DisasContext *dc, arg_r_r *a,
4728                   void (*func)(TCGv_i64, TCGv_i32))
4729 {
4730     TCGv_i64 dst;
4731     TCGv_i32 src;
4732 
4733     if (gen_trap_ifnofpu(dc)) {
4734         return true;
4735     }
4736 
4737     dst = tcg_temp_new_i64();
4738     src = gen_load_fpr_F(dc, a->rs);
4739     func(dst, src);
4740     gen_store_fpr_D(dc, a->rd, dst);
4741     return advance_pc(dc);
4742 }
4743 
4744 TRANS(FEXPAND, VIS1, do_df, a, gen_helper_fexpand)
4745 
4746 static bool do_env_df(DisasContext *dc, arg_r_r *a,
4747                       void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
4748 {
4749     TCGv_i64 dst;
4750     TCGv_i32 src;
4751 
4752     if (gen_trap_ifnofpu(dc)) {
4753         return true;
4754     }
4755 
4756     dst = tcg_temp_new_i64();
4757     src = gen_load_fpr_F(dc, a->rs);
4758     func(dst, tcg_env, src);
4759     gen_store_fpr_D(dc, a->rd, dst);
4760     return advance_pc(dc);
4761 }
4762 
4763 TRANS(FiTOd, ALL, do_env_df, a, gen_helper_fitod)
4764 TRANS(FsTOd, ALL, do_env_df, a, gen_helper_fstod)
4765 TRANS(FsTOx, 64, do_env_df, a, gen_helper_fstox)
4766 
4767 static bool do_qq(DisasContext *dc, arg_r_r *a,
4768                   void (*func)(TCGv_i128, TCGv_i128))
4769 {
4770     TCGv_i128 t;
4771 
4772     if (gen_trap_ifnofpu(dc)) {
4773         return true;
4774     }
4775     if (gen_trap_float128(dc)) {
4776         return true;
4777     }
4778 
4779     gen_op_clear_ieee_excp_and_FTT();
4780     t = gen_load_fpr_Q(dc, a->rs);
4781     func(t, t);
4782     gen_store_fpr_Q(dc, a->rd, t);
4783     return advance_pc(dc);
4784 }
4785 
4786 TRANS(FMOVq, 64, do_qq, a, tcg_gen_mov_i128)
4787 TRANS(FNEGq, 64, do_qq, a, gen_op_fnegq)
4788 TRANS(FABSq, 64, do_qq, a, gen_op_fabsq)
4789 
4790 static bool do_env_qq(DisasContext *dc, arg_r_r *a,
4791                       void (*func)(TCGv_i128, TCGv_env, TCGv_i128))
4792 {
4793     TCGv_i128 t;
4794 
4795     if (gen_trap_ifnofpu(dc)) {
4796         return true;
4797     }
4798     if (gen_trap_float128(dc)) {
4799         return true;
4800     }
4801 
4802     t = gen_load_fpr_Q(dc, a->rs);
4803     func(t, tcg_env, t);
4804     gen_store_fpr_Q(dc, a->rd, t);
4805     return advance_pc(dc);
4806 }
4807 
4808 TRANS(FSQRTq, ALL, do_env_qq, a, gen_helper_fsqrtq)
4809 
4810 static bool do_env_fq(DisasContext *dc, arg_r_r *a,
4811                       void (*func)(TCGv_i32, TCGv_env, TCGv_i128))
4812 {
4813     TCGv_i128 src;
4814     TCGv_i32 dst;
4815 
4816     if (gen_trap_ifnofpu(dc)) {
4817         return true;
4818     }
4819     if (gen_trap_float128(dc)) {
4820         return true;
4821     }
4822 
4823     src = gen_load_fpr_Q(dc, a->rs);
4824     dst = tcg_temp_new_i32();
4825     func(dst, tcg_env, src);
4826     gen_store_fpr_F(dc, a->rd, dst);
4827     return advance_pc(dc);
4828 }
4829 
4830 TRANS(FqTOs, ALL, do_env_fq, a, gen_helper_fqtos)
4831 TRANS(FqTOi, ALL, do_env_fq, a, gen_helper_fqtoi)
4832 
4833 static bool do_env_dq(DisasContext *dc, arg_r_r *a,
4834                       void (*func)(TCGv_i64, TCGv_env, TCGv_i128))
4835 {
4836     TCGv_i128 src;
4837     TCGv_i64 dst;
4838 
4839     if (gen_trap_ifnofpu(dc)) {
4840         return true;
4841     }
4842     if (gen_trap_float128(dc)) {
4843         return true;
4844     }
4845 
4846     src = gen_load_fpr_Q(dc, a->rs);
4847     dst = tcg_temp_new_i64();
4848     func(dst, tcg_env, src);
4849     gen_store_fpr_D(dc, a->rd, dst);
4850     return advance_pc(dc);
4851 }
4852 
4853 TRANS(FqTOd, ALL, do_env_dq, a, gen_helper_fqtod)
4854 TRANS(FqTOx, 64, do_env_dq, a, gen_helper_fqtox)
4855 
4856 static bool do_env_qf(DisasContext *dc, arg_r_r *a,
4857                       void (*func)(TCGv_i128, TCGv_env, TCGv_i32))
4858 {
4859     TCGv_i32 src;
4860     TCGv_i128 dst;
4861 
4862     if (gen_trap_ifnofpu(dc)) {
4863         return true;
4864     }
4865     if (gen_trap_float128(dc)) {
4866         return true;
4867     }
4868 
4869     src = gen_load_fpr_F(dc, a->rs);
4870     dst = tcg_temp_new_i128();
4871     func(dst, tcg_env, src);
4872     gen_store_fpr_Q(dc, a->rd, dst);
4873     return advance_pc(dc);
4874 }
4875 
4876 TRANS(FiTOq, ALL, do_env_qf, a, gen_helper_fitoq)
4877 TRANS(FsTOq, ALL, do_env_qf, a, gen_helper_fstoq)
4878 
4879 static bool do_env_qd(DisasContext *dc, arg_r_r *a,
4880                       void (*func)(TCGv_i128, TCGv_env, TCGv_i64))
4881 {
4882     TCGv_i64 src;
4883     TCGv_i128 dst;
4884 
4885     if (gen_trap_ifnofpu(dc)) {
4886         return true;
4887     }
4888     if (gen_trap_float128(dc)) {
4889         return true;
4890     }
4891 
4892     src = gen_load_fpr_D(dc, a->rs);
4893     dst = tcg_temp_new_i128();
4894     func(dst, tcg_env, src);
4895     gen_store_fpr_Q(dc, a->rd, dst);
4896     return advance_pc(dc);
4897 }
4898 
4899 TRANS(FdTOq, ALL, do_env_qd, a, gen_helper_fdtoq)
4900 TRANS(FxTOq, 64, do_env_qd, a, gen_helper_fxtoq)
4901 
4902 static bool do_fff(DisasContext *dc, arg_r_r_r *a,
4903                    void (*func)(TCGv_i32, TCGv_i32, TCGv_i32))
4904 {
4905     TCGv_i32 src1, src2;
4906 
4907     if (gen_trap_ifnofpu(dc)) {
4908         return true;
4909     }
4910 
4911     src1 = gen_load_fpr_F(dc, a->rs1);
4912     src2 = gen_load_fpr_F(dc, a->rs2);
4913     func(src1, src1, src2);
4914     gen_store_fpr_F(dc, a->rd, src1);
4915     return advance_pc(dc);
4916 }
4917 
4918 TRANS(FPADD16s, VIS1, do_fff, a, tcg_gen_vec_add16_i32)
4919 TRANS(FPADD32s, VIS1, do_fff, a, tcg_gen_add_i32)
4920 TRANS(FPSUB16s, VIS1, do_fff, a, tcg_gen_vec_sub16_i32)
4921 TRANS(FPSUB32s, VIS1, do_fff, a, tcg_gen_sub_i32)
4922 TRANS(FNORs, VIS1, do_fff, a, tcg_gen_nor_i32)
4923 TRANS(FANDNOTs, VIS1, do_fff, a, tcg_gen_andc_i32)
4924 TRANS(FXORs, VIS1, do_fff, a, tcg_gen_xor_i32)
4925 TRANS(FNANDs, VIS1, do_fff, a, tcg_gen_nand_i32)
4926 TRANS(FANDs, VIS1, do_fff, a, tcg_gen_and_i32)
4927 TRANS(FXNORs, VIS1, do_fff, a, tcg_gen_eqv_i32)
4928 TRANS(FORNOTs, VIS1, do_fff, a, tcg_gen_orc_i32)
4929 TRANS(FORs, VIS1, do_fff, a, tcg_gen_or_i32)
4930 
4931 TRANS(FHADDs, VIS3, do_fff, a, gen_op_fhadds)
4932 TRANS(FHSUBs, VIS3, do_fff, a, gen_op_fhsubs)
4933 TRANS(FNHADDs, VIS3, do_fff, a, gen_op_fnhadds)
4934 
4935 TRANS(FPADDS16s, VIS3, do_fff, a, gen_op_fpadds16s)
4936 TRANS(FPSUBS16s, VIS3, do_fff, a, gen_op_fpsubs16s)
4937 TRANS(FPADDS32s, VIS3, do_fff, a, gen_op_fpadds32s)
4938 TRANS(FPSUBS32s, VIS3, do_fff, a, gen_op_fpsubs32s)
4939 
4940 static bool do_env_fff(DisasContext *dc, arg_r_r_r *a,
4941                        void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
4942 {
4943     TCGv_i32 src1, src2;
4944 
4945     if (gen_trap_ifnofpu(dc)) {
4946         return true;
4947     }
4948 
4949     src1 = gen_load_fpr_F(dc, a->rs1);
4950     src2 = gen_load_fpr_F(dc, a->rs2);
4951     func(src1, tcg_env, src1, src2);
4952     gen_store_fpr_F(dc, a->rd, src1);
4953     return advance_pc(dc);
4954 }
4955 
4956 TRANS(FADDs, ALL, do_env_fff, a, gen_helper_fadds)
4957 TRANS(FSUBs, ALL, do_env_fff, a, gen_helper_fsubs)
4958 TRANS(FMULs, ALL, do_env_fff, a, gen_helper_fmuls)
4959 TRANS(FDIVs, ALL, do_env_fff, a, gen_helper_fdivs)
4960 TRANS(FNADDs, VIS3, do_env_fff, a, gen_helper_fnadds)
4961 TRANS(FNMULs, VIS3, do_env_fff, a, gen_helper_fnmuls)
4962 
4963 static bool do_dff(DisasContext *dc, arg_r_r_r *a,
4964                    void (*func)(TCGv_i64, TCGv_i32, TCGv_i32))
4965 {
4966     TCGv_i64 dst;
4967     TCGv_i32 src1, src2;
4968 
4969     if (gen_trap_ifnofpu(dc)) {
4970         return true;
4971     }
4972 
4973     dst = tcg_temp_new_i64();
4974     src1 = gen_load_fpr_F(dc, a->rs1);
4975     src2 = gen_load_fpr_F(dc, a->rs2);
4976     func(dst, src1, src2);
4977     gen_store_fpr_D(dc, a->rd, dst);
4978     return advance_pc(dc);
4979 }
4980 
4981 TRANS(FMUL8x16AU, VIS1, do_dff, a, gen_op_fmul8x16au)
4982 TRANS(FMUL8x16AL, VIS1, do_dff, a, gen_op_fmul8x16al)
4983 TRANS(FMULD8SUx16, VIS1, do_dff, a, gen_op_fmuld8sux16)
4984 TRANS(FMULD8ULx16, VIS1, do_dff, a, gen_op_fmuld8ulx16)
4985 TRANS(FPMERGE, VIS1, do_dff, a, gen_helper_fpmerge)
4986 
4987 static bool do_dfd(DisasContext *dc, arg_r_r_r *a,
4988                    void (*func)(TCGv_i64, TCGv_i32, TCGv_i64))
4989 {
4990     TCGv_i64 dst, src2;
4991     TCGv_i32 src1;
4992 
4993     if (gen_trap_ifnofpu(dc)) {
4994         return true;
4995     }
4996 
4997     dst = tcg_temp_new_i64();
4998     src1 = gen_load_fpr_F(dc, a->rs1);
4999     src2 = gen_load_fpr_D(dc, a->rs2);
5000     func(dst, src1, src2);
5001     gen_store_fpr_D(dc, a->rd, dst);
5002     return advance_pc(dc);
5003 }
5004 
5005 TRANS(FMUL8x16, VIS1, do_dfd, a, gen_helper_fmul8x16)
5006 
5007 static bool do_gvec_ddd(DisasContext *dc, arg_r_r_r *a, MemOp vece,
5008                         void (*func)(unsigned, uint32_t, uint32_t,
5009                                      uint32_t, uint32_t, uint32_t))
5010 {
5011     if (gen_trap_ifnofpu(dc)) {
5012         return true;
5013     }
5014 
5015     func(vece, gen_offset_fpr_D(a->rd), gen_offset_fpr_D(a->rs1),
5016          gen_offset_fpr_D(a->rs2), 8, 8);
5017     return advance_pc(dc);
5018 }
5019 
5020 TRANS(FPADD16, VIS1, do_gvec_ddd, a, MO_16, tcg_gen_gvec_add)
5021 TRANS(FPADD32, VIS1, do_gvec_ddd, a, MO_32, tcg_gen_gvec_add)
5022 TRANS(FPSUB16, VIS1, do_gvec_ddd, a, MO_16, tcg_gen_gvec_sub)
5023 TRANS(FPSUB32, VIS1, do_gvec_ddd, a, MO_32, tcg_gen_gvec_sub)
5024 TRANS(FCHKSM16, VIS3, do_gvec_ddd, a, MO_16, gen_op_fchksm16)
5025 TRANS(FMEAN16, VIS3, do_gvec_ddd, a, MO_16, gen_op_fmean16)
5026 
5027 TRANS(FPADDS16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_ssadd)
5028 TRANS(FPADDS32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_ssadd)
5029 TRANS(FPSUBS16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_sssub)
5030 TRANS(FPSUBS32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_sssub)
5031 
5032 TRANS(FSLL16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_shlv)
5033 TRANS(FSLL32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_shlv)
5034 TRANS(FSRL16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_shrv)
5035 TRANS(FSRL32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_shrv)
5036 TRANS(FSRA16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_sarv)
5037 TRANS(FSRA32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_sarv)
5038 
5039 static bool do_ddd(DisasContext *dc, arg_r_r_r *a,
5040                    void (*func)(TCGv_i64, TCGv_i64, TCGv_i64))
5041 {
5042     TCGv_i64 dst, src1, src2;
5043 
5044     if (gen_trap_ifnofpu(dc)) {
5045         return true;
5046     }
5047 
5048     dst = tcg_temp_new_i64();
5049     src1 = gen_load_fpr_D(dc, a->rs1);
5050     src2 = gen_load_fpr_D(dc, a->rs2);
5051     func(dst, src1, src2);
5052     gen_store_fpr_D(dc, a->rd, dst);
5053     return advance_pc(dc);
5054 }
5055 
5056 TRANS(FMUL8SUx16, VIS1, do_ddd, a, gen_helper_fmul8sux16)
5057 TRANS(FMUL8ULx16, VIS1, do_ddd, a, gen_helper_fmul8ulx16)
5058 
5059 TRANS(FNORd, VIS1, do_ddd, a, tcg_gen_nor_i64)
5060 TRANS(FANDNOTd, VIS1, do_ddd, a, tcg_gen_andc_i64)
5061 TRANS(FXORd, VIS1, do_ddd, a, tcg_gen_xor_i64)
5062 TRANS(FNANDd, VIS1, do_ddd, a, tcg_gen_nand_i64)
5063 TRANS(FANDd, VIS1, do_ddd, a, tcg_gen_and_i64)
5064 TRANS(FXNORd, VIS1, do_ddd, a, tcg_gen_eqv_i64)
5065 TRANS(FORNOTd, VIS1, do_ddd, a, tcg_gen_orc_i64)
5066 TRANS(FORd, VIS1, do_ddd, a, tcg_gen_or_i64)
5067 
5068 TRANS(FPACK32, VIS1, do_ddd, a, gen_op_fpack32)
5069 TRANS(FALIGNDATAg, VIS1, do_ddd, a, gen_op_faligndata)
5070 TRANS(BSHUFFLE, VIS2, do_ddd, a, gen_op_bshuffle)
5071 
5072 TRANS(FHADDd, VIS3, do_ddd, a, gen_op_fhaddd)
5073 TRANS(FHSUBd, VIS3, do_ddd, a, gen_op_fhsubd)
5074 TRANS(FNHADDd, VIS3, do_ddd, a, gen_op_fnhaddd)
5075 
5076 TRANS(FPADD64, VIS3B, do_ddd, a, tcg_gen_add_i64)
5077 TRANS(FPSUB64, VIS3B, do_ddd, a, tcg_gen_sub_i64)
5078 TRANS(FSLAS16, VIS3, do_ddd, a, gen_helper_fslas16)
5079 TRANS(FSLAS32, VIS3, do_ddd, a, gen_helper_fslas32)
5080 
5081 static bool do_rdd(DisasContext *dc, arg_r_r_r *a,
5082                    void (*func)(TCGv, TCGv_i64, TCGv_i64))
5083 {
5084     TCGv_i64 src1, src2;
5085     TCGv dst;
5086 
5087     if (gen_trap_ifnofpu(dc)) {
5088         return true;
5089     }
5090 
5091     dst = gen_dest_gpr(dc, a->rd);
5092     src1 = gen_load_fpr_D(dc, a->rs1);
5093     src2 = gen_load_fpr_D(dc, a->rs2);
5094     func(dst, src1, src2);
5095     gen_store_gpr(dc, a->rd, dst);
5096     return advance_pc(dc);
5097 }
5098 
5099 TRANS(FPCMPLE16, VIS1, do_rdd, a, gen_helper_fcmple16)
5100 TRANS(FPCMPNE16, VIS1, do_rdd, a, gen_helper_fcmpne16)
5101 TRANS(FPCMPGT16, VIS1, do_rdd, a, gen_helper_fcmpgt16)
5102 TRANS(FPCMPEQ16, VIS1, do_rdd, a, gen_helper_fcmpeq16)
5103 
5104 TRANS(FPCMPLE32, VIS1, do_rdd, a, gen_helper_fcmple32)
5105 TRANS(FPCMPNE32, VIS1, do_rdd, a, gen_helper_fcmpne32)
5106 TRANS(FPCMPGT32, VIS1, do_rdd, a, gen_helper_fcmpgt32)
5107 TRANS(FPCMPEQ32, VIS1, do_rdd, a, gen_helper_fcmpeq32)
5108 
5109 TRANS(FPCMPEQ8, VIS3B, do_rdd, a, gen_helper_fcmpeq8)
5110 TRANS(FPCMPNE8, VIS3B, do_rdd, a, gen_helper_fcmpne8)
5111 TRANS(FPCMPULE8, VIS3B, do_rdd, a, gen_helper_fcmpule8)
5112 TRANS(FPCMPUGT8, VIS3B, do_rdd, a, gen_helper_fcmpugt8)
5113 
5114 TRANS(PDISTN, VIS3, do_rdd, a, gen_op_pdistn)
5115 TRANS(XMULX, VIS3, do_rrr, a, gen_helper_xmulx)
5116 TRANS(XMULXHI, VIS3, do_rrr, a, gen_helper_xmulxhi)
5117 
5118 static bool do_env_ddd(DisasContext *dc, arg_r_r_r *a,
5119                        void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
5120 {
5121     TCGv_i64 dst, src1, src2;
5122 
5123     if (gen_trap_ifnofpu(dc)) {
5124         return true;
5125     }
5126 
5127     dst = tcg_temp_new_i64();
5128     src1 = gen_load_fpr_D(dc, a->rs1);
5129     src2 = gen_load_fpr_D(dc, a->rs2);
5130     func(dst, tcg_env, src1, src2);
5131     gen_store_fpr_D(dc, a->rd, dst);
5132     return advance_pc(dc);
5133 }
5134 
5135 TRANS(FADDd, ALL, do_env_ddd, a, gen_helper_faddd)
5136 TRANS(FSUBd, ALL, do_env_ddd, a, gen_helper_fsubd)
5137 TRANS(FMULd, ALL, do_env_ddd, a, gen_helper_fmuld)
5138 TRANS(FDIVd, ALL, do_env_ddd, a, gen_helper_fdivd)
5139 TRANS(FNADDd, VIS3, do_env_ddd, a, gen_helper_fnaddd)
5140 TRANS(FNMULd, VIS3, do_env_ddd, a, gen_helper_fnmuld)
5141 
5142 static bool trans_FsMULd(DisasContext *dc, arg_r_r_r *a)
5143 {
5144     TCGv_i64 dst;
5145     TCGv_i32 src1, src2;
5146 
5147     if (gen_trap_ifnofpu(dc)) {
5148         return true;
5149     }
5150     if (!(dc->def->features & CPU_FEATURE_FSMULD)) {
5151         return raise_unimpfpop(dc);
5152     }
5153 
5154     dst = tcg_temp_new_i64();
5155     src1 = gen_load_fpr_F(dc, a->rs1);
5156     src2 = gen_load_fpr_F(dc, a->rs2);
5157     gen_helper_fsmuld(dst, tcg_env, src1, src2);
5158     gen_store_fpr_D(dc, a->rd, dst);
5159     return advance_pc(dc);
5160 }
5161 
5162 static bool trans_FNsMULd(DisasContext *dc, arg_r_r_r *a)
5163 {
5164     TCGv_i64 dst;
5165     TCGv_i32 src1, src2;
5166 
5167     if (!avail_VIS3(dc)) {
5168         return false;
5169     }
5170     if (gen_trap_ifnofpu(dc)) {
5171         return true;
5172     }
5173     dst = tcg_temp_new_i64();
5174     src1 = gen_load_fpr_F(dc, a->rs1);
5175     src2 = gen_load_fpr_F(dc, a->rs2);
5176     gen_helper_fnsmuld(dst, tcg_env, src1, src2);
5177     gen_store_fpr_D(dc, a->rd, dst);
5178     return advance_pc(dc);
5179 }
5180 
5181 static bool do_ffff(DisasContext *dc, arg_r_r_r_r *a,
5182                     void (*func)(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_i32))
5183 {
5184     TCGv_i32 dst, src1, src2, src3;
5185 
5186     if (gen_trap_ifnofpu(dc)) {
5187         return true;
5188     }
5189 
5190     src1 = gen_load_fpr_F(dc, a->rs1);
5191     src2 = gen_load_fpr_F(dc, a->rs2);
5192     src3 = gen_load_fpr_F(dc, a->rs3);
5193     dst = tcg_temp_new_i32();
5194     func(dst, src1, src2, src3);
5195     gen_store_fpr_F(dc, a->rd, dst);
5196     return advance_pc(dc);
5197 }
5198 
5199 TRANS(FMADDs, FMAF, do_ffff, a, gen_op_fmadds)
5200 TRANS(FMSUBs, FMAF, do_ffff, a, gen_op_fmsubs)
5201 TRANS(FNMSUBs, FMAF, do_ffff, a, gen_op_fnmsubs)
5202 TRANS(FNMADDs, FMAF, do_ffff, a, gen_op_fnmadds)
5203 
5204 static bool do_dddd(DisasContext *dc, arg_r_r_r_r *a,
5205                     void (*func)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
5206 {
5207     TCGv_i64 dst, src1, src2, src3;
5208 
5209     if (gen_trap_ifnofpu(dc)) {
5210         return true;
5211     }
5212 
5213     dst  = tcg_temp_new_i64();
5214     src1 = gen_load_fpr_D(dc, a->rs1);
5215     src2 = gen_load_fpr_D(dc, a->rs2);
5216     src3 = gen_load_fpr_D(dc, a->rs3);
5217     func(dst, src1, src2, src3);
5218     gen_store_fpr_D(dc, a->rd, dst);
5219     return advance_pc(dc);
5220 }
5221 
5222 TRANS(PDIST, VIS1, do_dddd, a, gen_helper_pdist)
5223 TRANS(FMADDd, FMAF, do_dddd, a, gen_op_fmaddd)
5224 TRANS(FMSUBd, FMAF, do_dddd, a, gen_op_fmsubd)
5225 TRANS(FNMSUBd, FMAF, do_dddd, a, gen_op_fnmsubd)
5226 TRANS(FNMADDd, FMAF, do_dddd, a, gen_op_fnmaddd)
5227 TRANS(FPMADDX, IMA, do_dddd, a, gen_op_fpmaddx)
5228 TRANS(FPMADDXHI, IMA, do_dddd, a, gen_op_fpmaddxhi)
5229 
5230 static bool do_env_qqq(DisasContext *dc, arg_r_r_r *a,
5231                        void (*func)(TCGv_i128, TCGv_env, TCGv_i128, TCGv_i128))
5232 {
5233     TCGv_i128 src1, src2;
5234 
5235     if (gen_trap_ifnofpu(dc)) {
5236         return true;
5237     }
5238     if (gen_trap_float128(dc)) {
5239         return true;
5240     }
5241 
5242     src1 = gen_load_fpr_Q(dc, a->rs1);
5243     src2 = gen_load_fpr_Q(dc, a->rs2);
5244     func(src1, tcg_env, src1, src2);
5245     gen_store_fpr_Q(dc, a->rd, src1);
5246     return advance_pc(dc);
5247 }
5248 
5249 TRANS(FADDq, ALL, do_env_qqq, a, gen_helper_faddq)
5250 TRANS(FSUBq, ALL, do_env_qqq, a, gen_helper_fsubq)
5251 TRANS(FMULq, ALL, do_env_qqq, a, gen_helper_fmulq)
5252 TRANS(FDIVq, ALL, do_env_qqq, a, gen_helper_fdivq)
5253 
5254 static bool trans_FdMULq(DisasContext *dc, arg_r_r_r *a)
5255 {
5256     TCGv_i64 src1, src2;
5257     TCGv_i128 dst;
5258 
5259     if (gen_trap_ifnofpu(dc)) {
5260         return true;
5261     }
5262     if (gen_trap_float128(dc)) {
5263         return true;
5264     }
5265 
5266     src1 = gen_load_fpr_D(dc, a->rs1);
5267     src2 = gen_load_fpr_D(dc, a->rs2);
5268     dst = tcg_temp_new_i128();
5269     gen_helper_fdmulq(dst, tcg_env, src1, src2);
5270     gen_store_fpr_Q(dc, a->rd, dst);
5271     return advance_pc(dc);
5272 }
5273 
5274 static bool do_fmovr(DisasContext *dc, arg_FMOVRs *a, bool is_128,
5275                      void (*func)(DisasContext *, DisasCompare *, int, int))
5276 {
5277     DisasCompare cmp;
5278 
5279     if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
5280         return false;
5281     }
5282     if (gen_trap_ifnofpu(dc)) {
5283         return true;
5284     }
5285     if (is_128 && gen_trap_float128(dc)) {
5286         return true;
5287     }
5288 
5289     gen_op_clear_ieee_excp_and_FTT();
5290     func(dc, &cmp, a->rd, a->rs2);
5291     return advance_pc(dc);
5292 }
5293 
5294 TRANS(FMOVRs, 64, do_fmovr, a, false, gen_fmovs)
5295 TRANS(FMOVRd, 64, do_fmovr, a, false, gen_fmovd)
5296 TRANS(FMOVRq, 64, do_fmovr, a, true, gen_fmovq)
5297 
5298 static bool do_fmovcc(DisasContext *dc, arg_FMOVscc *a, bool is_128,
5299                       void (*func)(DisasContext *, DisasCompare *, int, int))
5300 {
5301     DisasCompare cmp;
5302 
5303     if (gen_trap_ifnofpu(dc)) {
5304         return true;
5305     }
5306     if (is_128 && gen_trap_float128(dc)) {
5307         return true;
5308     }
5309 
5310     gen_op_clear_ieee_excp_and_FTT();
5311     gen_compare(&cmp, a->cc, a->cond, dc);
5312     func(dc, &cmp, a->rd, a->rs2);
5313     return advance_pc(dc);
5314 }
5315 
5316 TRANS(FMOVscc, 64, do_fmovcc, a, false, gen_fmovs)
5317 TRANS(FMOVdcc, 64, do_fmovcc, a, false, gen_fmovd)
5318 TRANS(FMOVqcc, 64, do_fmovcc, a, true, gen_fmovq)
5319 
5320 static bool do_fmovfcc(DisasContext *dc, arg_FMOVsfcc *a, bool is_128,
5321                        void (*func)(DisasContext *, DisasCompare *, int, int))
5322 {
5323     DisasCompare cmp;
5324 
5325     if (gen_trap_ifnofpu(dc)) {
5326         return true;
5327     }
5328     if (is_128 && gen_trap_float128(dc)) {
5329         return true;
5330     }
5331 
5332     gen_op_clear_ieee_excp_and_FTT();
5333     gen_fcompare(&cmp, a->cc, a->cond);
5334     func(dc, &cmp, a->rd, a->rs2);
5335     return advance_pc(dc);
5336 }
5337 
5338 TRANS(FMOVsfcc, 64, do_fmovfcc, a, false, gen_fmovs)
5339 TRANS(FMOVdfcc, 64, do_fmovfcc, a, false, gen_fmovd)
5340 TRANS(FMOVqfcc, 64, do_fmovfcc, a, true, gen_fmovq)
5341 
5342 static bool do_fcmps(DisasContext *dc, arg_FCMPs *a, bool e)
5343 {
5344     TCGv_i32 src1, src2;
5345 
5346     if (avail_32(dc) && a->cc != 0) {
5347         return false;
5348     }
5349     if (gen_trap_ifnofpu(dc)) {
5350         return true;
5351     }
5352 
5353     src1 = gen_load_fpr_F(dc, a->rs1);
5354     src2 = gen_load_fpr_F(dc, a->rs2);
5355     if (e) {
5356         gen_helper_fcmpes(cpu_fcc[a->cc], tcg_env, src1, src2);
5357     } else {
5358         gen_helper_fcmps(cpu_fcc[a->cc], tcg_env, src1, src2);
5359     }
5360     return advance_pc(dc);
5361 }
5362 
5363 TRANS(FCMPs, ALL, do_fcmps, a, false)
5364 TRANS(FCMPEs, ALL, do_fcmps, a, true)
5365 
5366 static bool do_fcmpd(DisasContext *dc, arg_FCMPd *a, bool e)
5367 {
5368     TCGv_i64 src1, src2;
5369 
5370     if (avail_32(dc) && a->cc != 0) {
5371         return false;
5372     }
5373     if (gen_trap_ifnofpu(dc)) {
5374         return true;
5375     }
5376 
5377     src1 = gen_load_fpr_D(dc, a->rs1);
5378     src2 = gen_load_fpr_D(dc, a->rs2);
5379     if (e) {
5380         gen_helper_fcmped(cpu_fcc[a->cc], tcg_env, src1, src2);
5381     } else {
5382         gen_helper_fcmpd(cpu_fcc[a->cc], tcg_env, src1, src2);
5383     }
5384     return advance_pc(dc);
5385 }
5386 
5387 TRANS(FCMPd, ALL, do_fcmpd, a, false)
5388 TRANS(FCMPEd, ALL, do_fcmpd, a, true)
5389 
5390 static bool do_fcmpq(DisasContext *dc, arg_FCMPq *a, bool e)
5391 {
5392     TCGv_i128 src1, src2;
5393 
5394     if (avail_32(dc) && a->cc != 0) {
5395         return false;
5396     }
5397     if (gen_trap_ifnofpu(dc)) {
5398         return true;
5399     }
5400     if (gen_trap_float128(dc)) {
5401         return true;
5402     }
5403 
5404     src1 = gen_load_fpr_Q(dc, a->rs1);
5405     src2 = gen_load_fpr_Q(dc, a->rs2);
5406     if (e) {
5407         gen_helper_fcmpeq(cpu_fcc[a->cc], tcg_env, src1, src2);
5408     } else {
5409         gen_helper_fcmpq(cpu_fcc[a->cc], tcg_env, src1, src2);
5410     }
5411     return advance_pc(dc);
5412 }
5413 
5414 TRANS(FCMPq, ALL, do_fcmpq, a, false)
5415 TRANS(FCMPEq, ALL, do_fcmpq, a, true)
5416 
5417 static bool trans_FLCMPs(DisasContext *dc, arg_FLCMPs *a)
5418 {
5419     TCGv_i32 src1, src2;
5420 
5421     if (!avail_VIS3(dc)) {
5422         return false;
5423     }
5424     if (gen_trap_ifnofpu(dc)) {
5425         return true;
5426     }
5427 
5428     src1 = gen_load_fpr_F(dc, a->rs1);
5429     src2 = gen_load_fpr_F(dc, a->rs2);
5430     gen_helper_flcmps(cpu_fcc[a->cc], src1, src2);
5431     return advance_pc(dc);
5432 }
5433 
5434 static bool trans_FLCMPd(DisasContext *dc, arg_FLCMPd *a)
5435 {
5436     TCGv_i64 src1, src2;
5437 
5438     if (!avail_VIS3(dc)) {
5439         return false;
5440     }
5441     if (gen_trap_ifnofpu(dc)) {
5442         return true;
5443     }
5444 
5445     src1 = gen_load_fpr_D(dc, a->rs1);
5446     src2 = gen_load_fpr_D(dc, a->rs2);
5447     gen_helper_flcmpd(cpu_fcc[a->cc], src1, src2);
5448     return advance_pc(dc);
5449 }
5450 
5451 static bool do_movf2r(DisasContext *dc, arg_r_r *a,
5452                       int (*offset)(unsigned int),
5453                       void (*load)(TCGv, TCGv_ptr, tcg_target_long))
5454 {
5455     TCGv dst;
5456 
5457     if (gen_trap_ifnofpu(dc)) {
5458         return true;
5459     }
5460     dst = gen_dest_gpr(dc, a->rd);
5461     load(dst, tcg_env, offset(a->rs));
5462     gen_store_gpr(dc, a->rd, dst);
5463     return advance_pc(dc);
5464 }
5465 
5466 TRANS(MOVsTOsw, VIS3B, do_movf2r, a, gen_offset_fpr_F, tcg_gen_ld32s_tl)
5467 TRANS(MOVsTOuw, VIS3B, do_movf2r, a, gen_offset_fpr_F, tcg_gen_ld32u_tl)
5468 TRANS(MOVdTOx, VIS3B, do_movf2r, a, gen_offset_fpr_D, tcg_gen_ld_tl)
5469 
5470 static bool do_movr2f(DisasContext *dc, arg_r_r *a,
5471                       int (*offset)(unsigned int),
5472                       void (*store)(TCGv, TCGv_ptr, tcg_target_long))
5473 {
5474     TCGv src;
5475 
5476     if (gen_trap_ifnofpu(dc)) {
5477         return true;
5478     }
5479     src = gen_load_gpr(dc, a->rs);
5480     store(src, tcg_env, offset(a->rd));
5481     return advance_pc(dc);
5482 }
5483 
5484 TRANS(MOVwTOs, VIS3B, do_movr2f, a, gen_offset_fpr_F, tcg_gen_st32_tl)
5485 TRANS(MOVxTOd, VIS3B, do_movr2f, a, gen_offset_fpr_D, tcg_gen_st_tl)
5486 
5487 static void sparc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
5488 {
5489     DisasContext *dc = container_of(dcbase, DisasContext, base);
5490     int bound;
5491 
5492     dc->pc = dc->base.pc_first;
5493     dc->npc = (target_ulong)dc->base.tb->cs_base;
5494     dc->mem_idx = dc->base.tb->flags & TB_FLAG_MMU_MASK;
5495     dc->def = &cpu_env(cs)->def;
5496     dc->fpu_enabled = tb_fpu_enabled(dc->base.tb->flags);
5497     dc->address_mask_32bit = tb_am_enabled(dc->base.tb->flags);
5498 #ifndef CONFIG_USER_ONLY
5499     dc->supervisor = (dc->base.tb->flags & TB_FLAG_SUPER) != 0;
5500 #endif
5501 #ifdef TARGET_SPARC64
5502     dc->fprs_dirty = 0;
5503     dc->asi = (dc->base.tb->flags >> TB_FLAG_ASI_SHIFT) & 0xff;
5504 #ifndef CONFIG_USER_ONLY
5505     dc->hypervisor = (dc->base.tb->flags & TB_FLAG_HYPER) != 0;
5506 #endif
5507 #endif
5508     /*
5509      * if we reach a page boundary, we stop generation so that the
5510      * PC of a TT_TFAULT exception is always in the right page
5511      */
5512     bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
5513     dc->base.max_insns = MIN(dc->base.max_insns, bound);
5514 }
5515 
5516 static void sparc_tr_tb_start(DisasContextBase *db, CPUState *cs)
5517 {
5518 }
5519 
5520 static void sparc_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
5521 {
5522     DisasContext *dc = container_of(dcbase, DisasContext, base);
5523     target_ulong npc = dc->npc;
5524 
5525     if (npc & 3) {
5526         switch (npc) {
5527         case JUMP_PC:
5528             assert(dc->jump_pc[1] == dc->pc + 4);
5529             npc = dc->jump_pc[0] | JUMP_PC;
5530             break;
5531         case DYNAMIC_PC:
5532         case DYNAMIC_PC_LOOKUP:
5533             npc = DYNAMIC_PC;
5534             break;
5535         default:
5536             g_assert_not_reached();
5537         }
5538     }
5539     tcg_gen_insn_start(dc->pc, npc);
5540 }
5541 
5542 static void sparc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
5543 {
5544     DisasContext *dc = container_of(dcbase, DisasContext, base);
5545     unsigned int insn;
5546 
5547     insn = translator_ldl(cpu_env(cs), &dc->base, dc->pc);
5548     dc->base.pc_next += 4;
5549 
5550     if (!decode(dc, insn)) {
5551         gen_exception(dc, TT_ILL_INSN);
5552     }
5553 
5554     if (dc->base.is_jmp == DISAS_NORETURN) {
5555         return;
5556     }
5557     if (dc->pc != dc->base.pc_next) {
5558         dc->base.is_jmp = DISAS_TOO_MANY;
5559     }
5560 }
5561 
5562 static void sparc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
5563 {
5564     DisasContext *dc = container_of(dcbase, DisasContext, base);
5565     DisasDelayException *e, *e_next;
5566     bool may_lookup;
5567 
5568     finishing_insn(dc);
5569 
5570     switch (dc->base.is_jmp) {
5571     case DISAS_NEXT:
5572     case DISAS_TOO_MANY:
5573         if (((dc->pc | dc->npc) & 3) == 0) {
5574             /* static PC and NPC: we can use direct chaining */
5575             gen_goto_tb(dc, 0, dc->pc, dc->npc);
5576             break;
5577         }
5578 
5579         may_lookup = true;
5580         if (dc->pc & 3) {
5581             switch (dc->pc) {
5582             case DYNAMIC_PC_LOOKUP:
5583                 break;
5584             case DYNAMIC_PC:
5585                 may_lookup = false;
5586                 break;
5587             default:
5588                 g_assert_not_reached();
5589             }
5590         } else {
5591             tcg_gen_movi_tl(cpu_pc, dc->pc);
5592         }
5593 
5594         if (dc->npc & 3) {
5595             switch (dc->npc) {
5596             case JUMP_PC:
5597                 gen_generic_branch(dc);
5598                 break;
5599             case DYNAMIC_PC:
5600                 may_lookup = false;
5601                 break;
5602             case DYNAMIC_PC_LOOKUP:
5603                 break;
5604             default:
5605                 g_assert_not_reached();
5606             }
5607         } else {
5608             tcg_gen_movi_tl(cpu_npc, dc->npc);
5609         }
5610         if (may_lookup) {
5611             tcg_gen_lookup_and_goto_ptr();
5612         } else {
5613             tcg_gen_exit_tb(NULL, 0);
5614         }
5615         break;
5616 
5617     case DISAS_NORETURN:
5618        break;
5619 
5620     case DISAS_EXIT:
5621         /* Exit TB */
5622         save_state(dc);
5623         tcg_gen_exit_tb(NULL, 0);
5624         break;
5625 
5626     default:
5627         g_assert_not_reached();
5628     }
5629 
5630     for (e = dc->delay_excp_list; e ; e = e_next) {
5631         gen_set_label(e->lab);
5632 
5633         tcg_gen_movi_tl(cpu_pc, e->pc);
5634         if (e->npc % 4 == 0) {
5635             tcg_gen_movi_tl(cpu_npc, e->npc);
5636         }
5637         gen_helper_raise_exception(tcg_env, e->excp);
5638 
5639         e_next = e->next;
5640         g_free(e);
5641     }
5642 }
5643 
5644 static const TranslatorOps sparc_tr_ops = {
5645     .init_disas_context = sparc_tr_init_disas_context,
5646     .tb_start           = sparc_tr_tb_start,
5647     .insn_start         = sparc_tr_insn_start,
5648     .translate_insn     = sparc_tr_translate_insn,
5649     .tb_stop            = sparc_tr_tb_stop,
5650 };
5651 
5652 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
5653                            vaddr pc, void *host_pc)
5654 {
5655     DisasContext dc = {};
5656 
5657     translator_loop(cs, tb, max_insns, pc, host_pc, &sparc_tr_ops, &dc.base);
5658 }
5659 
5660 void sparc_tcg_init(void)
5661 {
5662     static const char gregnames[32][4] = {
5663         "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7",
5664         "o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7",
5665         "l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7",
5666         "i0", "i1", "i2", "i3", "i4", "i5", "i6", "i7",
5667     };
5668 
5669     static const struct { TCGv_i32 *ptr; int off; const char *name; } r32[] = {
5670 #ifdef TARGET_SPARC64
5671         { &cpu_fprs, offsetof(CPUSPARCState, fprs), "fprs" },
5672         { &cpu_fcc[0], offsetof(CPUSPARCState, fcc[0]), "fcc0" },
5673         { &cpu_fcc[1], offsetof(CPUSPARCState, fcc[1]), "fcc1" },
5674         { &cpu_fcc[2], offsetof(CPUSPARCState, fcc[2]), "fcc2" },
5675         { &cpu_fcc[3], offsetof(CPUSPARCState, fcc[3]), "fcc3" },
5676 #else
5677         { &cpu_fcc[0], offsetof(CPUSPARCState, fcc[0]), "fcc" },
5678 #endif
5679     };
5680 
5681     static const struct { TCGv *ptr; int off; const char *name; } rtl[] = {
5682 #ifdef TARGET_SPARC64
5683         { &cpu_gsr, offsetof(CPUSPARCState, gsr), "gsr" },
5684         { &cpu_xcc_Z, offsetof(CPUSPARCState, xcc_Z), "xcc_Z" },
5685         { &cpu_xcc_C, offsetof(CPUSPARCState, xcc_C), "xcc_C" },
5686 #endif
5687         { &cpu_cc_N, offsetof(CPUSPARCState, cc_N), "cc_N" },
5688         { &cpu_cc_V, offsetof(CPUSPARCState, cc_V), "cc_V" },
5689         { &cpu_icc_Z, offsetof(CPUSPARCState, icc_Z), "icc_Z" },
5690         { &cpu_icc_C, offsetof(CPUSPARCState, icc_C), "icc_C" },
5691         { &cpu_cond, offsetof(CPUSPARCState, cond), "cond" },
5692         { &cpu_pc, offsetof(CPUSPARCState, pc), "pc" },
5693         { &cpu_npc, offsetof(CPUSPARCState, npc), "npc" },
5694         { &cpu_y, offsetof(CPUSPARCState, y), "y" },
5695         { &cpu_tbr, offsetof(CPUSPARCState, tbr), "tbr" },
5696     };
5697 
5698     unsigned int i;
5699 
5700     cpu_regwptr = tcg_global_mem_new_ptr(tcg_env,
5701                                          offsetof(CPUSPARCState, regwptr),
5702                                          "regwptr");
5703 
5704     for (i = 0; i < ARRAY_SIZE(r32); ++i) {
5705         *r32[i].ptr = tcg_global_mem_new_i32(tcg_env, r32[i].off, r32[i].name);
5706     }
5707 
5708     for (i = 0; i < ARRAY_SIZE(rtl); ++i) {
5709         *rtl[i].ptr = tcg_global_mem_new(tcg_env, rtl[i].off, rtl[i].name);
5710     }
5711 
5712     cpu_regs[0] = NULL;
5713     for (i = 1; i < 8; ++i) {
5714         cpu_regs[i] = tcg_global_mem_new(tcg_env,
5715                                          offsetof(CPUSPARCState, gregs[i]),
5716                                          gregnames[i]);
5717     }
5718 
5719     for (i = 8; i < 32; ++i) {
5720         cpu_regs[i] = tcg_global_mem_new(cpu_regwptr,
5721                                          (i - 8) * sizeof(target_ulong),
5722                                          gregnames[i]);
5723     }
5724 }
5725 
5726 void sparc_restore_state_to_opc(CPUState *cs,
5727                                 const TranslationBlock *tb,
5728                                 const uint64_t *data)
5729 {
5730     CPUSPARCState *env = cpu_env(cs);
5731     target_ulong pc = data[0];
5732     target_ulong npc = data[1];
5733 
5734     env->pc = pc;
5735     if (npc == DYNAMIC_PC) {
5736         /* dynamic NPC: already stored */
5737     } else if (npc & JUMP_PC) {
5738         /* jump PC: use 'cond' and the jump targets of the translation */
5739         if (env->cond) {
5740             env->npc = npc & ~3;
5741         } else {
5742             env->npc = pc + 4;
5743         }
5744     } else {
5745         env->npc = npc;
5746     }
5747 }
5748