xref: /openbmc/qemu/target/sparc/translate.c (revision b2b48493362b9f77ca66fffb1464f7fc5a32c6e9)
1 /*
2    SPARC translation
3 
4    Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5    Copyright (C) 2003-2005 Fabrice Bellard
6 
7    This library is free software; you can redistribute it and/or
8    modify it under the terms of the GNU Lesser General Public
9    License as published by the Free Software Foundation; either
10    version 2.1 of the License, or (at your option) any later version.
11 
12    This library is distributed in the hope that it will be useful,
13    but WITHOUT ANY WARRANTY; without even the implied warranty of
14    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15    Lesser General Public License for more details.
16 
17    You should have received a copy of the GNU Lesser General Public
18    License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 
23 #include "cpu.h"
24 #include "exec/helper-proto.h"
25 #include "exec/exec-all.h"
26 #include "tcg/tcg-op.h"
27 #include "tcg/tcg-op-gvec.h"
28 #include "exec/helper-gen.h"
29 #include "exec/translator.h"
30 #include "exec/log.h"
31 #include "fpu/softfloat.h"
32 #include "asi.h"
33 
34 #define HELPER_H "helper.h"
35 #include "exec/helper-info.c.inc"
36 #undef  HELPER_H
37 
38 #ifdef TARGET_SPARC64
39 # define gen_helper_rdpsr(D, E)                 qemu_build_not_reached()
40 # define gen_helper_rdasr17(D, E)               qemu_build_not_reached()
41 # define gen_helper_rett(E)                     qemu_build_not_reached()
42 # define gen_helper_power_down(E)               qemu_build_not_reached()
43 # define gen_helper_wrpsr(E, S)                 qemu_build_not_reached()
44 #else
45 # define gen_helper_clear_softint(E, S)         qemu_build_not_reached()
46 # define gen_helper_done(E)                     qemu_build_not_reached()
47 # define gen_helper_flushw(E)                   qemu_build_not_reached()
48 # define gen_helper_fmul8x16a(D, S1, S2)        qemu_build_not_reached()
49 # define gen_helper_rdccr(D, E)                 qemu_build_not_reached()
50 # define gen_helper_rdcwp(D, E)                 qemu_build_not_reached()
51 # define gen_helper_restored(E)                 qemu_build_not_reached()
52 # define gen_helper_retry(E)                    qemu_build_not_reached()
53 # define gen_helper_saved(E)                    qemu_build_not_reached()
54 # define gen_helper_set_softint(E, S)           qemu_build_not_reached()
55 # define gen_helper_tick_get_count(D, E, T, C)  qemu_build_not_reached()
56 # define gen_helper_tick_set_count(P, S)        qemu_build_not_reached()
57 # define gen_helper_tick_set_limit(P, S)        qemu_build_not_reached()
58 # define gen_helper_wrccr(E, S)                 qemu_build_not_reached()
59 # define gen_helper_wrcwp(E, S)                 qemu_build_not_reached()
60 # define gen_helper_wrgl(E, S)                  qemu_build_not_reached()
61 # define gen_helper_write_softint(E, S)         qemu_build_not_reached()
62 # define gen_helper_wrpil(E, S)                 qemu_build_not_reached()
63 # define gen_helper_wrpstate(E, S)              qemu_build_not_reached()
64 # define gen_helper_cmask8               ({ qemu_build_not_reached(); NULL; })
65 # define gen_helper_cmask16              ({ qemu_build_not_reached(); NULL; })
66 # define gen_helper_cmask32              ({ qemu_build_not_reached(); NULL; })
67 # define gen_helper_fcmpeq8              ({ qemu_build_not_reached(); NULL; })
68 # define gen_helper_fcmpeq16             ({ qemu_build_not_reached(); NULL; })
69 # define gen_helper_fcmpeq32             ({ qemu_build_not_reached(); NULL; })
70 # define gen_helper_fcmpgt16             ({ qemu_build_not_reached(); NULL; })
71 # define gen_helper_fcmpgt32             ({ qemu_build_not_reached(); NULL; })
72 # define gen_helper_fcmple16             ({ qemu_build_not_reached(); NULL; })
73 # define gen_helper_fcmple32             ({ qemu_build_not_reached(); NULL; })
74 # define gen_helper_fcmpne8              ({ qemu_build_not_reached(); NULL; })
75 # define gen_helper_fcmpne16             ({ qemu_build_not_reached(); NULL; })
76 # define gen_helper_fcmpne32             ({ qemu_build_not_reached(); NULL; })
77 # define gen_helper_fcmpule8             ({ qemu_build_not_reached(); NULL; })
78 # define gen_helper_fcmpugt8             ({ qemu_build_not_reached(); NULL; })
79 # define gen_helper_fdtox                ({ qemu_build_not_reached(); NULL; })
80 # define gen_helper_fexpand              ({ qemu_build_not_reached(); NULL; })
81 # define gen_helper_fmul8sux16           ({ qemu_build_not_reached(); NULL; })
82 # define gen_helper_fmul8ulx16           ({ qemu_build_not_reached(); NULL; })
83 # define gen_helper_fmul8x16             ({ qemu_build_not_reached(); NULL; })
84 # define gen_helper_fpmerge              ({ qemu_build_not_reached(); NULL; })
85 # define gen_helper_fqtox                ({ qemu_build_not_reached(); NULL; })
86 # define gen_helper_fslas16              ({ qemu_build_not_reached(); NULL; })
87 # define gen_helper_fslas32              ({ qemu_build_not_reached(); NULL; })
88 # define gen_helper_fstox                ({ qemu_build_not_reached(); NULL; })
89 # define gen_helper_fxtod                ({ qemu_build_not_reached(); NULL; })
90 # define gen_helper_fxtoq                ({ qemu_build_not_reached(); NULL; })
91 # define gen_helper_fxtos                ({ qemu_build_not_reached(); NULL; })
92 # define gen_helper_pdist                ({ qemu_build_not_reached(); NULL; })
93 # define gen_helper_xmulx                ({ qemu_build_not_reached(); NULL; })
94 # define gen_helper_xmulxhi              ({ qemu_build_not_reached(); NULL; })
95 # define MAXTL_MASK                             0
96 #endif
97 
98 /* Dynamic PC, must exit to main loop. */
99 #define DYNAMIC_PC         1
100 /* Dynamic PC, one of two values according to jump_pc[T2]. */
101 #define JUMP_PC            2
102 /* Dynamic PC, may lookup next TB. */
103 #define DYNAMIC_PC_LOOKUP  3
104 
105 #define DISAS_EXIT  DISAS_TARGET_0
106 
107 /* global register indexes */
108 static TCGv_ptr cpu_regwptr;
109 static TCGv cpu_pc, cpu_npc;
110 static TCGv cpu_regs[32];
111 static TCGv cpu_y;
112 static TCGv cpu_tbr;
113 static TCGv cpu_cond;
114 static TCGv cpu_cc_N;
115 static TCGv cpu_cc_V;
116 static TCGv cpu_icc_Z;
117 static TCGv cpu_icc_C;
118 #ifdef TARGET_SPARC64
119 static TCGv cpu_xcc_Z;
120 static TCGv cpu_xcc_C;
121 static TCGv_i32 cpu_fprs;
122 static TCGv cpu_gsr;
123 #else
124 # define cpu_fprs               ({ qemu_build_not_reached(); (TCGv)NULL; })
125 # define cpu_gsr                ({ qemu_build_not_reached(); (TCGv)NULL; })
126 #endif
127 
128 #ifdef TARGET_SPARC64
129 #define cpu_cc_Z  cpu_xcc_Z
130 #define cpu_cc_C  cpu_xcc_C
131 #else
132 #define cpu_cc_Z  cpu_icc_Z
133 #define cpu_cc_C  cpu_icc_C
134 #define cpu_xcc_Z ({ qemu_build_not_reached(); NULL; })
135 #define cpu_xcc_C ({ qemu_build_not_reached(); NULL; })
136 #endif
137 
138 /* Floating point comparison registers */
139 static TCGv_i32 cpu_fcc[TARGET_FCCREGS];
140 
141 #define env_field_offsetof(X)     offsetof(CPUSPARCState, X)
142 #ifdef TARGET_SPARC64
143 # define env32_field_offsetof(X)  ({ qemu_build_not_reached(); 0; })
144 # define env64_field_offsetof(X)  env_field_offsetof(X)
145 #else
146 # define env32_field_offsetof(X)  env_field_offsetof(X)
147 # define env64_field_offsetof(X)  ({ qemu_build_not_reached(); 0; })
148 #endif
149 
150 typedef struct DisasCompare {
151     TCGCond cond;
152     TCGv c1;
153     int c2;
154 } DisasCompare;
155 
156 typedef struct DisasDelayException {
157     struct DisasDelayException *next;
158     TCGLabel *lab;
159     TCGv_i32 excp;
160     /* Saved state at parent insn. */
161     target_ulong pc;
162     target_ulong npc;
163 } DisasDelayException;
164 
165 typedef struct DisasContext {
166     DisasContextBase base;
167     target_ulong pc;    /* current Program Counter: integer or DYNAMIC_PC */
168     target_ulong npc;   /* next PC: integer or DYNAMIC_PC or JUMP_PC */
169 
170     /* Used when JUMP_PC value is used. */
171     DisasCompare jump;
172     target_ulong jump_pc[2];
173 
174     int mem_idx;
175     bool cpu_cond_live;
176     bool fpu_enabled;
177     bool address_mask_32bit;
178 #ifndef CONFIG_USER_ONLY
179     bool supervisor;
180 #ifdef TARGET_SPARC64
181     bool hypervisor;
182 #endif
183 #endif
184 
185     sparc_def_t *def;
186 #ifdef TARGET_SPARC64
187     int fprs_dirty;
188     int asi;
189 #endif
190     DisasDelayException *delay_excp_list;
191 } DisasContext;
192 
193 // This function uses non-native bit order
194 #define GET_FIELD(X, FROM, TO)                                  \
195     ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
196 
197 // This function uses the order in the manuals, i.e. bit 0 is 2^0
198 #define GET_FIELD_SP(X, FROM, TO)               \
199     GET_FIELD(X, 31 - (TO), 31 - (FROM))
200 
201 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
202 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
203 
204 #define UA2005_HTRAP_MASK 0xff
205 #define V8_TRAP_MASK 0x7f
206 
207 #define IS_IMM (insn & (1<<13))
208 
209 static void gen_update_fprs_dirty(DisasContext *dc, int rd)
210 {
211 #if defined(TARGET_SPARC64)
212     int bit = (rd < 32) ? 1 : 2;
213     /* If we know we've already set this bit within the TB,
214        we can avoid setting it again.  */
215     if (!(dc->fprs_dirty & bit)) {
216         dc->fprs_dirty |= bit;
217         tcg_gen_ori_i32(cpu_fprs, cpu_fprs, bit);
218     }
219 #endif
220 }
221 
222 /* floating point registers moves */
223 
224 static int gen_offset_fpr_F(unsigned int reg)
225 {
226     int ret;
227 
228     tcg_debug_assert(reg < 32);
229     ret= offsetof(CPUSPARCState, fpr[reg / 2]);
230     if (reg & 1) {
231         ret += offsetof(CPU_DoubleU, l.lower);
232     } else {
233         ret += offsetof(CPU_DoubleU, l.upper);
234     }
235     return ret;
236 }
237 
238 static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src)
239 {
240     TCGv_i32 ret = tcg_temp_new_i32();
241     tcg_gen_ld_i32(ret, tcg_env, gen_offset_fpr_F(src));
242     return ret;
243 }
244 
245 static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v)
246 {
247     tcg_gen_st_i32(v, tcg_env, gen_offset_fpr_F(dst));
248     gen_update_fprs_dirty(dc, dst);
249 }
250 
251 static int gen_offset_fpr_D(unsigned int reg)
252 {
253     tcg_debug_assert(reg < 64);
254     tcg_debug_assert(reg % 2 == 0);
255     return offsetof(CPUSPARCState, fpr[reg / 2]);
256 }
257 
258 static TCGv_i64 gen_load_fpr_D(DisasContext *dc, unsigned int src)
259 {
260     TCGv_i64 ret = tcg_temp_new_i64();
261     tcg_gen_ld_i64(ret, tcg_env, gen_offset_fpr_D(src));
262     return ret;
263 }
264 
265 static void gen_store_fpr_D(DisasContext *dc, unsigned int dst, TCGv_i64 v)
266 {
267     tcg_gen_st_i64(v, tcg_env, gen_offset_fpr_D(dst));
268     gen_update_fprs_dirty(dc, dst);
269 }
270 
271 static TCGv_i128 gen_load_fpr_Q(DisasContext *dc, unsigned int src)
272 {
273     TCGv_i128 ret = tcg_temp_new_i128();
274     TCGv_i64 h = gen_load_fpr_D(dc, src);
275     TCGv_i64 l = gen_load_fpr_D(dc, src + 2);
276 
277     tcg_gen_concat_i64_i128(ret, l, h);
278     return ret;
279 }
280 
281 static void gen_store_fpr_Q(DisasContext *dc, unsigned int dst, TCGv_i128 v)
282 {
283     TCGv_i64 h = tcg_temp_new_i64();
284     TCGv_i64 l = tcg_temp_new_i64();
285 
286     tcg_gen_extr_i128_i64(l, h, v);
287     gen_store_fpr_D(dc, dst, h);
288     gen_store_fpr_D(dc, dst + 2, l);
289 }
290 
291 /* moves */
292 #ifdef CONFIG_USER_ONLY
293 #define supervisor(dc) 0
294 #define hypervisor(dc) 0
295 #else
296 #ifdef TARGET_SPARC64
297 #define hypervisor(dc) (dc->hypervisor)
298 #define supervisor(dc) (dc->supervisor | dc->hypervisor)
299 #else
300 #define supervisor(dc) (dc->supervisor)
301 #define hypervisor(dc) 0
302 #endif
303 #endif
304 
305 #if !defined(TARGET_SPARC64)
306 # define AM_CHECK(dc)  false
307 #elif defined(TARGET_ABI32)
308 # define AM_CHECK(dc)  true
309 #elif defined(CONFIG_USER_ONLY)
310 # define AM_CHECK(dc)  false
311 #else
312 # define AM_CHECK(dc)  ((dc)->address_mask_32bit)
313 #endif
314 
315 static void gen_address_mask(DisasContext *dc, TCGv addr)
316 {
317     if (AM_CHECK(dc)) {
318         tcg_gen_andi_tl(addr, addr, 0xffffffffULL);
319     }
320 }
321 
322 static target_ulong address_mask_i(DisasContext *dc, target_ulong addr)
323 {
324     return AM_CHECK(dc) ? (uint32_t)addr : addr;
325 }
326 
327 static TCGv gen_load_gpr(DisasContext *dc, int reg)
328 {
329     if (reg > 0) {
330         assert(reg < 32);
331         return cpu_regs[reg];
332     } else {
333         TCGv t = tcg_temp_new();
334         tcg_gen_movi_tl(t, 0);
335         return t;
336     }
337 }
338 
339 static void gen_store_gpr(DisasContext *dc, int reg, TCGv v)
340 {
341     if (reg > 0) {
342         assert(reg < 32);
343         tcg_gen_mov_tl(cpu_regs[reg], v);
344     }
345 }
346 
347 static TCGv gen_dest_gpr(DisasContext *dc, int reg)
348 {
349     if (reg > 0) {
350         assert(reg < 32);
351         return cpu_regs[reg];
352     } else {
353         return tcg_temp_new();
354     }
355 }
356 
357 static bool use_goto_tb(DisasContext *s, target_ulong pc, target_ulong npc)
358 {
359     return translator_use_goto_tb(&s->base, pc) &&
360            translator_use_goto_tb(&s->base, npc);
361 }
362 
363 static void gen_goto_tb(DisasContext *s, int tb_num,
364                         target_ulong pc, target_ulong npc)
365 {
366     if (use_goto_tb(s, pc, npc))  {
367         /* jump to same page: we can use a direct jump */
368         tcg_gen_goto_tb(tb_num);
369         tcg_gen_movi_tl(cpu_pc, pc);
370         tcg_gen_movi_tl(cpu_npc, npc);
371         tcg_gen_exit_tb(s->base.tb, tb_num);
372     } else {
373         /* jump to another page: we can use an indirect jump */
374         tcg_gen_movi_tl(cpu_pc, pc);
375         tcg_gen_movi_tl(cpu_npc, npc);
376         tcg_gen_lookup_and_goto_ptr();
377     }
378 }
379 
380 static TCGv gen_carry32(void)
381 {
382     if (TARGET_LONG_BITS == 64) {
383         TCGv t = tcg_temp_new();
384         tcg_gen_extract_tl(t, cpu_icc_C, 32, 1);
385         return t;
386     }
387     return cpu_icc_C;
388 }
389 
390 static void gen_op_addcc_int(TCGv dst, TCGv src1, TCGv src2, TCGv cin)
391 {
392     TCGv z = tcg_constant_tl(0);
393 
394     if (cin) {
395         tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, src1, z, cin, z);
396         tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, cpu_cc_N, cpu_cc_C, src2, z);
397     } else {
398         tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, src1, z, src2, z);
399     }
400     tcg_gen_xor_tl(cpu_cc_Z, src1, src2);
401     tcg_gen_xor_tl(cpu_cc_V, cpu_cc_N, src2);
402     tcg_gen_andc_tl(cpu_cc_V, cpu_cc_V, cpu_cc_Z);
403     if (TARGET_LONG_BITS == 64) {
404         /*
405          * Carry-in to bit 32 is result ^ src1 ^ src2.
406          * We already have the src xor term in Z, from computation of V.
407          */
408         tcg_gen_xor_tl(cpu_icc_C, cpu_cc_Z, cpu_cc_N);
409         tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
410     }
411     tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
412     tcg_gen_mov_tl(dst, cpu_cc_N);
413 }
414 
415 static void gen_op_addcc(TCGv dst, TCGv src1, TCGv src2)
416 {
417     gen_op_addcc_int(dst, src1, src2, NULL);
418 }
419 
420 static void gen_op_taddcc(TCGv dst, TCGv src1, TCGv src2)
421 {
422     TCGv t = tcg_temp_new();
423 
424     /* Save the tag bits around modification of dst. */
425     tcg_gen_or_tl(t, src1, src2);
426 
427     gen_op_addcc(dst, src1, src2);
428 
429     /* Incorprate tag bits into icc.V */
430     tcg_gen_andi_tl(t, t, 3);
431     tcg_gen_neg_tl(t, t);
432     tcg_gen_ext32u_tl(t, t);
433     tcg_gen_or_tl(cpu_cc_V, cpu_cc_V, t);
434 }
435 
436 static void gen_op_addc(TCGv dst, TCGv src1, TCGv src2)
437 {
438     tcg_gen_add_tl(dst, src1, src2);
439     tcg_gen_add_tl(dst, dst, gen_carry32());
440 }
441 
442 static void gen_op_addccc(TCGv dst, TCGv src1, TCGv src2)
443 {
444     gen_op_addcc_int(dst, src1, src2, gen_carry32());
445 }
446 
447 static void gen_op_addxc(TCGv dst, TCGv src1, TCGv src2)
448 {
449     tcg_gen_add_tl(dst, src1, src2);
450     tcg_gen_add_tl(dst, dst, cpu_cc_C);
451 }
452 
453 static void gen_op_addxccc(TCGv dst, TCGv src1, TCGv src2)
454 {
455     gen_op_addcc_int(dst, src1, src2, cpu_cc_C);
456 }
457 
458 static void gen_op_subcc_int(TCGv dst, TCGv src1, TCGv src2, TCGv cin)
459 {
460     TCGv z = tcg_constant_tl(0);
461 
462     if (cin) {
463         tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, src1, z, cin, z);
464         tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, cpu_cc_N, cpu_cc_C, src2, z);
465     } else {
466         tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, src1, z, src2, z);
467     }
468     tcg_gen_neg_tl(cpu_cc_C, cpu_cc_C);
469     tcg_gen_xor_tl(cpu_cc_Z, src1, src2);
470     tcg_gen_xor_tl(cpu_cc_V, cpu_cc_N, src1);
471     tcg_gen_and_tl(cpu_cc_V, cpu_cc_V, cpu_cc_Z);
472 #ifdef TARGET_SPARC64
473     tcg_gen_xor_tl(cpu_icc_C, cpu_cc_Z, cpu_cc_N);
474     tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
475 #endif
476     tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
477     tcg_gen_mov_tl(dst, cpu_cc_N);
478 }
479 
480 static void gen_op_subcc(TCGv dst, TCGv src1, TCGv src2)
481 {
482     gen_op_subcc_int(dst, src1, src2, NULL);
483 }
484 
485 static void gen_op_tsubcc(TCGv dst, TCGv src1, TCGv src2)
486 {
487     TCGv t = tcg_temp_new();
488 
489     /* Save the tag bits around modification of dst. */
490     tcg_gen_or_tl(t, src1, src2);
491 
492     gen_op_subcc(dst, src1, src2);
493 
494     /* Incorprate tag bits into icc.V */
495     tcg_gen_andi_tl(t, t, 3);
496     tcg_gen_neg_tl(t, t);
497     tcg_gen_ext32u_tl(t, t);
498     tcg_gen_or_tl(cpu_cc_V, cpu_cc_V, t);
499 }
500 
501 static void gen_op_subc(TCGv dst, TCGv src1, TCGv src2)
502 {
503     tcg_gen_sub_tl(dst, src1, src2);
504     tcg_gen_sub_tl(dst, dst, gen_carry32());
505 }
506 
507 static void gen_op_subccc(TCGv dst, TCGv src1, TCGv src2)
508 {
509     gen_op_subcc_int(dst, src1, src2, gen_carry32());
510 }
511 
512 static void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
513 {
514     TCGv zero = tcg_constant_tl(0);
515     TCGv one = tcg_constant_tl(1);
516     TCGv t_src1 = tcg_temp_new();
517     TCGv t_src2 = tcg_temp_new();
518     TCGv t0 = tcg_temp_new();
519 
520     tcg_gen_ext32u_tl(t_src1, src1);
521     tcg_gen_ext32u_tl(t_src2, src2);
522 
523     /*
524      * if (!(env->y & 1))
525      *   src2 = 0;
526      */
527     tcg_gen_movcond_tl(TCG_COND_TSTEQ, t_src2, cpu_y, one, zero, t_src2);
528 
529     /*
530      * b2 = src1 & 1;
531      * y = (b2 << 31) | (y >> 1);
532      */
533     tcg_gen_extract_tl(t0, cpu_y, 1, 31);
534     tcg_gen_deposit_tl(cpu_y, t0, src1, 31, 1);
535 
536     // b1 = N ^ V;
537     tcg_gen_xor_tl(t0, cpu_cc_N, cpu_cc_V);
538 
539     /*
540      * src1 = (b1 << 31) | (src1 >> 1)
541      */
542     tcg_gen_andi_tl(t0, t0, 1u << 31);
543     tcg_gen_shri_tl(t_src1, t_src1, 1);
544     tcg_gen_or_tl(t_src1, t_src1, t0);
545 
546     gen_op_addcc(dst, t_src1, t_src2);
547 }
548 
549 static void gen_op_multiply(TCGv dst, TCGv src1, TCGv src2, int sign_ext)
550 {
551 #if TARGET_LONG_BITS == 32
552     if (sign_ext) {
553         tcg_gen_muls2_tl(dst, cpu_y, src1, src2);
554     } else {
555         tcg_gen_mulu2_tl(dst, cpu_y, src1, src2);
556     }
557 #else
558     TCGv t0 = tcg_temp_new_i64();
559     TCGv t1 = tcg_temp_new_i64();
560 
561     if (sign_ext) {
562         tcg_gen_ext32s_i64(t0, src1);
563         tcg_gen_ext32s_i64(t1, src2);
564     } else {
565         tcg_gen_ext32u_i64(t0, src1);
566         tcg_gen_ext32u_i64(t1, src2);
567     }
568 
569     tcg_gen_mul_i64(dst, t0, t1);
570     tcg_gen_shri_i64(cpu_y, dst, 32);
571 #endif
572 }
573 
574 static void gen_op_umul(TCGv dst, TCGv src1, TCGv src2)
575 {
576     /* zero-extend truncated operands before multiplication */
577     gen_op_multiply(dst, src1, src2, 0);
578 }
579 
580 static void gen_op_smul(TCGv dst, TCGv src1, TCGv src2)
581 {
582     /* sign-extend truncated operands before multiplication */
583     gen_op_multiply(dst, src1, src2, 1);
584 }
585 
586 static void gen_op_umulxhi(TCGv dst, TCGv src1, TCGv src2)
587 {
588     TCGv discard = tcg_temp_new();
589     tcg_gen_mulu2_tl(discard, dst, src1, src2);
590 }
591 
592 static void gen_op_fpmaddx(TCGv_i64 dst, TCGv_i64 src1,
593                            TCGv_i64 src2, TCGv_i64 src3)
594 {
595     TCGv_i64 t = tcg_temp_new_i64();
596 
597     tcg_gen_mul_i64(t, src1, src2);
598     tcg_gen_add_i64(dst, src3, t);
599 }
600 
601 static void gen_op_fpmaddxhi(TCGv_i64 dst, TCGv_i64 src1,
602                              TCGv_i64 src2, TCGv_i64 src3)
603 {
604     TCGv_i64 l = tcg_temp_new_i64();
605     TCGv_i64 h = tcg_temp_new_i64();
606     TCGv_i64 z = tcg_constant_i64(0);
607 
608     tcg_gen_mulu2_i64(l, h, src1, src2);
609     tcg_gen_add2_i64(l, dst, l, h, src3, z);
610 }
611 
612 static void gen_op_sdiv(TCGv dst, TCGv src1, TCGv src2)
613 {
614 #ifdef TARGET_SPARC64
615     gen_helper_sdiv(dst, tcg_env, src1, src2);
616     tcg_gen_ext32s_tl(dst, dst);
617 #else
618     TCGv_i64 t64 = tcg_temp_new_i64();
619     gen_helper_sdiv(t64, tcg_env, src1, src2);
620     tcg_gen_trunc_i64_tl(dst, t64);
621 #endif
622 }
623 
624 static void gen_op_udivcc(TCGv dst, TCGv src1, TCGv src2)
625 {
626     TCGv_i64 t64;
627 
628 #ifdef TARGET_SPARC64
629     t64 = cpu_cc_V;
630 #else
631     t64 = tcg_temp_new_i64();
632 #endif
633 
634     gen_helper_udiv(t64, tcg_env, src1, src2);
635 
636 #ifdef TARGET_SPARC64
637     tcg_gen_ext32u_tl(cpu_cc_N, t64);
638     tcg_gen_shri_tl(cpu_cc_V, t64, 32);
639     tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
640     tcg_gen_movi_tl(cpu_icc_C, 0);
641 #else
642     tcg_gen_extr_i64_tl(cpu_cc_N, cpu_cc_V, t64);
643 #endif
644     tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
645     tcg_gen_movi_tl(cpu_cc_C, 0);
646     tcg_gen_mov_tl(dst, cpu_cc_N);
647 }
648 
649 static void gen_op_sdivcc(TCGv dst, TCGv src1, TCGv src2)
650 {
651     TCGv_i64 t64;
652 
653 #ifdef TARGET_SPARC64
654     t64 = cpu_cc_V;
655 #else
656     t64 = tcg_temp_new_i64();
657 #endif
658 
659     gen_helper_sdiv(t64, tcg_env, src1, src2);
660 
661 #ifdef TARGET_SPARC64
662     tcg_gen_ext32s_tl(cpu_cc_N, t64);
663     tcg_gen_shri_tl(cpu_cc_V, t64, 32);
664     tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
665     tcg_gen_movi_tl(cpu_icc_C, 0);
666 #else
667     tcg_gen_extr_i64_tl(cpu_cc_N, cpu_cc_V, t64);
668 #endif
669     tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
670     tcg_gen_movi_tl(cpu_cc_C, 0);
671     tcg_gen_mov_tl(dst, cpu_cc_N);
672 }
673 
674 static void gen_op_taddcctv(TCGv dst, TCGv src1, TCGv src2)
675 {
676     gen_helper_taddcctv(dst, tcg_env, src1, src2);
677 }
678 
679 static void gen_op_tsubcctv(TCGv dst, TCGv src1, TCGv src2)
680 {
681     gen_helper_tsubcctv(dst, tcg_env, src1, src2);
682 }
683 
684 static void gen_op_popc(TCGv dst, TCGv src1, TCGv src2)
685 {
686     tcg_gen_ctpop_tl(dst, src2);
687 }
688 
689 static void gen_op_lzcnt(TCGv dst, TCGv src)
690 {
691     tcg_gen_clzi_tl(dst, src, TARGET_LONG_BITS);
692 }
693 
694 #ifndef TARGET_SPARC64
695 static void gen_helper_array8(TCGv dst, TCGv src1, TCGv src2)
696 {
697     g_assert_not_reached();
698 }
699 #endif
700 
701 static void gen_op_array16(TCGv dst, TCGv src1, TCGv src2)
702 {
703     gen_helper_array8(dst, src1, src2);
704     tcg_gen_shli_tl(dst, dst, 1);
705 }
706 
707 static void gen_op_array32(TCGv dst, TCGv src1, TCGv src2)
708 {
709     gen_helper_array8(dst, src1, src2);
710     tcg_gen_shli_tl(dst, dst, 2);
711 }
712 
713 static void gen_op_fpack16(TCGv_i32 dst, TCGv_i64 src)
714 {
715 #ifdef TARGET_SPARC64
716     gen_helper_fpack16(dst, cpu_gsr, src);
717 #else
718     g_assert_not_reached();
719 #endif
720 }
721 
722 static void gen_op_fpackfix(TCGv_i32 dst, TCGv_i64 src)
723 {
724 #ifdef TARGET_SPARC64
725     gen_helper_fpackfix(dst, cpu_gsr, src);
726 #else
727     g_assert_not_reached();
728 #endif
729 }
730 
731 static void gen_op_fpack32(TCGv_i64 dst, TCGv_i64 src1, TCGv_i64 src2)
732 {
733 #ifdef TARGET_SPARC64
734     gen_helper_fpack32(dst, cpu_gsr, src1, src2);
735 #else
736     g_assert_not_reached();
737 #endif
738 }
739 
740 static void gen_op_fpadds16s(TCGv_i32 d, TCGv_i32 src1, TCGv_i32 src2)
741 {
742     TCGv_i32 t[2];
743 
744     for (int i = 0; i < 2; i++) {
745         TCGv_i32 u = tcg_temp_new_i32();
746         TCGv_i32 v = tcg_temp_new_i32();
747 
748         tcg_gen_sextract_i32(u, src1, i * 16, 16);
749         tcg_gen_sextract_i32(v, src2, i * 16, 16);
750         tcg_gen_add_i32(u, u, v);
751         tcg_gen_smax_i32(u, u, tcg_constant_i32(INT16_MIN));
752         tcg_gen_smin_i32(u, u, tcg_constant_i32(INT16_MAX));
753         t[i] = u;
754     }
755     tcg_gen_deposit_i32(d, t[0], t[1], 16, 16);
756 }
757 
758 static void gen_op_fpsubs16s(TCGv_i32 d, TCGv_i32 src1, TCGv_i32 src2)
759 {
760     TCGv_i32 t[2];
761 
762     for (int i = 0; i < 2; i++) {
763         TCGv_i32 u = tcg_temp_new_i32();
764         TCGv_i32 v = tcg_temp_new_i32();
765 
766         tcg_gen_sextract_i32(u, src1, i * 16, 16);
767         tcg_gen_sextract_i32(v, src2, i * 16, 16);
768         tcg_gen_sub_i32(u, u, v);
769         tcg_gen_smax_i32(u, u, tcg_constant_i32(INT16_MIN));
770         tcg_gen_smin_i32(u, u, tcg_constant_i32(INT16_MAX));
771         t[i] = u;
772     }
773     tcg_gen_deposit_i32(d, t[0], t[1], 16, 16);
774 }
775 
776 static void gen_op_fpadds32s(TCGv_i32 d, TCGv_i32 src1, TCGv_i32 src2)
777 {
778     TCGv_i32 r = tcg_temp_new_i32();
779     TCGv_i32 t = tcg_temp_new_i32();
780     TCGv_i32 v = tcg_temp_new_i32();
781     TCGv_i32 z = tcg_constant_i32(0);
782 
783     tcg_gen_add_i32(r, src1, src2);
784     tcg_gen_xor_i32(t, src1, src2);
785     tcg_gen_xor_i32(v, r, src2);
786     tcg_gen_andc_i32(v, v, t);
787 
788     tcg_gen_setcond_i32(TCG_COND_GE, t, r, z);
789     tcg_gen_addi_i32(t, t, INT32_MAX);
790 
791     tcg_gen_movcond_i32(TCG_COND_LT, d, v, z, t, r);
792 }
793 
794 static void gen_op_fpsubs32s(TCGv_i32 d, TCGv_i32 src1, TCGv_i32 src2)
795 {
796     TCGv_i32 r = tcg_temp_new_i32();
797     TCGv_i32 t = tcg_temp_new_i32();
798     TCGv_i32 v = tcg_temp_new_i32();
799     TCGv_i32 z = tcg_constant_i32(0);
800 
801     tcg_gen_sub_i32(r, src1, src2);
802     tcg_gen_xor_i32(t, src1, src2);
803     tcg_gen_xor_i32(v, r, src1);
804     tcg_gen_and_i32(v, v, t);
805 
806     tcg_gen_setcond_i32(TCG_COND_GE, t, r, z);
807     tcg_gen_addi_i32(t, t, INT32_MAX);
808 
809     tcg_gen_movcond_i32(TCG_COND_LT, d, v, z, t, r);
810 }
811 
812 static void gen_op_faligndata_i(TCGv_i64 dst, TCGv_i64 s1,
813                                 TCGv_i64 s2, TCGv gsr)
814 {
815 #ifdef TARGET_SPARC64
816     TCGv t1, t2, shift;
817 
818     t1 = tcg_temp_new();
819     t2 = tcg_temp_new();
820     shift = tcg_temp_new();
821 
822     tcg_gen_andi_tl(shift, gsr, 7);
823     tcg_gen_shli_tl(shift, shift, 3);
824     tcg_gen_shl_tl(t1, s1, shift);
825 
826     /*
827      * A shift of 64 does not produce 0 in TCG.  Divide this into a
828      * shift of (up to 63) followed by a constant shift of 1.
829      */
830     tcg_gen_xori_tl(shift, shift, 63);
831     tcg_gen_shr_tl(t2, s2, shift);
832     tcg_gen_shri_tl(t2, t2, 1);
833 
834     tcg_gen_or_tl(dst, t1, t2);
835 #else
836     g_assert_not_reached();
837 #endif
838 }
839 
840 static void gen_op_faligndata_g(TCGv_i64 dst, TCGv_i64 s1, TCGv_i64 s2)
841 {
842     gen_op_faligndata_i(dst, s1, s2, cpu_gsr);
843 }
844 
845 static void gen_op_bshuffle(TCGv_i64 dst, TCGv_i64 src1, TCGv_i64 src2)
846 {
847 #ifdef TARGET_SPARC64
848     gen_helper_bshuffle(dst, cpu_gsr, src1, src2);
849 #else
850     g_assert_not_reached();
851 #endif
852 }
853 
854 static void gen_op_pdistn(TCGv dst, TCGv_i64 src1, TCGv_i64 src2)
855 {
856 #ifdef TARGET_SPARC64
857     gen_helper_pdist(dst, tcg_constant_i64(0), src1, src2);
858 #else
859     g_assert_not_reached();
860 #endif
861 }
862 
863 static void gen_op_fmul8x16al(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
864 {
865     tcg_gen_ext16s_i32(src2, src2);
866     gen_helper_fmul8x16a(dst, src1, src2);
867 }
868 
869 static void gen_op_fmul8x16au(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
870 {
871     tcg_gen_sari_i32(src2, src2, 16);
872     gen_helper_fmul8x16a(dst, src1, src2);
873 }
874 
875 static void gen_op_fmuld8ulx16(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
876 {
877     TCGv_i32 t0 = tcg_temp_new_i32();
878     TCGv_i32 t1 = tcg_temp_new_i32();
879     TCGv_i32 t2 = tcg_temp_new_i32();
880 
881     tcg_gen_ext8u_i32(t0, src1);
882     tcg_gen_ext16s_i32(t1, src2);
883     tcg_gen_mul_i32(t0, t0, t1);
884 
885     tcg_gen_extract_i32(t1, src1, 16, 8);
886     tcg_gen_sextract_i32(t2, src2, 16, 16);
887     tcg_gen_mul_i32(t1, t1, t2);
888 
889     tcg_gen_concat_i32_i64(dst, t0, t1);
890 }
891 
892 static void gen_op_fmuld8sux16(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
893 {
894     TCGv_i32 t0 = tcg_temp_new_i32();
895     TCGv_i32 t1 = tcg_temp_new_i32();
896     TCGv_i32 t2 = tcg_temp_new_i32();
897 
898     /*
899      * The insn description talks about extracting the upper 8 bits
900      * of the signed 16-bit input rs1, performing the multiply, then
901      * shifting left by 8 bits.  Instead, zap the lower 8 bits of
902      * the rs1 input, which avoids the need for two shifts.
903      */
904     tcg_gen_ext16s_i32(t0, src1);
905     tcg_gen_andi_i32(t0, t0, ~0xff);
906     tcg_gen_ext16s_i32(t1, src2);
907     tcg_gen_mul_i32(t0, t0, t1);
908 
909     tcg_gen_sextract_i32(t1, src1, 16, 16);
910     tcg_gen_andi_i32(t1, t1, ~0xff);
911     tcg_gen_sextract_i32(t2, src2, 16, 16);
912     tcg_gen_mul_i32(t1, t1, t2);
913 
914     tcg_gen_concat_i32_i64(dst, t0, t1);
915 }
916 
917 #ifdef TARGET_SPARC64
918 static void gen_vec_fchksm16(unsigned vece, TCGv_vec dst,
919                              TCGv_vec src1, TCGv_vec src2)
920 {
921     TCGv_vec a = tcg_temp_new_vec_matching(dst);
922     TCGv_vec c = tcg_temp_new_vec_matching(dst);
923 
924     tcg_gen_add_vec(vece, a, src1, src2);
925     tcg_gen_cmp_vec(TCG_COND_LTU, vece, c, a, src1);
926     /* Vector cmp produces -1 for true, so subtract to add carry. */
927     tcg_gen_sub_vec(vece, dst, a, c);
928 }
929 
930 static void gen_op_fchksm16(unsigned vece, uint32_t dofs, uint32_t aofs,
931                             uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
932 {
933     static const TCGOpcode vecop_list[] = {
934         INDEX_op_cmp_vec, INDEX_op_add_vec, INDEX_op_sub_vec,
935     };
936     static const GVecGen3 op = {
937         .fni8 = gen_helper_fchksm16,
938         .fniv = gen_vec_fchksm16,
939         .opt_opc = vecop_list,
940         .vece = MO_16,
941     };
942     tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &op);
943 }
944 
945 static void gen_vec_fmean16(unsigned vece, TCGv_vec dst,
946                             TCGv_vec src1, TCGv_vec src2)
947 {
948     TCGv_vec t = tcg_temp_new_vec_matching(dst);
949 
950     tcg_gen_or_vec(vece, t, src1, src2);
951     tcg_gen_and_vec(vece, t, t, tcg_constant_vec_matching(dst, vece, 1));
952     tcg_gen_sari_vec(vece, src1, src1, 1);
953     tcg_gen_sari_vec(vece, src2, src2, 1);
954     tcg_gen_add_vec(vece, dst, src1, src2);
955     tcg_gen_add_vec(vece, dst, dst, t);
956 }
957 
958 static void gen_op_fmean16(unsigned vece, uint32_t dofs, uint32_t aofs,
959                            uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
960 {
961     static const TCGOpcode vecop_list[] = {
962         INDEX_op_add_vec, INDEX_op_sari_vec,
963     };
964     static const GVecGen3 op = {
965         .fni8 = gen_helper_fmean16,
966         .fniv = gen_vec_fmean16,
967         .opt_opc = vecop_list,
968         .vece = MO_16,
969     };
970     tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &op);
971 }
972 #else
973 #define gen_op_fchksm16   ({ qemu_build_not_reached(); NULL; })
974 #define gen_op_fmean16    ({ qemu_build_not_reached(); NULL; })
975 #endif
976 
977 static void finishing_insn(DisasContext *dc)
978 {
979     /*
980      * From here, there is no future path through an unwinding exception.
981      * If the current insn cannot raise an exception, the computation of
982      * cpu_cond may be able to be elided.
983      */
984     if (dc->cpu_cond_live) {
985         tcg_gen_discard_tl(cpu_cond);
986         dc->cpu_cond_live = false;
987     }
988 }
989 
990 static void gen_generic_branch(DisasContext *dc)
991 {
992     TCGv npc0 = tcg_constant_tl(dc->jump_pc[0]);
993     TCGv npc1 = tcg_constant_tl(dc->jump_pc[1]);
994     TCGv c2 = tcg_constant_tl(dc->jump.c2);
995 
996     tcg_gen_movcond_tl(dc->jump.cond, cpu_npc, dc->jump.c1, c2, npc0, npc1);
997 }
998 
999 /* call this function before using the condition register as it may
1000    have been set for a jump */
1001 static void flush_cond(DisasContext *dc)
1002 {
1003     if (dc->npc == JUMP_PC) {
1004         gen_generic_branch(dc);
1005         dc->npc = DYNAMIC_PC_LOOKUP;
1006     }
1007 }
1008 
1009 static void save_npc(DisasContext *dc)
1010 {
1011     if (dc->npc & 3) {
1012         switch (dc->npc) {
1013         case JUMP_PC:
1014             gen_generic_branch(dc);
1015             dc->npc = DYNAMIC_PC_LOOKUP;
1016             break;
1017         case DYNAMIC_PC:
1018         case DYNAMIC_PC_LOOKUP:
1019             break;
1020         default:
1021             g_assert_not_reached();
1022         }
1023     } else {
1024         tcg_gen_movi_tl(cpu_npc, dc->npc);
1025     }
1026 }
1027 
1028 static void save_state(DisasContext *dc)
1029 {
1030     tcg_gen_movi_tl(cpu_pc, dc->pc);
1031     save_npc(dc);
1032 }
1033 
1034 static void gen_exception(DisasContext *dc, int which)
1035 {
1036     finishing_insn(dc);
1037     save_state(dc);
1038     gen_helper_raise_exception(tcg_env, tcg_constant_i32(which));
1039     dc->base.is_jmp = DISAS_NORETURN;
1040 }
1041 
1042 static TCGLabel *delay_exceptionv(DisasContext *dc, TCGv_i32 excp)
1043 {
1044     DisasDelayException *e = g_new0(DisasDelayException, 1);
1045 
1046     e->next = dc->delay_excp_list;
1047     dc->delay_excp_list = e;
1048 
1049     e->lab = gen_new_label();
1050     e->excp = excp;
1051     e->pc = dc->pc;
1052     /* Caller must have used flush_cond before branch. */
1053     assert(e->npc != JUMP_PC);
1054     e->npc = dc->npc;
1055 
1056     return e->lab;
1057 }
1058 
1059 static TCGLabel *delay_exception(DisasContext *dc, int excp)
1060 {
1061     return delay_exceptionv(dc, tcg_constant_i32(excp));
1062 }
1063 
1064 static void gen_check_align(DisasContext *dc, TCGv addr, int mask)
1065 {
1066     TCGv t = tcg_temp_new();
1067     TCGLabel *lab;
1068 
1069     tcg_gen_andi_tl(t, addr, mask);
1070 
1071     flush_cond(dc);
1072     lab = delay_exception(dc, TT_UNALIGNED);
1073     tcg_gen_brcondi_tl(TCG_COND_NE, t, 0, lab);
1074 }
1075 
1076 static void gen_mov_pc_npc(DisasContext *dc)
1077 {
1078     finishing_insn(dc);
1079 
1080     if (dc->npc & 3) {
1081         switch (dc->npc) {
1082         case JUMP_PC:
1083             gen_generic_branch(dc);
1084             tcg_gen_mov_tl(cpu_pc, cpu_npc);
1085             dc->pc = DYNAMIC_PC_LOOKUP;
1086             break;
1087         case DYNAMIC_PC:
1088         case DYNAMIC_PC_LOOKUP:
1089             tcg_gen_mov_tl(cpu_pc, cpu_npc);
1090             dc->pc = dc->npc;
1091             break;
1092         default:
1093             g_assert_not_reached();
1094         }
1095     } else {
1096         dc->pc = dc->npc;
1097     }
1098 }
1099 
1100 static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond,
1101                         DisasContext *dc)
1102 {
1103     TCGv t1;
1104 
1105     cmp->c1 = t1 = tcg_temp_new();
1106     cmp->c2 = 0;
1107 
1108     switch (cond & 7) {
1109     case 0x0: /* never */
1110         cmp->cond = TCG_COND_NEVER;
1111         cmp->c1 = tcg_constant_tl(0);
1112         break;
1113 
1114     case 0x1: /* eq: Z */
1115         cmp->cond = TCG_COND_EQ;
1116         if (TARGET_LONG_BITS == 32 || xcc) {
1117             tcg_gen_mov_tl(t1, cpu_cc_Z);
1118         } else {
1119             tcg_gen_ext32u_tl(t1, cpu_icc_Z);
1120         }
1121         break;
1122 
1123     case 0x2: /* le: Z | (N ^ V) */
1124         /*
1125          * Simplify:
1126          *   cc_Z || (N ^ V) < 0        NE
1127          *   cc_Z && !((N ^ V) < 0)     EQ
1128          *   cc_Z & ~((N ^ V) >> TLB)   EQ
1129          */
1130         cmp->cond = TCG_COND_EQ;
1131         tcg_gen_xor_tl(t1, cpu_cc_N, cpu_cc_V);
1132         tcg_gen_sextract_tl(t1, t1, xcc ? 63 : 31, 1);
1133         tcg_gen_andc_tl(t1, xcc ? cpu_cc_Z : cpu_icc_Z, t1);
1134         if (TARGET_LONG_BITS == 64 && !xcc) {
1135             tcg_gen_ext32u_tl(t1, t1);
1136         }
1137         break;
1138 
1139     case 0x3: /* lt: N ^ V */
1140         cmp->cond = TCG_COND_LT;
1141         tcg_gen_xor_tl(t1, cpu_cc_N, cpu_cc_V);
1142         if (TARGET_LONG_BITS == 64 && !xcc) {
1143             tcg_gen_ext32s_tl(t1, t1);
1144         }
1145         break;
1146 
1147     case 0x4: /* leu: Z | C */
1148         /*
1149          * Simplify:
1150          *   cc_Z == 0 || cc_C != 0     NE
1151          *   cc_Z != 0 && cc_C == 0     EQ
1152          *   cc_Z & (cc_C ? 0 : -1)     EQ
1153          *   cc_Z & (cc_C - 1)          EQ
1154          */
1155         cmp->cond = TCG_COND_EQ;
1156         if (TARGET_LONG_BITS == 32 || xcc) {
1157             tcg_gen_subi_tl(t1, cpu_cc_C, 1);
1158             tcg_gen_and_tl(t1, t1, cpu_cc_Z);
1159         } else {
1160             tcg_gen_extract_tl(t1, cpu_icc_C, 32, 1);
1161             tcg_gen_subi_tl(t1, t1, 1);
1162             tcg_gen_and_tl(t1, t1, cpu_icc_Z);
1163             tcg_gen_ext32u_tl(t1, t1);
1164         }
1165         break;
1166 
1167     case 0x5: /* ltu: C */
1168         cmp->cond = TCG_COND_NE;
1169         if (TARGET_LONG_BITS == 32 || xcc) {
1170             tcg_gen_mov_tl(t1, cpu_cc_C);
1171         } else {
1172             tcg_gen_extract_tl(t1, cpu_icc_C, 32, 1);
1173         }
1174         break;
1175 
1176     case 0x6: /* neg: N */
1177         cmp->cond = TCG_COND_LT;
1178         if (TARGET_LONG_BITS == 32 || xcc) {
1179             tcg_gen_mov_tl(t1, cpu_cc_N);
1180         } else {
1181             tcg_gen_ext32s_tl(t1, cpu_cc_N);
1182         }
1183         break;
1184 
1185     case 0x7: /* vs: V */
1186         cmp->cond = TCG_COND_LT;
1187         if (TARGET_LONG_BITS == 32 || xcc) {
1188             tcg_gen_mov_tl(t1, cpu_cc_V);
1189         } else {
1190             tcg_gen_ext32s_tl(t1, cpu_cc_V);
1191         }
1192         break;
1193     }
1194     if (cond & 8) {
1195         cmp->cond = tcg_invert_cond(cmp->cond);
1196     }
1197 }
1198 
1199 static void gen_fcompare(DisasCompare *cmp, unsigned int cc, unsigned int cond)
1200 {
1201     TCGv_i32 fcc = cpu_fcc[cc];
1202     TCGv_i32 c1 = fcc;
1203     int c2 = 0;
1204     TCGCond tcond;
1205 
1206     /*
1207      * FCC values:
1208      * 0 =
1209      * 1 <
1210      * 2 >
1211      * 3 unordered
1212      */
1213     switch (cond & 7) {
1214     case 0x0: /* fbn */
1215         tcond = TCG_COND_NEVER;
1216         break;
1217     case 0x1: /* fbne : !0 */
1218         tcond = TCG_COND_NE;
1219         break;
1220     case 0x2: /* fblg : 1 or 2 */
1221         /* fcc in {1,2} - 1 -> fcc in {0,1} */
1222         c1 = tcg_temp_new_i32();
1223         tcg_gen_addi_i32(c1, fcc, -1);
1224         c2 = 1;
1225         tcond = TCG_COND_LEU;
1226         break;
1227     case 0x3: /* fbul : 1 or 3 */
1228         c1 = tcg_temp_new_i32();
1229         tcg_gen_andi_i32(c1, fcc, 1);
1230         tcond = TCG_COND_NE;
1231         break;
1232     case 0x4: /* fbl  : 1 */
1233         c2 = 1;
1234         tcond = TCG_COND_EQ;
1235         break;
1236     case 0x5: /* fbug : 2 or 3 */
1237         c2 = 2;
1238         tcond = TCG_COND_GEU;
1239         break;
1240     case 0x6: /* fbg  : 2 */
1241         c2 = 2;
1242         tcond = TCG_COND_EQ;
1243         break;
1244     case 0x7: /* fbu  : 3 */
1245         c2 = 3;
1246         tcond = TCG_COND_EQ;
1247         break;
1248     }
1249     if (cond & 8) {
1250         tcond = tcg_invert_cond(tcond);
1251     }
1252 
1253     cmp->cond = tcond;
1254     cmp->c2 = c2;
1255     cmp->c1 = tcg_temp_new();
1256     tcg_gen_extu_i32_tl(cmp->c1, c1);
1257 }
1258 
1259 static bool gen_compare_reg(DisasCompare *cmp, int cond, TCGv r_src)
1260 {
1261     static const TCGCond cond_reg[4] = {
1262         TCG_COND_NEVER,  /* reserved */
1263         TCG_COND_EQ,
1264         TCG_COND_LE,
1265         TCG_COND_LT,
1266     };
1267     TCGCond tcond;
1268 
1269     if ((cond & 3) == 0) {
1270         return false;
1271     }
1272     tcond = cond_reg[cond & 3];
1273     if (cond & 4) {
1274         tcond = tcg_invert_cond(tcond);
1275     }
1276 
1277     cmp->cond = tcond;
1278     cmp->c1 = tcg_temp_new();
1279     cmp->c2 = 0;
1280     tcg_gen_mov_tl(cmp->c1, r_src);
1281     return true;
1282 }
1283 
1284 static void gen_op_clear_ieee_excp_and_FTT(void)
1285 {
1286     tcg_gen_st_i32(tcg_constant_i32(0), tcg_env,
1287                    offsetof(CPUSPARCState, fsr_cexc_ftt));
1288 }
1289 
1290 static void gen_op_fmovs(TCGv_i32 dst, TCGv_i32 src)
1291 {
1292     gen_op_clear_ieee_excp_and_FTT();
1293     tcg_gen_mov_i32(dst, src);
1294 }
1295 
1296 static void gen_op_fnegs(TCGv_i32 dst, TCGv_i32 src)
1297 {
1298     gen_op_clear_ieee_excp_and_FTT();
1299     tcg_gen_xori_i32(dst, src, 1u << 31);
1300 }
1301 
1302 static void gen_op_fabss(TCGv_i32 dst, TCGv_i32 src)
1303 {
1304     gen_op_clear_ieee_excp_and_FTT();
1305     tcg_gen_andi_i32(dst, src, ~(1u << 31));
1306 }
1307 
1308 static void gen_op_fmovd(TCGv_i64 dst, TCGv_i64 src)
1309 {
1310     gen_op_clear_ieee_excp_and_FTT();
1311     tcg_gen_mov_i64(dst, src);
1312 }
1313 
1314 static void gen_op_fnegd(TCGv_i64 dst, TCGv_i64 src)
1315 {
1316     gen_op_clear_ieee_excp_and_FTT();
1317     tcg_gen_xori_i64(dst, src, 1ull << 63);
1318 }
1319 
1320 static void gen_op_fabsd(TCGv_i64 dst, TCGv_i64 src)
1321 {
1322     gen_op_clear_ieee_excp_and_FTT();
1323     tcg_gen_andi_i64(dst, src, ~(1ull << 63));
1324 }
1325 
1326 static void gen_op_fnegq(TCGv_i128 dst, TCGv_i128 src)
1327 {
1328     TCGv_i64 l = tcg_temp_new_i64();
1329     TCGv_i64 h = tcg_temp_new_i64();
1330 
1331     tcg_gen_extr_i128_i64(l, h, src);
1332     tcg_gen_xori_i64(h, h, 1ull << 63);
1333     tcg_gen_concat_i64_i128(dst, l, h);
1334 }
1335 
1336 static void gen_op_fabsq(TCGv_i128 dst, TCGv_i128 src)
1337 {
1338     TCGv_i64 l = tcg_temp_new_i64();
1339     TCGv_i64 h = tcg_temp_new_i64();
1340 
1341     tcg_gen_extr_i128_i64(l, h, src);
1342     tcg_gen_andi_i64(h, h, ~(1ull << 63));
1343     tcg_gen_concat_i64_i128(dst, l, h);
1344 }
1345 
1346 static void gen_op_fmadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
1347 {
1348     gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(0));
1349 }
1350 
1351 static void gen_op_fmaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
1352 {
1353     gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(0));
1354 }
1355 
1356 static void gen_op_fmsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
1357 {
1358     int op = float_muladd_negate_c;
1359     gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1360 }
1361 
1362 static void gen_op_fmsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
1363 {
1364     int op = float_muladd_negate_c;
1365     gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1366 }
1367 
1368 static void gen_op_fnmsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
1369 {
1370     int op = float_muladd_negate_c | float_muladd_negate_result;
1371     gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1372 }
1373 
1374 static void gen_op_fnmsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
1375 {
1376     int op = float_muladd_negate_c | float_muladd_negate_result;
1377     gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1378 }
1379 
1380 static void gen_op_fnmadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
1381 {
1382     int op = float_muladd_negate_result;
1383     gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1384 }
1385 
1386 static void gen_op_fnmaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
1387 {
1388     int op = float_muladd_negate_result;
1389     gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1390 }
1391 
1392 /* Use muladd to compute (1 * src1) + src2 / 2 with one rounding. */
1393 static void gen_op_fhadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
1394 {
1395     TCGv_i32 one = tcg_constant_i32(float32_one);
1396     int op = float_muladd_halve_result;
1397     gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
1398 }
1399 
1400 static void gen_op_fhaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
1401 {
1402     TCGv_i64 one = tcg_constant_i64(float64_one);
1403     int op = float_muladd_halve_result;
1404     gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
1405 }
1406 
1407 /* Use muladd to compute (1 * src1) - src2 / 2 with one rounding. */
1408 static void gen_op_fhsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
1409 {
1410     TCGv_i32 one = tcg_constant_i32(float32_one);
1411     int op = float_muladd_negate_c | float_muladd_halve_result;
1412     gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
1413 }
1414 
1415 static void gen_op_fhsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
1416 {
1417     TCGv_i64 one = tcg_constant_i64(float64_one);
1418     int op = float_muladd_negate_c | float_muladd_halve_result;
1419     gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
1420 }
1421 
1422 /* Use muladd to compute -((1 * src1) + src2 / 2) with one rounding. */
1423 static void gen_op_fnhadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
1424 {
1425     TCGv_i32 one = tcg_constant_i32(float32_one);
1426     int op = float_muladd_negate_result | float_muladd_halve_result;
1427     gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
1428 }
1429 
1430 static void gen_op_fnhaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
1431 {
1432     TCGv_i64 one = tcg_constant_i64(float64_one);
1433     int op = float_muladd_negate_result | float_muladd_halve_result;
1434     gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
1435 }
1436 
1437 static void gen_op_fpexception_im(DisasContext *dc, int ftt)
1438 {
1439     /*
1440      * CEXC is only set when succesfully completing an FPop,
1441      * or when raising FSR_FTT_IEEE_EXCP, i.e. check_ieee_exception.
1442      * Thus we can simply store FTT into this field.
1443      */
1444     tcg_gen_st_i32(tcg_constant_i32(ftt), tcg_env,
1445                    offsetof(CPUSPARCState, fsr_cexc_ftt));
1446     gen_exception(dc, TT_FP_EXCP);
1447 }
1448 
1449 static int gen_trap_ifnofpu(DisasContext *dc)
1450 {
1451 #if !defined(CONFIG_USER_ONLY)
1452     if (!dc->fpu_enabled) {
1453         gen_exception(dc, TT_NFPU_INSN);
1454         return 1;
1455     }
1456 #endif
1457     return 0;
1458 }
1459 
1460 /* asi moves */
1461 typedef enum {
1462     GET_ASI_HELPER,
1463     GET_ASI_EXCP,
1464     GET_ASI_DIRECT,
1465     GET_ASI_DTWINX,
1466     GET_ASI_CODE,
1467     GET_ASI_BLOCK,
1468     GET_ASI_SHORT,
1469     GET_ASI_BCOPY,
1470     GET_ASI_BFILL,
1471 } ASIType;
1472 
1473 typedef struct {
1474     ASIType type;
1475     int asi;
1476     int mem_idx;
1477     MemOp memop;
1478 } DisasASI;
1479 
1480 /*
1481  * Build DisasASI.
1482  * For asi == -1, treat as non-asi.
1483  * For ask == -2, treat as immediate offset (v8 error, v9 %asi).
1484  */
1485 static DisasASI resolve_asi(DisasContext *dc, int asi, MemOp memop)
1486 {
1487     ASIType type = GET_ASI_HELPER;
1488     int mem_idx = dc->mem_idx;
1489 
1490     if (asi == -1) {
1491         /* Artificial "non-asi" case. */
1492         type = GET_ASI_DIRECT;
1493         goto done;
1494     }
1495 
1496 #ifndef TARGET_SPARC64
1497     /* Before v9, all asis are immediate and privileged.  */
1498     if (asi < 0) {
1499         gen_exception(dc, TT_ILL_INSN);
1500         type = GET_ASI_EXCP;
1501     } else if (supervisor(dc)
1502                /* Note that LEON accepts ASI_USERDATA in user mode, for
1503                   use with CASA.  Also note that previous versions of
1504                   QEMU allowed (and old versions of gcc emitted) ASI_P
1505                   for LEON, which is incorrect.  */
1506                || (asi == ASI_USERDATA
1507                    && (dc->def->features & CPU_FEATURE_CASA))) {
1508         switch (asi) {
1509         case ASI_USERDATA:    /* User data access */
1510             mem_idx = MMU_USER_IDX;
1511             type = GET_ASI_DIRECT;
1512             break;
1513         case ASI_KERNELDATA:  /* Supervisor data access */
1514             mem_idx = MMU_KERNEL_IDX;
1515             type = GET_ASI_DIRECT;
1516             break;
1517         case ASI_USERTXT:     /* User text access */
1518             mem_idx = MMU_USER_IDX;
1519             type = GET_ASI_CODE;
1520             break;
1521         case ASI_KERNELTXT:   /* Supervisor text access */
1522             mem_idx = MMU_KERNEL_IDX;
1523             type = GET_ASI_CODE;
1524             break;
1525         case ASI_M_BYPASS:    /* MMU passthrough */
1526         case ASI_LEON_BYPASS: /* LEON MMU passthrough */
1527             mem_idx = MMU_PHYS_IDX;
1528             type = GET_ASI_DIRECT;
1529             break;
1530         case ASI_M_BCOPY: /* Block copy, sta access */
1531             mem_idx = MMU_KERNEL_IDX;
1532             type = GET_ASI_BCOPY;
1533             break;
1534         case ASI_M_BFILL: /* Block fill, stda access */
1535             mem_idx = MMU_KERNEL_IDX;
1536             type = GET_ASI_BFILL;
1537             break;
1538         }
1539 
1540         /* MMU_PHYS_IDX is used when the MMU is disabled to passthrough the
1541          * permissions check in get_physical_address(..).
1542          */
1543         mem_idx = (dc->mem_idx == MMU_PHYS_IDX) ? MMU_PHYS_IDX : mem_idx;
1544     } else {
1545         gen_exception(dc, TT_PRIV_INSN);
1546         type = GET_ASI_EXCP;
1547     }
1548 #else
1549     if (asi < 0) {
1550         asi = dc->asi;
1551     }
1552     /* With v9, all asis below 0x80 are privileged.  */
1553     /* ??? We ought to check cpu_has_hypervisor, but we didn't copy
1554        down that bit into DisasContext.  For the moment that's ok,
1555        since the direct implementations below doesn't have any ASIs
1556        in the restricted [0x30, 0x7f] range, and the check will be
1557        done properly in the helper.  */
1558     if (!supervisor(dc) && asi < 0x80) {
1559         gen_exception(dc, TT_PRIV_ACT);
1560         type = GET_ASI_EXCP;
1561     } else {
1562         switch (asi) {
1563         case ASI_REAL:      /* Bypass */
1564         case ASI_REAL_IO:   /* Bypass, non-cacheable */
1565         case ASI_REAL_L:    /* Bypass LE */
1566         case ASI_REAL_IO_L: /* Bypass, non-cacheable LE */
1567         case ASI_TWINX_REAL:   /* Real address, twinx */
1568         case ASI_TWINX_REAL_L: /* Real address, twinx, LE */
1569         case ASI_QUAD_LDD_PHYS:
1570         case ASI_QUAD_LDD_PHYS_L:
1571             mem_idx = MMU_PHYS_IDX;
1572             break;
1573         case ASI_N:  /* Nucleus */
1574         case ASI_NL: /* Nucleus LE */
1575         case ASI_TWINX_N:
1576         case ASI_TWINX_NL:
1577         case ASI_NUCLEUS_QUAD_LDD:
1578         case ASI_NUCLEUS_QUAD_LDD_L:
1579             if (hypervisor(dc)) {
1580                 mem_idx = MMU_PHYS_IDX;
1581             } else {
1582                 mem_idx = MMU_NUCLEUS_IDX;
1583             }
1584             break;
1585         case ASI_AIUP:  /* As if user primary */
1586         case ASI_AIUPL: /* As if user primary LE */
1587         case ASI_TWINX_AIUP:
1588         case ASI_TWINX_AIUP_L:
1589         case ASI_BLK_AIUP_4V:
1590         case ASI_BLK_AIUP_L_4V:
1591         case ASI_BLK_AIUP:
1592         case ASI_BLK_AIUPL:
1593             mem_idx = MMU_USER_IDX;
1594             break;
1595         case ASI_AIUS:  /* As if user secondary */
1596         case ASI_AIUSL: /* As if user secondary LE */
1597         case ASI_TWINX_AIUS:
1598         case ASI_TWINX_AIUS_L:
1599         case ASI_BLK_AIUS_4V:
1600         case ASI_BLK_AIUS_L_4V:
1601         case ASI_BLK_AIUS:
1602         case ASI_BLK_AIUSL:
1603             mem_idx = MMU_USER_SECONDARY_IDX;
1604             break;
1605         case ASI_S:  /* Secondary */
1606         case ASI_SL: /* Secondary LE */
1607         case ASI_TWINX_S:
1608         case ASI_TWINX_SL:
1609         case ASI_BLK_COMMIT_S:
1610         case ASI_BLK_S:
1611         case ASI_BLK_SL:
1612         case ASI_FL8_S:
1613         case ASI_FL8_SL:
1614         case ASI_FL16_S:
1615         case ASI_FL16_SL:
1616             if (mem_idx == MMU_USER_IDX) {
1617                 mem_idx = MMU_USER_SECONDARY_IDX;
1618             } else if (mem_idx == MMU_KERNEL_IDX) {
1619                 mem_idx = MMU_KERNEL_SECONDARY_IDX;
1620             }
1621             break;
1622         case ASI_P:  /* Primary */
1623         case ASI_PL: /* Primary LE */
1624         case ASI_TWINX_P:
1625         case ASI_TWINX_PL:
1626         case ASI_BLK_COMMIT_P:
1627         case ASI_BLK_P:
1628         case ASI_BLK_PL:
1629         case ASI_FL8_P:
1630         case ASI_FL8_PL:
1631         case ASI_FL16_P:
1632         case ASI_FL16_PL:
1633             break;
1634         }
1635         switch (asi) {
1636         case ASI_REAL:
1637         case ASI_REAL_IO:
1638         case ASI_REAL_L:
1639         case ASI_REAL_IO_L:
1640         case ASI_N:
1641         case ASI_NL:
1642         case ASI_AIUP:
1643         case ASI_AIUPL:
1644         case ASI_AIUS:
1645         case ASI_AIUSL:
1646         case ASI_S:
1647         case ASI_SL:
1648         case ASI_P:
1649         case ASI_PL:
1650             type = GET_ASI_DIRECT;
1651             break;
1652         case ASI_TWINX_REAL:
1653         case ASI_TWINX_REAL_L:
1654         case ASI_TWINX_N:
1655         case ASI_TWINX_NL:
1656         case ASI_TWINX_AIUP:
1657         case ASI_TWINX_AIUP_L:
1658         case ASI_TWINX_AIUS:
1659         case ASI_TWINX_AIUS_L:
1660         case ASI_TWINX_P:
1661         case ASI_TWINX_PL:
1662         case ASI_TWINX_S:
1663         case ASI_TWINX_SL:
1664         case ASI_QUAD_LDD_PHYS:
1665         case ASI_QUAD_LDD_PHYS_L:
1666         case ASI_NUCLEUS_QUAD_LDD:
1667         case ASI_NUCLEUS_QUAD_LDD_L:
1668             type = GET_ASI_DTWINX;
1669             break;
1670         case ASI_BLK_COMMIT_P:
1671         case ASI_BLK_COMMIT_S:
1672         case ASI_BLK_AIUP_4V:
1673         case ASI_BLK_AIUP_L_4V:
1674         case ASI_BLK_AIUP:
1675         case ASI_BLK_AIUPL:
1676         case ASI_BLK_AIUS_4V:
1677         case ASI_BLK_AIUS_L_4V:
1678         case ASI_BLK_AIUS:
1679         case ASI_BLK_AIUSL:
1680         case ASI_BLK_S:
1681         case ASI_BLK_SL:
1682         case ASI_BLK_P:
1683         case ASI_BLK_PL:
1684             type = GET_ASI_BLOCK;
1685             break;
1686         case ASI_FL8_S:
1687         case ASI_FL8_SL:
1688         case ASI_FL8_P:
1689         case ASI_FL8_PL:
1690             memop = MO_UB;
1691             type = GET_ASI_SHORT;
1692             break;
1693         case ASI_FL16_S:
1694         case ASI_FL16_SL:
1695         case ASI_FL16_P:
1696         case ASI_FL16_PL:
1697             memop = MO_TEUW;
1698             type = GET_ASI_SHORT;
1699             break;
1700         }
1701         /* The little-endian asis all have bit 3 set.  */
1702         if (asi & 8) {
1703             memop ^= MO_BSWAP;
1704         }
1705     }
1706 #endif
1707 
1708  done:
1709     return (DisasASI){ type, asi, mem_idx, memop };
1710 }
1711 
1712 #if defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
1713 static void gen_helper_ld_asi(TCGv_i64 r, TCGv_env e, TCGv a,
1714                               TCGv_i32 asi, TCGv_i32 mop)
1715 {
1716     g_assert_not_reached();
1717 }
1718 
1719 static void gen_helper_st_asi(TCGv_env e, TCGv a, TCGv_i64 r,
1720                               TCGv_i32 asi, TCGv_i32 mop)
1721 {
1722     g_assert_not_reached();
1723 }
1724 #endif
1725 
1726 static void gen_ld_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr)
1727 {
1728     switch (da->type) {
1729     case GET_ASI_EXCP:
1730         break;
1731     case GET_ASI_DTWINX: /* Reserved for ldda.  */
1732         gen_exception(dc, TT_ILL_INSN);
1733         break;
1734     case GET_ASI_DIRECT:
1735         tcg_gen_qemu_ld_tl(dst, addr, da->mem_idx, da->memop | MO_ALIGN);
1736         break;
1737 
1738     case GET_ASI_CODE:
1739 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
1740         {
1741             MemOpIdx oi = make_memop_idx(da->memop, da->mem_idx);
1742             TCGv_i64 t64 = tcg_temp_new_i64();
1743 
1744             gen_helper_ld_code(t64, tcg_env, addr, tcg_constant_i32(oi));
1745             tcg_gen_trunc_i64_tl(dst, t64);
1746         }
1747         break;
1748 #else
1749         g_assert_not_reached();
1750 #endif
1751 
1752     default:
1753         {
1754             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1755             TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN);
1756 
1757             save_state(dc);
1758 #ifdef TARGET_SPARC64
1759             gen_helper_ld_asi(dst, tcg_env, addr, r_asi, r_mop);
1760 #else
1761             {
1762                 TCGv_i64 t64 = tcg_temp_new_i64();
1763                 gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
1764                 tcg_gen_trunc_i64_tl(dst, t64);
1765             }
1766 #endif
1767         }
1768         break;
1769     }
1770 }
1771 
1772 static void gen_st_asi(DisasContext *dc, DisasASI *da, TCGv src, TCGv addr)
1773 {
1774     switch (da->type) {
1775     case GET_ASI_EXCP:
1776         break;
1777 
1778     case GET_ASI_DTWINX: /* Reserved for stda.  */
1779         if (TARGET_LONG_BITS == 32) {
1780             gen_exception(dc, TT_ILL_INSN);
1781             break;
1782         } else if (!(dc->def->features & CPU_FEATURE_HYPV)) {
1783             /* Pre OpenSPARC CPUs don't have these */
1784             gen_exception(dc, TT_ILL_INSN);
1785             break;
1786         }
1787         /* In OpenSPARC T1+ CPUs TWINX ASIs in store are ST_BLKINIT_ ASIs */
1788         /* fall through */
1789 
1790     case GET_ASI_DIRECT:
1791         tcg_gen_qemu_st_tl(src, addr, da->mem_idx, da->memop | MO_ALIGN);
1792         break;
1793 
1794     case GET_ASI_BCOPY:
1795         assert(TARGET_LONG_BITS == 32);
1796         /*
1797          * Copy 32 bytes from the address in SRC to ADDR.
1798          *
1799          * From Ross RT625 hyperSPARC manual, section 4.6:
1800          * "Block Copy and Block Fill will work only on cache line boundaries."
1801          *
1802          * It does not specify if an unaliged address is truncated or trapped.
1803          * Previous qemu behaviour was to truncate to 4 byte alignment, which
1804          * is obviously wrong.  The only place I can see this used is in the
1805          * Linux kernel which begins with page alignment, advancing by 32,
1806          * so is always aligned.  Assume truncation as the simpler option.
1807          *
1808          * Since the loads and stores are paired, allow the copy to happen
1809          * in the host endianness.  The copy need not be atomic.
1810          */
1811         {
1812             MemOp mop = MO_128 | MO_ATOM_IFALIGN_PAIR;
1813             TCGv saddr = tcg_temp_new();
1814             TCGv daddr = tcg_temp_new();
1815             TCGv_i128 tmp = tcg_temp_new_i128();
1816 
1817             tcg_gen_andi_tl(saddr, src, -32);
1818             tcg_gen_andi_tl(daddr, addr, -32);
1819             tcg_gen_qemu_ld_i128(tmp, saddr, da->mem_idx, mop);
1820             tcg_gen_qemu_st_i128(tmp, daddr, da->mem_idx, mop);
1821             tcg_gen_addi_tl(saddr, saddr, 16);
1822             tcg_gen_addi_tl(daddr, daddr, 16);
1823             tcg_gen_qemu_ld_i128(tmp, saddr, da->mem_idx, mop);
1824             tcg_gen_qemu_st_i128(tmp, daddr, da->mem_idx, mop);
1825         }
1826         break;
1827 
1828     default:
1829         {
1830             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1831             TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN);
1832 
1833             save_state(dc);
1834 #ifdef TARGET_SPARC64
1835             gen_helper_st_asi(tcg_env, addr, src, r_asi, r_mop);
1836 #else
1837             {
1838                 TCGv_i64 t64 = tcg_temp_new_i64();
1839                 tcg_gen_extu_tl_i64(t64, src);
1840                 gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
1841             }
1842 #endif
1843 
1844             /* A write to a TLB register may alter page maps.  End the TB. */
1845             dc->npc = DYNAMIC_PC;
1846         }
1847         break;
1848     }
1849 }
1850 
1851 static void gen_swap_asi(DisasContext *dc, DisasASI *da,
1852                          TCGv dst, TCGv src, TCGv addr)
1853 {
1854     switch (da->type) {
1855     case GET_ASI_EXCP:
1856         break;
1857     case GET_ASI_DIRECT:
1858         tcg_gen_atomic_xchg_tl(dst, addr, src,
1859                                da->mem_idx, da->memop | MO_ALIGN);
1860         break;
1861     default:
1862         /* ??? Should be DAE_invalid_asi.  */
1863         gen_exception(dc, TT_DATA_ACCESS);
1864         break;
1865     }
1866 }
1867 
1868 static void gen_cas_asi(DisasContext *dc, DisasASI *da,
1869                         TCGv oldv, TCGv newv, TCGv cmpv, TCGv addr)
1870 {
1871     switch (da->type) {
1872     case GET_ASI_EXCP:
1873         return;
1874     case GET_ASI_DIRECT:
1875         tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, newv,
1876                                   da->mem_idx, da->memop | MO_ALIGN);
1877         break;
1878     default:
1879         /* ??? Should be DAE_invalid_asi.  */
1880         gen_exception(dc, TT_DATA_ACCESS);
1881         break;
1882     }
1883 }
1884 
1885 static void gen_ldstub_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr)
1886 {
1887     switch (da->type) {
1888     case GET_ASI_EXCP:
1889         break;
1890     case GET_ASI_DIRECT:
1891         tcg_gen_atomic_xchg_tl(dst, addr, tcg_constant_tl(0xff),
1892                                da->mem_idx, MO_UB);
1893         break;
1894     default:
1895         /* ??? In theory, this should be raise DAE_invalid_asi.
1896            But the SS-20 roms do ldstuba [%l0] #ASI_M_CTL, %o1.  */
1897         if (tb_cflags(dc->base.tb) & CF_PARALLEL) {
1898             gen_helper_exit_atomic(tcg_env);
1899         } else {
1900             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1901             TCGv_i32 r_mop = tcg_constant_i32(MO_UB);
1902             TCGv_i64 s64, t64;
1903 
1904             save_state(dc);
1905             t64 = tcg_temp_new_i64();
1906             gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
1907 
1908             s64 = tcg_constant_i64(0xff);
1909             gen_helper_st_asi(tcg_env, addr, s64, r_asi, r_mop);
1910 
1911             tcg_gen_trunc_i64_tl(dst, t64);
1912 
1913             /* End the TB.  */
1914             dc->npc = DYNAMIC_PC;
1915         }
1916         break;
1917     }
1918 }
1919 
1920 static void gen_ldf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size,
1921                         TCGv addr, int rd)
1922 {
1923     MemOp memop = da->memop;
1924     MemOp size = memop & MO_SIZE;
1925     TCGv_i32 d32;
1926     TCGv_i64 d64, l64;
1927     TCGv addr_tmp;
1928 
1929     /* TODO: Use 128-bit load/store below. */
1930     if (size == MO_128) {
1931         memop = (memop & ~MO_SIZE) | MO_64;
1932     }
1933 
1934     switch (da->type) {
1935     case GET_ASI_EXCP:
1936         break;
1937 
1938     case GET_ASI_DIRECT:
1939         memop |= MO_ALIGN_4;
1940         switch (size) {
1941         case MO_32:
1942             d32 = tcg_temp_new_i32();
1943             tcg_gen_qemu_ld_i32(d32, addr, da->mem_idx, memop);
1944             gen_store_fpr_F(dc, rd, d32);
1945             break;
1946 
1947         case MO_64:
1948             d64 = tcg_temp_new_i64();
1949             tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx, memop);
1950             gen_store_fpr_D(dc, rd, d64);
1951             break;
1952 
1953         case MO_128:
1954             d64 = tcg_temp_new_i64();
1955             l64 = tcg_temp_new_i64();
1956             tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx, memop);
1957             addr_tmp = tcg_temp_new();
1958             tcg_gen_addi_tl(addr_tmp, addr, 8);
1959             tcg_gen_qemu_ld_i64(l64, addr_tmp, da->mem_idx, memop);
1960             gen_store_fpr_D(dc, rd, d64);
1961             gen_store_fpr_D(dc, rd + 2, l64);
1962             break;
1963         default:
1964             g_assert_not_reached();
1965         }
1966         break;
1967 
1968     case GET_ASI_BLOCK:
1969         /* Valid for lddfa on aligned registers only.  */
1970         if (orig_size == MO_64 && (rd & 7) == 0) {
1971             /* The first operation checks required alignment.  */
1972             addr_tmp = tcg_temp_new();
1973             d64 = tcg_temp_new_i64();
1974             for (int i = 0; ; ++i) {
1975                 tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx,
1976                                     memop | (i == 0 ? MO_ALIGN_64 : 0));
1977                 gen_store_fpr_D(dc, rd + 2 * i, d64);
1978                 if (i == 7) {
1979                     break;
1980                 }
1981                 tcg_gen_addi_tl(addr_tmp, addr, 8);
1982                 addr = addr_tmp;
1983             }
1984         } else {
1985             gen_exception(dc, TT_ILL_INSN);
1986         }
1987         break;
1988 
1989     case GET_ASI_SHORT:
1990         /* Valid for lddfa only.  */
1991         if (orig_size == MO_64) {
1992             d64 = tcg_temp_new_i64();
1993             tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx, memop | MO_ALIGN);
1994             gen_store_fpr_D(dc, rd, d64);
1995         } else {
1996             gen_exception(dc, TT_ILL_INSN);
1997         }
1998         break;
1999 
2000     default:
2001         {
2002             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2003             TCGv_i32 r_mop = tcg_constant_i32(memop | MO_ALIGN);
2004 
2005             save_state(dc);
2006             /* According to the table in the UA2011 manual, the only
2007                other asis that are valid for ldfa/lddfa/ldqfa are
2008                the NO_FAULT asis.  We still need a helper for these,
2009                but we can just use the integer asi helper for them.  */
2010             switch (size) {
2011             case MO_32:
2012                 d64 = tcg_temp_new_i64();
2013                 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
2014                 d32 = tcg_temp_new_i32();
2015                 tcg_gen_extrl_i64_i32(d32, d64);
2016                 gen_store_fpr_F(dc, rd, d32);
2017                 break;
2018             case MO_64:
2019                 d64 = tcg_temp_new_i64();
2020                 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
2021                 gen_store_fpr_D(dc, rd, d64);
2022                 break;
2023             case MO_128:
2024                 d64 = tcg_temp_new_i64();
2025                 l64 = tcg_temp_new_i64();
2026                 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
2027                 addr_tmp = tcg_temp_new();
2028                 tcg_gen_addi_tl(addr_tmp, addr, 8);
2029                 gen_helper_ld_asi(l64, tcg_env, addr_tmp, r_asi, r_mop);
2030                 gen_store_fpr_D(dc, rd, d64);
2031                 gen_store_fpr_D(dc, rd + 2, l64);
2032                 break;
2033             default:
2034                 g_assert_not_reached();
2035             }
2036         }
2037         break;
2038     }
2039 }
2040 
2041 static void gen_stf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size,
2042                         TCGv addr, int rd)
2043 {
2044     MemOp memop = da->memop;
2045     MemOp size = memop & MO_SIZE;
2046     TCGv_i32 d32;
2047     TCGv_i64 d64;
2048     TCGv addr_tmp;
2049 
2050     /* TODO: Use 128-bit load/store below. */
2051     if (size == MO_128) {
2052         memop = (memop & ~MO_SIZE) | MO_64;
2053     }
2054 
2055     switch (da->type) {
2056     case GET_ASI_EXCP:
2057         break;
2058 
2059     case GET_ASI_DIRECT:
2060         memop |= MO_ALIGN_4;
2061         switch (size) {
2062         case MO_32:
2063             d32 = gen_load_fpr_F(dc, rd);
2064             tcg_gen_qemu_st_i32(d32, addr, da->mem_idx, memop | MO_ALIGN);
2065             break;
2066         case MO_64:
2067             d64 = gen_load_fpr_D(dc, rd);
2068             tcg_gen_qemu_st_i64(d64, addr, da->mem_idx, memop | MO_ALIGN_4);
2069             break;
2070         case MO_128:
2071             /* Only 4-byte alignment required.  However, it is legal for the
2072                cpu to signal the alignment fault, and the OS trap handler is
2073                required to fix it up.  Requiring 16-byte alignment here avoids
2074                having to probe the second page before performing the first
2075                write.  */
2076             d64 = gen_load_fpr_D(dc, rd);
2077             tcg_gen_qemu_st_i64(d64, addr, da->mem_idx, memop | MO_ALIGN_16);
2078             addr_tmp = tcg_temp_new();
2079             tcg_gen_addi_tl(addr_tmp, addr, 8);
2080             d64 = gen_load_fpr_D(dc, rd + 2);
2081             tcg_gen_qemu_st_i64(d64, addr_tmp, da->mem_idx, memop);
2082             break;
2083         default:
2084             g_assert_not_reached();
2085         }
2086         break;
2087 
2088     case GET_ASI_BLOCK:
2089         /* Valid for stdfa on aligned registers only.  */
2090         if (orig_size == MO_64 && (rd & 7) == 0) {
2091             /* The first operation checks required alignment.  */
2092             addr_tmp = tcg_temp_new();
2093             for (int i = 0; ; ++i) {
2094                 d64 = gen_load_fpr_D(dc, rd + 2 * i);
2095                 tcg_gen_qemu_st_i64(d64, addr, da->mem_idx,
2096                                     memop | (i == 0 ? MO_ALIGN_64 : 0));
2097                 if (i == 7) {
2098                     break;
2099                 }
2100                 tcg_gen_addi_tl(addr_tmp, addr, 8);
2101                 addr = addr_tmp;
2102             }
2103         } else {
2104             gen_exception(dc, TT_ILL_INSN);
2105         }
2106         break;
2107 
2108     case GET_ASI_SHORT:
2109         /* Valid for stdfa only.  */
2110         if (orig_size == MO_64) {
2111             d64 = gen_load_fpr_D(dc, rd);
2112             tcg_gen_qemu_st_i64(d64, addr, da->mem_idx, memop | MO_ALIGN);
2113         } else {
2114             gen_exception(dc, TT_ILL_INSN);
2115         }
2116         break;
2117 
2118     default:
2119         /* According to the table in the UA2011 manual, the only
2120            other asis that are valid for ldfa/lddfa/ldqfa are
2121            the PST* asis, which aren't currently handled.  */
2122         gen_exception(dc, TT_ILL_INSN);
2123         break;
2124     }
2125 }
2126 
2127 static void gen_ldda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd)
2128 {
2129     TCGv hi = gen_dest_gpr(dc, rd);
2130     TCGv lo = gen_dest_gpr(dc, rd + 1);
2131 
2132     switch (da->type) {
2133     case GET_ASI_EXCP:
2134         return;
2135 
2136     case GET_ASI_DTWINX:
2137 #ifdef TARGET_SPARC64
2138         {
2139             MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16;
2140             TCGv_i128 t = tcg_temp_new_i128();
2141 
2142             tcg_gen_qemu_ld_i128(t, addr, da->mem_idx, mop);
2143             /*
2144              * Note that LE twinx acts as if each 64-bit register result is
2145              * byte swapped.  We perform one 128-bit LE load, so must swap
2146              * the order of the writebacks.
2147              */
2148             if ((mop & MO_BSWAP) == MO_TE) {
2149                 tcg_gen_extr_i128_i64(lo, hi, t);
2150             } else {
2151                 tcg_gen_extr_i128_i64(hi, lo, t);
2152             }
2153         }
2154         break;
2155 #else
2156         g_assert_not_reached();
2157 #endif
2158 
2159     case GET_ASI_DIRECT:
2160         {
2161             TCGv_i64 tmp = tcg_temp_new_i64();
2162 
2163             tcg_gen_qemu_ld_i64(tmp, addr, da->mem_idx, da->memop | MO_ALIGN);
2164 
2165             /* Note that LE ldda acts as if each 32-bit register
2166                result is byte swapped.  Having just performed one
2167                64-bit bswap, we need now to swap the writebacks.  */
2168             if ((da->memop & MO_BSWAP) == MO_TE) {
2169                 tcg_gen_extr_i64_tl(lo, hi, tmp);
2170             } else {
2171                 tcg_gen_extr_i64_tl(hi, lo, tmp);
2172             }
2173         }
2174         break;
2175 
2176     case GET_ASI_CODE:
2177 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
2178         {
2179             MemOpIdx oi = make_memop_idx(da->memop, da->mem_idx);
2180             TCGv_i64 tmp = tcg_temp_new_i64();
2181 
2182             gen_helper_ld_code(tmp, tcg_env, addr, tcg_constant_i32(oi));
2183 
2184             /* See above.  */
2185             if ((da->memop & MO_BSWAP) == MO_TE) {
2186                 tcg_gen_extr_i64_tl(lo, hi, tmp);
2187             } else {
2188                 tcg_gen_extr_i64_tl(hi, lo, tmp);
2189             }
2190         }
2191         break;
2192 #else
2193         g_assert_not_reached();
2194 #endif
2195 
2196     default:
2197         /* ??? In theory we've handled all of the ASIs that are valid
2198            for ldda, and this should raise DAE_invalid_asi.  However,
2199            real hardware allows others.  This can be seen with e.g.
2200            FreeBSD 10.3 wrt ASI_IC_TAG.  */
2201         {
2202             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2203             TCGv_i32 r_mop = tcg_constant_i32(da->memop);
2204             TCGv_i64 tmp = tcg_temp_new_i64();
2205 
2206             save_state(dc);
2207             gen_helper_ld_asi(tmp, tcg_env, addr, r_asi, r_mop);
2208 
2209             /* See above.  */
2210             if ((da->memop & MO_BSWAP) == MO_TE) {
2211                 tcg_gen_extr_i64_tl(lo, hi, tmp);
2212             } else {
2213                 tcg_gen_extr_i64_tl(hi, lo, tmp);
2214             }
2215         }
2216         break;
2217     }
2218 
2219     gen_store_gpr(dc, rd, hi);
2220     gen_store_gpr(dc, rd + 1, lo);
2221 }
2222 
2223 static void gen_stda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd)
2224 {
2225     TCGv hi = gen_load_gpr(dc, rd);
2226     TCGv lo = gen_load_gpr(dc, rd + 1);
2227 
2228     switch (da->type) {
2229     case GET_ASI_EXCP:
2230         break;
2231 
2232     case GET_ASI_DTWINX:
2233 #ifdef TARGET_SPARC64
2234         {
2235             MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16;
2236             TCGv_i128 t = tcg_temp_new_i128();
2237 
2238             /*
2239              * Note that LE twinx acts as if each 64-bit register result is
2240              * byte swapped.  We perform one 128-bit LE store, so must swap
2241              * the order of the construction.
2242              */
2243             if ((mop & MO_BSWAP) == MO_TE) {
2244                 tcg_gen_concat_i64_i128(t, lo, hi);
2245             } else {
2246                 tcg_gen_concat_i64_i128(t, hi, lo);
2247             }
2248             tcg_gen_qemu_st_i128(t, addr, da->mem_idx, mop);
2249         }
2250         break;
2251 #else
2252         g_assert_not_reached();
2253 #endif
2254 
2255     case GET_ASI_DIRECT:
2256         {
2257             TCGv_i64 t64 = tcg_temp_new_i64();
2258 
2259             /* Note that LE stda acts as if each 32-bit register result is
2260                byte swapped.  We will perform one 64-bit LE store, so now
2261                we must swap the order of the construction.  */
2262             if ((da->memop & MO_BSWAP) == MO_TE) {
2263                 tcg_gen_concat_tl_i64(t64, lo, hi);
2264             } else {
2265                 tcg_gen_concat_tl_i64(t64, hi, lo);
2266             }
2267             tcg_gen_qemu_st_i64(t64, addr, da->mem_idx, da->memop | MO_ALIGN);
2268         }
2269         break;
2270 
2271     case GET_ASI_BFILL:
2272         assert(TARGET_LONG_BITS == 32);
2273         /*
2274          * Store 32 bytes of [rd:rd+1] to ADDR.
2275          * See comments for GET_ASI_COPY above.
2276          */
2277         {
2278             MemOp mop = MO_TE | MO_128 | MO_ATOM_IFALIGN_PAIR;
2279             TCGv_i64 t8 = tcg_temp_new_i64();
2280             TCGv_i128 t16 = tcg_temp_new_i128();
2281             TCGv daddr = tcg_temp_new();
2282 
2283             tcg_gen_concat_tl_i64(t8, lo, hi);
2284             tcg_gen_concat_i64_i128(t16, t8, t8);
2285             tcg_gen_andi_tl(daddr, addr, -32);
2286             tcg_gen_qemu_st_i128(t16, daddr, da->mem_idx, mop);
2287             tcg_gen_addi_tl(daddr, daddr, 16);
2288             tcg_gen_qemu_st_i128(t16, daddr, da->mem_idx, mop);
2289         }
2290         break;
2291 
2292     default:
2293         /* ??? In theory we've handled all of the ASIs that are valid
2294            for stda, and this should raise DAE_invalid_asi.  */
2295         {
2296             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2297             TCGv_i32 r_mop = tcg_constant_i32(da->memop);
2298             TCGv_i64 t64 = tcg_temp_new_i64();
2299 
2300             /* See above.  */
2301             if ((da->memop & MO_BSWAP) == MO_TE) {
2302                 tcg_gen_concat_tl_i64(t64, lo, hi);
2303             } else {
2304                 tcg_gen_concat_tl_i64(t64, hi, lo);
2305             }
2306 
2307             save_state(dc);
2308             gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
2309         }
2310         break;
2311     }
2312 }
2313 
2314 static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2315 {
2316 #ifdef TARGET_SPARC64
2317     TCGv_i32 c32, zero, dst, s1, s2;
2318     TCGv_i64 c64 = tcg_temp_new_i64();
2319 
2320     /* We have two choices here: extend the 32 bit data and use movcond_i64,
2321        or fold the comparison down to 32 bits and use movcond_i32.  Choose
2322        the later.  */
2323     c32 = tcg_temp_new_i32();
2324     tcg_gen_setcondi_i64(cmp->cond, c64, cmp->c1, cmp->c2);
2325     tcg_gen_extrl_i64_i32(c32, c64);
2326 
2327     s1 = gen_load_fpr_F(dc, rs);
2328     s2 = gen_load_fpr_F(dc, rd);
2329     dst = tcg_temp_new_i32();
2330     zero = tcg_constant_i32(0);
2331 
2332     tcg_gen_movcond_i32(TCG_COND_NE, dst, c32, zero, s1, s2);
2333 
2334     gen_store_fpr_F(dc, rd, dst);
2335 #else
2336     qemu_build_not_reached();
2337 #endif
2338 }
2339 
2340 static void gen_fmovd(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2341 {
2342 #ifdef TARGET_SPARC64
2343     TCGv_i64 dst = tcg_temp_new_i64();
2344     tcg_gen_movcond_i64(cmp->cond, dst, cmp->c1, tcg_constant_tl(cmp->c2),
2345                         gen_load_fpr_D(dc, rs),
2346                         gen_load_fpr_D(dc, rd));
2347     gen_store_fpr_D(dc, rd, dst);
2348 #else
2349     qemu_build_not_reached();
2350 #endif
2351 }
2352 
2353 static void gen_fmovq(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2354 {
2355 #ifdef TARGET_SPARC64
2356     TCGv c2 = tcg_constant_tl(cmp->c2);
2357     TCGv_i64 h = tcg_temp_new_i64();
2358     TCGv_i64 l = tcg_temp_new_i64();
2359 
2360     tcg_gen_movcond_i64(cmp->cond, h, cmp->c1, c2,
2361                         gen_load_fpr_D(dc, rs),
2362                         gen_load_fpr_D(dc, rd));
2363     tcg_gen_movcond_i64(cmp->cond, l, cmp->c1, c2,
2364                         gen_load_fpr_D(dc, rs + 2),
2365                         gen_load_fpr_D(dc, rd + 2));
2366     gen_store_fpr_D(dc, rd, h);
2367     gen_store_fpr_D(dc, rd + 2, l);
2368 #else
2369     qemu_build_not_reached();
2370 #endif
2371 }
2372 
2373 #ifdef TARGET_SPARC64
2374 static void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr)
2375 {
2376     TCGv_i32 r_tl = tcg_temp_new_i32();
2377 
2378     /* load env->tl into r_tl */
2379     tcg_gen_ld_i32(r_tl, tcg_env, offsetof(CPUSPARCState, tl));
2380 
2381     /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2382     tcg_gen_andi_i32(r_tl, r_tl, MAXTL_MASK);
2383 
2384     /* calculate offset to current trap state from env->ts, reuse r_tl */
2385     tcg_gen_muli_i32(r_tl, r_tl, sizeof (trap_state));
2386     tcg_gen_addi_ptr(r_tsptr, tcg_env, offsetof(CPUSPARCState, ts));
2387 
2388     /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2389     {
2390         TCGv_ptr r_tl_tmp = tcg_temp_new_ptr();
2391         tcg_gen_ext_i32_ptr(r_tl_tmp, r_tl);
2392         tcg_gen_add_ptr(r_tsptr, r_tsptr, r_tl_tmp);
2393     }
2394 }
2395 #endif
2396 
2397 static int extract_dfpreg(DisasContext *dc, int x)
2398 {
2399     int r = x & 0x1e;
2400 #ifdef TARGET_SPARC64
2401     r |= (x & 1) << 5;
2402 #endif
2403     return r;
2404 }
2405 
2406 static int extract_qfpreg(DisasContext *dc, int x)
2407 {
2408     int r = x & 0x1c;
2409 #ifdef TARGET_SPARC64
2410     r |= (x & 1) << 5;
2411 #endif
2412     return r;
2413 }
2414 
2415 /* Include the auto-generated decoder.  */
2416 #include "decode-insns.c.inc"
2417 
2418 #define TRANS(NAME, AVAIL, FUNC, ...) \
2419     static bool trans_##NAME(DisasContext *dc, arg_##NAME *a) \
2420     { return avail_##AVAIL(dc) && FUNC(dc, __VA_ARGS__); }
2421 
2422 #define avail_ALL(C)      true
2423 #ifdef TARGET_SPARC64
2424 # define avail_32(C)      false
2425 # define avail_ASR17(C)   false
2426 # define avail_CASA(C)    true
2427 # define avail_DIV(C)     true
2428 # define avail_MUL(C)     true
2429 # define avail_POWERDOWN(C) false
2430 # define avail_64(C)      true
2431 # define avail_FMAF(C)    ((C)->def->features & CPU_FEATURE_FMAF)
2432 # define avail_GL(C)      ((C)->def->features & CPU_FEATURE_GL)
2433 # define avail_HYPV(C)    ((C)->def->features & CPU_FEATURE_HYPV)
2434 # define avail_IMA(C)     ((C)->def->features & CPU_FEATURE_IMA)
2435 # define avail_VIS1(C)    ((C)->def->features & CPU_FEATURE_VIS1)
2436 # define avail_VIS2(C)    ((C)->def->features & CPU_FEATURE_VIS2)
2437 # define avail_VIS3(C)    ((C)->def->features & CPU_FEATURE_VIS3)
2438 # define avail_VIS3B(C)   avail_VIS3(C)
2439 # define avail_VIS4(C)    ((C)->def->features & CPU_FEATURE_VIS4)
2440 #else
2441 # define avail_32(C)      true
2442 # define avail_ASR17(C)   ((C)->def->features & CPU_FEATURE_ASR17)
2443 # define avail_CASA(C)    ((C)->def->features & CPU_FEATURE_CASA)
2444 # define avail_DIV(C)     ((C)->def->features & CPU_FEATURE_DIV)
2445 # define avail_MUL(C)     ((C)->def->features & CPU_FEATURE_MUL)
2446 # define avail_POWERDOWN(C) ((C)->def->features & CPU_FEATURE_POWERDOWN)
2447 # define avail_64(C)      false
2448 # define avail_FMAF(C)    false
2449 # define avail_GL(C)      false
2450 # define avail_HYPV(C)    false
2451 # define avail_IMA(C)     false
2452 # define avail_VIS1(C)    false
2453 # define avail_VIS2(C)    false
2454 # define avail_VIS3(C)    false
2455 # define avail_VIS3B(C)   false
2456 # define avail_VIS4(C)    false
2457 #endif
2458 
2459 /* Default case for non jump instructions. */
2460 static bool advance_pc(DisasContext *dc)
2461 {
2462     TCGLabel *l1;
2463 
2464     finishing_insn(dc);
2465 
2466     if (dc->npc & 3) {
2467         switch (dc->npc) {
2468         case DYNAMIC_PC:
2469         case DYNAMIC_PC_LOOKUP:
2470             dc->pc = dc->npc;
2471             tcg_gen_mov_tl(cpu_pc, cpu_npc);
2472             tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
2473             break;
2474 
2475         case JUMP_PC:
2476             /* we can do a static jump */
2477             l1 = gen_new_label();
2478             tcg_gen_brcondi_tl(dc->jump.cond, dc->jump.c1, dc->jump.c2, l1);
2479 
2480             /* jump not taken */
2481             gen_goto_tb(dc, 1, dc->jump_pc[1], dc->jump_pc[1] + 4);
2482 
2483             /* jump taken */
2484             gen_set_label(l1);
2485             gen_goto_tb(dc, 0, dc->jump_pc[0], dc->jump_pc[0] + 4);
2486 
2487             dc->base.is_jmp = DISAS_NORETURN;
2488             break;
2489 
2490         default:
2491             g_assert_not_reached();
2492         }
2493     } else {
2494         dc->pc = dc->npc;
2495         dc->npc = dc->npc + 4;
2496     }
2497     return true;
2498 }
2499 
2500 /*
2501  * Major opcodes 00 and 01 -- branches, call, and sethi
2502  */
2503 
2504 static bool advance_jump_cond(DisasContext *dc, DisasCompare *cmp,
2505                               bool annul, int disp)
2506 {
2507     target_ulong dest = address_mask_i(dc, dc->pc + disp * 4);
2508     target_ulong npc;
2509 
2510     finishing_insn(dc);
2511 
2512     if (cmp->cond == TCG_COND_ALWAYS) {
2513         if (annul) {
2514             dc->pc = dest;
2515             dc->npc = dest + 4;
2516         } else {
2517             gen_mov_pc_npc(dc);
2518             dc->npc = dest;
2519         }
2520         return true;
2521     }
2522 
2523     if (cmp->cond == TCG_COND_NEVER) {
2524         npc = dc->npc;
2525         if (npc & 3) {
2526             gen_mov_pc_npc(dc);
2527             if (annul) {
2528                 tcg_gen_addi_tl(cpu_pc, cpu_pc, 4);
2529             }
2530             tcg_gen_addi_tl(cpu_npc, cpu_pc, 4);
2531         } else {
2532             dc->pc = npc + (annul ? 4 : 0);
2533             dc->npc = dc->pc + 4;
2534         }
2535         return true;
2536     }
2537 
2538     flush_cond(dc);
2539     npc = dc->npc;
2540 
2541     if (annul) {
2542         TCGLabel *l1 = gen_new_label();
2543 
2544         tcg_gen_brcondi_tl(tcg_invert_cond(cmp->cond), cmp->c1, cmp->c2, l1);
2545         gen_goto_tb(dc, 0, npc, dest);
2546         gen_set_label(l1);
2547         gen_goto_tb(dc, 1, npc + 4, npc + 8);
2548 
2549         dc->base.is_jmp = DISAS_NORETURN;
2550     } else {
2551         if (npc & 3) {
2552             switch (npc) {
2553             case DYNAMIC_PC:
2554             case DYNAMIC_PC_LOOKUP:
2555                 tcg_gen_mov_tl(cpu_pc, cpu_npc);
2556                 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
2557                 tcg_gen_movcond_tl(cmp->cond, cpu_npc,
2558                                    cmp->c1, tcg_constant_tl(cmp->c2),
2559                                    tcg_constant_tl(dest), cpu_npc);
2560                 dc->pc = npc;
2561                 break;
2562             default:
2563                 g_assert_not_reached();
2564             }
2565         } else {
2566             dc->pc = npc;
2567             dc->npc = JUMP_PC;
2568             dc->jump = *cmp;
2569             dc->jump_pc[0] = dest;
2570             dc->jump_pc[1] = npc + 4;
2571 
2572             /* The condition for cpu_cond is always NE -- normalize. */
2573             if (cmp->cond == TCG_COND_NE) {
2574                 tcg_gen_xori_tl(cpu_cond, cmp->c1, cmp->c2);
2575             } else {
2576                 tcg_gen_setcondi_tl(cmp->cond, cpu_cond, cmp->c1, cmp->c2);
2577             }
2578             dc->cpu_cond_live = true;
2579         }
2580     }
2581     return true;
2582 }
2583 
2584 static bool raise_priv(DisasContext *dc)
2585 {
2586     gen_exception(dc, TT_PRIV_INSN);
2587     return true;
2588 }
2589 
2590 static bool raise_unimpfpop(DisasContext *dc)
2591 {
2592     gen_op_fpexception_im(dc, FSR_FTT_UNIMPFPOP);
2593     return true;
2594 }
2595 
2596 static bool gen_trap_float128(DisasContext *dc)
2597 {
2598     if (dc->def->features & CPU_FEATURE_FLOAT128) {
2599         return false;
2600     }
2601     return raise_unimpfpop(dc);
2602 }
2603 
2604 static bool do_bpcc(DisasContext *dc, arg_bcc *a)
2605 {
2606     DisasCompare cmp;
2607 
2608     gen_compare(&cmp, a->cc, a->cond, dc);
2609     return advance_jump_cond(dc, &cmp, a->a, a->i);
2610 }
2611 
2612 TRANS(Bicc, ALL, do_bpcc, a)
2613 TRANS(BPcc,  64, do_bpcc, a)
2614 
2615 static bool do_fbpfcc(DisasContext *dc, arg_bcc *a)
2616 {
2617     DisasCompare cmp;
2618 
2619     if (gen_trap_ifnofpu(dc)) {
2620         return true;
2621     }
2622     gen_fcompare(&cmp, a->cc, a->cond);
2623     return advance_jump_cond(dc, &cmp, a->a, a->i);
2624 }
2625 
2626 TRANS(FBPfcc,  64, do_fbpfcc, a)
2627 TRANS(FBfcc,  ALL, do_fbpfcc, a)
2628 
2629 static bool trans_BPr(DisasContext *dc, arg_BPr *a)
2630 {
2631     DisasCompare cmp;
2632 
2633     if (!avail_64(dc)) {
2634         return false;
2635     }
2636     if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
2637         return false;
2638     }
2639     return advance_jump_cond(dc, &cmp, a->a, a->i);
2640 }
2641 
2642 static bool trans_CALL(DisasContext *dc, arg_CALL *a)
2643 {
2644     target_long target = address_mask_i(dc, dc->pc + a->i * 4);
2645 
2646     gen_store_gpr(dc, 15, tcg_constant_tl(dc->pc));
2647     gen_mov_pc_npc(dc);
2648     dc->npc = target;
2649     return true;
2650 }
2651 
2652 static bool trans_NCP(DisasContext *dc, arg_NCP *a)
2653 {
2654     /*
2655      * For sparc32, always generate the no-coprocessor exception.
2656      * For sparc64, always generate illegal instruction.
2657      */
2658 #ifdef TARGET_SPARC64
2659     return false;
2660 #else
2661     gen_exception(dc, TT_NCP_INSN);
2662     return true;
2663 #endif
2664 }
2665 
2666 static bool trans_SETHI(DisasContext *dc, arg_SETHI *a)
2667 {
2668     /* Special-case %g0 because that's the canonical nop.  */
2669     if (a->rd) {
2670         gen_store_gpr(dc, a->rd, tcg_constant_tl((uint32_t)a->i << 10));
2671     }
2672     return advance_pc(dc);
2673 }
2674 
2675 /*
2676  * Major Opcode 10 -- integer, floating-point, vis, and system insns.
2677  */
2678 
2679 static bool do_tcc(DisasContext *dc, int cond, int cc,
2680                    int rs1, bool imm, int rs2_or_imm)
2681 {
2682     int mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc)
2683                 ? UA2005_HTRAP_MASK : V8_TRAP_MASK);
2684     DisasCompare cmp;
2685     TCGLabel *lab;
2686     TCGv_i32 trap;
2687 
2688     /* Trap never.  */
2689     if (cond == 0) {
2690         return advance_pc(dc);
2691     }
2692 
2693     /*
2694      * Immediate traps are the most common case.  Since this value is
2695      * live across the branch, it really pays to evaluate the constant.
2696      */
2697     if (rs1 == 0 && (imm || rs2_or_imm == 0)) {
2698         trap = tcg_constant_i32((rs2_or_imm & mask) + TT_TRAP);
2699     } else {
2700         trap = tcg_temp_new_i32();
2701         tcg_gen_trunc_tl_i32(trap, gen_load_gpr(dc, rs1));
2702         if (imm) {
2703             tcg_gen_addi_i32(trap, trap, rs2_or_imm);
2704         } else {
2705             TCGv_i32 t2 = tcg_temp_new_i32();
2706             tcg_gen_trunc_tl_i32(t2, gen_load_gpr(dc, rs2_or_imm));
2707             tcg_gen_add_i32(trap, trap, t2);
2708         }
2709         tcg_gen_andi_i32(trap, trap, mask);
2710         tcg_gen_addi_i32(trap, trap, TT_TRAP);
2711     }
2712 
2713     finishing_insn(dc);
2714 
2715     /* Trap always.  */
2716     if (cond == 8) {
2717         save_state(dc);
2718         gen_helper_raise_exception(tcg_env, trap);
2719         dc->base.is_jmp = DISAS_NORETURN;
2720         return true;
2721     }
2722 
2723     /* Conditional trap.  */
2724     flush_cond(dc);
2725     lab = delay_exceptionv(dc, trap);
2726     gen_compare(&cmp, cc, cond, dc);
2727     tcg_gen_brcondi_tl(cmp.cond, cmp.c1, cmp.c2, lab);
2728 
2729     return advance_pc(dc);
2730 }
2731 
2732 static bool trans_Tcc_r(DisasContext *dc, arg_Tcc_r *a)
2733 {
2734     if (avail_32(dc) && a->cc) {
2735         return false;
2736     }
2737     return do_tcc(dc, a->cond, a->cc, a->rs1, false, a->rs2);
2738 }
2739 
2740 static bool trans_Tcc_i_v7(DisasContext *dc, arg_Tcc_i_v7 *a)
2741 {
2742     if (avail_64(dc)) {
2743         return false;
2744     }
2745     return do_tcc(dc, a->cond, 0, a->rs1, true, a->i);
2746 }
2747 
2748 static bool trans_Tcc_i_v9(DisasContext *dc, arg_Tcc_i_v9 *a)
2749 {
2750     if (avail_32(dc)) {
2751         return false;
2752     }
2753     return do_tcc(dc, a->cond, a->cc, a->rs1, true, a->i);
2754 }
2755 
2756 static bool trans_STBAR(DisasContext *dc, arg_STBAR *a)
2757 {
2758     tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
2759     return advance_pc(dc);
2760 }
2761 
2762 static bool trans_MEMBAR(DisasContext *dc, arg_MEMBAR *a)
2763 {
2764     if (avail_32(dc)) {
2765         return false;
2766     }
2767     if (a->mmask) {
2768         /* Note TCG_MO_* was modeled on sparc64, so mmask matches. */
2769         tcg_gen_mb(a->mmask | TCG_BAR_SC);
2770     }
2771     if (a->cmask) {
2772         /* For #Sync, etc, end the TB to recognize interrupts. */
2773         dc->base.is_jmp = DISAS_EXIT;
2774     }
2775     return advance_pc(dc);
2776 }
2777 
2778 static bool do_rd_special(DisasContext *dc, bool priv, int rd,
2779                           TCGv (*func)(DisasContext *, TCGv))
2780 {
2781     if (!priv) {
2782         return raise_priv(dc);
2783     }
2784     gen_store_gpr(dc, rd, func(dc, gen_dest_gpr(dc, rd)));
2785     return advance_pc(dc);
2786 }
2787 
2788 static TCGv do_rdy(DisasContext *dc, TCGv dst)
2789 {
2790     return cpu_y;
2791 }
2792 
2793 static bool trans_RDY(DisasContext *dc, arg_RDY *a)
2794 {
2795     /*
2796      * TODO: Need a feature bit for sparcv8.  In the meantime, treat all
2797      * 32-bit cpus like sparcv7, which ignores the rs1 field.
2798      * This matches after all other ASR, so Leon3 Asr17 is handled first.
2799      */
2800     if (avail_64(dc) && a->rs1 != 0) {
2801         return false;
2802     }
2803     return do_rd_special(dc, true, a->rd, do_rdy);
2804 }
2805 
2806 static TCGv do_rd_leon3_config(DisasContext *dc, TCGv dst)
2807 {
2808     gen_helper_rdasr17(dst, tcg_env);
2809     return dst;
2810 }
2811 
2812 TRANS(RDASR17, ASR17, do_rd_special, true, a->rd, do_rd_leon3_config)
2813 
2814 static TCGv do_rdccr(DisasContext *dc, TCGv dst)
2815 {
2816     gen_helper_rdccr(dst, tcg_env);
2817     return dst;
2818 }
2819 
2820 TRANS(RDCCR, 64, do_rd_special, true, a->rd, do_rdccr)
2821 
2822 static TCGv do_rdasi(DisasContext *dc, TCGv dst)
2823 {
2824 #ifdef TARGET_SPARC64
2825     return tcg_constant_tl(dc->asi);
2826 #else
2827     qemu_build_not_reached();
2828 #endif
2829 }
2830 
2831 TRANS(RDASI, 64, do_rd_special, true, a->rd, do_rdasi)
2832 
2833 static TCGv do_rdtick(DisasContext *dc, TCGv dst)
2834 {
2835     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
2836 
2837     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
2838     if (translator_io_start(&dc->base)) {
2839         dc->base.is_jmp = DISAS_EXIT;
2840     }
2841     gen_helper_tick_get_count(dst, tcg_env, r_tickptr,
2842                               tcg_constant_i32(dc->mem_idx));
2843     return dst;
2844 }
2845 
2846 /* TODO: non-priv access only allowed when enabled. */
2847 TRANS(RDTICK, 64, do_rd_special, true, a->rd, do_rdtick)
2848 
2849 static TCGv do_rdpc(DisasContext *dc, TCGv dst)
2850 {
2851     return tcg_constant_tl(address_mask_i(dc, dc->pc));
2852 }
2853 
2854 TRANS(RDPC, 64, do_rd_special, true, a->rd, do_rdpc)
2855 
2856 static TCGv do_rdfprs(DisasContext *dc, TCGv dst)
2857 {
2858     tcg_gen_ext_i32_tl(dst, cpu_fprs);
2859     return dst;
2860 }
2861 
2862 TRANS(RDFPRS, 64, do_rd_special, true, a->rd, do_rdfprs)
2863 
2864 static TCGv do_rdgsr(DisasContext *dc, TCGv dst)
2865 {
2866     gen_trap_ifnofpu(dc);
2867     return cpu_gsr;
2868 }
2869 
2870 TRANS(RDGSR, 64, do_rd_special, true, a->rd, do_rdgsr)
2871 
2872 static TCGv do_rdsoftint(DisasContext *dc, TCGv dst)
2873 {
2874     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(softint));
2875     return dst;
2876 }
2877 
2878 TRANS(RDSOFTINT, 64, do_rd_special, supervisor(dc), a->rd, do_rdsoftint)
2879 
2880 static TCGv do_rdtick_cmpr(DisasContext *dc, TCGv dst)
2881 {
2882     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(tick_cmpr));
2883     return dst;
2884 }
2885 
2886 /* TODO: non-priv access only allowed when enabled. */
2887 TRANS(RDTICK_CMPR, 64, do_rd_special, true, a->rd, do_rdtick_cmpr)
2888 
2889 static TCGv do_rdstick(DisasContext *dc, TCGv dst)
2890 {
2891     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
2892 
2893     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick));
2894     if (translator_io_start(&dc->base)) {
2895         dc->base.is_jmp = DISAS_EXIT;
2896     }
2897     gen_helper_tick_get_count(dst, tcg_env, r_tickptr,
2898                               tcg_constant_i32(dc->mem_idx));
2899     return dst;
2900 }
2901 
2902 /* TODO: non-priv access only allowed when enabled. */
2903 TRANS(RDSTICK, 64, do_rd_special, true, a->rd, do_rdstick)
2904 
2905 static TCGv do_rdstick_cmpr(DisasContext *dc, TCGv dst)
2906 {
2907     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(stick_cmpr));
2908     return dst;
2909 }
2910 
2911 /* TODO: supervisor access only allowed when enabled by hypervisor. */
2912 TRANS(RDSTICK_CMPR, 64, do_rd_special, supervisor(dc), a->rd, do_rdstick_cmpr)
2913 
2914 /*
2915  * UltraSPARC-T1 Strand status.
2916  * HYPV check maybe not enough, UA2005 & UA2007 describe
2917  * this ASR as impl. dep
2918  */
2919 static TCGv do_rdstrand_status(DisasContext *dc, TCGv dst)
2920 {
2921     return tcg_constant_tl(1);
2922 }
2923 
2924 TRANS(RDSTRAND_STATUS, HYPV, do_rd_special, true, a->rd, do_rdstrand_status)
2925 
2926 static TCGv do_rdpsr(DisasContext *dc, TCGv dst)
2927 {
2928     gen_helper_rdpsr(dst, tcg_env);
2929     return dst;
2930 }
2931 
2932 TRANS(RDPSR, 32, do_rd_special, supervisor(dc), a->rd, do_rdpsr)
2933 
2934 static TCGv do_rdhpstate(DisasContext *dc, TCGv dst)
2935 {
2936     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hpstate));
2937     return dst;
2938 }
2939 
2940 TRANS(RDHPR_hpstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhpstate)
2941 
2942 static TCGv do_rdhtstate(DisasContext *dc, TCGv dst)
2943 {
2944     TCGv_i32 tl = tcg_temp_new_i32();
2945     TCGv_ptr tp = tcg_temp_new_ptr();
2946 
2947     tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl));
2948     tcg_gen_andi_i32(tl, tl, MAXTL_MASK);
2949     tcg_gen_shli_i32(tl, tl, 3);
2950     tcg_gen_ext_i32_ptr(tp, tl);
2951     tcg_gen_add_ptr(tp, tp, tcg_env);
2952 
2953     tcg_gen_ld_tl(dst, tp, env64_field_offsetof(htstate));
2954     return dst;
2955 }
2956 
2957 TRANS(RDHPR_htstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtstate)
2958 
2959 static TCGv do_rdhintp(DisasContext *dc, TCGv dst)
2960 {
2961     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hintp));
2962     return dst;
2963 }
2964 
2965 TRANS(RDHPR_hintp, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhintp)
2966 
2967 static TCGv do_rdhtba(DisasContext *dc, TCGv dst)
2968 {
2969     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(htba));
2970     return dst;
2971 }
2972 
2973 TRANS(RDHPR_htba, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtba)
2974 
2975 static TCGv do_rdhver(DisasContext *dc, TCGv dst)
2976 {
2977     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hver));
2978     return dst;
2979 }
2980 
2981 TRANS(RDHPR_hver, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhver)
2982 
2983 static TCGv do_rdhstick_cmpr(DisasContext *dc, TCGv dst)
2984 {
2985     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hstick_cmpr));
2986     return dst;
2987 }
2988 
2989 TRANS(RDHPR_hstick_cmpr, HYPV, do_rd_special, hypervisor(dc), a->rd,
2990       do_rdhstick_cmpr)
2991 
2992 static TCGv do_rdwim(DisasContext *dc, TCGv dst)
2993 {
2994     tcg_gen_ld_tl(dst, tcg_env, env32_field_offsetof(wim));
2995     return dst;
2996 }
2997 
2998 TRANS(RDWIM, 32, do_rd_special, supervisor(dc), a->rd, do_rdwim)
2999 
3000 static TCGv do_rdtpc(DisasContext *dc, TCGv dst)
3001 {
3002 #ifdef TARGET_SPARC64
3003     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3004 
3005     gen_load_trap_state_at_tl(r_tsptr);
3006     tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tpc));
3007     return dst;
3008 #else
3009     qemu_build_not_reached();
3010 #endif
3011 }
3012 
3013 TRANS(RDPR_tpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtpc)
3014 
3015 static TCGv do_rdtnpc(DisasContext *dc, TCGv dst)
3016 {
3017 #ifdef TARGET_SPARC64
3018     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3019 
3020     gen_load_trap_state_at_tl(r_tsptr);
3021     tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tnpc));
3022     return dst;
3023 #else
3024     qemu_build_not_reached();
3025 #endif
3026 }
3027 
3028 TRANS(RDPR_tnpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtnpc)
3029 
3030 static TCGv do_rdtstate(DisasContext *dc, TCGv dst)
3031 {
3032 #ifdef TARGET_SPARC64
3033     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3034 
3035     gen_load_trap_state_at_tl(r_tsptr);
3036     tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tstate));
3037     return dst;
3038 #else
3039     qemu_build_not_reached();
3040 #endif
3041 }
3042 
3043 TRANS(RDPR_tstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdtstate)
3044 
3045 static TCGv do_rdtt(DisasContext *dc, TCGv dst)
3046 {
3047 #ifdef TARGET_SPARC64
3048     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3049 
3050     gen_load_trap_state_at_tl(r_tsptr);
3051     tcg_gen_ld32s_tl(dst, r_tsptr, offsetof(trap_state, tt));
3052     return dst;
3053 #else
3054     qemu_build_not_reached();
3055 #endif
3056 }
3057 
3058 TRANS(RDPR_tt, 64, do_rd_special, supervisor(dc), a->rd, do_rdtt)
3059 TRANS(RDPR_tick, 64, do_rd_special, supervisor(dc), a->rd, do_rdtick)
3060 
3061 static TCGv do_rdtba(DisasContext *dc, TCGv dst)
3062 {
3063     return cpu_tbr;
3064 }
3065 
3066 TRANS(RDTBR, 32, do_rd_special, supervisor(dc), a->rd, do_rdtba)
3067 TRANS(RDPR_tba, 64, do_rd_special, supervisor(dc), a->rd, do_rdtba)
3068 
3069 static TCGv do_rdpstate(DisasContext *dc, TCGv dst)
3070 {
3071     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(pstate));
3072     return dst;
3073 }
3074 
3075 TRANS(RDPR_pstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdpstate)
3076 
3077 static TCGv do_rdtl(DisasContext *dc, TCGv dst)
3078 {
3079     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(tl));
3080     return dst;
3081 }
3082 
3083 TRANS(RDPR_tl, 64, do_rd_special, supervisor(dc), a->rd, do_rdtl)
3084 
3085 static TCGv do_rdpil(DisasContext *dc, TCGv dst)
3086 {
3087     tcg_gen_ld32s_tl(dst, tcg_env, env_field_offsetof(psrpil));
3088     return dst;
3089 }
3090 
3091 TRANS(RDPR_pil, 64, do_rd_special, supervisor(dc), a->rd, do_rdpil)
3092 
3093 static TCGv do_rdcwp(DisasContext *dc, TCGv dst)
3094 {
3095     gen_helper_rdcwp(dst, tcg_env);
3096     return dst;
3097 }
3098 
3099 TRANS(RDPR_cwp, 64, do_rd_special, supervisor(dc), a->rd, do_rdcwp)
3100 
3101 static TCGv do_rdcansave(DisasContext *dc, TCGv dst)
3102 {
3103     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cansave));
3104     return dst;
3105 }
3106 
3107 TRANS(RDPR_cansave, 64, do_rd_special, supervisor(dc), a->rd, do_rdcansave)
3108 
3109 static TCGv do_rdcanrestore(DisasContext *dc, TCGv dst)
3110 {
3111     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(canrestore));
3112     return dst;
3113 }
3114 
3115 TRANS(RDPR_canrestore, 64, do_rd_special, supervisor(dc), a->rd,
3116       do_rdcanrestore)
3117 
3118 static TCGv do_rdcleanwin(DisasContext *dc, TCGv dst)
3119 {
3120     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cleanwin));
3121     return dst;
3122 }
3123 
3124 TRANS(RDPR_cleanwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdcleanwin)
3125 
3126 static TCGv do_rdotherwin(DisasContext *dc, TCGv dst)
3127 {
3128     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(otherwin));
3129     return dst;
3130 }
3131 
3132 TRANS(RDPR_otherwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdotherwin)
3133 
3134 static TCGv do_rdwstate(DisasContext *dc, TCGv dst)
3135 {
3136     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(wstate));
3137     return dst;
3138 }
3139 
3140 TRANS(RDPR_wstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdwstate)
3141 
3142 static TCGv do_rdgl(DisasContext *dc, TCGv dst)
3143 {
3144     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(gl));
3145     return dst;
3146 }
3147 
3148 TRANS(RDPR_gl, GL, do_rd_special, supervisor(dc), a->rd, do_rdgl)
3149 
3150 /* UA2005 strand status */
3151 static TCGv do_rdssr(DisasContext *dc, TCGv dst)
3152 {
3153     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(ssr));
3154     return dst;
3155 }
3156 
3157 TRANS(RDPR_strand_status, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdssr)
3158 
3159 static TCGv do_rdver(DisasContext *dc, TCGv dst)
3160 {
3161     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(version));
3162     return dst;
3163 }
3164 
3165 TRANS(RDPR_ver, 64, do_rd_special, supervisor(dc), a->rd, do_rdver)
3166 
3167 static bool trans_FLUSHW(DisasContext *dc, arg_FLUSHW *a)
3168 {
3169     if (avail_64(dc)) {
3170         gen_helper_flushw(tcg_env);
3171         return advance_pc(dc);
3172     }
3173     return false;
3174 }
3175 
3176 static bool do_wr_special(DisasContext *dc, arg_r_r_ri *a, bool priv,
3177                           void (*func)(DisasContext *, TCGv))
3178 {
3179     TCGv src;
3180 
3181     /* For simplicity, we under-decoded the rs2 form. */
3182     if (!a->imm && (a->rs2_or_imm & ~0x1f)) {
3183         return false;
3184     }
3185     if (!priv) {
3186         return raise_priv(dc);
3187     }
3188 
3189     if (a->rs1 == 0 && (a->imm || a->rs2_or_imm == 0)) {
3190         src = tcg_constant_tl(a->rs2_or_imm);
3191     } else {
3192         TCGv src1 = gen_load_gpr(dc, a->rs1);
3193         if (a->rs2_or_imm == 0) {
3194             src = src1;
3195         } else {
3196             src = tcg_temp_new();
3197             if (a->imm) {
3198                 tcg_gen_xori_tl(src, src1, a->rs2_or_imm);
3199             } else {
3200                 tcg_gen_xor_tl(src, src1, gen_load_gpr(dc, a->rs2_or_imm));
3201             }
3202         }
3203     }
3204     func(dc, src);
3205     return advance_pc(dc);
3206 }
3207 
3208 static void do_wry(DisasContext *dc, TCGv src)
3209 {
3210     tcg_gen_ext32u_tl(cpu_y, src);
3211 }
3212 
3213 TRANS(WRY, ALL, do_wr_special, a, true, do_wry)
3214 
3215 static void do_wrccr(DisasContext *dc, TCGv src)
3216 {
3217     gen_helper_wrccr(tcg_env, src);
3218 }
3219 
3220 TRANS(WRCCR, 64, do_wr_special, a, true, do_wrccr)
3221 
3222 static void do_wrasi(DisasContext *dc, TCGv src)
3223 {
3224     TCGv tmp = tcg_temp_new();
3225 
3226     tcg_gen_ext8u_tl(tmp, src);
3227     tcg_gen_st32_tl(tmp, tcg_env, env64_field_offsetof(asi));
3228     /* End TB to notice changed ASI. */
3229     dc->base.is_jmp = DISAS_EXIT;
3230 }
3231 
3232 TRANS(WRASI, 64, do_wr_special, a, true, do_wrasi)
3233 
3234 static void do_wrfprs(DisasContext *dc, TCGv src)
3235 {
3236 #ifdef TARGET_SPARC64
3237     tcg_gen_trunc_tl_i32(cpu_fprs, src);
3238     dc->fprs_dirty = 0;
3239     dc->base.is_jmp = DISAS_EXIT;
3240 #else
3241     qemu_build_not_reached();
3242 #endif
3243 }
3244 
3245 TRANS(WRFPRS, 64, do_wr_special, a, true, do_wrfprs)
3246 
3247 static void do_wrgsr(DisasContext *dc, TCGv src)
3248 {
3249     gen_trap_ifnofpu(dc);
3250     tcg_gen_mov_tl(cpu_gsr, src);
3251 }
3252 
3253 TRANS(WRGSR, 64, do_wr_special, a, true, do_wrgsr)
3254 
3255 static void do_wrsoftint_set(DisasContext *dc, TCGv src)
3256 {
3257     gen_helper_set_softint(tcg_env, src);
3258 }
3259 
3260 TRANS(WRSOFTINT_SET, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_set)
3261 
3262 static void do_wrsoftint_clr(DisasContext *dc, TCGv src)
3263 {
3264     gen_helper_clear_softint(tcg_env, src);
3265 }
3266 
3267 TRANS(WRSOFTINT_CLR, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_clr)
3268 
3269 static void do_wrsoftint(DisasContext *dc, TCGv src)
3270 {
3271     gen_helper_write_softint(tcg_env, src);
3272 }
3273 
3274 TRANS(WRSOFTINT, 64, do_wr_special, a, supervisor(dc), do_wrsoftint)
3275 
3276 static void do_wrtick_cmpr(DisasContext *dc, TCGv src)
3277 {
3278     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3279 
3280     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(tick_cmpr));
3281     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
3282     translator_io_start(&dc->base);
3283     gen_helper_tick_set_limit(r_tickptr, src);
3284     /* End TB to handle timer interrupt */
3285     dc->base.is_jmp = DISAS_EXIT;
3286 }
3287 
3288 TRANS(WRTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrtick_cmpr)
3289 
3290 static void do_wrstick(DisasContext *dc, TCGv src)
3291 {
3292 #ifdef TARGET_SPARC64
3293     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3294 
3295     tcg_gen_ld_ptr(r_tickptr, tcg_env, offsetof(CPUSPARCState, stick));
3296     translator_io_start(&dc->base);
3297     gen_helper_tick_set_count(r_tickptr, src);
3298     /* End TB to handle timer interrupt */
3299     dc->base.is_jmp = DISAS_EXIT;
3300 #else
3301     qemu_build_not_reached();
3302 #endif
3303 }
3304 
3305 TRANS(WRSTICK, 64, do_wr_special, a, supervisor(dc), do_wrstick)
3306 
3307 static void do_wrstick_cmpr(DisasContext *dc, TCGv src)
3308 {
3309     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3310 
3311     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(stick_cmpr));
3312     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick));
3313     translator_io_start(&dc->base);
3314     gen_helper_tick_set_limit(r_tickptr, src);
3315     /* End TB to handle timer interrupt */
3316     dc->base.is_jmp = DISAS_EXIT;
3317 }
3318 
3319 TRANS(WRSTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrstick_cmpr)
3320 
3321 static void do_wrpowerdown(DisasContext *dc, TCGv src)
3322 {
3323     finishing_insn(dc);
3324     save_state(dc);
3325     gen_helper_power_down(tcg_env);
3326 }
3327 
3328 TRANS(WRPOWERDOWN, POWERDOWN, do_wr_special, a, supervisor(dc), do_wrpowerdown)
3329 
3330 static void do_wrpsr(DisasContext *dc, TCGv src)
3331 {
3332     gen_helper_wrpsr(tcg_env, src);
3333     dc->base.is_jmp = DISAS_EXIT;
3334 }
3335 
3336 TRANS(WRPSR, 32, do_wr_special, a, supervisor(dc), do_wrpsr)
3337 
3338 static void do_wrwim(DisasContext *dc, TCGv src)
3339 {
3340     target_ulong mask = MAKE_64BIT_MASK(0, dc->def->nwindows);
3341     TCGv tmp = tcg_temp_new();
3342 
3343     tcg_gen_andi_tl(tmp, src, mask);
3344     tcg_gen_st_tl(tmp, tcg_env, env32_field_offsetof(wim));
3345 }
3346 
3347 TRANS(WRWIM, 32, do_wr_special, a, supervisor(dc), do_wrwim)
3348 
3349 static void do_wrtpc(DisasContext *dc, TCGv src)
3350 {
3351 #ifdef TARGET_SPARC64
3352     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3353 
3354     gen_load_trap_state_at_tl(r_tsptr);
3355     tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tpc));
3356 #else
3357     qemu_build_not_reached();
3358 #endif
3359 }
3360 
3361 TRANS(WRPR_tpc, 64, do_wr_special, a, supervisor(dc), do_wrtpc)
3362 
3363 static void do_wrtnpc(DisasContext *dc, TCGv src)
3364 {
3365 #ifdef TARGET_SPARC64
3366     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3367 
3368     gen_load_trap_state_at_tl(r_tsptr);
3369     tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tnpc));
3370 #else
3371     qemu_build_not_reached();
3372 #endif
3373 }
3374 
3375 TRANS(WRPR_tnpc, 64, do_wr_special, a, supervisor(dc), do_wrtnpc)
3376 
3377 static void do_wrtstate(DisasContext *dc, TCGv src)
3378 {
3379 #ifdef TARGET_SPARC64
3380     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3381 
3382     gen_load_trap_state_at_tl(r_tsptr);
3383     tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tstate));
3384 #else
3385     qemu_build_not_reached();
3386 #endif
3387 }
3388 
3389 TRANS(WRPR_tstate, 64, do_wr_special, a, supervisor(dc), do_wrtstate)
3390 
3391 static void do_wrtt(DisasContext *dc, TCGv src)
3392 {
3393 #ifdef TARGET_SPARC64
3394     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3395 
3396     gen_load_trap_state_at_tl(r_tsptr);
3397     tcg_gen_st32_tl(src, r_tsptr, offsetof(trap_state, tt));
3398 #else
3399     qemu_build_not_reached();
3400 #endif
3401 }
3402 
3403 TRANS(WRPR_tt, 64, do_wr_special, a, supervisor(dc), do_wrtt)
3404 
3405 static void do_wrtick(DisasContext *dc, TCGv src)
3406 {
3407     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3408 
3409     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
3410     translator_io_start(&dc->base);
3411     gen_helper_tick_set_count(r_tickptr, src);
3412     /* End TB to handle timer interrupt */
3413     dc->base.is_jmp = DISAS_EXIT;
3414 }
3415 
3416 TRANS(WRPR_tick, 64, do_wr_special, a, supervisor(dc), do_wrtick)
3417 
3418 static void do_wrtba(DisasContext *dc, TCGv src)
3419 {
3420     tcg_gen_mov_tl(cpu_tbr, src);
3421 }
3422 
3423 TRANS(WRPR_tba, 64, do_wr_special, a, supervisor(dc), do_wrtba)
3424 
3425 static void do_wrpstate(DisasContext *dc, TCGv src)
3426 {
3427     save_state(dc);
3428     if (translator_io_start(&dc->base)) {
3429         dc->base.is_jmp = DISAS_EXIT;
3430     }
3431     gen_helper_wrpstate(tcg_env, src);
3432     dc->npc = DYNAMIC_PC;
3433 }
3434 
3435 TRANS(WRPR_pstate, 64, do_wr_special, a, supervisor(dc), do_wrpstate)
3436 
3437 static void do_wrtl(DisasContext *dc, TCGv src)
3438 {
3439     save_state(dc);
3440     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(tl));
3441     dc->npc = DYNAMIC_PC;
3442 }
3443 
3444 TRANS(WRPR_tl, 64, do_wr_special, a, supervisor(dc), do_wrtl)
3445 
3446 static void do_wrpil(DisasContext *dc, TCGv src)
3447 {
3448     if (translator_io_start(&dc->base)) {
3449         dc->base.is_jmp = DISAS_EXIT;
3450     }
3451     gen_helper_wrpil(tcg_env, src);
3452 }
3453 
3454 TRANS(WRPR_pil, 64, do_wr_special, a, supervisor(dc), do_wrpil)
3455 
3456 static void do_wrcwp(DisasContext *dc, TCGv src)
3457 {
3458     gen_helper_wrcwp(tcg_env, src);
3459 }
3460 
3461 TRANS(WRPR_cwp, 64, do_wr_special, a, supervisor(dc), do_wrcwp)
3462 
3463 static void do_wrcansave(DisasContext *dc, TCGv src)
3464 {
3465     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cansave));
3466 }
3467 
3468 TRANS(WRPR_cansave, 64, do_wr_special, a, supervisor(dc), do_wrcansave)
3469 
3470 static void do_wrcanrestore(DisasContext *dc, TCGv src)
3471 {
3472     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(canrestore));
3473 }
3474 
3475 TRANS(WRPR_canrestore, 64, do_wr_special, a, supervisor(dc), do_wrcanrestore)
3476 
3477 static void do_wrcleanwin(DisasContext *dc, TCGv src)
3478 {
3479     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cleanwin));
3480 }
3481 
3482 TRANS(WRPR_cleanwin, 64, do_wr_special, a, supervisor(dc), do_wrcleanwin)
3483 
3484 static void do_wrotherwin(DisasContext *dc, TCGv src)
3485 {
3486     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(otherwin));
3487 }
3488 
3489 TRANS(WRPR_otherwin, 64, do_wr_special, a, supervisor(dc), do_wrotherwin)
3490 
3491 static void do_wrwstate(DisasContext *dc, TCGv src)
3492 {
3493     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(wstate));
3494 }
3495 
3496 TRANS(WRPR_wstate, 64, do_wr_special, a, supervisor(dc), do_wrwstate)
3497 
3498 static void do_wrgl(DisasContext *dc, TCGv src)
3499 {
3500     gen_helper_wrgl(tcg_env, src);
3501 }
3502 
3503 TRANS(WRPR_gl, GL, do_wr_special, a, supervisor(dc), do_wrgl)
3504 
3505 /* UA2005 strand status */
3506 static void do_wrssr(DisasContext *dc, TCGv src)
3507 {
3508     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(ssr));
3509 }
3510 
3511 TRANS(WRPR_strand_status, HYPV, do_wr_special, a, hypervisor(dc), do_wrssr)
3512 
3513 TRANS(WRTBR, 32, do_wr_special, a, supervisor(dc), do_wrtba)
3514 
3515 static void do_wrhpstate(DisasContext *dc, TCGv src)
3516 {
3517     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hpstate));
3518     dc->base.is_jmp = DISAS_EXIT;
3519 }
3520 
3521 TRANS(WRHPR_hpstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhpstate)
3522 
3523 static void do_wrhtstate(DisasContext *dc, TCGv src)
3524 {
3525     TCGv_i32 tl = tcg_temp_new_i32();
3526     TCGv_ptr tp = tcg_temp_new_ptr();
3527 
3528     tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl));
3529     tcg_gen_andi_i32(tl, tl, MAXTL_MASK);
3530     tcg_gen_shli_i32(tl, tl, 3);
3531     tcg_gen_ext_i32_ptr(tp, tl);
3532     tcg_gen_add_ptr(tp, tp, tcg_env);
3533 
3534     tcg_gen_st_tl(src, tp, env64_field_offsetof(htstate));
3535 }
3536 
3537 TRANS(WRHPR_htstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtstate)
3538 
3539 static void do_wrhintp(DisasContext *dc, TCGv src)
3540 {
3541     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hintp));
3542 }
3543 
3544 TRANS(WRHPR_hintp, HYPV, do_wr_special, a, hypervisor(dc), do_wrhintp)
3545 
3546 static void do_wrhtba(DisasContext *dc, TCGv src)
3547 {
3548     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(htba));
3549 }
3550 
3551 TRANS(WRHPR_htba, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtba)
3552 
3553 static void do_wrhstick_cmpr(DisasContext *dc, TCGv src)
3554 {
3555     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3556 
3557     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hstick_cmpr));
3558     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(hstick));
3559     translator_io_start(&dc->base);
3560     gen_helper_tick_set_limit(r_tickptr, src);
3561     /* End TB to handle timer interrupt */
3562     dc->base.is_jmp = DISAS_EXIT;
3563 }
3564 
3565 TRANS(WRHPR_hstick_cmpr, HYPV, do_wr_special, a, hypervisor(dc),
3566       do_wrhstick_cmpr)
3567 
3568 static bool do_saved_restored(DisasContext *dc, bool saved)
3569 {
3570     if (!supervisor(dc)) {
3571         return raise_priv(dc);
3572     }
3573     if (saved) {
3574         gen_helper_saved(tcg_env);
3575     } else {
3576         gen_helper_restored(tcg_env);
3577     }
3578     return advance_pc(dc);
3579 }
3580 
3581 TRANS(SAVED, 64, do_saved_restored, true)
3582 TRANS(RESTORED, 64, do_saved_restored, false)
3583 
3584 static bool trans_NOP(DisasContext *dc, arg_NOP *a)
3585 {
3586     return advance_pc(dc);
3587 }
3588 
3589 /*
3590  * TODO: Need a feature bit for sparcv8.
3591  * In the meantime, treat all 32-bit cpus like sparcv7.
3592  */
3593 TRANS(NOP_v7, 32, trans_NOP, a)
3594 TRANS(NOP_v9, 64, trans_NOP, a)
3595 
3596 static bool do_arith_int(DisasContext *dc, arg_r_r_ri_cc *a,
3597                          void (*func)(TCGv, TCGv, TCGv),
3598                          void (*funci)(TCGv, TCGv, target_long),
3599                          bool logic_cc)
3600 {
3601     TCGv dst, src1;
3602 
3603     /* For simplicity, we under-decoded the rs2 form. */
3604     if (!a->imm && a->rs2_or_imm & ~0x1f) {
3605         return false;
3606     }
3607 
3608     if (logic_cc) {
3609         dst = cpu_cc_N;
3610     } else {
3611         dst = gen_dest_gpr(dc, a->rd);
3612     }
3613     src1 = gen_load_gpr(dc, a->rs1);
3614 
3615     if (a->imm || a->rs2_or_imm == 0) {
3616         if (funci) {
3617             funci(dst, src1, a->rs2_or_imm);
3618         } else {
3619             func(dst, src1, tcg_constant_tl(a->rs2_or_imm));
3620         }
3621     } else {
3622         func(dst, src1, cpu_regs[a->rs2_or_imm]);
3623     }
3624 
3625     if (logic_cc) {
3626         if (TARGET_LONG_BITS == 64) {
3627             tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
3628             tcg_gen_movi_tl(cpu_icc_C, 0);
3629         }
3630         tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
3631         tcg_gen_movi_tl(cpu_cc_C, 0);
3632         tcg_gen_movi_tl(cpu_cc_V, 0);
3633     }
3634 
3635     gen_store_gpr(dc, a->rd, dst);
3636     return advance_pc(dc);
3637 }
3638 
3639 static bool do_arith(DisasContext *dc, arg_r_r_ri_cc *a,
3640                      void (*func)(TCGv, TCGv, TCGv),
3641                      void (*funci)(TCGv, TCGv, target_long),
3642                      void (*func_cc)(TCGv, TCGv, TCGv))
3643 {
3644     if (a->cc) {
3645         return do_arith_int(dc, a, func_cc, NULL, false);
3646     }
3647     return do_arith_int(dc, a, func, funci, false);
3648 }
3649 
3650 static bool do_logic(DisasContext *dc, arg_r_r_ri_cc *a,
3651                      void (*func)(TCGv, TCGv, TCGv),
3652                      void (*funci)(TCGv, TCGv, target_long))
3653 {
3654     return do_arith_int(dc, a, func, funci, a->cc);
3655 }
3656 
3657 TRANS(ADD, ALL, do_arith, a, tcg_gen_add_tl, tcg_gen_addi_tl, gen_op_addcc)
3658 TRANS(SUB, ALL, do_arith, a, tcg_gen_sub_tl, tcg_gen_subi_tl, gen_op_subcc)
3659 TRANS(ADDC, ALL, do_arith, a, gen_op_addc, NULL, gen_op_addccc)
3660 TRANS(SUBC, ALL, do_arith, a, gen_op_subc, NULL, gen_op_subccc)
3661 
3662 TRANS(TADDcc, ALL, do_arith, a, NULL, NULL, gen_op_taddcc)
3663 TRANS(TSUBcc, ALL, do_arith, a, NULL, NULL, gen_op_tsubcc)
3664 TRANS(TADDccTV, ALL, do_arith, a, NULL, NULL, gen_op_taddcctv)
3665 TRANS(TSUBccTV, ALL, do_arith, a, NULL, NULL, gen_op_tsubcctv)
3666 
3667 TRANS(AND, ALL, do_logic, a, tcg_gen_and_tl, tcg_gen_andi_tl)
3668 TRANS(XOR, ALL, do_logic, a, tcg_gen_xor_tl, tcg_gen_xori_tl)
3669 TRANS(ANDN, ALL, do_logic, a, tcg_gen_andc_tl, NULL)
3670 TRANS(ORN, ALL, do_logic, a, tcg_gen_orc_tl, NULL)
3671 TRANS(XORN, ALL, do_logic, a, tcg_gen_eqv_tl, NULL)
3672 
3673 TRANS(MULX, 64, do_arith, a, tcg_gen_mul_tl, tcg_gen_muli_tl, NULL)
3674 TRANS(UMUL, MUL, do_logic, a, gen_op_umul, NULL)
3675 TRANS(SMUL, MUL, do_logic, a, gen_op_smul, NULL)
3676 TRANS(MULScc, ALL, do_arith, a, NULL, NULL, gen_op_mulscc)
3677 
3678 TRANS(UDIVcc, DIV, do_arith, a, NULL, NULL, gen_op_udivcc)
3679 TRANS(SDIV, DIV, do_arith, a, gen_op_sdiv, NULL, gen_op_sdivcc)
3680 
3681 /* TODO: Should have feature bit -- comes in with UltraSparc T2. */
3682 TRANS(POPC, 64, do_arith, a, gen_op_popc, NULL, NULL)
3683 
3684 static bool trans_OR(DisasContext *dc, arg_r_r_ri_cc *a)
3685 {
3686     /* OR with %g0 is the canonical alias for MOV. */
3687     if (!a->cc && a->rs1 == 0) {
3688         if (a->imm || a->rs2_or_imm == 0) {
3689             gen_store_gpr(dc, a->rd, tcg_constant_tl(a->rs2_or_imm));
3690         } else if (a->rs2_or_imm & ~0x1f) {
3691             /* For simplicity, we under-decoded the rs2 form. */
3692             return false;
3693         } else {
3694             gen_store_gpr(dc, a->rd, cpu_regs[a->rs2_or_imm]);
3695         }
3696         return advance_pc(dc);
3697     }
3698     return do_logic(dc, a, tcg_gen_or_tl, tcg_gen_ori_tl);
3699 }
3700 
3701 static bool trans_UDIV(DisasContext *dc, arg_r_r_ri *a)
3702 {
3703     TCGv_i64 t1, t2;
3704     TCGv dst;
3705 
3706     if (!avail_DIV(dc)) {
3707         return false;
3708     }
3709     /* For simplicity, we under-decoded the rs2 form. */
3710     if (!a->imm && a->rs2_or_imm & ~0x1f) {
3711         return false;
3712     }
3713 
3714     if (unlikely(a->rs2_or_imm == 0)) {
3715         gen_exception(dc, TT_DIV_ZERO);
3716         return true;
3717     }
3718 
3719     if (a->imm) {
3720         t2 = tcg_constant_i64((uint32_t)a->rs2_or_imm);
3721     } else {
3722         TCGLabel *lab;
3723         TCGv_i32 n2;
3724 
3725         finishing_insn(dc);
3726         flush_cond(dc);
3727 
3728         n2 = tcg_temp_new_i32();
3729         tcg_gen_trunc_tl_i32(n2, cpu_regs[a->rs2_or_imm]);
3730 
3731         lab = delay_exception(dc, TT_DIV_ZERO);
3732         tcg_gen_brcondi_i32(TCG_COND_EQ, n2, 0, lab);
3733 
3734         t2 = tcg_temp_new_i64();
3735 #ifdef TARGET_SPARC64
3736         tcg_gen_ext32u_i64(t2, cpu_regs[a->rs2_or_imm]);
3737 #else
3738         tcg_gen_extu_i32_i64(t2, cpu_regs[a->rs2_or_imm]);
3739 #endif
3740     }
3741 
3742     t1 = tcg_temp_new_i64();
3743     tcg_gen_concat_tl_i64(t1, gen_load_gpr(dc, a->rs1), cpu_y);
3744 
3745     tcg_gen_divu_i64(t1, t1, t2);
3746     tcg_gen_umin_i64(t1, t1, tcg_constant_i64(UINT32_MAX));
3747 
3748     dst = gen_dest_gpr(dc, a->rd);
3749     tcg_gen_trunc_i64_tl(dst, t1);
3750     gen_store_gpr(dc, a->rd, dst);
3751     return advance_pc(dc);
3752 }
3753 
3754 static bool trans_UDIVX(DisasContext *dc, arg_r_r_ri *a)
3755 {
3756     TCGv dst, src1, src2;
3757 
3758     if (!avail_64(dc)) {
3759         return false;
3760     }
3761     /* For simplicity, we under-decoded the rs2 form. */
3762     if (!a->imm && a->rs2_or_imm & ~0x1f) {
3763         return false;
3764     }
3765 
3766     if (unlikely(a->rs2_or_imm == 0)) {
3767         gen_exception(dc, TT_DIV_ZERO);
3768         return true;
3769     }
3770 
3771     if (a->imm) {
3772         src2 = tcg_constant_tl(a->rs2_or_imm);
3773     } else {
3774         TCGLabel *lab;
3775 
3776         finishing_insn(dc);
3777         flush_cond(dc);
3778 
3779         lab = delay_exception(dc, TT_DIV_ZERO);
3780         src2 = cpu_regs[a->rs2_or_imm];
3781         tcg_gen_brcondi_tl(TCG_COND_EQ, src2, 0, lab);
3782     }
3783 
3784     dst = gen_dest_gpr(dc, a->rd);
3785     src1 = gen_load_gpr(dc, a->rs1);
3786 
3787     tcg_gen_divu_tl(dst, src1, src2);
3788     gen_store_gpr(dc, a->rd, dst);
3789     return advance_pc(dc);
3790 }
3791 
3792 static bool trans_SDIVX(DisasContext *dc, arg_r_r_ri *a)
3793 {
3794     TCGv dst, src1, src2;
3795 
3796     if (!avail_64(dc)) {
3797         return false;
3798     }
3799     /* For simplicity, we under-decoded the rs2 form. */
3800     if (!a->imm && a->rs2_or_imm & ~0x1f) {
3801         return false;
3802     }
3803 
3804     if (unlikely(a->rs2_or_imm == 0)) {
3805         gen_exception(dc, TT_DIV_ZERO);
3806         return true;
3807     }
3808 
3809     dst = gen_dest_gpr(dc, a->rd);
3810     src1 = gen_load_gpr(dc, a->rs1);
3811 
3812     if (a->imm) {
3813         if (unlikely(a->rs2_or_imm == -1)) {
3814             tcg_gen_neg_tl(dst, src1);
3815             gen_store_gpr(dc, a->rd, dst);
3816             return advance_pc(dc);
3817         }
3818         src2 = tcg_constant_tl(a->rs2_or_imm);
3819     } else {
3820         TCGLabel *lab;
3821         TCGv t1, t2;
3822 
3823         finishing_insn(dc);
3824         flush_cond(dc);
3825 
3826         lab = delay_exception(dc, TT_DIV_ZERO);
3827         src2 = cpu_regs[a->rs2_or_imm];
3828         tcg_gen_brcondi_tl(TCG_COND_EQ, src2, 0, lab);
3829 
3830         /*
3831          * Need to avoid INT64_MIN / -1, which will trap on x86 host.
3832          * Set SRC2 to 1 as a new divisor, to produce the correct result.
3833          */
3834         t1 = tcg_temp_new();
3835         t2 = tcg_temp_new();
3836         tcg_gen_setcondi_tl(TCG_COND_EQ, t1, src1, (target_long)INT64_MIN);
3837         tcg_gen_setcondi_tl(TCG_COND_EQ, t2, src2, -1);
3838         tcg_gen_and_tl(t1, t1, t2);
3839         tcg_gen_movcond_tl(TCG_COND_NE, t1, t1, tcg_constant_tl(0),
3840                            tcg_constant_tl(1), src2);
3841         src2 = t1;
3842     }
3843 
3844     tcg_gen_div_tl(dst, src1, src2);
3845     gen_store_gpr(dc, a->rd, dst);
3846     return advance_pc(dc);
3847 }
3848 
3849 static bool gen_edge(DisasContext *dc, arg_r_r_r *a,
3850                      int width, bool cc, bool little_endian)
3851 {
3852     TCGv dst, s1, s2, l, r, t, m;
3853     uint64_t amask = address_mask_i(dc, -8);
3854 
3855     dst = gen_dest_gpr(dc, a->rd);
3856     s1 = gen_load_gpr(dc, a->rs1);
3857     s2 = gen_load_gpr(dc, a->rs2);
3858 
3859     if (cc) {
3860         gen_op_subcc(cpu_cc_N, s1, s2);
3861     }
3862 
3863     l = tcg_temp_new();
3864     r = tcg_temp_new();
3865     t = tcg_temp_new();
3866 
3867     switch (width) {
3868     case 8:
3869         tcg_gen_andi_tl(l, s1, 7);
3870         tcg_gen_andi_tl(r, s2, 7);
3871         tcg_gen_xori_tl(r, r, 7);
3872         m = tcg_constant_tl(0xff);
3873         break;
3874     case 16:
3875         tcg_gen_extract_tl(l, s1, 1, 2);
3876         tcg_gen_extract_tl(r, s2, 1, 2);
3877         tcg_gen_xori_tl(r, r, 3);
3878         m = tcg_constant_tl(0xf);
3879         break;
3880     case 32:
3881         tcg_gen_extract_tl(l, s1, 2, 1);
3882         tcg_gen_extract_tl(r, s2, 2, 1);
3883         tcg_gen_xori_tl(r, r, 1);
3884         m = tcg_constant_tl(0x3);
3885         break;
3886     default:
3887         abort();
3888     }
3889 
3890     /* Compute Left Edge */
3891     if (little_endian) {
3892         tcg_gen_shl_tl(l, m, l);
3893         tcg_gen_and_tl(l, l, m);
3894     } else {
3895         tcg_gen_shr_tl(l, m, l);
3896     }
3897     /* Compute Right Edge */
3898     if (little_endian) {
3899         tcg_gen_shr_tl(r, m, r);
3900     } else {
3901         tcg_gen_shl_tl(r, m, r);
3902         tcg_gen_and_tl(r, r, m);
3903     }
3904 
3905     /* Compute dst = (s1 == s2 under amask ? l : l & r) */
3906     tcg_gen_xor_tl(t, s1, s2);
3907     tcg_gen_and_tl(r, r, l);
3908     tcg_gen_movcond_tl(TCG_COND_TSTEQ, dst, t, tcg_constant_tl(amask), r, l);
3909 
3910     gen_store_gpr(dc, a->rd, dst);
3911     return advance_pc(dc);
3912 }
3913 
3914 TRANS(EDGE8cc, VIS1, gen_edge, a, 8, 1, 0)
3915 TRANS(EDGE8Lcc, VIS1, gen_edge, a, 8, 1, 1)
3916 TRANS(EDGE16cc, VIS1, gen_edge, a, 16, 1, 0)
3917 TRANS(EDGE16Lcc, VIS1, gen_edge, a, 16, 1, 1)
3918 TRANS(EDGE32cc, VIS1, gen_edge, a, 32, 1, 0)
3919 TRANS(EDGE32Lcc, VIS1, gen_edge, a, 32, 1, 1)
3920 
3921 TRANS(EDGE8N, VIS2, gen_edge, a, 8, 0, 0)
3922 TRANS(EDGE8LN, VIS2, gen_edge, a, 8, 0, 1)
3923 TRANS(EDGE16N, VIS2, gen_edge, a, 16, 0, 0)
3924 TRANS(EDGE16LN, VIS2, gen_edge, a, 16, 0, 1)
3925 TRANS(EDGE32N, VIS2, gen_edge, a, 32, 0, 0)
3926 TRANS(EDGE32LN, VIS2, gen_edge, a, 32, 0, 1)
3927 
3928 static bool do_rr(DisasContext *dc, arg_r_r *a,
3929                   void (*func)(TCGv, TCGv))
3930 {
3931     TCGv dst = gen_dest_gpr(dc, a->rd);
3932     TCGv src = gen_load_gpr(dc, a->rs);
3933 
3934     func(dst, src);
3935     gen_store_gpr(dc, a->rd, dst);
3936     return advance_pc(dc);
3937 }
3938 
3939 TRANS(LZCNT, VIS3, do_rr, a, gen_op_lzcnt)
3940 
3941 static bool do_rrr(DisasContext *dc, arg_r_r_r *a,
3942                    void (*func)(TCGv, TCGv, TCGv))
3943 {
3944     TCGv dst = gen_dest_gpr(dc, a->rd);
3945     TCGv src1 = gen_load_gpr(dc, a->rs1);
3946     TCGv src2 = gen_load_gpr(dc, a->rs2);
3947 
3948     func(dst, src1, src2);
3949     gen_store_gpr(dc, a->rd, dst);
3950     return advance_pc(dc);
3951 }
3952 
3953 TRANS(ARRAY8, VIS1, do_rrr, a, gen_helper_array8)
3954 TRANS(ARRAY16, VIS1, do_rrr, a, gen_op_array16)
3955 TRANS(ARRAY32, VIS1, do_rrr, a, gen_op_array32)
3956 
3957 TRANS(ADDXC, VIS3, do_rrr, a, gen_op_addxc)
3958 TRANS(ADDXCcc, VIS3, do_rrr, a, gen_op_addxccc)
3959 
3960 TRANS(UMULXHI, VIS3, do_rrr, a, gen_op_umulxhi)
3961 
3962 static void gen_op_alignaddr(TCGv dst, TCGv s1, TCGv s2)
3963 {
3964 #ifdef TARGET_SPARC64
3965     TCGv tmp = tcg_temp_new();
3966 
3967     tcg_gen_add_tl(tmp, s1, s2);
3968     tcg_gen_andi_tl(dst, tmp, -8);
3969     tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
3970 #else
3971     g_assert_not_reached();
3972 #endif
3973 }
3974 
3975 static void gen_op_alignaddrl(TCGv dst, TCGv s1, TCGv s2)
3976 {
3977 #ifdef TARGET_SPARC64
3978     TCGv tmp = tcg_temp_new();
3979 
3980     tcg_gen_add_tl(tmp, s1, s2);
3981     tcg_gen_andi_tl(dst, tmp, -8);
3982     tcg_gen_neg_tl(tmp, tmp);
3983     tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
3984 #else
3985     g_assert_not_reached();
3986 #endif
3987 }
3988 
3989 TRANS(ALIGNADDR, VIS1, do_rrr, a, gen_op_alignaddr)
3990 TRANS(ALIGNADDRL, VIS1, do_rrr, a, gen_op_alignaddrl)
3991 
3992 static void gen_op_bmask(TCGv dst, TCGv s1, TCGv s2)
3993 {
3994 #ifdef TARGET_SPARC64
3995     tcg_gen_add_tl(dst, s1, s2);
3996     tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, dst, 32, 32);
3997 #else
3998     g_assert_not_reached();
3999 #endif
4000 }
4001 
4002 TRANS(BMASK, VIS2, do_rrr, a, gen_op_bmask)
4003 
4004 static bool do_cmask(DisasContext *dc, int rs2, void (*func)(TCGv, TCGv, TCGv))
4005 {
4006     func(cpu_gsr, cpu_gsr, gen_load_gpr(dc, rs2));
4007     return true;
4008 }
4009 
4010 TRANS(CMASK8, VIS3, do_cmask, a->rs2, gen_helper_cmask8)
4011 TRANS(CMASK16, VIS3, do_cmask, a->rs2, gen_helper_cmask16)
4012 TRANS(CMASK32, VIS3, do_cmask, a->rs2, gen_helper_cmask32)
4013 
4014 static bool do_shift_r(DisasContext *dc, arg_shiftr *a, bool l, bool u)
4015 {
4016     TCGv dst, src1, src2;
4017 
4018     /* Reject 64-bit shifts for sparc32. */
4019     if (avail_32(dc) && a->x) {
4020         return false;
4021     }
4022 
4023     src2 = tcg_temp_new();
4024     tcg_gen_andi_tl(src2, gen_load_gpr(dc, a->rs2), a->x ? 63 : 31);
4025     src1 = gen_load_gpr(dc, a->rs1);
4026     dst = gen_dest_gpr(dc, a->rd);
4027 
4028     if (l) {
4029         tcg_gen_shl_tl(dst, src1, src2);
4030         if (!a->x) {
4031             tcg_gen_ext32u_tl(dst, dst);
4032         }
4033     } else if (u) {
4034         if (!a->x) {
4035             tcg_gen_ext32u_tl(dst, src1);
4036             src1 = dst;
4037         }
4038         tcg_gen_shr_tl(dst, src1, src2);
4039     } else {
4040         if (!a->x) {
4041             tcg_gen_ext32s_tl(dst, src1);
4042             src1 = dst;
4043         }
4044         tcg_gen_sar_tl(dst, src1, src2);
4045     }
4046     gen_store_gpr(dc, a->rd, dst);
4047     return advance_pc(dc);
4048 }
4049 
4050 TRANS(SLL_r, ALL, do_shift_r, a, true, true)
4051 TRANS(SRL_r, ALL, do_shift_r, a, false, true)
4052 TRANS(SRA_r, ALL, do_shift_r, a, false, false)
4053 
4054 static bool do_shift_i(DisasContext *dc, arg_shifti *a, bool l, bool u)
4055 {
4056     TCGv dst, src1;
4057 
4058     /* Reject 64-bit shifts for sparc32. */
4059     if (avail_32(dc) && (a->x || a->i >= 32)) {
4060         return false;
4061     }
4062 
4063     src1 = gen_load_gpr(dc, a->rs1);
4064     dst = gen_dest_gpr(dc, a->rd);
4065 
4066     if (avail_32(dc) || a->x) {
4067         if (l) {
4068             tcg_gen_shli_tl(dst, src1, a->i);
4069         } else if (u) {
4070             tcg_gen_shri_tl(dst, src1, a->i);
4071         } else {
4072             tcg_gen_sari_tl(dst, src1, a->i);
4073         }
4074     } else {
4075         if (l) {
4076             tcg_gen_deposit_z_tl(dst, src1, a->i, 32 - a->i);
4077         } else if (u) {
4078             tcg_gen_extract_tl(dst, src1, a->i, 32 - a->i);
4079         } else {
4080             tcg_gen_sextract_tl(dst, src1, a->i, 32 - a->i);
4081         }
4082     }
4083     gen_store_gpr(dc, a->rd, dst);
4084     return advance_pc(dc);
4085 }
4086 
4087 TRANS(SLL_i, ALL, do_shift_i, a, true, true)
4088 TRANS(SRL_i, ALL, do_shift_i, a, false, true)
4089 TRANS(SRA_i, ALL, do_shift_i, a, false, false)
4090 
4091 static TCGv gen_rs2_or_imm(DisasContext *dc, bool imm, int rs2_or_imm)
4092 {
4093     /* For simplicity, we under-decoded the rs2 form. */
4094     if (!imm && rs2_or_imm & ~0x1f) {
4095         return NULL;
4096     }
4097     if (imm || rs2_or_imm == 0) {
4098         return tcg_constant_tl(rs2_or_imm);
4099     } else {
4100         return cpu_regs[rs2_or_imm];
4101     }
4102 }
4103 
4104 static bool do_mov_cond(DisasContext *dc, DisasCompare *cmp, int rd, TCGv src2)
4105 {
4106     TCGv dst = gen_load_gpr(dc, rd);
4107     TCGv c2 = tcg_constant_tl(cmp->c2);
4108 
4109     tcg_gen_movcond_tl(cmp->cond, dst, cmp->c1, c2, src2, dst);
4110     gen_store_gpr(dc, rd, dst);
4111     return advance_pc(dc);
4112 }
4113 
4114 static bool trans_MOVcc(DisasContext *dc, arg_MOVcc *a)
4115 {
4116     TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4117     DisasCompare cmp;
4118 
4119     if (src2 == NULL) {
4120         return false;
4121     }
4122     gen_compare(&cmp, a->cc, a->cond, dc);
4123     return do_mov_cond(dc, &cmp, a->rd, src2);
4124 }
4125 
4126 static bool trans_MOVfcc(DisasContext *dc, arg_MOVfcc *a)
4127 {
4128     TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4129     DisasCompare cmp;
4130 
4131     if (src2 == NULL) {
4132         return false;
4133     }
4134     gen_fcompare(&cmp, a->cc, a->cond);
4135     return do_mov_cond(dc, &cmp, a->rd, src2);
4136 }
4137 
4138 static bool trans_MOVR(DisasContext *dc, arg_MOVR *a)
4139 {
4140     TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4141     DisasCompare cmp;
4142 
4143     if (src2 == NULL) {
4144         return false;
4145     }
4146     if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
4147         return false;
4148     }
4149     return do_mov_cond(dc, &cmp, a->rd, src2);
4150 }
4151 
4152 static bool do_add_special(DisasContext *dc, arg_r_r_ri *a,
4153                            bool (*func)(DisasContext *dc, int rd, TCGv src))
4154 {
4155     TCGv src1, sum;
4156 
4157     /* For simplicity, we under-decoded the rs2 form. */
4158     if (!a->imm && a->rs2_or_imm & ~0x1f) {
4159         return false;
4160     }
4161 
4162     /*
4163      * Always load the sum into a new temporary.
4164      * This is required to capture the value across a window change,
4165      * e.g. SAVE and RESTORE, and may be optimized away otherwise.
4166      */
4167     sum = tcg_temp_new();
4168     src1 = gen_load_gpr(dc, a->rs1);
4169     if (a->imm || a->rs2_or_imm == 0) {
4170         tcg_gen_addi_tl(sum, src1, a->rs2_or_imm);
4171     } else {
4172         tcg_gen_add_tl(sum, src1, cpu_regs[a->rs2_or_imm]);
4173     }
4174     return func(dc, a->rd, sum);
4175 }
4176 
4177 static bool do_jmpl(DisasContext *dc, int rd, TCGv src)
4178 {
4179     /*
4180      * Preserve pc across advance, so that we can delay
4181      * the writeback to rd until after src is consumed.
4182      */
4183     target_ulong cur_pc = dc->pc;
4184 
4185     gen_check_align(dc, src, 3);
4186 
4187     gen_mov_pc_npc(dc);
4188     tcg_gen_mov_tl(cpu_npc, src);
4189     gen_address_mask(dc, cpu_npc);
4190     gen_store_gpr(dc, rd, tcg_constant_tl(cur_pc));
4191 
4192     dc->npc = DYNAMIC_PC_LOOKUP;
4193     return true;
4194 }
4195 
4196 TRANS(JMPL, ALL, do_add_special, a, do_jmpl)
4197 
4198 static bool do_rett(DisasContext *dc, int rd, TCGv src)
4199 {
4200     if (!supervisor(dc)) {
4201         return raise_priv(dc);
4202     }
4203 
4204     gen_check_align(dc, src, 3);
4205 
4206     gen_mov_pc_npc(dc);
4207     tcg_gen_mov_tl(cpu_npc, src);
4208     gen_helper_rett(tcg_env);
4209 
4210     dc->npc = DYNAMIC_PC;
4211     return true;
4212 }
4213 
4214 TRANS(RETT, 32, do_add_special, a, do_rett)
4215 
4216 static bool do_return(DisasContext *dc, int rd, TCGv src)
4217 {
4218     gen_check_align(dc, src, 3);
4219     gen_helper_restore(tcg_env);
4220 
4221     gen_mov_pc_npc(dc);
4222     tcg_gen_mov_tl(cpu_npc, src);
4223     gen_address_mask(dc, cpu_npc);
4224 
4225     dc->npc = DYNAMIC_PC_LOOKUP;
4226     return true;
4227 }
4228 
4229 TRANS(RETURN, 64, do_add_special, a, do_return)
4230 
4231 static bool do_save(DisasContext *dc, int rd, TCGv src)
4232 {
4233     gen_helper_save(tcg_env);
4234     gen_store_gpr(dc, rd, src);
4235     return advance_pc(dc);
4236 }
4237 
4238 TRANS(SAVE, ALL, do_add_special, a, do_save)
4239 
4240 static bool do_restore(DisasContext *dc, int rd, TCGv src)
4241 {
4242     gen_helper_restore(tcg_env);
4243     gen_store_gpr(dc, rd, src);
4244     return advance_pc(dc);
4245 }
4246 
4247 TRANS(RESTORE, ALL, do_add_special, a, do_restore)
4248 
4249 static bool do_done_retry(DisasContext *dc, bool done)
4250 {
4251     if (!supervisor(dc)) {
4252         return raise_priv(dc);
4253     }
4254     dc->npc = DYNAMIC_PC;
4255     dc->pc = DYNAMIC_PC;
4256     translator_io_start(&dc->base);
4257     if (done) {
4258         gen_helper_done(tcg_env);
4259     } else {
4260         gen_helper_retry(tcg_env);
4261     }
4262     return true;
4263 }
4264 
4265 TRANS(DONE, 64, do_done_retry, true)
4266 TRANS(RETRY, 64, do_done_retry, false)
4267 
4268 /*
4269  * Major opcode 11 -- load and store instructions
4270  */
4271 
4272 static TCGv gen_ldst_addr(DisasContext *dc, int rs1, bool imm, int rs2_or_imm)
4273 {
4274     TCGv addr, tmp = NULL;
4275 
4276     /* For simplicity, we under-decoded the rs2 form. */
4277     if (!imm && rs2_or_imm & ~0x1f) {
4278         return NULL;
4279     }
4280 
4281     addr = gen_load_gpr(dc, rs1);
4282     if (rs2_or_imm) {
4283         tmp = tcg_temp_new();
4284         if (imm) {
4285             tcg_gen_addi_tl(tmp, addr, rs2_or_imm);
4286         } else {
4287             tcg_gen_add_tl(tmp, addr, cpu_regs[rs2_or_imm]);
4288         }
4289         addr = tmp;
4290     }
4291     if (AM_CHECK(dc)) {
4292         if (!tmp) {
4293             tmp = tcg_temp_new();
4294         }
4295         tcg_gen_ext32u_tl(tmp, addr);
4296         addr = tmp;
4297     }
4298     return addr;
4299 }
4300 
4301 static bool do_ld_gpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4302 {
4303     TCGv reg, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4304     DisasASI da;
4305 
4306     if (addr == NULL) {
4307         return false;
4308     }
4309     da = resolve_asi(dc, a->asi, mop);
4310 
4311     reg = gen_dest_gpr(dc, a->rd);
4312     gen_ld_asi(dc, &da, reg, addr);
4313     gen_store_gpr(dc, a->rd, reg);
4314     return advance_pc(dc);
4315 }
4316 
4317 TRANS(LDUW, ALL, do_ld_gpr, a, MO_TEUL)
4318 TRANS(LDUB, ALL, do_ld_gpr, a, MO_UB)
4319 TRANS(LDUH, ALL, do_ld_gpr, a, MO_TEUW)
4320 TRANS(LDSB, ALL, do_ld_gpr, a, MO_SB)
4321 TRANS(LDSH, ALL, do_ld_gpr, a, MO_TESW)
4322 TRANS(LDSW, 64, do_ld_gpr, a, MO_TESL)
4323 TRANS(LDX, 64, do_ld_gpr, a, MO_TEUQ)
4324 
4325 static bool do_st_gpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4326 {
4327     TCGv reg, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4328     DisasASI da;
4329 
4330     if (addr == NULL) {
4331         return false;
4332     }
4333     da = resolve_asi(dc, a->asi, mop);
4334 
4335     reg = gen_load_gpr(dc, a->rd);
4336     gen_st_asi(dc, &da, reg, addr);
4337     return advance_pc(dc);
4338 }
4339 
4340 TRANS(STW, ALL, do_st_gpr, a, MO_TEUL)
4341 TRANS(STB, ALL, do_st_gpr, a, MO_UB)
4342 TRANS(STH, ALL, do_st_gpr, a, MO_TEUW)
4343 TRANS(STX, 64, do_st_gpr, a, MO_TEUQ)
4344 
4345 static bool trans_LDD(DisasContext *dc, arg_r_r_ri_asi *a)
4346 {
4347     TCGv addr;
4348     DisasASI da;
4349 
4350     if (a->rd & 1) {
4351         return false;
4352     }
4353     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4354     if (addr == NULL) {
4355         return false;
4356     }
4357     da = resolve_asi(dc, a->asi, MO_TEUQ);
4358     gen_ldda_asi(dc, &da, addr, a->rd);
4359     return advance_pc(dc);
4360 }
4361 
4362 static bool trans_STD(DisasContext *dc, arg_r_r_ri_asi *a)
4363 {
4364     TCGv addr;
4365     DisasASI da;
4366 
4367     if (a->rd & 1) {
4368         return false;
4369     }
4370     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4371     if (addr == NULL) {
4372         return false;
4373     }
4374     da = resolve_asi(dc, a->asi, MO_TEUQ);
4375     gen_stda_asi(dc, &da, addr, a->rd);
4376     return advance_pc(dc);
4377 }
4378 
4379 static bool trans_LDSTUB(DisasContext *dc, arg_r_r_ri_asi *a)
4380 {
4381     TCGv addr, reg;
4382     DisasASI da;
4383 
4384     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4385     if (addr == NULL) {
4386         return false;
4387     }
4388     da = resolve_asi(dc, a->asi, MO_UB);
4389 
4390     reg = gen_dest_gpr(dc, a->rd);
4391     gen_ldstub_asi(dc, &da, reg, addr);
4392     gen_store_gpr(dc, a->rd, reg);
4393     return advance_pc(dc);
4394 }
4395 
4396 static bool trans_SWAP(DisasContext *dc, arg_r_r_ri_asi *a)
4397 {
4398     TCGv addr, dst, src;
4399     DisasASI da;
4400 
4401     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4402     if (addr == NULL) {
4403         return false;
4404     }
4405     da = resolve_asi(dc, a->asi, MO_TEUL);
4406 
4407     dst = gen_dest_gpr(dc, a->rd);
4408     src = gen_load_gpr(dc, a->rd);
4409     gen_swap_asi(dc, &da, dst, src, addr);
4410     gen_store_gpr(dc, a->rd, dst);
4411     return advance_pc(dc);
4412 }
4413 
4414 static bool do_casa(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4415 {
4416     TCGv addr, o, n, c;
4417     DisasASI da;
4418 
4419     addr = gen_ldst_addr(dc, a->rs1, true, 0);
4420     if (addr == NULL) {
4421         return false;
4422     }
4423     da = resolve_asi(dc, a->asi, mop);
4424 
4425     o = gen_dest_gpr(dc, a->rd);
4426     n = gen_load_gpr(dc, a->rd);
4427     c = gen_load_gpr(dc, a->rs2_or_imm);
4428     gen_cas_asi(dc, &da, o, n, c, addr);
4429     gen_store_gpr(dc, a->rd, o);
4430     return advance_pc(dc);
4431 }
4432 
4433 TRANS(CASA, CASA, do_casa, a, MO_TEUL)
4434 TRANS(CASXA, 64, do_casa, a, MO_TEUQ)
4435 
4436 static bool do_ld_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz)
4437 {
4438     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4439     DisasASI da;
4440 
4441     if (addr == NULL) {
4442         return false;
4443     }
4444     if (gen_trap_ifnofpu(dc)) {
4445         return true;
4446     }
4447     if (sz == MO_128 && gen_trap_float128(dc)) {
4448         return true;
4449     }
4450     da = resolve_asi(dc, a->asi, MO_TE | sz);
4451     gen_ldf_asi(dc, &da, sz, addr, a->rd);
4452     gen_update_fprs_dirty(dc, a->rd);
4453     return advance_pc(dc);
4454 }
4455 
4456 TRANS(LDF, ALL, do_ld_fpr, a, MO_32)
4457 TRANS(LDDF, ALL, do_ld_fpr, a, MO_64)
4458 TRANS(LDQF, ALL, do_ld_fpr, a, MO_128)
4459 
4460 TRANS(LDFA, 64, do_ld_fpr, a, MO_32)
4461 TRANS(LDDFA, 64, do_ld_fpr, a, MO_64)
4462 TRANS(LDQFA, 64, do_ld_fpr, a, MO_128)
4463 
4464 static bool do_st_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz)
4465 {
4466     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4467     DisasASI da;
4468 
4469     if (addr == NULL) {
4470         return false;
4471     }
4472     if (gen_trap_ifnofpu(dc)) {
4473         return true;
4474     }
4475     if (sz == MO_128 && gen_trap_float128(dc)) {
4476         return true;
4477     }
4478     da = resolve_asi(dc, a->asi, MO_TE | sz);
4479     gen_stf_asi(dc, &da, sz, addr, a->rd);
4480     return advance_pc(dc);
4481 }
4482 
4483 TRANS(STF, ALL, do_st_fpr, a, MO_32)
4484 TRANS(STDF, ALL, do_st_fpr, a, MO_64)
4485 TRANS(STQF, ALL, do_st_fpr, a, MO_128)
4486 
4487 TRANS(STFA, 64, do_st_fpr, a, MO_32)
4488 TRANS(STDFA, 64, do_st_fpr, a, MO_64)
4489 TRANS(STQFA, 64, do_st_fpr, a, MO_128)
4490 
4491 static bool trans_STDFQ(DisasContext *dc, arg_STDFQ *a)
4492 {
4493     if (!avail_32(dc)) {
4494         return false;
4495     }
4496     if (!supervisor(dc)) {
4497         return raise_priv(dc);
4498     }
4499     if (gen_trap_ifnofpu(dc)) {
4500         return true;
4501     }
4502     gen_op_fpexception_im(dc, FSR_FTT_SEQ_ERROR);
4503     return true;
4504 }
4505 
4506 static bool trans_LDFSR(DisasContext *dc, arg_r_r_ri *a)
4507 {
4508     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4509     TCGv_i32 tmp;
4510 
4511     if (addr == NULL) {
4512         return false;
4513     }
4514     if (gen_trap_ifnofpu(dc)) {
4515         return true;
4516     }
4517 
4518     tmp = tcg_temp_new_i32();
4519     tcg_gen_qemu_ld_i32(tmp, addr, dc->mem_idx, MO_TEUL | MO_ALIGN);
4520 
4521     tcg_gen_extract_i32(cpu_fcc[0], tmp, FSR_FCC0_SHIFT, 2);
4522     /* LDFSR does not change FCC[1-3]. */
4523 
4524     gen_helper_set_fsr_nofcc_noftt(tcg_env, tmp);
4525     return advance_pc(dc);
4526 }
4527 
4528 static bool do_ldxfsr(DisasContext *dc, arg_r_r_ri *a, bool entire)
4529 {
4530 #ifdef TARGET_SPARC64
4531     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4532     TCGv_i64 t64;
4533     TCGv_i32 lo, hi;
4534 
4535     if (addr == NULL) {
4536         return false;
4537     }
4538     if (gen_trap_ifnofpu(dc)) {
4539         return true;
4540     }
4541 
4542     t64 = tcg_temp_new_i64();
4543     tcg_gen_qemu_ld_i64(t64, addr, dc->mem_idx, MO_TEUQ | MO_ALIGN);
4544 
4545     lo = tcg_temp_new_i32();
4546     hi = cpu_fcc[3];
4547     tcg_gen_extr_i64_i32(lo, hi, t64);
4548     tcg_gen_extract_i32(cpu_fcc[0], lo, FSR_FCC0_SHIFT, 2);
4549     tcg_gen_extract_i32(cpu_fcc[1], hi, FSR_FCC1_SHIFT - 32, 2);
4550     tcg_gen_extract_i32(cpu_fcc[2], hi, FSR_FCC2_SHIFT - 32, 2);
4551     tcg_gen_extract_i32(cpu_fcc[3], hi, FSR_FCC3_SHIFT - 32, 2);
4552 
4553     if (entire) {
4554         gen_helper_set_fsr_nofcc(tcg_env, lo);
4555     } else {
4556         gen_helper_set_fsr_nofcc_noftt(tcg_env, lo);
4557     }
4558     return advance_pc(dc);
4559 #else
4560     return false;
4561 #endif
4562 }
4563 
4564 TRANS(LDXFSR, 64, do_ldxfsr, a, false)
4565 TRANS(LDXEFSR, VIS3B, do_ldxfsr, a, true)
4566 
4567 static bool do_stfsr(DisasContext *dc, arg_r_r_ri *a, MemOp mop)
4568 {
4569     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4570     TCGv fsr;
4571 
4572     if (addr == NULL) {
4573         return false;
4574     }
4575     if (gen_trap_ifnofpu(dc)) {
4576         return true;
4577     }
4578 
4579     fsr = tcg_temp_new();
4580     gen_helper_get_fsr(fsr, tcg_env);
4581     tcg_gen_qemu_st_tl(fsr, addr, dc->mem_idx, mop | MO_ALIGN);
4582     return advance_pc(dc);
4583 }
4584 
4585 TRANS(STFSR, ALL, do_stfsr, a, MO_TEUL)
4586 TRANS(STXFSR, 64, do_stfsr, a, MO_TEUQ)
4587 
4588 static bool do_fc(DisasContext *dc, int rd, int32_t c)
4589 {
4590     if (gen_trap_ifnofpu(dc)) {
4591         return true;
4592     }
4593     gen_store_fpr_F(dc, rd, tcg_constant_i32(c));
4594     return advance_pc(dc);
4595 }
4596 
4597 TRANS(FZEROs, VIS1, do_fc, a->rd, 0)
4598 TRANS(FONEs, VIS1, do_fc, a->rd, -1)
4599 
4600 static bool do_dc(DisasContext *dc, int rd, int64_t c)
4601 {
4602     if (gen_trap_ifnofpu(dc)) {
4603         return true;
4604     }
4605     gen_store_fpr_D(dc, rd, tcg_constant_i64(c));
4606     return advance_pc(dc);
4607 }
4608 
4609 TRANS(FZEROd, VIS1, do_dc, a->rd, 0)
4610 TRANS(FONEd, VIS1, do_dc, a->rd, -1)
4611 
4612 static bool do_ff(DisasContext *dc, arg_r_r *a,
4613                   void (*func)(TCGv_i32, TCGv_i32))
4614 {
4615     TCGv_i32 tmp;
4616 
4617     if (gen_trap_ifnofpu(dc)) {
4618         return true;
4619     }
4620 
4621     tmp = gen_load_fpr_F(dc, a->rs);
4622     func(tmp, tmp);
4623     gen_store_fpr_F(dc, a->rd, tmp);
4624     return advance_pc(dc);
4625 }
4626 
4627 TRANS(FMOVs, ALL, do_ff, a, gen_op_fmovs)
4628 TRANS(FNEGs, ALL, do_ff, a, gen_op_fnegs)
4629 TRANS(FABSs, ALL, do_ff, a, gen_op_fabss)
4630 TRANS(FSRCs, VIS1, do_ff, a, tcg_gen_mov_i32)
4631 TRANS(FNOTs, VIS1, do_ff, a, tcg_gen_not_i32)
4632 
4633 static bool do_fd(DisasContext *dc, arg_r_r *a,
4634                   void (*func)(TCGv_i32, TCGv_i64))
4635 {
4636     TCGv_i32 dst;
4637     TCGv_i64 src;
4638 
4639     if (gen_trap_ifnofpu(dc)) {
4640         return true;
4641     }
4642 
4643     dst = tcg_temp_new_i32();
4644     src = gen_load_fpr_D(dc, a->rs);
4645     func(dst, src);
4646     gen_store_fpr_F(dc, a->rd, dst);
4647     return advance_pc(dc);
4648 }
4649 
4650 TRANS(FPACK16, VIS1, do_fd, a, gen_op_fpack16)
4651 TRANS(FPACKFIX, VIS1, do_fd, a, gen_op_fpackfix)
4652 
4653 static bool do_env_ff(DisasContext *dc, arg_r_r *a,
4654                       void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
4655 {
4656     TCGv_i32 tmp;
4657 
4658     if (gen_trap_ifnofpu(dc)) {
4659         return true;
4660     }
4661 
4662     tmp = gen_load_fpr_F(dc, a->rs);
4663     func(tmp, tcg_env, tmp);
4664     gen_store_fpr_F(dc, a->rd, tmp);
4665     return advance_pc(dc);
4666 }
4667 
4668 TRANS(FSQRTs, ALL, do_env_ff, a, gen_helper_fsqrts)
4669 TRANS(FiTOs, ALL, do_env_ff, a, gen_helper_fitos)
4670 TRANS(FsTOi, ALL, do_env_ff, a, gen_helper_fstoi)
4671 
4672 static bool do_env_fd(DisasContext *dc, arg_r_r *a,
4673                       void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
4674 {
4675     TCGv_i32 dst;
4676     TCGv_i64 src;
4677 
4678     if (gen_trap_ifnofpu(dc)) {
4679         return true;
4680     }
4681 
4682     dst = tcg_temp_new_i32();
4683     src = gen_load_fpr_D(dc, a->rs);
4684     func(dst, tcg_env, src);
4685     gen_store_fpr_F(dc, a->rd, dst);
4686     return advance_pc(dc);
4687 }
4688 
4689 TRANS(FdTOs, ALL, do_env_fd, a, gen_helper_fdtos)
4690 TRANS(FdTOi, ALL, do_env_fd, a, gen_helper_fdtoi)
4691 TRANS(FxTOs, 64, do_env_fd, a, gen_helper_fxtos)
4692 
4693 static bool do_dd(DisasContext *dc, arg_r_r *a,
4694                   void (*func)(TCGv_i64, TCGv_i64))
4695 {
4696     TCGv_i64 dst, src;
4697 
4698     if (gen_trap_ifnofpu(dc)) {
4699         return true;
4700     }
4701 
4702     dst = tcg_temp_new_i64();
4703     src = gen_load_fpr_D(dc, a->rs);
4704     func(dst, src);
4705     gen_store_fpr_D(dc, a->rd, dst);
4706     return advance_pc(dc);
4707 }
4708 
4709 TRANS(FMOVd, 64, do_dd, a, gen_op_fmovd)
4710 TRANS(FNEGd, 64, do_dd, a, gen_op_fnegd)
4711 TRANS(FABSd, 64, do_dd, a, gen_op_fabsd)
4712 TRANS(FSRCd, VIS1, do_dd, a, tcg_gen_mov_i64)
4713 TRANS(FNOTd, VIS1, do_dd, a, tcg_gen_not_i64)
4714 
4715 static bool do_env_dd(DisasContext *dc, arg_r_r *a,
4716                       void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
4717 {
4718     TCGv_i64 dst, src;
4719 
4720     if (gen_trap_ifnofpu(dc)) {
4721         return true;
4722     }
4723 
4724     dst = tcg_temp_new_i64();
4725     src = gen_load_fpr_D(dc, a->rs);
4726     func(dst, tcg_env, src);
4727     gen_store_fpr_D(dc, a->rd, dst);
4728     return advance_pc(dc);
4729 }
4730 
4731 TRANS(FSQRTd, ALL, do_env_dd, a, gen_helper_fsqrtd)
4732 TRANS(FxTOd, 64, do_env_dd, a, gen_helper_fxtod)
4733 TRANS(FdTOx, 64, do_env_dd, a, gen_helper_fdtox)
4734 
4735 static bool do_df(DisasContext *dc, arg_r_r *a,
4736                   void (*func)(TCGv_i64, TCGv_i32))
4737 {
4738     TCGv_i64 dst;
4739     TCGv_i32 src;
4740 
4741     if (gen_trap_ifnofpu(dc)) {
4742         return true;
4743     }
4744 
4745     dst = tcg_temp_new_i64();
4746     src = gen_load_fpr_F(dc, a->rs);
4747     func(dst, src);
4748     gen_store_fpr_D(dc, a->rd, dst);
4749     return advance_pc(dc);
4750 }
4751 
4752 TRANS(FEXPAND, VIS1, do_df, a, gen_helper_fexpand)
4753 
4754 static bool do_env_df(DisasContext *dc, arg_r_r *a,
4755                       void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
4756 {
4757     TCGv_i64 dst;
4758     TCGv_i32 src;
4759 
4760     if (gen_trap_ifnofpu(dc)) {
4761         return true;
4762     }
4763 
4764     dst = tcg_temp_new_i64();
4765     src = gen_load_fpr_F(dc, a->rs);
4766     func(dst, tcg_env, src);
4767     gen_store_fpr_D(dc, a->rd, dst);
4768     return advance_pc(dc);
4769 }
4770 
4771 TRANS(FiTOd, ALL, do_env_df, a, gen_helper_fitod)
4772 TRANS(FsTOd, ALL, do_env_df, a, gen_helper_fstod)
4773 TRANS(FsTOx, 64, do_env_df, a, gen_helper_fstox)
4774 
4775 static bool do_qq(DisasContext *dc, arg_r_r *a,
4776                   void (*func)(TCGv_i128, TCGv_i128))
4777 {
4778     TCGv_i128 t;
4779 
4780     if (gen_trap_ifnofpu(dc)) {
4781         return true;
4782     }
4783     if (gen_trap_float128(dc)) {
4784         return true;
4785     }
4786 
4787     gen_op_clear_ieee_excp_and_FTT();
4788     t = gen_load_fpr_Q(dc, a->rs);
4789     func(t, t);
4790     gen_store_fpr_Q(dc, a->rd, t);
4791     return advance_pc(dc);
4792 }
4793 
4794 TRANS(FMOVq, 64, do_qq, a, tcg_gen_mov_i128)
4795 TRANS(FNEGq, 64, do_qq, a, gen_op_fnegq)
4796 TRANS(FABSq, 64, do_qq, a, gen_op_fabsq)
4797 
4798 static bool do_env_qq(DisasContext *dc, arg_r_r *a,
4799                       void (*func)(TCGv_i128, TCGv_env, TCGv_i128))
4800 {
4801     TCGv_i128 t;
4802 
4803     if (gen_trap_ifnofpu(dc)) {
4804         return true;
4805     }
4806     if (gen_trap_float128(dc)) {
4807         return true;
4808     }
4809 
4810     t = gen_load_fpr_Q(dc, a->rs);
4811     func(t, tcg_env, t);
4812     gen_store_fpr_Q(dc, a->rd, t);
4813     return advance_pc(dc);
4814 }
4815 
4816 TRANS(FSQRTq, ALL, do_env_qq, a, gen_helper_fsqrtq)
4817 
4818 static bool do_env_fq(DisasContext *dc, arg_r_r *a,
4819                       void (*func)(TCGv_i32, TCGv_env, TCGv_i128))
4820 {
4821     TCGv_i128 src;
4822     TCGv_i32 dst;
4823 
4824     if (gen_trap_ifnofpu(dc)) {
4825         return true;
4826     }
4827     if (gen_trap_float128(dc)) {
4828         return true;
4829     }
4830 
4831     src = gen_load_fpr_Q(dc, a->rs);
4832     dst = tcg_temp_new_i32();
4833     func(dst, tcg_env, src);
4834     gen_store_fpr_F(dc, a->rd, dst);
4835     return advance_pc(dc);
4836 }
4837 
4838 TRANS(FqTOs, ALL, do_env_fq, a, gen_helper_fqtos)
4839 TRANS(FqTOi, ALL, do_env_fq, a, gen_helper_fqtoi)
4840 
4841 static bool do_env_dq(DisasContext *dc, arg_r_r *a,
4842                       void (*func)(TCGv_i64, TCGv_env, TCGv_i128))
4843 {
4844     TCGv_i128 src;
4845     TCGv_i64 dst;
4846 
4847     if (gen_trap_ifnofpu(dc)) {
4848         return true;
4849     }
4850     if (gen_trap_float128(dc)) {
4851         return true;
4852     }
4853 
4854     src = gen_load_fpr_Q(dc, a->rs);
4855     dst = tcg_temp_new_i64();
4856     func(dst, tcg_env, src);
4857     gen_store_fpr_D(dc, a->rd, dst);
4858     return advance_pc(dc);
4859 }
4860 
4861 TRANS(FqTOd, ALL, do_env_dq, a, gen_helper_fqtod)
4862 TRANS(FqTOx, 64, do_env_dq, a, gen_helper_fqtox)
4863 
4864 static bool do_env_qf(DisasContext *dc, arg_r_r *a,
4865                       void (*func)(TCGv_i128, TCGv_env, TCGv_i32))
4866 {
4867     TCGv_i32 src;
4868     TCGv_i128 dst;
4869 
4870     if (gen_trap_ifnofpu(dc)) {
4871         return true;
4872     }
4873     if (gen_trap_float128(dc)) {
4874         return true;
4875     }
4876 
4877     src = gen_load_fpr_F(dc, a->rs);
4878     dst = tcg_temp_new_i128();
4879     func(dst, tcg_env, src);
4880     gen_store_fpr_Q(dc, a->rd, dst);
4881     return advance_pc(dc);
4882 }
4883 
4884 TRANS(FiTOq, ALL, do_env_qf, a, gen_helper_fitoq)
4885 TRANS(FsTOq, ALL, do_env_qf, a, gen_helper_fstoq)
4886 
4887 static bool do_env_qd(DisasContext *dc, arg_r_r *a,
4888                       void (*func)(TCGv_i128, TCGv_env, TCGv_i64))
4889 {
4890     TCGv_i64 src;
4891     TCGv_i128 dst;
4892 
4893     if (gen_trap_ifnofpu(dc)) {
4894         return true;
4895     }
4896     if (gen_trap_float128(dc)) {
4897         return true;
4898     }
4899 
4900     src = gen_load_fpr_D(dc, a->rs);
4901     dst = tcg_temp_new_i128();
4902     func(dst, tcg_env, src);
4903     gen_store_fpr_Q(dc, a->rd, dst);
4904     return advance_pc(dc);
4905 }
4906 
4907 TRANS(FdTOq, ALL, do_env_qd, a, gen_helper_fdtoq)
4908 TRANS(FxTOq, 64, do_env_qd, a, gen_helper_fxtoq)
4909 
4910 static bool do_fff(DisasContext *dc, arg_r_r_r *a,
4911                    void (*func)(TCGv_i32, TCGv_i32, TCGv_i32))
4912 {
4913     TCGv_i32 src1, src2;
4914 
4915     if (gen_trap_ifnofpu(dc)) {
4916         return true;
4917     }
4918 
4919     src1 = gen_load_fpr_F(dc, a->rs1);
4920     src2 = gen_load_fpr_F(dc, a->rs2);
4921     func(src1, src1, src2);
4922     gen_store_fpr_F(dc, a->rd, src1);
4923     return advance_pc(dc);
4924 }
4925 
4926 TRANS(FPADD16s, VIS1, do_fff, a, tcg_gen_vec_add16_i32)
4927 TRANS(FPADD32s, VIS1, do_fff, a, tcg_gen_add_i32)
4928 TRANS(FPSUB16s, VIS1, do_fff, a, tcg_gen_vec_sub16_i32)
4929 TRANS(FPSUB32s, VIS1, do_fff, a, tcg_gen_sub_i32)
4930 TRANS(FNORs, VIS1, do_fff, a, tcg_gen_nor_i32)
4931 TRANS(FANDNOTs, VIS1, do_fff, a, tcg_gen_andc_i32)
4932 TRANS(FXORs, VIS1, do_fff, a, tcg_gen_xor_i32)
4933 TRANS(FNANDs, VIS1, do_fff, a, tcg_gen_nand_i32)
4934 TRANS(FANDs, VIS1, do_fff, a, tcg_gen_and_i32)
4935 TRANS(FXNORs, VIS1, do_fff, a, tcg_gen_eqv_i32)
4936 TRANS(FORNOTs, VIS1, do_fff, a, tcg_gen_orc_i32)
4937 TRANS(FORs, VIS1, do_fff, a, tcg_gen_or_i32)
4938 
4939 TRANS(FHADDs, VIS3, do_fff, a, gen_op_fhadds)
4940 TRANS(FHSUBs, VIS3, do_fff, a, gen_op_fhsubs)
4941 TRANS(FNHADDs, VIS3, do_fff, a, gen_op_fnhadds)
4942 
4943 TRANS(FPADDS16s, VIS3, do_fff, a, gen_op_fpadds16s)
4944 TRANS(FPSUBS16s, VIS3, do_fff, a, gen_op_fpsubs16s)
4945 TRANS(FPADDS32s, VIS3, do_fff, a, gen_op_fpadds32s)
4946 TRANS(FPSUBS32s, VIS3, do_fff, a, gen_op_fpsubs32s)
4947 
4948 static bool do_env_fff(DisasContext *dc, arg_r_r_r *a,
4949                        void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
4950 {
4951     TCGv_i32 src1, src2;
4952 
4953     if (gen_trap_ifnofpu(dc)) {
4954         return true;
4955     }
4956 
4957     src1 = gen_load_fpr_F(dc, a->rs1);
4958     src2 = gen_load_fpr_F(dc, a->rs2);
4959     func(src1, tcg_env, src1, src2);
4960     gen_store_fpr_F(dc, a->rd, src1);
4961     return advance_pc(dc);
4962 }
4963 
4964 TRANS(FADDs, ALL, do_env_fff, a, gen_helper_fadds)
4965 TRANS(FSUBs, ALL, do_env_fff, a, gen_helper_fsubs)
4966 TRANS(FMULs, ALL, do_env_fff, a, gen_helper_fmuls)
4967 TRANS(FDIVs, ALL, do_env_fff, a, gen_helper_fdivs)
4968 TRANS(FNADDs, VIS3, do_env_fff, a, gen_helper_fnadds)
4969 TRANS(FNMULs, VIS3, do_env_fff, a, gen_helper_fnmuls)
4970 
4971 static bool do_dff(DisasContext *dc, arg_r_r_r *a,
4972                    void (*func)(TCGv_i64, TCGv_i32, TCGv_i32))
4973 {
4974     TCGv_i64 dst;
4975     TCGv_i32 src1, src2;
4976 
4977     if (gen_trap_ifnofpu(dc)) {
4978         return true;
4979     }
4980 
4981     dst = tcg_temp_new_i64();
4982     src1 = gen_load_fpr_F(dc, a->rs1);
4983     src2 = gen_load_fpr_F(dc, a->rs2);
4984     func(dst, src1, src2);
4985     gen_store_fpr_D(dc, a->rd, dst);
4986     return advance_pc(dc);
4987 }
4988 
4989 TRANS(FMUL8x16AU, VIS1, do_dff, a, gen_op_fmul8x16au)
4990 TRANS(FMUL8x16AL, VIS1, do_dff, a, gen_op_fmul8x16al)
4991 TRANS(FMULD8SUx16, VIS1, do_dff, a, gen_op_fmuld8sux16)
4992 TRANS(FMULD8ULx16, VIS1, do_dff, a, gen_op_fmuld8ulx16)
4993 TRANS(FPMERGE, VIS1, do_dff, a, gen_helper_fpmerge)
4994 
4995 static bool do_dfd(DisasContext *dc, arg_r_r_r *a,
4996                    void (*func)(TCGv_i64, TCGv_i32, TCGv_i64))
4997 {
4998     TCGv_i64 dst, src2;
4999     TCGv_i32 src1;
5000 
5001     if (gen_trap_ifnofpu(dc)) {
5002         return true;
5003     }
5004 
5005     dst = tcg_temp_new_i64();
5006     src1 = gen_load_fpr_F(dc, a->rs1);
5007     src2 = gen_load_fpr_D(dc, a->rs2);
5008     func(dst, src1, src2);
5009     gen_store_fpr_D(dc, a->rd, dst);
5010     return advance_pc(dc);
5011 }
5012 
5013 TRANS(FMUL8x16, VIS1, do_dfd, a, gen_helper_fmul8x16)
5014 
5015 static bool do_gvec_ddd(DisasContext *dc, arg_r_r_r *a, MemOp vece,
5016                         void (*func)(unsigned, uint32_t, uint32_t,
5017                                      uint32_t, uint32_t, uint32_t))
5018 {
5019     if (gen_trap_ifnofpu(dc)) {
5020         return true;
5021     }
5022 
5023     func(vece, gen_offset_fpr_D(a->rd), gen_offset_fpr_D(a->rs1),
5024          gen_offset_fpr_D(a->rs2), 8, 8);
5025     return advance_pc(dc);
5026 }
5027 
5028 TRANS(FPADD16, VIS1, do_gvec_ddd, a, MO_16, tcg_gen_gvec_add)
5029 TRANS(FPADD32, VIS1, do_gvec_ddd, a, MO_32, tcg_gen_gvec_add)
5030 TRANS(FPSUB16, VIS1, do_gvec_ddd, a, MO_16, tcg_gen_gvec_sub)
5031 TRANS(FPSUB32, VIS1, do_gvec_ddd, a, MO_32, tcg_gen_gvec_sub)
5032 TRANS(FCHKSM16, VIS3, do_gvec_ddd, a, MO_16, gen_op_fchksm16)
5033 TRANS(FMEAN16, VIS3, do_gvec_ddd, a, MO_16, gen_op_fmean16)
5034 
5035 TRANS(FPADDS16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_ssadd)
5036 TRANS(FPADDS32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_ssadd)
5037 TRANS(FPSUBS16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_sssub)
5038 TRANS(FPSUBS32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_sssub)
5039 
5040 TRANS(FSLL16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_shlv)
5041 TRANS(FSLL32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_shlv)
5042 TRANS(FSRL16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_shrv)
5043 TRANS(FSRL32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_shrv)
5044 TRANS(FSRA16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_sarv)
5045 TRANS(FSRA32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_sarv)
5046 
5047 static bool do_ddd(DisasContext *dc, arg_r_r_r *a,
5048                    void (*func)(TCGv_i64, TCGv_i64, TCGv_i64))
5049 {
5050     TCGv_i64 dst, src1, src2;
5051 
5052     if (gen_trap_ifnofpu(dc)) {
5053         return true;
5054     }
5055 
5056     dst = tcg_temp_new_i64();
5057     src1 = gen_load_fpr_D(dc, a->rs1);
5058     src2 = gen_load_fpr_D(dc, a->rs2);
5059     func(dst, src1, src2);
5060     gen_store_fpr_D(dc, a->rd, dst);
5061     return advance_pc(dc);
5062 }
5063 
5064 TRANS(FMUL8SUx16, VIS1, do_ddd, a, gen_helper_fmul8sux16)
5065 TRANS(FMUL8ULx16, VIS1, do_ddd, a, gen_helper_fmul8ulx16)
5066 
5067 TRANS(FNORd, VIS1, do_ddd, a, tcg_gen_nor_i64)
5068 TRANS(FANDNOTd, VIS1, do_ddd, a, tcg_gen_andc_i64)
5069 TRANS(FXORd, VIS1, do_ddd, a, tcg_gen_xor_i64)
5070 TRANS(FNANDd, VIS1, do_ddd, a, tcg_gen_nand_i64)
5071 TRANS(FANDd, VIS1, do_ddd, a, tcg_gen_and_i64)
5072 TRANS(FXNORd, VIS1, do_ddd, a, tcg_gen_eqv_i64)
5073 TRANS(FORNOTd, VIS1, do_ddd, a, tcg_gen_orc_i64)
5074 TRANS(FORd, VIS1, do_ddd, a, tcg_gen_or_i64)
5075 
5076 TRANS(FPACK32, VIS1, do_ddd, a, gen_op_fpack32)
5077 TRANS(FALIGNDATAg, VIS1, do_ddd, a, gen_op_faligndata_g)
5078 TRANS(BSHUFFLE, VIS2, do_ddd, a, gen_op_bshuffle)
5079 
5080 TRANS(FHADDd, VIS3, do_ddd, a, gen_op_fhaddd)
5081 TRANS(FHSUBd, VIS3, do_ddd, a, gen_op_fhsubd)
5082 TRANS(FNHADDd, VIS3, do_ddd, a, gen_op_fnhaddd)
5083 
5084 TRANS(FPADD64, VIS3B, do_ddd, a, tcg_gen_add_i64)
5085 TRANS(FPSUB64, VIS3B, do_ddd, a, tcg_gen_sub_i64)
5086 TRANS(FSLAS16, VIS3, do_ddd, a, gen_helper_fslas16)
5087 TRANS(FSLAS32, VIS3, do_ddd, a, gen_helper_fslas32)
5088 
5089 static bool do_rdd(DisasContext *dc, arg_r_r_r *a,
5090                    void (*func)(TCGv, TCGv_i64, TCGv_i64))
5091 {
5092     TCGv_i64 src1, src2;
5093     TCGv dst;
5094 
5095     if (gen_trap_ifnofpu(dc)) {
5096         return true;
5097     }
5098 
5099     dst = gen_dest_gpr(dc, a->rd);
5100     src1 = gen_load_fpr_D(dc, a->rs1);
5101     src2 = gen_load_fpr_D(dc, a->rs2);
5102     func(dst, src1, src2);
5103     gen_store_gpr(dc, a->rd, dst);
5104     return advance_pc(dc);
5105 }
5106 
5107 TRANS(FPCMPLE16, VIS1, do_rdd, a, gen_helper_fcmple16)
5108 TRANS(FPCMPNE16, VIS1, do_rdd, a, gen_helper_fcmpne16)
5109 TRANS(FPCMPGT16, VIS1, do_rdd, a, gen_helper_fcmpgt16)
5110 TRANS(FPCMPEQ16, VIS1, do_rdd, a, gen_helper_fcmpeq16)
5111 
5112 TRANS(FPCMPLE32, VIS1, do_rdd, a, gen_helper_fcmple32)
5113 TRANS(FPCMPNE32, VIS1, do_rdd, a, gen_helper_fcmpne32)
5114 TRANS(FPCMPGT32, VIS1, do_rdd, a, gen_helper_fcmpgt32)
5115 TRANS(FPCMPEQ32, VIS1, do_rdd, a, gen_helper_fcmpeq32)
5116 
5117 TRANS(FPCMPEQ8, VIS3B, do_rdd, a, gen_helper_fcmpeq8)
5118 TRANS(FPCMPNE8, VIS3B, do_rdd, a, gen_helper_fcmpne8)
5119 TRANS(FPCMPULE8, VIS3B, do_rdd, a, gen_helper_fcmpule8)
5120 TRANS(FPCMPUGT8, VIS3B, do_rdd, a, gen_helper_fcmpugt8)
5121 
5122 TRANS(PDISTN, VIS3, do_rdd, a, gen_op_pdistn)
5123 TRANS(XMULX, VIS3, do_rrr, a, gen_helper_xmulx)
5124 TRANS(XMULXHI, VIS3, do_rrr, a, gen_helper_xmulxhi)
5125 
5126 static bool do_env_ddd(DisasContext *dc, arg_r_r_r *a,
5127                        void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
5128 {
5129     TCGv_i64 dst, src1, src2;
5130 
5131     if (gen_trap_ifnofpu(dc)) {
5132         return true;
5133     }
5134 
5135     dst = tcg_temp_new_i64();
5136     src1 = gen_load_fpr_D(dc, a->rs1);
5137     src2 = gen_load_fpr_D(dc, a->rs2);
5138     func(dst, tcg_env, src1, src2);
5139     gen_store_fpr_D(dc, a->rd, dst);
5140     return advance_pc(dc);
5141 }
5142 
5143 TRANS(FADDd, ALL, do_env_ddd, a, gen_helper_faddd)
5144 TRANS(FSUBd, ALL, do_env_ddd, a, gen_helper_fsubd)
5145 TRANS(FMULd, ALL, do_env_ddd, a, gen_helper_fmuld)
5146 TRANS(FDIVd, ALL, do_env_ddd, a, gen_helper_fdivd)
5147 TRANS(FNADDd, VIS3, do_env_ddd, a, gen_helper_fnaddd)
5148 TRANS(FNMULd, VIS3, do_env_ddd, a, gen_helper_fnmuld)
5149 
5150 static bool trans_FsMULd(DisasContext *dc, arg_r_r_r *a)
5151 {
5152     TCGv_i64 dst;
5153     TCGv_i32 src1, src2;
5154 
5155     if (gen_trap_ifnofpu(dc)) {
5156         return true;
5157     }
5158     if (!(dc->def->features & CPU_FEATURE_FSMULD)) {
5159         return raise_unimpfpop(dc);
5160     }
5161 
5162     dst = tcg_temp_new_i64();
5163     src1 = gen_load_fpr_F(dc, a->rs1);
5164     src2 = gen_load_fpr_F(dc, a->rs2);
5165     gen_helper_fsmuld(dst, tcg_env, src1, src2);
5166     gen_store_fpr_D(dc, a->rd, dst);
5167     return advance_pc(dc);
5168 }
5169 
5170 static bool trans_FNsMULd(DisasContext *dc, arg_r_r_r *a)
5171 {
5172     TCGv_i64 dst;
5173     TCGv_i32 src1, src2;
5174 
5175     if (!avail_VIS3(dc)) {
5176         return false;
5177     }
5178     if (gen_trap_ifnofpu(dc)) {
5179         return true;
5180     }
5181     dst = tcg_temp_new_i64();
5182     src1 = gen_load_fpr_F(dc, a->rs1);
5183     src2 = gen_load_fpr_F(dc, a->rs2);
5184     gen_helper_fnsmuld(dst, tcg_env, src1, src2);
5185     gen_store_fpr_D(dc, a->rd, dst);
5186     return advance_pc(dc);
5187 }
5188 
5189 static bool do_ffff(DisasContext *dc, arg_r_r_r_r *a,
5190                     void (*func)(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_i32))
5191 {
5192     TCGv_i32 dst, src1, src2, src3;
5193 
5194     if (gen_trap_ifnofpu(dc)) {
5195         return true;
5196     }
5197 
5198     src1 = gen_load_fpr_F(dc, a->rs1);
5199     src2 = gen_load_fpr_F(dc, a->rs2);
5200     src3 = gen_load_fpr_F(dc, a->rs3);
5201     dst = tcg_temp_new_i32();
5202     func(dst, src1, src2, src3);
5203     gen_store_fpr_F(dc, a->rd, dst);
5204     return advance_pc(dc);
5205 }
5206 
5207 TRANS(FMADDs, FMAF, do_ffff, a, gen_op_fmadds)
5208 TRANS(FMSUBs, FMAF, do_ffff, a, gen_op_fmsubs)
5209 TRANS(FNMSUBs, FMAF, do_ffff, a, gen_op_fnmsubs)
5210 TRANS(FNMADDs, FMAF, do_ffff, a, gen_op_fnmadds)
5211 
5212 static bool do_dddd(DisasContext *dc, arg_r_r_r_r *a,
5213                     void (*func)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
5214 {
5215     TCGv_i64 dst, src1, src2, src3;
5216 
5217     if (gen_trap_ifnofpu(dc)) {
5218         return true;
5219     }
5220 
5221     dst  = tcg_temp_new_i64();
5222     src1 = gen_load_fpr_D(dc, a->rs1);
5223     src2 = gen_load_fpr_D(dc, a->rs2);
5224     src3 = gen_load_fpr_D(dc, a->rs3);
5225     func(dst, src1, src2, src3);
5226     gen_store_fpr_D(dc, a->rd, dst);
5227     return advance_pc(dc);
5228 }
5229 
5230 TRANS(PDIST, VIS1, do_dddd, a, gen_helper_pdist)
5231 TRANS(FMADDd, FMAF, do_dddd, a, gen_op_fmaddd)
5232 TRANS(FMSUBd, FMAF, do_dddd, a, gen_op_fmsubd)
5233 TRANS(FNMSUBd, FMAF, do_dddd, a, gen_op_fnmsubd)
5234 TRANS(FNMADDd, FMAF, do_dddd, a, gen_op_fnmaddd)
5235 TRANS(FPMADDX, IMA, do_dddd, a, gen_op_fpmaddx)
5236 TRANS(FPMADDXHI, IMA, do_dddd, a, gen_op_fpmaddxhi)
5237 
5238 static bool trans_FALIGNDATAi(DisasContext *dc, arg_r_r_r *a)
5239 {
5240     TCGv_i64 dst, src1, src2;
5241     TCGv src3;
5242 
5243     if (!avail_VIS4(dc)) {
5244         return false;
5245     }
5246     if (gen_trap_ifnofpu(dc)) {
5247         return true;
5248     }
5249 
5250     dst  = tcg_temp_new_i64();
5251     src1 = gen_load_fpr_D(dc, a->rd);
5252     src2 = gen_load_fpr_D(dc, a->rs2);
5253     src3 = gen_load_gpr(dc, a->rs1);
5254     gen_op_faligndata_i(dst, src1, src2, src3);
5255     gen_store_fpr_D(dc, a->rd, dst);
5256     return advance_pc(dc);
5257 }
5258 
5259 static bool do_env_qqq(DisasContext *dc, arg_r_r_r *a,
5260                        void (*func)(TCGv_i128, TCGv_env, TCGv_i128, TCGv_i128))
5261 {
5262     TCGv_i128 src1, src2;
5263 
5264     if (gen_trap_ifnofpu(dc)) {
5265         return true;
5266     }
5267     if (gen_trap_float128(dc)) {
5268         return true;
5269     }
5270 
5271     src1 = gen_load_fpr_Q(dc, a->rs1);
5272     src2 = gen_load_fpr_Q(dc, a->rs2);
5273     func(src1, tcg_env, src1, src2);
5274     gen_store_fpr_Q(dc, a->rd, src1);
5275     return advance_pc(dc);
5276 }
5277 
5278 TRANS(FADDq, ALL, do_env_qqq, a, gen_helper_faddq)
5279 TRANS(FSUBq, ALL, do_env_qqq, a, gen_helper_fsubq)
5280 TRANS(FMULq, ALL, do_env_qqq, a, gen_helper_fmulq)
5281 TRANS(FDIVq, ALL, do_env_qqq, a, gen_helper_fdivq)
5282 
5283 static bool trans_FdMULq(DisasContext *dc, arg_r_r_r *a)
5284 {
5285     TCGv_i64 src1, src2;
5286     TCGv_i128 dst;
5287 
5288     if (gen_trap_ifnofpu(dc)) {
5289         return true;
5290     }
5291     if (gen_trap_float128(dc)) {
5292         return true;
5293     }
5294 
5295     src1 = gen_load_fpr_D(dc, a->rs1);
5296     src2 = gen_load_fpr_D(dc, a->rs2);
5297     dst = tcg_temp_new_i128();
5298     gen_helper_fdmulq(dst, tcg_env, src1, src2);
5299     gen_store_fpr_Q(dc, a->rd, dst);
5300     return advance_pc(dc);
5301 }
5302 
5303 static bool do_fmovr(DisasContext *dc, arg_FMOVRs *a, bool is_128,
5304                      void (*func)(DisasContext *, DisasCompare *, int, int))
5305 {
5306     DisasCompare cmp;
5307 
5308     if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
5309         return false;
5310     }
5311     if (gen_trap_ifnofpu(dc)) {
5312         return true;
5313     }
5314     if (is_128 && gen_trap_float128(dc)) {
5315         return true;
5316     }
5317 
5318     gen_op_clear_ieee_excp_and_FTT();
5319     func(dc, &cmp, a->rd, a->rs2);
5320     return advance_pc(dc);
5321 }
5322 
5323 TRANS(FMOVRs, 64, do_fmovr, a, false, gen_fmovs)
5324 TRANS(FMOVRd, 64, do_fmovr, a, false, gen_fmovd)
5325 TRANS(FMOVRq, 64, do_fmovr, a, true, gen_fmovq)
5326 
5327 static bool do_fmovcc(DisasContext *dc, arg_FMOVscc *a, bool is_128,
5328                       void (*func)(DisasContext *, DisasCompare *, int, int))
5329 {
5330     DisasCompare cmp;
5331 
5332     if (gen_trap_ifnofpu(dc)) {
5333         return true;
5334     }
5335     if (is_128 && gen_trap_float128(dc)) {
5336         return true;
5337     }
5338 
5339     gen_op_clear_ieee_excp_and_FTT();
5340     gen_compare(&cmp, a->cc, a->cond, dc);
5341     func(dc, &cmp, a->rd, a->rs2);
5342     return advance_pc(dc);
5343 }
5344 
5345 TRANS(FMOVscc, 64, do_fmovcc, a, false, gen_fmovs)
5346 TRANS(FMOVdcc, 64, do_fmovcc, a, false, gen_fmovd)
5347 TRANS(FMOVqcc, 64, do_fmovcc, a, true, gen_fmovq)
5348 
5349 static bool do_fmovfcc(DisasContext *dc, arg_FMOVsfcc *a, bool is_128,
5350                        void (*func)(DisasContext *, DisasCompare *, int, int))
5351 {
5352     DisasCompare cmp;
5353 
5354     if (gen_trap_ifnofpu(dc)) {
5355         return true;
5356     }
5357     if (is_128 && gen_trap_float128(dc)) {
5358         return true;
5359     }
5360 
5361     gen_op_clear_ieee_excp_and_FTT();
5362     gen_fcompare(&cmp, a->cc, a->cond);
5363     func(dc, &cmp, a->rd, a->rs2);
5364     return advance_pc(dc);
5365 }
5366 
5367 TRANS(FMOVsfcc, 64, do_fmovfcc, a, false, gen_fmovs)
5368 TRANS(FMOVdfcc, 64, do_fmovfcc, a, false, gen_fmovd)
5369 TRANS(FMOVqfcc, 64, do_fmovfcc, a, true, gen_fmovq)
5370 
5371 static bool do_fcmps(DisasContext *dc, arg_FCMPs *a, bool e)
5372 {
5373     TCGv_i32 src1, src2;
5374 
5375     if (avail_32(dc) && a->cc != 0) {
5376         return false;
5377     }
5378     if (gen_trap_ifnofpu(dc)) {
5379         return true;
5380     }
5381 
5382     src1 = gen_load_fpr_F(dc, a->rs1);
5383     src2 = gen_load_fpr_F(dc, a->rs2);
5384     if (e) {
5385         gen_helper_fcmpes(cpu_fcc[a->cc], tcg_env, src1, src2);
5386     } else {
5387         gen_helper_fcmps(cpu_fcc[a->cc], tcg_env, src1, src2);
5388     }
5389     return advance_pc(dc);
5390 }
5391 
5392 TRANS(FCMPs, ALL, do_fcmps, a, false)
5393 TRANS(FCMPEs, ALL, do_fcmps, a, true)
5394 
5395 static bool do_fcmpd(DisasContext *dc, arg_FCMPd *a, bool e)
5396 {
5397     TCGv_i64 src1, src2;
5398 
5399     if (avail_32(dc) && a->cc != 0) {
5400         return false;
5401     }
5402     if (gen_trap_ifnofpu(dc)) {
5403         return true;
5404     }
5405 
5406     src1 = gen_load_fpr_D(dc, a->rs1);
5407     src2 = gen_load_fpr_D(dc, a->rs2);
5408     if (e) {
5409         gen_helper_fcmped(cpu_fcc[a->cc], tcg_env, src1, src2);
5410     } else {
5411         gen_helper_fcmpd(cpu_fcc[a->cc], tcg_env, src1, src2);
5412     }
5413     return advance_pc(dc);
5414 }
5415 
5416 TRANS(FCMPd, ALL, do_fcmpd, a, false)
5417 TRANS(FCMPEd, ALL, do_fcmpd, a, true)
5418 
5419 static bool do_fcmpq(DisasContext *dc, arg_FCMPq *a, bool e)
5420 {
5421     TCGv_i128 src1, src2;
5422 
5423     if (avail_32(dc) && a->cc != 0) {
5424         return false;
5425     }
5426     if (gen_trap_ifnofpu(dc)) {
5427         return true;
5428     }
5429     if (gen_trap_float128(dc)) {
5430         return true;
5431     }
5432 
5433     src1 = gen_load_fpr_Q(dc, a->rs1);
5434     src2 = gen_load_fpr_Q(dc, a->rs2);
5435     if (e) {
5436         gen_helper_fcmpeq(cpu_fcc[a->cc], tcg_env, src1, src2);
5437     } else {
5438         gen_helper_fcmpq(cpu_fcc[a->cc], tcg_env, src1, src2);
5439     }
5440     return advance_pc(dc);
5441 }
5442 
5443 TRANS(FCMPq, ALL, do_fcmpq, a, false)
5444 TRANS(FCMPEq, ALL, do_fcmpq, a, true)
5445 
5446 static bool trans_FLCMPs(DisasContext *dc, arg_FLCMPs *a)
5447 {
5448     TCGv_i32 src1, src2;
5449 
5450     if (!avail_VIS3(dc)) {
5451         return false;
5452     }
5453     if (gen_trap_ifnofpu(dc)) {
5454         return true;
5455     }
5456 
5457     src1 = gen_load_fpr_F(dc, a->rs1);
5458     src2 = gen_load_fpr_F(dc, a->rs2);
5459     gen_helper_flcmps(cpu_fcc[a->cc], src1, src2);
5460     return advance_pc(dc);
5461 }
5462 
5463 static bool trans_FLCMPd(DisasContext *dc, arg_FLCMPd *a)
5464 {
5465     TCGv_i64 src1, src2;
5466 
5467     if (!avail_VIS3(dc)) {
5468         return false;
5469     }
5470     if (gen_trap_ifnofpu(dc)) {
5471         return true;
5472     }
5473 
5474     src1 = gen_load_fpr_D(dc, a->rs1);
5475     src2 = gen_load_fpr_D(dc, a->rs2);
5476     gen_helper_flcmpd(cpu_fcc[a->cc], src1, src2);
5477     return advance_pc(dc);
5478 }
5479 
5480 static bool do_movf2r(DisasContext *dc, arg_r_r *a,
5481                       int (*offset)(unsigned int),
5482                       void (*load)(TCGv, TCGv_ptr, tcg_target_long))
5483 {
5484     TCGv dst;
5485 
5486     if (gen_trap_ifnofpu(dc)) {
5487         return true;
5488     }
5489     dst = gen_dest_gpr(dc, a->rd);
5490     load(dst, tcg_env, offset(a->rs));
5491     gen_store_gpr(dc, a->rd, dst);
5492     return advance_pc(dc);
5493 }
5494 
5495 TRANS(MOVsTOsw, VIS3B, do_movf2r, a, gen_offset_fpr_F, tcg_gen_ld32s_tl)
5496 TRANS(MOVsTOuw, VIS3B, do_movf2r, a, gen_offset_fpr_F, tcg_gen_ld32u_tl)
5497 TRANS(MOVdTOx, VIS3B, do_movf2r, a, gen_offset_fpr_D, tcg_gen_ld_tl)
5498 
5499 static bool do_movr2f(DisasContext *dc, arg_r_r *a,
5500                       int (*offset)(unsigned int),
5501                       void (*store)(TCGv, TCGv_ptr, tcg_target_long))
5502 {
5503     TCGv src;
5504 
5505     if (gen_trap_ifnofpu(dc)) {
5506         return true;
5507     }
5508     src = gen_load_gpr(dc, a->rs);
5509     store(src, tcg_env, offset(a->rd));
5510     return advance_pc(dc);
5511 }
5512 
5513 TRANS(MOVwTOs, VIS3B, do_movr2f, a, gen_offset_fpr_F, tcg_gen_st32_tl)
5514 TRANS(MOVxTOd, VIS3B, do_movr2f, a, gen_offset_fpr_D, tcg_gen_st_tl)
5515 
5516 static void sparc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
5517 {
5518     DisasContext *dc = container_of(dcbase, DisasContext, base);
5519     int bound;
5520 
5521     dc->pc = dc->base.pc_first;
5522     dc->npc = (target_ulong)dc->base.tb->cs_base;
5523     dc->mem_idx = dc->base.tb->flags & TB_FLAG_MMU_MASK;
5524     dc->def = &cpu_env(cs)->def;
5525     dc->fpu_enabled = tb_fpu_enabled(dc->base.tb->flags);
5526     dc->address_mask_32bit = tb_am_enabled(dc->base.tb->flags);
5527 #ifndef CONFIG_USER_ONLY
5528     dc->supervisor = (dc->base.tb->flags & TB_FLAG_SUPER) != 0;
5529 #endif
5530 #ifdef TARGET_SPARC64
5531     dc->fprs_dirty = 0;
5532     dc->asi = (dc->base.tb->flags >> TB_FLAG_ASI_SHIFT) & 0xff;
5533 #ifndef CONFIG_USER_ONLY
5534     dc->hypervisor = (dc->base.tb->flags & TB_FLAG_HYPER) != 0;
5535 #endif
5536 #endif
5537     /*
5538      * if we reach a page boundary, we stop generation so that the
5539      * PC of a TT_TFAULT exception is always in the right page
5540      */
5541     bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
5542     dc->base.max_insns = MIN(dc->base.max_insns, bound);
5543 }
5544 
5545 static void sparc_tr_tb_start(DisasContextBase *db, CPUState *cs)
5546 {
5547 }
5548 
5549 static void sparc_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
5550 {
5551     DisasContext *dc = container_of(dcbase, DisasContext, base);
5552     target_ulong npc = dc->npc;
5553 
5554     if (npc & 3) {
5555         switch (npc) {
5556         case JUMP_PC:
5557             assert(dc->jump_pc[1] == dc->pc + 4);
5558             npc = dc->jump_pc[0] | JUMP_PC;
5559             break;
5560         case DYNAMIC_PC:
5561         case DYNAMIC_PC_LOOKUP:
5562             npc = DYNAMIC_PC;
5563             break;
5564         default:
5565             g_assert_not_reached();
5566         }
5567     }
5568     tcg_gen_insn_start(dc->pc, npc);
5569 }
5570 
5571 static void sparc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
5572 {
5573     DisasContext *dc = container_of(dcbase, DisasContext, base);
5574     unsigned int insn;
5575 
5576     insn = translator_ldl(cpu_env(cs), &dc->base, dc->pc);
5577     dc->base.pc_next += 4;
5578 
5579     if (!decode(dc, insn)) {
5580         gen_exception(dc, TT_ILL_INSN);
5581     }
5582 
5583     if (dc->base.is_jmp == DISAS_NORETURN) {
5584         return;
5585     }
5586     if (dc->pc != dc->base.pc_next) {
5587         dc->base.is_jmp = DISAS_TOO_MANY;
5588     }
5589 }
5590 
5591 static void sparc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
5592 {
5593     DisasContext *dc = container_of(dcbase, DisasContext, base);
5594     DisasDelayException *e, *e_next;
5595     bool may_lookup;
5596 
5597     finishing_insn(dc);
5598 
5599     switch (dc->base.is_jmp) {
5600     case DISAS_NEXT:
5601     case DISAS_TOO_MANY:
5602         if (((dc->pc | dc->npc) & 3) == 0) {
5603             /* static PC and NPC: we can use direct chaining */
5604             gen_goto_tb(dc, 0, dc->pc, dc->npc);
5605             break;
5606         }
5607 
5608         may_lookup = true;
5609         if (dc->pc & 3) {
5610             switch (dc->pc) {
5611             case DYNAMIC_PC_LOOKUP:
5612                 break;
5613             case DYNAMIC_PC:
5614                 may_lookup = false;
5615                 break;
5616             default:
5617                 g_assert_not_reached();
5618             }
5619         } else {
5620             tcg_gen_movi_tl(cpu_pc, dc->pc);
5621         }
5622 
5623         if (dc->npc & 3) {
5624             switch (dc->npc) {
5625             case JUMP_PC:
5626                 gen_generic_branch(dc);
5627                 break;
5628             case DYNAMIC_PC:
5629                 may_lookup = false;
5630                 break;
5631             case DYNAMIC_PC_LOOKUP:
5632                 break;
5633             default:
5634                 g_assert_not_reached();
5635             }
5636         } else {
5637             tcg_gen_movi_tl(cpu_npc, dc->npc);
5638         }
5639         if (may_lookup) {
5640             tcg_gen_lookup_and_goto_ptr();
5641         } else {
5642             tcg_gen_exit_tb(NULL, 0);
5643         }
5644         break;
5645 
5646     case DISAS_NORETURN:
5647        break;
5648 
5649     case DISAS_EXIT:
5650         /* Exit TB */
5651         save_state(dc);
5652         tcg_gen_exit_tb(NULL, 0);
5653         break;
5654 
5655     default:
5656         g_assert_not_reached();
5657     }
5658 
5659     for (e = dc->delay_excp_list; e ; e = e_next) {
5660         gen_set_label(e->lab);
5661 
5662         tcg_gen_movi_tl(cpu_pc, e->pc);
5663         if (e->npc % 4 == 0) {
5664             tcg_gen_movi_tl(cpu_npc, e->npc);
5665         }
5666         gen_helper_raise_exception(tcg_env, e->excp);
5667 
5668         e_next = e->next;
5669         g_free(e);
5670     }
5671 }
5672 
5673 static const TranslatorOps sparc_tr_ops = {
5674     .init_disas_context = sparc_tr_init_disas_context,
5675     .tb_start           = sparc_tr_tb_start,
5676     .insn_start         = sparc_tr_insn_start,
5677     .translate_insn     = sparc_tr_translate_insn,
5678     .tb_stop            = sparc_tr_tb_stop,
5679 };
5680 
5681 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
5682                            vaddr pc, void *host_pc)
5683 {
5684     DisasContext dc = {};
5685 
5686     translator_loop(cs, tb, max_insns, pc, host_pc, &sparc_tr_ops, &dc.base);
5687 }
5688 
5689 void sparc_tcg_init(void)
5690 {
5691     static const char gregnames[32][4] = {
5692         "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7",
5693         "o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7",
5694         "l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7",
5695         "i0", "i1", "i2", "i3", "i4", "i5", "i6", "i7",
5696     };
5697 
5698     static const struct { TCGv_i32 *ptr; int off; const char *name; } r32[] = {
5699 #ifdef TARGET_SPARC64
5700         { &cpu_fprs, offsetof(CPUSPARCState, fprs), "fprs" },
5701         { &cpu_fcc[0], offsetof(CPUSPARCState, fcc[0]), "fcc0" },
5702         { &cpu_fcc[1], offsetof(CPUSPARCState, fcc[1]), "fcc1" },
5703         { &cpu_fcc[2], offsetof(CPUSPARCState, fcc[2]), "fcc2" },
5704         { &cpu_fcc[3], offsetof(CPUSPARCState, fcc[3]), "fcc3" },
5705 #else
5706         { &cpu_fcc[0], offsetof(CPUSPARCState, fcc[0]), "fcc" },
5707 #endif
5708     };
5709 
5710     static const struct { TCGv *ptr; int off; const char *name; } rtl[] = {
5711 #ifdef TARGET_SPARC64
5712         { &cpu_gsr, offsetof(CPUSPARCState, gsr), "gsr" },
5713         { &cpu_xcc_Z, offsetof(CPUSPARCState, xcc_Z), "xcc_Z" },
5714         { &cpu_xcc_C, offsetof(CPUSPARCState, xcc_C), "xcc_C" },
5715 #endif
5716         { &cpu_cc_N, offsetof(CPUSPARCState, cc_N), "cc_N" },
5717         { &cpu_cc_V, offsetof(CPUSPARCState, cc_V), "cc_V" },
5718         { &cpu_icc_Z, offsetof(CPUSPARCState, icc_Z), "icc_Z" },
5719         { &cpu_icc_C, offsetof(CPUSPARCState, icc_C), "icc_C" },
5720         { &cpu_cond, offsetof(CPUSPARCState, cond), "cond" },
5721         { &cpu_pc, offsetof(CPUSPARCState, pc), "pc" },
5722         { &cpu_npc, offsetof(CPUSPARCState, npc), "npc" },
5723         { &cpu_y, offsetof(CPUSPARCState, y), "y" },
5724         { &cpu_tbr, offsetof(CPUSPARCState, tbr), "tbr" },
5725     };
5726 
5727     unsigned int i;
5728 
5729     cpu_regwptr = tcg_global_mem_new_ptr(tcg_env,
5730                                          offsetof(CPUSPARCState, regwptr),
5731                                          "regwptr");
5732 
5733     for (i = 0; i < ARRAY_SIZE(r32); ++i) {
5734         *r32[i].ptr = tcg_global_mem_new_i32(tcg_env, r32[i].off, r32[i].name);
5735     }
5736 
5737     for (i = 0; i < ARRAY_SIZE(rtl); ++i) {
5738         *rtl[i].ptr = tcg_global_mem_new(tcg_env, rtl[i].off, rtl[i].name);
5739     }
5740 
5741     cpu_regs[0] = NULL;
5742     for (i = 1; i < 8; ++i) {
5743         cpu_regs[i] = tcg_global_mem_new(tcg_env,
5744                                          offsetof(CPUSPARCState, gregs[i]),
5745                                          gregnames[i]);
5746     }
5747 
5748     for (i = 8; i < 32; ++i) {
5749         cpu_regs[i] = tcg_global_mem_new(cpu_regwptr,
5750                                          (i - 8) * sizeof(target_ulong),
5751                                          gregnames[i]);
5752     }
5753 }
5754 
5755 void sparc_restore_state_to_opc(CPUState *cs,
5756                                 const TranslationBlock *tb,
5757                                 const uint64_t *data)
5758 {
5759     CPUSPARCState *env = cpu_env(cs);
5760     target_ulong pc = data[0];
5761     target_ulong npc = data[1];
5762 
5763     env->pc = pc;
5764     if (npc == DYNAMIC_PC) {
5765         /* dynamic NPC: already stored */
5766     } else if (npc & JUMP_PC) {
5767         /* jump PC: use 'cond' and the jump targets of the translation */
5768         if (env->cond) {
5769             env->npc = npc & ~3;
5770         } else {
5771             env->npc = pc + 4;
5772         }
5773     } else {
5774         env->npc = npc;
5775     }
5776 }
5777