xref: /openbmc/qemu/target/sparc/translate.c (revision 4fd71d19)
1 /*
2    SPARC translation
3 
4    Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5    Copyright (C) 2003-2005 Fabrice Bellard
6 
7    This library is free software; you can redistribute it and/or
8    modify it under the terms of the GNU Lesser General Public
9    License as published by the Free Software Foundation; either
10    version 2.1 of the License, or (at your option) any later version.
11 
12    This library is distributed in the hope that it will be useful,
13    but WITHOUT ANY WARRANTY; without even the implied warranty of
14    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15    Lesser General Public License for more details.
16 
17    You should have received a copy of the GNU Lesser General Public
18    License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 
23 #include "cpu.h"
24 #include "exec/helper-proto.h"
25 #include "exec/exec-all.h"
26 #include "tcg/tcg-op.h"
27 #include "tcg/tcg-op-gvec.h"
28 #include "exec/helper-gen.h"
29 #include "exec/translator.h"
30 #include "exec/log.h"
31 #include "fpu/softfloat.h"
32 #include "asi.h"
33 
34 #define HELPER_H "helper.h"
35 #include "exec/helper-info.c.inc"
36 #undef  HELPER_H
37 
38 #ifdef TARGET_SPARC64
39 # define gen_helper_rdpsr(D, E)                 qemu_build_not_reached()
40 # define gen_helper_rdasr17(D, E)               qemu_build_not_reached()
41 # define gen_helper_rett(E)                     qemu_build_not_reached()
42 # define gen_helper_power_down(E)               qemu_build_not_reached()
43 # define gen_helper_wrpsr(E, S)                 qemu_build_not_reached()
44 #else
45 # define gen_helper_clear_softint(E, S)         qemu_build_not_reached()
46 # define gen_helper_done(E)                     qemu_build_not_reached()
47 # define gen_helper_flushw(E)                   qemu_build_not_reached()
48 # define gen_helper_fmul8x16a(D, S1, S2)        qemu_build_not_reached()
49 # define gen_helper_rdccr(D, E)                 qemu_build_not_reached()
50 # define gen_helper_rdcwp(D, E)                 qemu_build_not_reached()
51 # define gen_helper_restored(E)                 qemu_build_not_reached()
52 # define gen_helper_retry(E)                    qemu_build_not_reached()
53 # define gen_helper_saved(E)                    qemu_build_not_reached()
54 # define gen_helper_set_softint(E, S)           qemu_build_not_reached()
55 # define gen_helper_tick_get_count(D, E, T, C)  qemu_build_not_reached()
56 # define gen_helper_tick_set_count(P, S)        qemu_build_not_reached()
57 # define gen_helper_tick_set_limit(P, S)        qemu_build_not_reached()
58 # define gen_helper_wrccr(E, S)                 qemu_build_not_reached()
59 # define gen_helper_wrcwp(E, S)                 qemu_build_not_reached()
60 # define gen_helper_wrgl(E, S)                  qemu_build_not_reached()
61 # define gen_helper_write_softint(E, S)         qemu_build_not_reached()
62 # define gen_helper_wrpil(E, S)                 qemu_build_not_reached()
63 # define gen_helper_wrpstate(E, S)              qemu_build_not_reached()
64 # define gen_helper_fcmpeq16             ({ qemu_build_not_reached(); NULL; })
65 # define gen_helper_fcmpeq32             ({ qemu_build_not_reached(); NULL; })
66 # define gen_helper_fcmpgt16             ({ qemu_build_not_reached(); NULL; })
67 # define gen_helper_fcmpgt32             ({ qemu_build_not_reached(); NULL; })
68 # define gen_helper_fcmple16             ({ qemu_build_not_reached(); NULL; })
69 # define gen_helper_fcmple32             ({ qemu_build_not_reached(); NULL; })
70 # define gen_helper_fcmpne16             ({ qemu_build_not_reached(); NULL; })
71 # define gen_helper_fcmpne32             ({ qemu_build_not_reached(); NULL; })
72 # define gen_helper_fdtox                ({ qemu_build_not_reached(); NULL; })
73 # define gen_helper_fexpand              ({ qemu_build_not_reached(); NULL; })
74 # define gen_helper_fmul8sux16           ({ qemu_build_not_reached(); NULL; })
75 # define gen_helper_fmul8ulx16           ({ qemu_build_not_reached(); NULL; })
76 # define gen_helper_fmul8x16             ({ qemu_build_not_reached(); NULL; })
77 # define gen_helper_fpmerge              ({ qemu_build_not_reached(); NULL; })
78 # define gen_helper_fqtox                ({ qemu_build_not_reached(); NULL; })
79 # define gen_helper_fstox                ({ qemu_build_not_reached(); NULL; })
80 # define gen_helper_fxtod                ({ qemu_build_not_reached(); NULL; })
81 # define gen_helper_fxtoq                ({ qemu_build_not_reached(); NULL; })
82 # define gen_helper_fxtos                ({ qemu_build_not_reached(); NULL; })
83 # define gen_helper_pdist                ({ qemu_build_not_reached(); NULL; })
84 # define MAXTL_MASK                             0
85 #endif
86 
87 /* Dynamic PC, must exit to main loop. */
88 #define DYNAMIC_PC         1
89 /* Dynamic PC, one of two values according to jump_pc[T2]. */
90 #define JUMP_PC            2
91 /* Dynamic PC, may lookup next TB. */
92 #define DYNAMIC_PC_LOOKUP  3
93 
94 #define DISAS_EXIT  DISAS_TARGET_0
95 
96 /* global register indexes */
97 static TCGv_ptr cpu_regwptr;
98 static TCGv cpu_pc, cpu_npc;
99 static TCGv cpu_regs[32];
100 static TCGv cpu_y;
101 static TCGv cpu_tbr;
102 static TCGv cpu_cond;
103 static TCGv cpu_cc_N;
104 static TCGv cpu_cc_V;
105 static TCGv cpu_icc_Z;
106 static TCGv cpu_icc_C;
107 #ifdef TARGET_SPARC64
108 static TCGv cpu_xcc_Z;
109 static TCGv cpu_xcc_C;
110 static TCGv_i32 cpu_fprs;
111 static TCGv cpu_gsr;
112 #else
113 # define cpu_fprs               ({ qemu_build_not_reached(); (TCGv)NULL; })
114 # define cpu_gsr                ({ qemu_build_not_reached(); (TCGv)NULL; })
115 #endif
116 
117 #ifdef TARGET_SPARC64
118 #define cpu_cc_Z  cpu_xcc_Z
119 #define cpu_cc_C  cpu_xcc_C
120 #else
121 #define cpu_cc_Z  cpu_icc_Z
122 #define cpu_cc_C  cpu_icc_C
123 #define cpu_xcc_Z ({ qemu_build_not_reached(); NULL; })
124 #define cpu_xcc_C ({ qemu_build_not_reached(); NULL; })
125 #endif
126 
127 /* Floating point comparison registers */
128 static TCGv_i32 cpu_fcc[TARGET_FCCREGS];
129 
130 #define env_field_offsetof(X)     offsetof(CPUSPARCState, X)
131 #ifdef TARGET_SPARC64
132 # define env32_field_offsetof(X)  ({ qemu_build_not_reached(); 0; })
133 # define env64_field_offsetof(X)  env_field_offsetof(X)
134 #else
135 # define env32_field_offsetof(X)  env_field_offsetof(X)
136 # define env64_field_offsetof(X)  ({ qemu_build_not_reached(); 0; })
137 #endif
138 
139 typedef struct DisasCompare {
140     TCGCond cond;
141     TCGv c1;
142     int c2;
143 } DisasCompare;
144 
145 typedef struct DisasDelayException {
146     struct DisasDelayException *next;
147     TCGLabel *lab;
148     TCGv_i32 excp;
149     /* Saved state at parent insn. */
150     target_ulong pc;
151     target_ulong npc;
152 } DisasDelayException;
153 
154 typedef struct DisasContext {
155     DisasContextBase base;
156     target_ulong pc;    /* current Program Counter: integer or DYNAMIC_PC */
157     target_ulong npc;   /* next PC: integer or DYNAMIC_PC or JUMP_PC */
158 
159     /* Used when JUMP_PC value is used. */
160     DisasCompare jump;
161     target_ulong jump_pc[2];
162 
163     int mem_idx;
164     bool cpu_cond_live;
165     bool fpu_enabled;
166     bool address_mask_32bit;
167 #ifndef CONFIG_USER_ONLY
168     bool supervisor;
169 #ifdef TARGET_SPARC64
170     bool hypervisor;
171 #endif
172 #endif
173 
174     sparc_def_t *def;
175 #ifdef TARGET_SPARC64
176     int fprs_dirty;
177     int asi;
178 #endif
179     DisasDelayException *delay_excp_list;
180 } DisasContext;
181 
182 // This function uses non-native bit order
183 #define GET_FIELD(X, FROM, TO)                                  \
184     ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
185 
186 // This function uses the order in the manuals, i.e. bit 0 is 2^0
187 #define GET_FIELD_SP(X, FROM, TO)               \
188     GET_FIELD(X, 31 - (TO), 31 - (FROM))
189 
190 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
191 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
192 
193 #define UA2005_HTRAP_MASK 0xff
194 #define V8_TRAP_MASK 0x7f
195 
196 #define IS_IMM (insn & (1<<13))
197 
198 static void gen_update_fprs_dirty(DisasContext *dc, int rd)
199 {
200 #if defined(TARGET_SPARC64)
201     int bit = (rd < 32) ? 1 : 2;
202     /* If we know we've already set this bit within the TB,
203        we can avoid setting it again.  */
204     if (!(dc->fprs_dirty & bit)) {
205         dc->fprs_dirty |= bit;
206         tcg_gen_ori_i32(cpu_fprs, cpu_fprs, bit);
207     }
208 #endif
209 }
210 
211 /* floating point registers moves */
212 
213 static int gen_offset_fpr_F(unsigned int reg)
214 {
215     int ret;
216 
217     tcg_debug_assert(reg < 32);
218     ret= offsetof(CPUSPARCState, fpr[reg / 2]);
219     if (reg & 1) {
220         ret += offsetof(CPU_DoubleU, l.lower);
221     } else {
222         ret += offsetof(CPU_DoubleU, l.upper);
223     }
224     return ret;
225 }
226 
227 static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src)
228 {
229     TCGv_i32 ret = tcg_temp_new_i32();
230     tcg_gen_ld_i32(ret, tcg_env, gen_offset_fpr_F(src));
231     return ret;
232 }
233 
234 static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v)
235 {
236     tcg_gen_st_i32(v, tcg_env, gen_offset_fpr_F(dst));
237     gen_update_fprs_dirty(dc, dst);
238 }
239 
240 static int gen_offset_fpr_D(unsigned int reg)
241 {
242     tcg_debug_assert(reg < 64);
243     tcg_debug_assert(reg % 2 == 0);
244     return offsetof(CPUSPARCState, fpr[reg / 2]);
245 }
246 
247 static TCGv_i64 gen_load_fpr_D(DisasContext *dc, unsigned int src)
248 {
249     TCGv_i64 ret = tcg_temp_new_i64();
250     tcg_gen_ld_i64(ret, tcg_env, gen_offset_fpr_D(src));
251     return ret;
252 }
253 
254 static void gen_store_fpr_D(DisasContext *dc, unsigned int dst, TCGv_i64 v)
255 {
256     tcg_gen_st_i64(v, tcg_env, gen_offset_fpr_D(dst));
257     gen_update_fprs_dirty(dc, dst);
258 }
259 
260 static TCGv_i128 gen_load_fpr_Q(DisasContext *dc, unsigned int src)
261 {
262     TCGv_i128 ret = tcg_temp_new_i128();
263     TCGv_i64 h = gen_load_fpr_D(dc, src);
264     TCGv_i64 l = gen_load_fpr_D(dc, src + 2);
265 
266     tcg_gen_concat_i64_i128(ret, l, h);
267     return ret;
268 }
269 
270 static void gen_store_fpr_Q(DisasContext *dc, unsigned int dst, TCGv_i128 v)
271 {
272     TCGv_i64 h = tcg_temp_new_i64();
273     TCGv_i64 l = tcg_temp_new_i64();
274 
275     tcg_gen_extr_i128_i64(l, h, v);
276     gen_store_fpr_D(dc, dst, h);
277     gen_store_fpr_D(dc, dst + 2, l);
278 }
279 
280 /* moves */
281 #ifdef CONFIG_USER_ONLY
282 #define supervisor(dc) 0
283 #define hypervisor(dc) 0
284 #else
285 #ifdef TARGET_SPARC64
286 #define hypervisor(dc) (dc->hypervisor)
287 #define supervisor(dc) (dc->supervisor | dc->hypervisor)
288 #else
289 #define supervisor(dc) (dc->supervisor)
290 #define hypervisor(dc) 0
291 #endif
292 #endif
293 
294 #if !defined(TARGET_SPARC64)
295 # define AM_CHECK(dc)  false
296 #elif defined(TARGET_ABI32)
297 # define AM_CHECK(dc)  true
298 #elif defined(CONFIG_USER_ONLY)
299 # define AM_CHECK(dc)  false
300 #else
301 # define AM_CHECK(dc)  ((dc)->address_mask_32bit)
302 #endif
303 
304 static void gen_address_mask(DisasContext *dc, TCGv addr)
305 {
306     if (AM_CHECK(dc)) {
307         tcg_gen_andi_tl(addr, addr, 0xffffffffULL);
308     }
309 }
310 
311 static target_ulong address_mask_i(DisasContext *dc, target_ulong addr)
312 {
313     return AM_CHECK(dc) ? (uint32_t)addr : addr;
314 }
315 
316 static TCGv gen_load_gpr(DisasContext *dc, int reg)
317 {
318     if (reg > 0) {
319         assert(reg < 32);
320         return cpu_regs[reg];
321     } else {
322         TCGv t = tcg_temp_new();
323         tcg_gen_movi_tl(t, 0);
324         return t;
325     }
326 }
327 
328 static void gen_store_gpr(DisasContext *dc, int reg, TCGv v)
329 {
330     if (reg > 0) {
331         assert(reg < 32);
332         tcg_gen_mov_tl(cpu_regs[reg], v);
333     }
334 }
335 
336 static TCGv gen_dest_gpr(DisasContext *dc, int reg)
337 {
338     if (reg > 0) {
339         assert(reg < 32);
340         return cpu_regs[reg];
341     } else {
342         return tcg_temp_new();
343     }
344 }
345 
346 static bool use_goto_tb(DisasContext *s, target_ulong pc, target_ulong npc)
347 {
348     return translator_use_goto_tb(&s->base, pc) &&
349            translator_use_goto_tb(&s->base, npc);
350 }
351 
352 static void gen_goto_tb(DisasContext *s, int tb_num,
353                         target_ulong pc, target_ulong npc)
354 {
355     if (use_goto_tb(s, pc, npc))  {
356         /* jump to same page: we can use a direct jump */
357         tcg_gen_goto_tb(tb_num);
358         tcg_gen_movi_tl(cpu_pc, pc);
359         tcg_gen_movi_tl(cpu_npc, npc);
360         tcg_gen_exit_tb(s->base.tb, tb_num);
361     } else {
362         /* jump to another page: we can use an indirect jump */
363         tcg_gen_movi_tl(cpu_pc, pc);
364         tcg_gen_movi_tl(cpu_npc, npc);
365         tcg_gen_lookup_and_goto_ptr();
366     }
367 }
368 
369 static TCGv gen_carry32(void)
370 {
371     if (TARGET_LONG_BITS == 64) {
372         TCGv t = tcg_temp_new();
373         tcg_gen_extract_tl(t, cpu_icc_C, 32, 1);
374         return t;
375     }
376     return cpu_icc_C;
377 }
378 
379 static void gen_op_addcc_int(TCGv dst, TCGv src1, TCGv src2, TCGv cin)
380 {
381     TCGv z = tcg_constant_tl(0);
382 
383     if (cin) {
384         tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, src1, z, cin, z);
385         tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, cpu_cc_N, cpu_cc_C, src2, z);
386     } else {
387         tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, src1, z, src2, z);
388     }
389     tcg_gen_xor_tl(cpu_cc_Z, src1, src2);
390     tcg_gen_xor_tl(cpu_cc_V, cpu_cc_N, src2);
391     tcg_gen_andc_tl(cpu_cc_V, cpu_cc_V, cpu_cc_Z);
392     if (TARGET_LONG_BITS == 64) {
393         /*
394          * Carry-in to bit 32 is result ^ src1 ^ src2.
395          * We already have the src xor term in Z, from computation of V.
396          */
397         tcg_gen_xor_tl(cpu_icc_C, cpu_cc_Z, cpu_cc_N);
398         tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
399     }
400     tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
401     tcg_gen_mov_tl(dst, cpu_cc_N);
402 }
403 
404 static void gen_op_addcc(TCGv dst, TCGv src1, TCGv src2)
405 {
406     gen_op_addcc_int(dst, src1, src2, NULL);
407 }
408 
409 static void gen_op_taddcc(TCGv dst, TCGv src1, TCGv src2)
410 {
411     TCGv t = tcg_temp_new();
412 
413     /* Save the tag bits around modification of dst. */
414     tcg_gen_or_tl(t, src1, src2);
415 
416     gen_op_addcc(dst, src1, src2);
417 
418     /* Incorprate tag bits into icc.V */
419     tcg_gen_andi_tl(t, t, 3);
420     tcg_gen_neg_tl(t, t);
421     tcg_gen_ext32u_tl(t, t);
422     tcg_gen_or_tl(cpu_cc_V, cpu_cc_V, t);
423 }
424 
425 static void gen_op_addc(TCGv dst, TCGv src1, TCGv src2)
426 {
427     tcg_gen_add_tl(dst, src1, src2);
428     tcg_gen_add_tl(dst, dst, gen_carry32());
429 }
430 
431 static void gen_op_addccc(TCGv dst, TCGv src1, TCGv src2)
432 {
433     gen_op_addcc_int(dst, src1, src2, gen_carry32());
434 }
435 
436 static void gen_op_subcc_int(TCGv dst, TCGv src1, TCGv src2, TCGv cin)
437 {
438     TCGv z = tcg_constant_tl(0);
439 
440     if (cin) {
441         tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, src1, z, cin, z);
442         tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, cpu_cc_N, cpu_cc_C, src2, z);
443     } else {
444         tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, src1, z, src2, z);
445     }
446     tcg_gen_neg_tl(cpu_cc_C, cpu_cc_C);
447     tcg_gen_xor_tl(cpu_cc_Z, src1, src2);
448     tcg_gen_xor_tl(cpu_cc_V, cpu_cc_N, src1);
449     tcg_gen_and_tl(cpu_cc_V, cpu_cc_V, cpu_cc_Z);
450 #ifdef TARGET_SPARC64
451     tcg_gen_xor_tl(cpu_icc_C, cpu_cc_Z, cpu_cc_N);
452     tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
453 #endif
454     tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
455     tcg_gen_mov_tl(dst, cpu_cc_N);
456 }
457 
458 static void gen_op_subcc(TCGv dst, TCGv src1, TCGv src2)
459 {
460     gen_op_subcc_int(dst, src1, src2, NULL);
461 }
462 
463 static void gen_op_tsubcc(TCGv dst, TCGv src1, TCGv src2)
464 {
465     TCGv t = tcg_temp_new();
466 
467     /* Save the tag bits around modification of dst. */
468     tcg_gen_or_tl(t, src1, src2);
469 
470     gen_op_subcc(dst, src1, src2);
471 
472     /* Incorprate tag bits into icc.V */
473     tcg_gen_andi_tl(t, t, 3);
474     tcg_gen_neg_tl(t, t);
475     tcg_gen_ext32u_tl(t, t);
476     tcg_gen_or_tl(cpu_cc_V, cpu_cc_V, t);
477 }
478 
479 static void gen_op_subc(TCGv dst, TCGv src1, TCGv src2)
480 {
481     tcg_gen_sub_tl(dst, src1, src2);
482     tcg_gen_sub_tl(dst, dst, gen_carry32());
483 }
484 
485 static void gen_op_subccc(TCGv dst, TCGv src1, TCGv src2)
486 {
487     gen_op_subcc_int(dst, src1, src2, gen_carry32());
488 }
489 
490 static void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
491 {
492     TCGv zero = tcg_constant_tl(0);
493     TCGv one = tcg_constant_tl(1);
494     TCGv t_src1 = tcg_temp_new();
495     TCGv t_src2 = tcg_temp_new();
496     TCGv t0 = tcg_temp_new();
497 
498     tcg_gen_ext32u_tl(t_src1, src1);
499     tcg_gen_ext32u_tl(t_src2, src2);
500 
501     /*
502      * if (!(env->y & 1))
503      *   src2 = 0;
504      */
505     tcg_gen_movcond_tl(TCG_COND_TSTEQ, t_src2, cpu_y, one, zero, t_src2);
506 
507     /*
508      * b2 = src1 & 1;
509      * y = (b2 << 31) | (y >> 1);
510      */
511     tcg_gen_extract_tl(t0, cpu_y, 1, 31);
512     tcg_gen_deposit_tl(cpu_y, t0, src1, 31, 1);
513 
514     // b1 = N ^ V;
515     tcg_gen_xor_tl(t0, cpu_cc_N, cpu_cc_V);
516 
517     /*
518      * src1 = (b1 << 31) | (src1 >> 1)
519      */
520     tcg_gen_andi_tl(t0, t0, 1u << 31);
521     tcg_gen_shri_tl(t_src1, t_src1, 1);
522     tcg_gen_or_tl(t_src1, t_src1, t0);
523 
524     gen_op_addcc(dst, t_src1, t_src2);
525 }
526 
527 static void gen_op_multiply(TCGv dst, TCGv src1, TCGv src2, int sign_ext)
528 {
529 #if TARGET_LONG_BITS == 32
530     if (sign_ext) {
531         tcg_gen_muls2_tl(dst, cpu_y, src1, src2);
532     } else {
533         tcg_gen_mulu2_tl(dst, cpu_y, src1, src2);
534     }
535 #else
536     TCGv t0 = tcg_temp_new_i64();
537     TCGv t1 = tcg_temp_new_i64();
538 
539     if (sign_ext) {
540         tcg_gen_ext32s_i64(t0, src1);
541         tcg_gen_ext32s_i64(t1, src2);
542     } else {
543         tcg_gen_ext32u_i64(t0, src1);
544         tcg_gen_ext32u_i64(t1, src2);
545     }
546 
547     tcg_gen_mul_i64(dst, t0, t1);
548     tcg_gen_shri_i64(cpu_y, dst, 32);
549 #endif
550 }
551 
552 static void gen_op_umul(TCGv dst, TCGv src1, TCGv src2)
553 {
554     /* zero-extend truncated operands before multiplication */
555     gen_op_multiply(dst, src1, src2, 0);
556 }
557 
558 static void gen_op_smul(TCGv dst, TCGv src1, TCGv src2)
559 {
560     /* sign-extend truncated operands before multiplication */
561     gen_op_multiply(dst, src1, src2, 1);
562 }
563 
564 static void gen_op_sdiv(TCGv dst, TCGv src1, TCGv src2)
565 {
566 #ifdef TARGET_SPARC64
567     gen_helper_sdiv(dst, tcg_env, src1, src2);
568     tcg_gen_ext32s_tl(dst, dst);
569 #else
570     TCGv_i64 t64 = tcg_temp_new_i64();
571     gen_helper_sdiv(t64, tcg_env, src1, src2);
572     tcg_gen_trunc_i64_tl(dst, t64);
573 #endif
574 }
575 
576 static void gen_op_udivcc(TCGv dst, TCGv src1, TCGv src2)
577 {
578     TCGv_i64 t64;
579 
580 #ifdef TARGET_SPARC64
581     t64 = cpu_cc_V;
582 #else
583     t64 = tcg_temp_new_i64();
584 #endif
585 
586     gen_helper_udiv(t64, tcg_env, src1, src2);
587 
588 #ifdef TARGET_SPARC64
589     tcg_gen_ext32u_tl(cpu_cc_N, t64);
590     tcg_gen_shri_tl(cpu_cc_V, t64, 32);
591     tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
592     tcg_gen_movi_tl(cpu_icc_C, 0);
593 #else
594     tcg_gen_extr_i64_tl(cpu_cc_N, cpu_cc_V, t64);
595 #endif
596     tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
597     tcg_gen_movi_tl(cpu_cc_C, 0);
598     tcg_gen_mov_tl(dst, cpu_cc_N);
599 }
600 
601 static void gen_op_sdivcc(TCGv dst, TCGv src1, TCGv src2)
602 {
603     TCGv_i64 t64;
604 
605 #ifdef TARGET_SPARC64
606     t64 = cpu_cc_V;
607 #else
608     t64 = tcg_temp_new_i64();
609 #endif
610 
611     gen_helper_sdiv(t64, tcg_env, src1, src2);
612 
613 #ifdef TARGET_SPARC64
614     tcg_gen_ext32s_tl(cpu_cc_N, t64);
615     tcg_gen_shri_tl(cpu_cc_V, t64, 32);
616     tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
617     tcg_gen_movi_tl(cpu_icc_C, 0);
618 #else
619     tcg_gen_extr_i64_tl(cpu_cc_N, cpu_cc_V, t64);
620 #endif
621     tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
622     tcg_gen_movi_tl(cpu_cc_C, 0);
623     tcg_gen_mov_tl(dst, cpu_cc_N);
624 }
625 
626 static void gen_op_taddcctv(TCGv dst, TCGv src1, TCGv src2)
627 {
628     gen_helper_taddcctv(dst, tcg_env, src1, src2);
629 }
630 
631 static void gen_op_tsubcctv(TCGv dst, TCGv src1, TCGv src2)
632 {
633     gen_helper_tsubcctv(dst, tcg_env, src1, src2);
634 }
635 
636 static void gen_op_popc(TCGv dst, TCGv src1, TCGv src2)
637 {
638     tcg_gen_ctpop_tl(dst, src2);
639 }
640 
641 #ifndef TARGET_SPARC64
642 static void gen_helper_array8(TCGv dst, TCGv src1, TCGv src2)
643 {
644     g_assert_not_reached();
645 }
646 #endif
647 
648 static void gen_op_array16(TCGv dst, TCGv src1, TCGv src2)
649 {
650     gen_helper_array8(dst, src1, src2);
651     tcg_gen_shli_tl(dst, dst, 1);
652 }
653 
654 static void gen_op_array32(TCGv dst, TCGv src1, TCGv src2)
655 {
656     gen_helper_array8(dst, src1, src2);
657     tcg_gen_shli_tl(dst, dst, 2);
658 }
659 
660 static void gen_op_fpack16(TCGv_i32 dst, TCGv_i64 src)
661 {
662 #ifdef TARGET_SPARC64
663     gen_helper_fpack16(dst, cpu_gsr, src);
664 #else
665     g_assert_not_reached();
666 #endif
667 }
668 
669 static void gen_op_fpackfix(TCGv_i32 dst, TCGv_i64 src)
670 {
671 #ifdef TARGET_SPARC64
672     gen_helper_fpackfix(dst, cpu_gsr, src);
673 #else
674     g_assert_not_reached();
675 #endif
676 }
677 
678 static void gen_op_fpack32(TCGv_i64 dst, TCGv_i64 src1, TCGv_i64 src2)
679 {
680 #ifdef TARGET_SPARC64
681     gen_helper_fpack32(dst, cpu_gsr, src1, src2);
682 #else
683     g_assert_not_reached();
684 #endif
685 }
686 
687 static void gen_op_faligndata(TCGv_i64 dst, TCGv_i64 s1, TCGv_i64 s2)
688 {
689 #ifdef TARGET_SPARC64
690     TCGv t1, t2, shift;
691 
692     t1 = tcg_temp_new();
693     t2 = tcg_temp_new();
694     shift = tcg_temp_new();
695 
696     tcg_gen_andi_tl(shift, cpu_gsr, 7);
697     tcg_gen_shli_tl(shift, shift, 3);
698     tcg_gen_shl_tl(t1, s1, shift);
699 
700     /*
701      * A shift of 64 does not produce 0 in TCG.  Divide this into a
702      * shift of (up to 63) followed by a constant shift of 1.
703      */
704     tcg_gen_xori_tl(shift, shift, 63);
705     tcg_gen_shr_tl(t2, s2, shift);
706     tcg_gen_shri_tl(t2, t2, 1);
707 
708     tcg_gen_or_tl(dst, t1, t2);
709 #else
710     g_assert_not_reached();
711 #endif
712 }
713 
714 static void gen_op_bshuffle(TCGv_i64 dst, TCGv_i64 src1, TCGv_i64 src2)
715 {
716 #ifdef TARGET_SPARC64
717     gen_helper_bshuffle(dst, cpu_gsr, src1, src2);
718 #else
719     g_assert_not_reached();
720 #endif
721 }
722 
723 static void gen_op_fmul8x16al(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
724 {
725     tcg_gen_ext16s_i32(src2, src2);
726     gen_helper_fmul8x16a(dst, src1, src2);
727 }
728 
729 static void gen_op_fmul8x16au(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
730 {
731     tcg_gen_sari_i32(src2, src2, 16);
732     gen_helper_fmul8x16a(dst, src1, src2);
733 }
734 
735 static void gen_op_fmuld8ulx16(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
736 {
737     TCGv_i32 t0 = tcg_temp_new_i32();
738     TCGv_i32 t1 = tcg_temp_new_i32();
739     TCGv_i32 t2 = tcg_temp_new_i32();
740 
741     tcg_gen_ext8u_i32(t0, src1);
742     tcg_gen_ext16s_i32(t1, src2);
743     tcg_gen_mul_i32(t0, t0, t1);
744 
745     tcg_gen_extract_i32(t1, src1, 16, 8);
746     tcg_gen_sextract_i32(t2, src2, 16, 16);
747     tcg_gen_mul_i32(t1, t1, t2);
748 
749     tcg_gen_concat_i32_i64(dst, t0, t1);
750 }
751 
752 static void gen_op_fmuld8sux16(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
753 {
754     TCGv_i32 t0 = tcg_temp_new_i32();
755     TCGv_i32 t1 = tcg_temp_new_i32();
756     TCGv_i32 t2 = tcg_temp_new_i32();
757 
758     /*
759      * The insn description talks about extracting the upper 8 bits
760      * of the signed 16-bit input rs1, performing the multiply, then
761      * shifting left by 8 bits.  Instead, zap the lower 8 bits of
762      * the rs1 input, which avoids the need for two shifts.
763      */
764     tcg_gen_ext16s_i32(t0, src1);
765     tcg_gen_andi_i32(t0, t0, ~0xff);
766     tcg_gen_ext16s_i32(t1, src2);
767     tcg_gen_mul_i32(t0, t0, t1);
768 
769     tcg_gen_sextract_i32(t1, src1, 16, 16);
770     tcg_gen_andi_i32(t1, t1, ~0xff);
771     tcg_gen_sextract_i32(t2, src2, 16, 16);
772     tcg_gen_mul_i32(t1, t1, t2);
773 
774     tcg_gen_concat_i32_i64(dst, t0, t1);
775 }
776 
777 static void finishing_insn(DisasContext *dc)
778 {
779     /*
780      * From here, there is no future path through an unwinding exception.
781      * If the current insn cannot raise an exception, the computation of
782      * cpu_cond may be able to be elided.
783      */
784     if (dc->cpu_cond_live) {
785         tcg_gen_discard_tl(cpu_cond);
786         dc->cpu_cond_live = false;
787     }
788 }
789 
790 static void gen_generic_branch(DisasContext *dc)
791 {
792     TCGv npc0 = tcg_constant_tl(dc->jump_pc[0]);
793     TCGv npc1 = tcg_constant_tl(dc->jump_pc[1]);
794     TCGv c2 = tcg_constant_tl(dc->jump.c2);
795 
796     tcg_gen_movcond_tl(dc->jump.cond, cpu_npc, dc->jump.c1, c2, npc0, npc1);
797 }
798 
799 /* call this function before using the condition register as it may
800    have been set for a jump */
801 static void flush_cond(DisasContext *dc)
802 {
803     if (dc->npc == JUMP_PC) {
804         gen_generic_branch(dc);
805         dc->npc = DYNAMIC_PC_LOOKUP;
806     }
807 }
808 
809 static void save_npc(DisasContext *dc)
810 {
811     if (dc->npc & 3) {
812         switch (dc->npc) {
813         case JUMP_PC:
814             gen_generic_branch(dc);
815             dc->npc = DYNAMIC_PC_LOOKUP;
816             break;
817         case DYNAMIC_PC:
818         case DYNAMIC_PC_LOOKUP:
819             break;
820         default:
821             g_assert_not_reached();
822         }
823     } else {
824         tcg_gen_movi_tl(cpu_npc, dc->npc);
825     }
826 }
827 
828 static void save_state(DisasContext *dc)
829 {
830     tcg_gen_movi_tl(cpu_pc, dc->pc);
831     save_npc(dc);
832 }
833 
834 static void gen_exception(DisasContext *dc, int which)
835 {
836     finishing_insn(dc);
837     save_state(dc);
838     gen_helper_raise_exception(tcg_env, tcg_constant_i32(which));
839     dc->base.is_jmp = DISAS_NORETURN;
840 }
841 
842 static TCGLabel *delay_exceptionv(DisasContext *dc, TCGv_i32 excp)
843 {
844     DisasDelayException *e = g_new0(DisasDelayException, 1);
845 
846     e->next = dc->delay_excp_list;
847     dc->delay_excp_list = e;
848 
849     e->lab = gen_new_label();
850     e->excp = excp;
851     e->pc = dc->pc;
852     /* Caller must have used flush_cond before branch. */
853     assert(e->npc != JUMP_PC);
854     e->npc = dc->npc;
855 
856     return e->lab;
857 }
858 
859 static TCGLabel *delay_exception(DisasContext *dc, int excp)
860 {
861     return delay_exceptionv(dc, tcg_constant_i32(excp));
862 }
863 
864 static void gen_check_align(DisasContext *dc, TCGv addr, int mask)
865 {
866     TCGv t = tcg_temp_new();
867     TCGLabel *lab;
868 
869     tcg_gen_andi_tl(t, addr, mask);
870 
871     flush_cond(dc);
872     lab = delay_exception(dc, TT_UNALIGNED);
873     tcg_gen_brcondi_tl(TCG_COND_NE, t, 0, lab);
874 }
875 
876 static void gen_mov_pc_npc(DisasContext *dc)
877 {
878     finishing_insn(dc);
879 
880     if (dc->npc & 3) {
881         switch (dc->npc) {
882         case JUMP_PC:
883             gen_generic_branch(dc);
884             tcg_gen_mov_tl(cpu_pc, cpu_npc);
885             dc->pc = DYNAMIC_PC_LOOKUP;
886             break;
887         case DYNAMIC_PC:
888         case DYNAMIC_PC_LOOKUP:
889             tcg_gen_mov_tl(cpu_pc, cpu_npc);
890             dc->pc = dc->npc;
891             break;
892         default:
893             g_assert_not_reached();
894         }
895     } else {
896         dc->pc = dc->npc;
897     }
898 }
899 
900 static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond,
901                         DisasContext *dc)
902 {
903     TCGv t1;
904 
905     cmp->c1 = t1 = tcg_temp_new();
906     cmp->c2 = 0;
907 
908     switch (cond & 7) {
909     case 0x0: /* never */
910         cmp->cond = TCG_COND_NEVER;
911         cmp->c1 = tcg_constant_tl(0);
912         break;
913 
914     case 0x1: /* eq: Z */
915         cmp->cond = TCG_COND_EQ;
916         if (TARGET_LONG_BITS == 32 || xcc) {
917             tcg_gen_mov_tl(t1, cpu_cc_Z);
918         } else {
919             tcg_gen_ext32u_tl(t1, cpu_icc_Z);
920         }
921         break;
922 
923     case 0x2: /* le: Z | (N ^ V) */
924         /*
925          * Simplify:
926          *   cc_Z || (N ^ V) < 0        NE
927          *   cc_Z && !((N ^ V) < 0)     EQ
928          *   cc_Z & ~((N ^ V) >> TLB)   EQ
929          */
930         cmp->cond = TCG_COND_EQ;
931         tcg_gen_xor_tl(t1, cpu_cc_N, cpu_cc_V);
932         tcg_gen_sextract_tl(t1, t1, xcc ? 63 : 31, 1);
933         tcg_gen_andc_tl(t1, xcc ? cpu_cc_Z : cpu_icc_Z, t1);
934         if (TARGET_LONG_BITS == 64 && !xcc) {
935             tcg_gen_ext32u_tl(t1, t1);
936         }
937         break;
938 
939     case 0x3: /* lt: N ^ V */
940         cmp->cond = TCG_COND_LT;
941         tcg_gen_xor_tl(t1, cpu_cc_N, cpu_cc_V);
942         if (TARGET_LONG_BITS == 64 && !xcc) {
943             tcg_gen_ext32s_tl(t1, t1);
944         }
945         break;
946 
947     case 0x4: /* leu: Z | C */
948         /*
949          * Simplify:
950          *   cc_Z == 0 || cc_C != 0     NE
951          *   cc_Z != 0 && cc_C == 0     EQ
952          *   cc_Z & (cc_C ? 0 : -1)     EQ
953          *   cc_Z & (cc_C - 1)          EQ
954          */
955         cmp->cond = TCG_COND_EQ;
956         if (TARGET_LONG_BITS == 32 || xcc) {
957             tcg_gen_subi_tl(t1, cpu_cc_C, 1);
958             tcg_gen_and_tl(t1, t1, cpu_cc_Z);
959         } else {
960             tcg_gen_extract_tl(t1, cpu_icc_C, 32, 1);
961             tcg_gen_subi_tl(t1, t1, 1);
962             tcg_gen_and_tl(t1, t1, cpu_icc_Z);
963             tcg_gen_ext32u_tl(t1, t1);
964         }
965         break;
966 
967     case 0x5: /* ltu: C */
968         cmp->cond = TCG_COND_NE;
969         if (TARGET_LONG_BITS == 32 || xcc) {
970             tcg_gen_mov_tl(t1, cpu_cc_C);
971         } else {
972             tcg_gen_extract_tl(t1, cpu_icc_C, 32, 1);
973         }
974         break;
975 
976     case 0x6: /* neg: N */
977         cmp->cond = TCG_COND_LT;
978         if (TARGET_LONG_BITS == 32 || xcc) {
979             tcg_gen_mov_tl(t1, cpu_cc_N);
980         } else {
981             tcg_gen_ext32s_tl(t1, cpu_cc_N);
982         }
983         break;
984 
985     case 0x7: /* vs: V */
986         cmp->cond = TCG_COND_LT;
987         if (TARGET_LONG_BITS == 32 || xcc) {
988             tcg_gen_mov_tl(t1, cpu_cc_V);
989         } else {
990             tcg_gen_ext32s_tl(t1, cpu_cc_V);
991         }
992         break;
993     }
994     if (cond & 8) {
995         cmp->cond = tcg_invert_cond(cmp->cond);
996     }
997 }
998 
999 static void gen_fcompare(DisasCompare *cmp, unsigned int cc, unsigned int cond)
1000 {
1001     TCGv_i32 fcc = cpu_fcc[cc];
1002     TCGv_i32 c1 = fcc;
1003     int c2 = 0;
1004     TCGCond tcond;
1005 
1006     /*
1007      * FCC values:
1008      * 0 =
1009      * 1 <
1010      * 2 >
1011      * 3 unordered
1012      */
1013     switch (cond & 7) {
1014     case 0x0: /* fbn */
1015         tcond = TCG_COND_NEVER;
1016         break;
1017     case 0x1: /* fbne : !0 */
1018         tcond = TCG_COND_NE;
1019         break;
1020     case 0x2: /* fblg : 1 or 2 */
1021         /* fcc in {1,2} - 1 -> fcc in {0,1} */
1022         c1 = tcg_temp_new_i32();
1023         tcg_gen_addi_i32(c1, fcc, -1);
1024         c2 = 1;
1025         tcond = TCG_COND_LEU;
1026         break;
1027     case 0x3: /* fbul : 1 or 3 */
1028         c1 = tcg_temp_new_i32();
1029         tcg_gen_andi_i32(c1, fcc, 1);
1030         tcond = TCG_COND_NE;
1031         break;
1032     case 0x4: /* fbl  : 1 */
1033         c2 = 1;
1034         tcond = TCG_COND_EQ;
1035         break;
1036     case 0x5: /* fbug : 2 or 3 */
1037         c2 = 2;
1038         tcond = TCG_COND_GEU;
1039         break;
1040     case 0x6: /* fbg  : 2 */
1041         c2 = 2;
1042         tcond = TCG_COND_EQ;
1043         break;
1044     case 0x7: /* fbu  : 3 */
1045         c2 = 3;
1046         tcond = TCG_COND_EQ;
1047         break;
1048     }
1049     if (cond & 8) {
1050         tcond = tcg_invert_cond(tcond);
1051     }
1052 
1053     cmp->cond = tcond;
1054     cmp->c2 = c2;
1055     cmp->c1 = tcg_temp_new();
1056     tcg_gen_extu_i32_tl(cmp->c1, c1);
1057 }
1058 
1059 static bool gen_compare_reg(DisasCompare *cmp, int cond, TCGv r_src)
1060 {
1061     static const TCGCond cond_reg[4] = {
1062         TCG_COND_NEVER,  /* reserved */
1063         TCG_COND_EQ,
1064         TCG_COND_LE,
1065         TCG_COND_LT,
1066     };
1067     TCGCond tcond;
1068 
1069     if ((cond & 3) == 0) {
1070         return false;
1071     }
1072     tcond = cond_reg[cond & 3];
1073     if (cond & 4) {
1074         tcond = tcg_invert_cond(tcond);
1075     }
1076 
1077     cmp->cond = tcond;
1078     cmp->c1 = tcg_temp_new();
1079     cmp->c2 = 0;
1080     tcg_gen_mov_tl(cmp->c1, r_src);
1081     return true;
1082 }
1083 
1084 static void gen_op_clear_ieee_excp_and_FTT(void)
1085 {
1086     tcg_gen_st_i32(tcg_constant_i32(0), tcg_env,
1087                    offsetof(CPUSPARCState, fsr_cexc_ftt));
1088 }
1089 
1090 static void gen_op_fmovs(TCGv_i32 dst, TCGv_i32 src)
1091 {
1092     gen_op_clear_ieee_excp_and_FTT();
1093     tcg_gen_mov_i32(dst, src);
1094 }
1095 
1096 static void gen_op_fnegs(TCGv_i32 dst, TCGv_i32 src)
1097 {
1098     gen_op_clear_ieee_excp_and_FTT();
1099     tcg_gen_xori_i32(dst, src, 1u << 31);
1100 }
1101 
1102 static void gen_op_fabss(TCGv_i32 dst, TCGv_i32 src)
1103 {
1104     gen_op_clear_ieee_excp_and_FTT();
1105     tcg_gen_andi_i32(dst, src, ~(1u << 31));
1106 }
1107 
1108 static void gen_op_fmovd(TCGv_i64 dst, TCGv_i64 src)
1109 {
1110     gen_op_clear_ieee_excp_and_FTT();
1111     tcg_gen_mov_i64(dst, src);
1112 }
1113 
1114 static void gen_op_fnegd(TCGv_i64 dst, TCGv_i64 src)
1115 {
1116     gen_op_clear_ieee_excp_and_FTT();
1117     tcg_gen_xori_i64(dst, src, 1ull << 63);
1118 }
1119 
1120 static void gen_op_fabsd(TCGv_i64 dst, TCGv_i64 src)
1121 {
1122     gen_op_clear_ieee_excp_and_FTT();
1123     tcg_gen_andi_i64(dst, src, ~(1ull << 63));
1124 }
1125 
1126 static void gen_op_fnegq(TCGv_i128 dst, TCGv_i128 src)
1127 {
1128     TCGv_i64 l = tcg_temp_new_i64();
1129     TCGv_i64 h = tcg_temp_new_i64();
1130 
1131     tcg_gen_extr_i128_i64(l, h, src);
1132     tcg_gen_xori_i64(h, h, 1ull << 63);
1133     tcg_gen_concat_i64_i128(dst, l, h);
1134 }
1135 
1136 static void gen_op_fabsq(TCGv_i128 dst, TCGv_i128 src)
1137 {
1138     TCGv_i64 l = tcg_temp_new_i64();
1139     TCGv_i64 h = tcg_temp_new_i64();
1140 
1141     tcg_gen_extr_i128_i64(l, h, src);
1142     tcg_gen_andi_i64(h, h, ~(1ull << 63));
1143     tcg_gen_concat_i64_i128(dst, l, h);
1144 }
1145 
1146 static void gen_op_fmadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
1147 {
1148     gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(0));
1149 }
1150 
1151 static void gen_op_fmaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
1152 {
1153     gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(0));
1154 }
1155 
1156 static void gen_op_fmsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
1157 {
1158     int op = float_muladd_negate_c;
1159     gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1160 }
1161 
1162 static void gen_op_fmsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
1163 {
1164     int op = float_muladd_negate_c;
1165     gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1166 }
1167 
1168 static void gen_op_fnmsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
1169 {
1170     int op = float_muladd_negate_c | float_muladd_negate_result;
1171     gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1172 }
1173 
1174 static void gen_op_fnmsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
1175 {
1176     int op = float_muladd_negate_c | float_muladd_negate_result;
1177     gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1178 }
1179 
1180 static void gen_op_fnmadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
1181 {
1182     int op = float_muladd_negate_result;
1183     gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1184 }
1185 
1186 static void gen_op_fnmaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
1187 {
1188     int op = float_muladd_negate_result;
1189     gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1190 }
1191 
1192 static void gen_op_fpexception_im(DisasContext *dc, int ftt)
1193 {
1194     /*
1195      * CEXC is only set when succesfully completing an FPop,
1196      * or when raising FSR_FTT_IEEE_EXCP, i.e. check_ieee_exception.
1197      * Thus we can simply store FTT into this field.
1198      */
1199     tcg_gen_st_i32(tcg_constant_i32(ftt), tcg_env,
1200                    offsetof(CPUSPARCState, fsr_cexc_ftt));
1201     gen_exception(dc, TT_FP_EXCP);
1202 }
1203 
1204 static int gen_trap_ifnofpu(DisasContext *dc)
1205 {
1206 #if !defined(CONFIG_USER_ONLY)
1207     if (!dc->fpu_enabled) {
1208         gen_exception(dc, TT_NFPU_INSN);
1209         return 1;
1210     }
1211 #endif
1212     return 0;
1213 }
1214 
1215 /* asi moves */
1216 typedef enum {
1217     GET_ASI_HELPER,
1218     GET_ASI_EXCP,
1219     GET_ASI_DIRECT,
1220     GET_ASI_DTWINX,
1221     GET_ASI_CODE,
1222     GET_ASI_BLOCK,
1223     GET_ASI_SHORT,
1224     GET_ASI_BCOPY,
1225     GET_ASI_BFILL,
1226 } ASIType;
1227 
1228 typedef struct {
1229     ASIType type;
1230     int asi;
1231     int mem_idx;
1232     MemOp memop;
1233 } DisasASI;
1234 
1235 /*
1236  * Build DisasASI.
1237  * For asi == -1, treat as non-asi.
1238  * For ask == -2, treat as immediate offset (v8 error, v9 %asi).
1239  */
1240 static DisasASI resolve_asi(DisasContext *dc, int asi, MemOp memop)
1241 {
1242     ASIType type = GET_ASI_HELPER;
1243     int mem_idx = dc->mem_idx;
1244 
1245     if (asi == -1) {
1246         /* Artificial "non-asi" case. */
1247         type = GET_ASI_DIRECT;
1248         goto done;
1249     }
1250 
1251 #ifndef TARGET_SPARC64
1252     /* Before v9, all asis are immediate and privileged.  */
1253     if (asi < 0) {
1254         gen_exception(dc, TT_ILL_INSN);
1255         type = GET_ASI_EXCP;
1256     } else if (supervisor(dc)
1257                /* Note that LEON accepts ASI_USERDATA in user mode, for
1258                   use with CASA.  Also note that previous versions of
1259                   QEMU allowed (and old versions of gcc emitted) ASI_P
1260                   for LEON, which is incorrect.  */
1261                || (asi == ASI_USERDATA
1262                    && (dc->def->features & CPU_FEATURE_CASA))) {
1263         switch (asi) {
1264         case ASI_USERDATA:    /* User data access */
1265             mem_idx = MMU_USER_IDX;
1266             type = GET_ASI_DIRECT;
1267             break;
1268         case ASI_KERNELDATA:  /* Supervisor data access */
1269             mem_idx = MMU_KERNEL_IDX;
1270             type = GET_ASI_DIRECT;
1271             break;
1272         case ASI_USERTXT:     /* User text access */
1273             mem_idx = MMU_USER_IDX;
1274             type = GET_ASI_CODE;
1275             break;
1276         case ASI_KERNELTXT:   /* Supervisor text access */
1277             mem_idx = MMU_KERNEL_IDX;
1278             type = GET_ASI_CODE;
1279             break;
1280         case ASI_M_BYPASS:    /* MMU passthrough */
1281         case ASI_LEON_BYPASS: /* LEON MMU passthrough */
1282             mem_idx = MMU_PHYS_IDX;
1283             type = GET_ASI_DIRECT;
1284             break;
1285         case ASI_M_BCOPY: /* Block copy, sta access */
1286             mem_idx = MMU_KERNEL_IDX;
1287             type = GET_ASI_BCOPY;
1288             break;
1289         case ASI_M_BFILL: /* Block fill, stda access */
1290             mem_idx = MMU_KERNEL_IDX;
1291             type = GET_ASI_BFILL;
1292             break;
1293         }
1294 
1295         /* MMU_PHYS_IDX is used when the MMU is disabled to passthrough the
1296          * permissions check in get_physical_address(..).
1297          */
1298         mem_idx = (dc->mem_idx == MMU_PHYS_IDX) ? MMU_PHYS_IDX : mem_idx;
1299     } else {
1300         gen_exception(dc, TT_PRIV_INSN);
1301         type = GET_ASI_EXCP;
1302     }
1303 #else
1304     if (asi < 0) {
1305         asi = dc->asi;
1306     }
1307     /* With v9, all asis below 0x80 are privileged.  */
1308     /* ??? We ought to check cpu_has_hypervisor, but we didn't copy
1309        down that bit into DisasContext.  For the moment that's ok,
1310        since the direct implementations below doesn't have any ASIs
1311        in the restricted [0x30, 0x7f] range, and the check will be
1312        done properly in the helper.  */
1313     if (!supervisor(dc) && asi < 0x80) {
1314         gen_exception(dc, TT_PRIV_ACT);
1315         type = GET_ASI_EXCP;
1316     } else {
1317         switch (asi) {
1318         case ASI_REAL:      /* Bypass */
1319         case ASI_REAL_IO:   /* Bypass, non-cacheable */
1320         case ASI_REAL_L:    /* Bypass LE */
1321         case ASI_REAL_IO_L: /* Bypass, non-cacheable LE */
1322         case ASI_TWINX_REAL:   /* Real address, twinx */
1323         case ASI_TWINX_REAL_L: /* Real address, twinx, LE */
1324         case ASI_QUAD_LDD_PHYS:
1325         case ASI_QUAD_LDD_PHYS_L:
1326             mem_idx = MMU_PHYS_IDX;
1327             break;
1328         case ASI_N:  /* Nucleus */
1329         case ASI_NL: /* Nucleus LE */
1330         case ASI_TWINX_N:
1331         case ASI_TWINX_NL:
1332         case ASI_NUCLEUS_QUAD_LDD:
1333         case ASI_NUCLEUS_QUAD_LDD_L:
1334             if (hypervisor(dc)) {
1335                 mem_idx = MMU_PHYS_IDX;
1336             } else {
1337                 mem_idx = MMU_NUCLEUS_IDX;
1338             }
1339             break;
1340         case ASI_AIUP:  /* As if user primary */
1341         case ASI_AIUPL: /* As if user primary LE */
1342         case ASI_TWINX_AIUP:
1343         case ASI_TWINX_AIUP_L:
1344         case ASI_BLK_AIUP_4V:
1345         case ASI_BLK_AIUP_L_4V:
1346         case ASI_BLK_AIUP:
1347         case ASI_BLK_AIUPL:
1348             mem_idx = MMU_USER_IDX;
1349             break;
1350         case ASI_AIUS:  /* As if user secondary */
1351         case ASI_AIUSL: /* As if user secondary LE */
1352         case ASI_TWINX_AIUS:
1353         case ASI_TWINX_AIUS_L:
1354         case ASI_BLK_AIUS_4V:
1355         case ASI_BLK_AIUS_L_4V:
1356         case ASI_BLK_AIUS:
1357         case ASI_BLK_AIUSL:
1358             mem_idx = MMU_USER_SECONDARY_IDX;
1359             break;
1360         case ASI_S:  /* Secondary */
1361         case ASI_SL: /* Secondary LE */
1362         case ASI_TWINX_S:
1363         case ASI_TWINX_SL:
1364         case ASI_BLK_COMMIT_S:
1365         case ASI_BLK_S:
1366         case ASI_BLK_SL:
1367         case ASI_FL8_S:
1368         case ASI_FL8_SL:
1369         case ASI_FL16_S:
1370         case ASI_FL16_SL:
1371             if (mem_idx == MMU_USER_IDX) {
1372                 mem_idx = MMU_USER_SECONDARY_IDX;
1373             } else if (mem_idx == MMU_KERNEL_IDX) {
1374                 mem_idx = MMU_KERNEL_SECONDARY_IDX;
1375             }
1376             break;
1377         case ASI_P:  /* Primary */
1378         case ASI_PL: /* Primary LE */
1379         case ASI_TWINX_P:
1380         case ASI_TWINX_PL:
1381         case ASI_BLK_COMMIT_P:
1382         case ASI_BLK_P:
1383         case ASI_BLK_PL:
1384         case ASI_FL8_P:
1385         case ASI_FL8_PL:
1386         case ASI_FL16_P:
1387         case ASI_FL16_PL:
1388             break;
1389         }
1390         switch (asi) {
1391         case ASI_REAL:
1392         case ASI_REAL_IO:
1393         case ASI_REAL_L:
1394         case ASI_REAL_IO_L:
1395         case ASI_N:
1396         case ASI_NL:
1397         case ASI_AIUP:
1398         case ASI_AIUPL:
1399         case ASI_AIUS:
1400         case ASI_AIUSL:
1401         case ASI_S:
1402         case ASI_SL:
1403         case ASI_P:
1404         case ASI_PL:
1405             type = GET_ASI_DIRECT;
1406             break;
1407         case ASI_TWINX_REAL:
1408         case ASI_TWINX_REAL_L:
1409         case ASI_TWINX_N:
1410         case ASI_TWINX_NL:
1411         case ASI_TWINX_AIUP:
1412         case ASI_TWINX_AIUP_L:
1413         case ASI_TWINX_AIUS:
1414         case ASI_TWINX_AIUS_L:
1415         case ASI_TWINX_P:
1416         case ASI_TWINX_PL:
1417         case ASI_TWINX_S:
1418         case ASI_TWINX_SL:
1419         case ASI_QUAD_LDD_PHYS:
1420         case ASI_QUAD_LDD_PHYS_L:
1421         case ASI_NUCLEUS_QUAD_LDD:
1422         case ASI_NUCLEUS_QUAD_LDD_L:
1423             type = GET_ASI_DTWINX;
1424             break;
1425         case ASI_BLK_COMMIT_P:
1426         case ASI_BLK_COMMIT_S:
1427         case ASI_BLK_AIUP_4V:
1428         case ASI_BLK_AIUP_L_4V:
1429         case ASI_BLK_AIUP:
1430         case ASI_BLK_AIUPL:
1431         case ASI_BLK_AIUS_4V:
1432         case ASI_BLK_AIUS_L_4V:
1433         case ASI_BLK_AIUS:
1434         case ASI_BLK_AIUSL:
1435         case ASI_BLK_S:
1436         case ASI_BLK_SL:
1437         case ASI_BLK_P:
1438         case ASI_BLK_PL:
1439             type = GET_ASI_BLOCK;
1440             break;
1441         case ASI_FL8_S:
1442         case ASI_FL8_SL:
1443         case ASI_FL8_P:
1444         case ASI_FL8_PL:
1445             memop = MO_UB;
1446             type = GET_ASI_SHORT;
1447             break;
1448         case ASI_FL16_S:
1449         case ASI_FL16_SL:
1450         case ASI_FL16_P:
1451         case ASI_FL16_PL:
1452             memop = MO_TEUW;
1453             type = GET_ASI_SHORT;
1454             break;
1455         }
1456         /* The little-endian asis all have bit 3 set.  */
1457         if (asi & 8) {
1458             memop ^= MO_BSWAP;
1459         }
1460     }
1461 #endif
1462 
1463  done:
1464     return (DisasASI){ type, asi, mem_idx, memop };
1465 }
1466 
1467 #if defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
1468 static void gen_helper_ld_asi(TCGv_i64 r, TCGv_env e, TCGv a,
1469                               TCGv_i32 asi, TCGv_i32 mop)
1470 {
1471     g_assert_not_reached();
1472 }
1473 
1474 static void gen_helper_st_asi(TCGv_env e, TCGv a, TCGv_i64 r,
1475                               TCGv_i32 asi, TCGv_i32 mop)
1476 {
1477     g_assert_not_reached();
1478 }
1479 #endif
1480 
1481 static void gen_ld_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr)
1482 {
1483     switch (da->type) {
1484     case GET_ASI_EXCP:
1485         break;
1486     case GET_ASI_DTWINX: /* Reserved for ldda.  */
1487         gen_exception(dc, TT_ILL_INSN);
1488         break;
1489     case GET_ASI_DIRECT:
1490         tcg_gen_qemu_ld_tl(dst, addr, da->mem_idx, da->memop | MO_ALIGN);
1491         break;
1492 
1493     case GET_ASI_CODE:
1494 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
1495         {
1496             MemOpIdx oi = make_memop_idx(da->memop, da->mem_idx);
1497             TCGv_i64 t64 = tcg_temp_new_i64();
1498 
1499             gen_helper_ld_code(t64, tcg_env, addr, tcg_constant_i32(oi));
1500             tcg_gen_trunc_i64_tl(dst, t64);
1501         }
1502         break;
1503 #else
1504         g_assert_not_reached();
1505 #endif
1506 
1507     default:
1508         {
1509             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1510             TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN);
1511 
1512             save_state(dc);
1513 #ifdef TARGET_SPARC64
1514             gen_helper_ld_asi(dst, tcg_env, addr, r_asi, r_mop);
1515 #else
1516             {
1517                 TCGv_i64 t64 = tcg_temp_new_i64();
1518                 gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
1519                 tcg_gen_trunc_i64_tl(dst, t64);
1520             }
1521 #endif
1522         }
1523         break;
1524     }
1525 }
1526 
1527 static void gen_st_asi(DisasContext *dc, DisasASI *da, TCGv src, TCGv addr)
1528 {
1529     switch (da->type) {
1530     case GET_ASI_EXCP:
1531         break;
1532 
1533     case GET_ASI_DTWINX: /* Reserved for stda.  */
1534         if (TARGET_LONG_BITS == 32) {
1535             gen_exception(dc, TT_ILL_INSN);
1536             break;
1537         } else if (!(dc->def->features & CPU_FEATURE_HYPV)) {
1538             /* Pre OpenSPARC CPUs don't have these */
1539             gen_exception(dc, TT_ILL_INSN);
1540             break;
1541         }
1542         /* In OpenSPARC T1+ CPUs TWINX ASIs in store are ST_BLKINIT_ ASIs */
1543         /* fall through */
1544 
1545     case GET_ASI_DIRECT:
1546         tcg_gen_qemu_st_tl(src, addr, da->mem_idx, da->memop | MO_ALIGN);
1547         break;
1548 
1549     case GET_ASI_BCOPY:
1550         assert(TARGET_LONG_BITS == 32);
1551         /*
1552          * Copy 32 bytes from the address in SRC to ADDR.
1553          *
1554          * From Ross RT625 hyperSPARC manual, section 4.6:
1555          * "Block Copy and Block Fill will work only on cache line boundaries."
1556          *
1557          * It does not specify if an unaliged address is truncated or trapped.
1558          * Previous qemu behaviour was to truncate to 4 byte alignment, which
1559          * is obviously wrong.  The only place I can see this used is in the
1560          * Linux kernel which begins with page alignment, advancing by 32,
1561          * so is always aligned.  Assume truncation as the simpler option.
1562          *
1563          * Since the loads and stores are paired, allow the copy to happen
1564          * in the host endianness.  The copy need not be atomic.
1565          */
1566         {
1567             MemOp mop = MO_128 | MO_ATOM_IFALIGN_PAIR;
1568             TCGv saddr = tcg_temp_new();
1569             TCGv daddr = tcg_temp_new();
1570             TCGv_i128 tmp = tcg_temp_new_i128();
1571 
1572             tcg_gen_andi_tl(saddr, src, -32);
1573             tcg_gen_andi_tl(daddr, addr, -32);
1574             tcg_gen_qemu_ld_i128(tmp, saddr, da->mem_idx, mop);
1575             tcg_gen_qemu_st_i128(tmp, daddr, da->mem_idx, mop);
1576             tcg_gen_addi_tl(saddr, saddr, 16);
1577             tcg_gen_addi_tl(daddr, daddr, 16);
1578             tcg_gen_qemu_ld_i128(tmp, saddr, da->mem_idx, mop);
1579             tcg_gen_qemu_st_i128(tmp, daddr, da->mem_idx, mop);
1580         }
1581         break;
1582 
1583     default:
1584         {
1585             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1586             TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN);
1587 
1588             save_state(dc);
1589 #ifdef TARGET_SPARC64
1590             gen_helper_st_asi(tcg_env, addr, src, r_asi, r_mop);
1591 #else
1592             {
1593                 TCGv_i64 t64 = tcg_temp_new_i64();
1594                 tcg_gen_extu_tl_i64(t64, src);
1595                 gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
1596             }
1597 #endif
1598 
1599             /* A write to a TLB register may alter page maps.  End the TB. */
1600             dc->npc = DYNAMIC_PC;
1601         }
1602         break;
1603     }
1604 }
1605 
1606 static void gen_swap_asi(DisasContext *dc, DisasASI *da,
1607                          TCGv dst, TCGv src, TCGv addr)
1608 {
1609     switch (da->type) {
1610     case GET_ASI_EXCP:
1611         break;
1612     case GET_ASI_DIRECT:
1613         tcg_gen_atomic_xchg_tl(dst, addr, src,
1614                                da->mem_idx, da->memop | MO_ALIGN);
1615         break;
1616     default:
1617         /* ??? Should be DAE_invalid_asi.  */
1618         gen_exception(dc, TT_DATA_ACCESS);
1619         break;
1620     }
1621 }
1622 
1623 static void gen_cas_asi(DisasContext *dc, DisasASI *da,
1624                         TCGv oldv, TCGv newv, TCGv cmpv, TCGv addr)
1625 {
1626     switch (da->type) {
1627     case GET_ASI_EXCP:
1628         return;
1629     case GET_ASI_DIRECT:
1630         tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, newv,
1631                                   da->mem_idx, da->memop | MO_ALIGN);
1632         break;
1633     default:
1634         /* ??? Should be DAE_invalid_asi.  */
1635         gen_exception(dc, TT_DATA_ACCESS);
1636         break;
1637     }
1638 }
1639 
1640 static void gen_ldstub_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr)
1641 {
1642     switch (da->type) {
1643     case GET_ASI_EXCP:
1644         break;
1645     case GET_ASI_DIRECT:
1646         tcg_gen_atomic_xchg_tl(dst, addr, tcg_constant_tl(0xff),
1647                                da->mem_idx, MO_UB);
1648         break;
1649     default:
1650         /* ??? In theory, this should be raise DAE_invalid_asi.
1651            But the SS-20 roms do ldstuba [%l0] #ASI_M_CTL, %o1.  */
1652         if (tb_cflags(dc->base.tb) & CF_PARALLEL) {
1653             gen_helper_exit_atomic(tcg_env);
1654         } else {
1655             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1656             TCGv_i32 r_mop = tcg_constant_i32(MO_UB);
1657             TCGv_i64 s64, t64;
1658 
1659             save_state(dc);
1660             t64 = tcg_temp_new_i64();
1661             gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
1662 
1663             s64 = tcg_constant_i64(0xff);
1664             gen_helper_st_asi(tcg_env, addr, s64, r_asi, r_mop);
1665 
1666             tcg_gen_trunc_i64_tl(dst, t64);
1667 
1668             /* End the TB.  */
1669             dc->npc = DYNAMIC_PC;
1670         }
1671         break;
1672     }
1673 }
1674 
1675 static void gen_ldf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size,
1676                         TCGv addr, int rd)
1677 {
1678     MemOp memop = da->memop;
1679     MemOp size = memop & MO_SIZE;
1680     TCGv_i32 d32;
1681     TCGv_i64 d64, l64;
1682     TCGv addr_tmp;
1683 
1684     /* TODO: Use 128-bit load/store below. */
1685     if (size == MO_128) {
1686         memop = (memop & ~MO_SIZE) | MO_64;
1687     }
1688 
1689     switch (da->type) {
1690     case GET_ASI_EXCP:
1691         break;
1692 
1693     case GET_ASI_DIRECT:
1694         memop |= MO_ALIGN_4;
1695         switch (size) {
1696         case MO_32:
1697             d32 = tcg_temp_new_i32();
1698             tcg_gen_qemu_ld_i32(d32, addr, da->mem_idx, memop);
1699             gen_store_fpr_F(dc, rd, d32);
1700             break;
1701 
1702         case MO_64:
1703             d64 = tcg_temp_new_i64();
1704             tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx, memop);
1705             gen_store_fpr_D(dc, rd, d64);
1706             break;
1707 
1708         case MO_128:
1709             d64 = tcg_temp_new_i64();
1710             l64 = tcg_temp_new_i64();
1711             tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx, memop);
1712             addr_tmp = tcg_temp_new();
1713             tcg_gen_addi_tl(addr_tmp, addr, 8);
1714             tcg_gen_qemu_ld_i64(l64, addr_tmp, da->mem_idx, memop);
1715             gen_store_fpr_D(dc, rd, d64);
1716             gen_store_fpr_D(dc, rd + 2, l64);
1717             break;
1718         default:
1719             g_assert_not_reached();
1720         }
1721         break;
1722 
1723     case GET_ASI_BLOCK:
1724         /* Valid for lddfa on aligned registers only.  */
1725         if (orig_size == MO_64 && (rd & 7) == 0) {
1726             /* The first operation checks required alignment.  */
1727             addr_tmp = tcg_temp_new();
1728             d64 = tcg_temp_new_i64();
1729             for (int i = 0; ; ++i) {
1730                 tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx,
1731                                     memop | (i == 0 ? MO_ALIGN_64 : 0));
1732                 gen_store_fpr_D(dc, rd + 2 * i, d64);
1733                 if (i == 7) {
1734                     break;
1735                 }
1736                 tcg_gen_addi_tl(addr_tmp, addr, 8);
1737                 addr = addr_tmp;
1738             }
1739         } else {
1740             gen_exception(dc, TT_ILL_INSN);
1741         }
1742         break;
1743 
1744     case GET_ASI_SHORT:
1745         /* Valid for lddfa only.  */
1746         if (orig_size == MO_64) {
1747             d64 = tcg_temp_new_i64();
1748             tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx, memop | MO_ALIGN);
1749             gen_store_fpr_D(dc, rd, d64);
1750         } else {
1751             gen_exception(dc, TT_ILL_INSN);
1752         }
1753         break;
1754 
1755     default:
1756         {
1757             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1758             TCGv_i32 r_mop = tcg_constant_i32(memop | MO_ALIGN);
1759 
1760             save_state(dc);
1761             /* According to the table in the UA2011 manual, the only
1762                other asis that are valid for ldfa/lddfa/ldqfa are
1763                the NO_FAULT asis.  We still need a helper for these,
1764                but we can just use the integer asi helper for them.  */
1765             switch (size) {
1766             case MO_32:
1767                 d64 = tcg_temp_new_i64();
1768                 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
1769                 d32 = tcg_temp_new_i32();
1770                 tcg_gen_extrl_i64_i32(d32, d64);
1771                 gen_store_fpr_F(dc, rd, d32);
1772                 break;
1773             case MO_64:
1774                 d64 = tcg_temp_new_i64();
1775                 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
1776                 gen_store_fpr_D(dc, rd, d64);
1777                 break;
1778             case MO_128:
1779                 d64 = tcg_temp_new_i64();
1780                 l64 = tcg_temp_new_i64();
1781                 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
1782                 addr_tmp = tcg_temp_new();
1783                 tcg_gen_addi_tl(addr_tmp, addr, 8);
1784                 gen_helper_ld_asi(l64, tcg_env, addr_tmp, r_asi, r_mop);
1785                 gen_store_fpr_D(dc, rd, d64);
1786                 gen_store_fpr_D(dc, rd + 2, l64);
1787                 break;
1788             default:
1789                 g_assert_not_reached();
1790             }
1791         }
1792         break;
1793     }
1794 }
1795 
1796 static void gen_stf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size,
1797                         TCGv addr, int rd)
1798 {
1799     MemOp memop = da->memop;
1800     MemOp size = memop & MO_SIZE;
1801     TCGv_i32 d32;
1802     TCGv_i64 d64;
1803     TCGv addr_tmp;
1804 
1805     /* TODO: Use 128-bit load/store below. */
1806     if (size == MO_128) {
1807         memop = (memop & ~MO_SIZE) | MO_64;
1808     }
1809 
1810     switch (da->type) {
1811     case GET_ASI_EXCP:
1812         break;
1813 
1814     case GET_ASI_DIRECT:
1815         memop |= MO_ALIGN_4;
1816         switch (size) {
1817         case MO_32:
1818             d32 = gen_load_fpr_F(dc, rd);
1819             tcg_gen_qemu_st_i32(d32, addr, da->mem_idx, memop | MO_ALIGN);
1820             break;
1821         case MO_64:
1822             d64 = gen_load_fpr_D(dc, rd);
1823             tcg_gen_qemu_st_i64(d64, addr, da->mem_idx, memop | MO_ALIGN_4);
1824             break;
1825         case MO_128:
1826             /* Only 4-byte alignment required.  However, it is legal for the
1827                cpu to signal the alignment fault, and the OS trap handler is
1828                required to fix it up.  Requiring 16-byte alignment here avoids
1829                having to probe the second page before performing the first
1830                write.  */
1831             d64 = gen_load_fpr_D(dc, rd);
1832             tcg_gen_qemu_st_i64(d64, addr, da->mem_idx, memop | MO_ALIGN_16);
1833             addr_tmp = tcg_temp_new();
1834             tcg_gen_addi_tl(addr_tmp, addr, 8);
1835             d64 = gen_load_fpr_D(dc, rd + 2);
1836             tcg_gen_qemu_st_i64(d64, addr_tmp, da->mem_idx, memop);
1837             break;
1838         default:
1839             g_assert_not_reached();
1840         }
1841         break;
1842 
1843     case GET_ASI_BLOCK:
1844         /* Valid for stdfa on aligned registers only.  */
1845         if (orig_size == MO_64 && (rd & 7) == 0) {
1846             /* The first operation checks required alignment.  */
1847             addr_tmp = tcg_temp_new();
1848             for (int i = 0; ; ++i) {
1849                 d64 = gen_load_fpr_D(dc, rd + 2 * i);
1850                 tcg_gen_qemu_st_i64(d64, addr, da->mem_idx,
1851                                     memop | (i == 0 ? MO_ALIGN_64 : 0));
1852                 if (i == 7) {
1853                     break;
1854                 }
1855                 tcg_gen_addi_tl(addr_tmp, addr, 8);
1856                 addr = addr_tmp;
1857             }
1858         } else {
1859             gen_exception(dc, TT_ILL_INSN);
1860         }
1861         break;
1862 
1863     case GET_ASI_SHORT:
1864         /* Valid for stdfa only.  */
1865         if (orig_size == MO_64) {
1866             d64 = gen_load_fpr_D(dc, rd);
1867             tcg_gen_qemu_st_i64(d64, addr, da->mem_idx, memop | MO_ALIGN);
1868         } else {
1869             gen_exception(dc, TT_ILL_INSN);
1870         }
1871         break;
1872 
1873     default:
1874         /* According to the table in the UA2011 manual, the only
1875            other asis that are valid for ldfa/lddfa/ldqfa are
1876            the PST* asis, which aren't currently handled.  */
1877         gen_exception(dc, TT_ILL_INSN);
1878         break;
1879     }
1880 }
1881 
1882 static void gen_ldda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd)
1883 {
1884     TCGv hi = gen_dest_gpr(dc, rd);
1885     TCGv lo = gen_dest_gpr(dc, rd + 1);
1886 
1887     switch (da->type) {
1888     case GET_ASI_EXCP:
1889         return;
1890 
1891     case GET_ASI_DTWINX:
1892 #ifdef TARGET_SPARC64
1893         {
1894             MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16;
1895             TCGv_i128 t = tcg_temp_new_i128();
1896 
1897             tcg_gen_qemu_ld_i128(t, addr, da->mem_idx, mop);
1898             /*
1899              * Note that LE twinx acts as if each 64-bit register result is
1900              * byte swapped.  We perform one 128-bit LE load, so must swap
1901              * the order of the writebacks.
1902              */
1903             if ((mop & MO_BSWAP) == MO_TE) {
1904                 tcg_gen_extr_i128_i64(lo, hi, t);
1905             } else {
1906                 tcg_gen_extr_i128_i64(hi, lo, t);
1907             }
1908         }
1909         break;
1910 #else
1911         g_assert_not_reached();
1912 #endif
1913 
1914     case GET_ASI_DIRECT:
1915         {
1916             TCGv_i64 tmp = tcg_temp_new_i64();
1917 
1918             tcg_gen_qemu_ld_i64(tmp, addr, da->mem_idx, da->memop | MO_ALIGN);
1919 
1920             /* Note that LE ldda acts as if each 32-bit register
1921                result is byte swapped.  Having just performed one
1922                64-bit bswap, we need now to swap the writebacks.  */
1923             if ((da->memop & MO_BSWAP) == MO_TE) {
1924                 tcg_gen_extr_i64_tl(lo, hi, tmp);
1925             } else {
1926                 tcg_gen_extr_i64_tl(hi, lo, tmp);
1927             }
1928         }
1929         break;
1930 
1931     case GET_ASI_CODE:
1932 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
1933         {
1934             MemOpIdx oi = make_memop_idx(da->memop, da->mem_idx);
1935             TCGv_i64 tmp = tcg_temp_new_i64();
1936 
1937             gen_helper_ld_code(tmp, tcg_env, addr, tcg_constant_i32(oi));
1938 
1939             /* See above.  */
1940             if ((da->memop & MO_BSWAP) == MO_TE) {
1941                 tcg_gen_extr_i64_tl(lo, hi, tmp);
1942             } else {
1943                 tcg_gen_extr_i64_tl(hi, lo, tmp);
1944             }
1945         }
1946         break;
1947 #else
1948         g_assert_not_reached();
1949 #endif
1950 
1951     default:
1952         /* ??? In theory we've handled all of the ASIs that are valid
1953            for ldda, and this should raise DAE_invalid_asi.  However,
1954            real hardware allows others.  This can be seen with e.g.
1955            FreeBSD 10.3 wrt ASI_IC_TAG.  */
1956         {
1957             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1958             TCGv_i32 r_mop = tcg_constant_i32(da->memop);
1959             TCGv_i64 tmp = tcg_temp_new_i64();
1960 
1961             save_state(dc);
1962             gen_helper_ld_asi(tmp, tcg_env, addr, r_asi, r_mop);
1963 
1964             /* See above.  */
1965             if ((da->memop & MO_BSWAP) == MO_TE) {
1966                 tcg_gen_extr_i64_tl(lo, hi, tmp);
1967             } else {
1968                 tcg_gen_extr_i64_tl(hi, lo, tmp);
1969             }
1970         }
1971         break;
1972     }
1973 
1974     gen_store_gpr(dc, rd, hi);
1975     gen_store_gpr(dc, rd + 1, lo);
1976 }
1977 
1978 static void gen_stda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd)
1979 {
1980     TCGv hi = gen_load_gpr(dc, rd);
1981     TCGv lo = gen_load_gpr(dc, rd + 1);
1982 
1983     switch (da->type) {
1984     case GET_ASI_EXCP:
1985         break;
1986 
1987     case GET_ASI_DTWINX:
1988 #ifdef TARGET_SPARC64
1989         {
1990             MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16;
1991             TCGv_i128 t = tcg_temp_new_i128();
1992 
1993             /*
1994              * Note that LE twinx acts as if each 64-bit register result is
1995              * byte swapped.  We perform one 128-bit LE store, so must swap
1996              * the order of the construction.
1997              */
1998             if ((mop & MO_BSWAP) == MO_TE) {
1999                 tcg_gen_concat_i64_i128(t, lo, hi);
2000             } else {
2001                 tcg_gen_concat_i64_i128(t, hi, lo);
2002             }
2003             tcg_gen_qemu_st_i128(t, addr, da->mem_idx, mop);
2004         }
2005         break;
2006 #else
2007         g_assert_not_reached();
2008 #endif
2009 
2010     case GET_ASI_DIRECT:
2011         {
2012             TCGv_i64 t64 = tcg_temp_new_i64();
2013 
2014             /* Note that LE stda acts as if each 32-bit register result is
2015                byte swapped.  We will perform one 64-bit LE store, so now
2016                we must swap the order of the construction.  */
2017             if ((da->memop & MO_BSWAP) == MO_TE) {
2018                 tcg_gen_concat_tl_i64(t64, lo, hi);
2019             } else {
2020                 tcg_gen_concat_tl_i64(t64, hi, lo);
2021             }
2022             tcg_gen_qemu_st_i64(t64, addr, da->mem_idx, da->memop | MO_ALIGN);
2023         }
2024         break;
2025 
2026     case GET_ASI_BFILL:
2027         assert(TARGET_LONG_BITS == 32);
2028         /*
2029          * Store 32 bytes of [rd:rd+1] to ADDR.
2030          * See comments for GET_ASI_COPY above.
2031          */
2032         {
2033             MemOp mop = MO_TE | MO_128 | MO_ATOM_IFALIGN_PAIR;
2034             TCGv_i64 t8 = tcg_temp_new_i64();
2035             TCGv_i128 t16 = tcg_temp_new_i128();
2036             TCGv daddr = tcg_temp_new();
2037 
2038             tcg_gen_concat_tl_i64(t8, lo, hi);
2039             tcg_gen_concat_i64_i128(t16, t8, t8);
2040             tcg_gen_andi_tl(daddr, addr, -32);
2041             tcg_gen_qemu_st_i128(t16, daddr, da->mem_idx, mop);
2042             tcg_gen_addi_tl(daddr, daddr, 16);
2043             tcg_gen_qemu_st_i128(t16, daddr, da->mem_idx, mop);
2044         }
2045         break;
2046 
2047     default:
2048         /* ??? In theory we've handled all of the ASIs that are valid
2049            for stda, and this should raise DAE_invalid_asi.  */
2050         {
2051             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2052             TCGv_i32 r_mop = tcg_constant_i32(da->memop);
2053             TCGv_i64 t64 = tcg_temp_new_i64();
2054 
2055             /* See above.  */
2056             if ((da->memop & MO_BSWAP) == MO_TE) {
2057                 tcg_gen_concat_tl_i64(t64, lo, hi);
2058             } else {
2059                 tcg_gen_concat_tl_i64(t64, hi, lo);
2060             }
2061 
2062             save_state(dc);
2063             gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
2064         }
2065         break;
2066     }
2067 }
2068 
2069 static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2070 {
2071 #ifdef TARGET_SPARC64
2072     TCGv_i32 c32, zero, dst, s1, s2;
2073     TCGv_i64 c64 = tcg_temp_new_i64();
2074 
2075     /* We have two choices here: extend the 32 bit data and use movcond_i64,
2076        or fold the comparison down to 32 bits and use movcond_i32.  Choose
2077        the later.  */
2078     c32 = tcg_temp_new_i32();
2079     tcg_gen_setcondi_i64(cmp->cond, c64, cmp->c1, cmp->c2);
2080     tcg_gen_extrl_i64_i32(c32, c64);
2081 
2082     s1 = gen_load_fpr_F(dc, rs);
2083     s2 = gen_load_fpr_F(dc, rd);
2084     dst = tcg_temp_new_i32();
2085     zero = tcg_constant_i32(0);
2086 
2087     tcg_gen_movcond_i32(TCG_COND_NE, dst, c32, zero, s1, s2);
2088 
2089     gen_store_fpr_F(dc, rd, dst);
2090 #else
2091     qemu_build_not_reached();
2092 #endif
2093 }
2094 
2095 static void gen_fmovd(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2096 {
2097 #ifdef TARGET_SPARC64
2098     TCGv_i64 dst = tcg_temp_new_i64();
2099     tcg_gen_movcond_i64(cmp->cond, dst, cmp->c1, tcg_constant_tl(cmp->c2),
2100                         gen_load_fpr_D(dc, rs),
2101                         gen_load_fpr_D(dc, rd));
2102     gen_store_fpr_D(dc, rd, dst);
2103 #else
2104     qemu_build_not_reached();
2105 #endif
2106 }
2107 
2108 static void gen_fmovq(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2109 {
2110 #ifdef TARGET_SPARC64
2111     TCGv c2 = tcg_constant_tl(cmp->c2);
2112     TCGv_i64 h = tcg_temp_new_i64();
2113     TCGv_i64 l = tcg_temp_new_i64();
2114 
2115     tcg_gen_movcond_i64(cmp->cond, h, cmp->c1, c2,
2116                         gen_load_fpr_D(dc, rs),
2117                         gen_load_fpr_D(dc, rd));
2118     tcg_gen_movcond_i64(cmp->cond, l, cmp->c1, c2,
2119                         gen_load_fpr_D(dc, rs + 2),
2120                         gen_load_fpr_D(dc, rd + 2));
2121     gen_store_fpr_D(dc, rd, h);
2122     gen_store_fpr_D(dc, rd + 2, l);
2123 #else
2124     qemu_build_not_reached();
2125 #endif
2126 }
2127 
2128 #ifdef TARGET_SPARC64
2129 static void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr)
2130 {
2131     TCGv_i32 r_tl = tcg_temp_new_i32();
2132 
2133     /* load env->tl into r_tl */
2134     tcg_gen_ld_i32(r_tl, tcg_env, offsetof(CPUSPARCState, tl));
2135 
2136     /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2137     tcg_gen_andi_i32(r_tl, r_tl, MAXTL_MASK);
2138 
2139     /* calculate offset to current trap state from env->ts, reuse r_tl */
2140     tcg_gen_muli_i32(r_tl, r_tl, sizeof (trap_state));
2141     tcg_gen_addi_ptr(r_tsptr, tcg_env, offsetof(CPUSPARCState, ts));
2142 
2143     /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2144     {
2145         TCGv_ptr r_tl_tmp = tcg_temp_new_ptr();
2146         tcg_gen_ext_i32_ptr(r_tl_tmp, r_tl);
2147         tcg_gen_add_ptr(r_tsptr, r_tsptr, r_tl_tmp);
2148     }
2149 }
2150 #endif
2151 
2152 static int extract_dfpreg(DisasContext *dc, int x)
2153 {
2154     int r = x & 0x1e;
2155 #ifdef TARGET_SPARC64
2156     r |= (x & 1) << 5;
2157 #endif
2158     return r;
2159 }
2160 
2161 static int extract_qfpreg(DisasContext *dc, int x)
2162 {
2163     int r = x & 0x1c;
2164 #ifdef TARGET_SPARC64
2165     r |= (x & 1) << 5;
2166 #endif
2167     return r;
2168 }
2169 
2170 /* Include the auto-generated decoder.  */
2171 #include "decode-insns.c.inc"
2172 
2173 #define TRANS(NAME, AVAIL, FUNC, ...) \
2174     static bool trans_##NAME(DisasContext *dc, arg_##NAME *a) \
2175     { return avail_##AVAIL(dc) && FUNC(dc, __VA_ARGS__); }
2176 
2177 #define avail_ALL(C)      true
2178 #ifdef TARGET_SPARC64
2179 # define avail_32(C)      false
2180 # define avail_ASR17(C)   false
2181 # define avail_CASA(C)    true
2182 # define avail_DIV(C)     true
2183 # define avail_MUL(C)     true
2184 # define avail_POWERDOWN(C) false
2185 # define avail_64(C)      true
2186 # define avail_FMAF(C)    ((C)->def->features & CPU_FEATURE_FMAF)
2187 # define avail_GL(C)      ((C)->def->features & CPU_FEATURE_GL)
2188 # define avail_HYPV(C)    ((C)->def->features & CPU_FEATURE_HYPV)
2189 # define avail_VIS1(C)    ((C)->def->features & CPU_FEATURE_VIS1)
2190 # define avail_VIS2(C)    ((C)->def->features & CPU_FEATURE_VIS2)
2191 #else
2192 # define avail_32(C)      true
2193 # define avail_ASR17(C)   ((C)->def->features & CPU_FEATURE_ASR17)
2194 # define avail_CASA(C)    ((C)->def->features & CPU_FEATURE_CASA)
2195 # define avail_DIV(C)     ((C)->def->features & CPU_FEATURE_DIV)
2196 # define avail_MUL(C)     ((C)->def->features & CPU_FEATURE_MUL)
2197 # define avail_POWERDOWN(C) ((C)->def->features & CPU_FEATURE_POWERDOWN)
2198 # define avail_64(C)      false
2199 # define avail_FMAF(C)    false
2200 # define avail_GL(C)      false
2201 # define avail_HYPV(C)    false
2202 # define avail_VIS1(C)    false
2203 # define avail_VIS2(C)    false
2204 #endif
2205 
2206 /* Default case for non jump instructions. */
2207 static bool advance_pc(DisasContext *dc)
2208 {
2209     TCGLabel *l1;
2210 
2211     finishing_insn(dc);
2212 
2213     if (dc->npc & 3) {
2214         switch (dc->npc) {
2215         case DYNAMIC_PC:
2216         case DYNAMIC_PC_LOOKUP:
2217             dc->pc = dc->npc;
2218             tcg_gen_mov_tl(cpu_pc, cpu_npc);
2219             tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
2220             break;
2221 
2222         case JUMP_PC:
2223             /* we can do a static jump */
2224             l1 = gen_new_label();
2225             tcg_gen_brcondi_tl(dc->jump.cond, dc->jump.c1, dc->jump.c2, l1);
2226 
2227             /* jump not taken */
2228             gen_goto_tb(dc, 1, dc->jump_pc[1], dc->jump_pc[1] + 4);
2229 
2230             /* jump taken */
2231             gen_set_label(l1);
2232             gen_goto_tb(dc, 0, dc->jump_pc[0], dc->jump_pc[0] + 4);
2233 
2234             dc->base.is_jmp = DISAS_NORETURN;
2235             break;
2236 
2237         default:
2238             g_assert_not_reached();
2239         }
2240     } else {
2241         dc->pc = dc->npc;
2242         dc->npc = dc->npc + 4;
2243     }
2244     return true;
2245 }
2246 
2247 /*
2248  * Major opcodes 00 and 01 -- branches, call, and sethi
2249  */
2250 
2251 static bool advance_jump_cond(DisasContext *dc, DisasCompare *cmp,
2252                               bool annul, int disp)
2253 {
2254     target_ulong dest = address_mask_i(dc, dc->pc + disp * 4);
2255     target_ulong npc;
2256 
2257     finishing_insn(dc);
2258 
2259     if (cmp->cond == TCG_COND_ALWAYS) {
2260         if (annul) {
2261             dc->pc = dest;
2262             dc->npc = dest + 4;
2263         } else {
2264             gen_mov_pc_npc(dc);
2265             dc->npc = dest;
2266         }
2267         return true;
2268     }
2269 
2270     if (cmp->cond == TCG_COND_NEVER) {
2271         npc = dc->npc;
2272         if (npc & 3) {
2273             gen_mov_pc_npc(dc);
2274             if (annul) {
2275                 tcg_gen_addi_tl(cpu_pc, cpu_pc, 4);
2276             }
2277             tcg_gen_addi_tl(cpu_npc, cpu_pc, 4);
2278         } else {
2279             dc->pc = npc + (annul ? 4 : 0);
2280             dc->npc = dc->pc + 4;
2281         }
2282         return true;
2283     }
2284 
2285     flush_cond(dc);
2286     npc = dc->npc;
2287 
2288     if (annul) {
2289         TCGLabel *l1 = gen_new_label();
2290 
2291         tcg_gen_brcondi_tl(tcg_invert_cond(cmp->cond), cmp->c1, cmp->c2, l1);
2292         gen_goto_tb(dc, 0, npc, dest);
2293         gen_set_label(l1);
2294         gen_goto_tb(dc, 1, npc + 4, npc + 8);
2295 
2296         dc->base.is_jmp = DISAS_NORETURN;
2297     } else {
2298         if (npc & 3) {
2299             switch (npc) {
2300             case DYNAMIC_PC:
2301             case DYNAMIC_PC_LOOKUP:
2302                 tcg_gen_mov_tl(cpu_pc, cpu_npc);
2303                 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
2304                 tcg_gen_movcond_tl(cmp->cond, cpu_npc,
2305                                    cmp->c1, tcg_constant_tl(cmp->c2),
2306                                    tcg_constant_tl(dest), cpu_npc);
2307                 dc->pc = npc;
2308                 break;
2309             default:
2310                 g_assert_not_reached();
2311             }
2312         } else {
2313             dc->pc = npc;
2314             dc->npc = JUMP_PC;
2315             dc->jump = *cmp;
2316             dc->jump_pc[0] = dest;
2317             dc->jump_pc[1] = npc + 4;
2318 
2319             /* The condition for cpu_cond is always NE -- normalize. */
2320             if (cmp->cond == TCG_COND_NE) {
2321                 tcg_gen_xori_tl(cpu_cond, cmp->c1, cmp->c2);
2322             } else {
2323                 tcg_gen_setcondi_tl(cmp->cond, cpu_cond, cmp->c1, cmp->c2);
2324             }
2325             dc->cpu_cond_live = true;
2326         }
2327     }
2328     return true;
2329 }
2330 
2331 static bool raise_priv(DisasContext *dc)
2332 {
2333     gen_exception(dc, TT_PRIV_INSN);
2334     return true;
2335 }
2336 
2337 static bool raise_unimpfpop(DisasContext *dc)
2338 {
2339     gen_op_fpexception_im(dc, FSR_FTT_UNIMPFPOP);
2340     return true;
2341 }
2342 
2343 static bool gen_trap_float128(DisasContext *dc)
2344 {
2345     if (dc->def->features & CPU_FEATURE_FLOAT128) {
2346         return false;
2347     }
2348     return raise_unimpfpop(dc);
2349 }
2350 
2351 static bool do_bpcc(DisasContext *dc, arg_bcc *a)
2352 {
2353     DisasCompare cmp;
2354 
2355     gen_compare(&cmp, a->cc, a->cond, dc);
2356     return advance_jump_cond(dc, &cmp, a->a, a->i);
2357 }
2358 
2359 TRANS(Bicc, ALL, do_bpcc, a)
2360 TRANS(BPcc,  64, do_bpcc, a)
2361 
2362 static bool do_fbpfcc(DisasContext *dc, arg_bcc *a)
2363 {
2364     DisasCompare cmp;
2365 
2366     if (gen_trap_ifnofpu(dc)) {
2367         return true;
2368     }
2369     gen_fcompare(&cmp, a->cc, a->cond);
2370     return advance_jump_cond(dc, &cmp, a->a, a->i);
2371 }
2372 
2373 TRANS(FBPfcc,  64, do_fbpfcc, a)
2374 TRANS(FBfcc,  ALL, do_fbpfcc, a)
2375 
2376 static bool trans_BPr(DisasContext *dc, arg_BPr *a)
2377 {
2378     DisasCompare cmp;
2379 
2380     if (!avail_64(dc)) {
2381         return false;
2382     }
2383     if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
2384         return false;
2385     }
2386     return advance_jump_cond(dc, &cmp, a->a, a->i);
2387 }
2388 
2389 static bool trans_CALL(DisasContext *dc, arg_CALL *a)
2390 {
2391     target_long target = address_mask_i(dc, dc->pc + a->i * 4);
2392 
2393     gen_store_gpr(dc, 15, tcg_constant_tl(dc->pc));
2394     gen_mov_pc_npc(dc);
2395     dc->npc = target;
2396     return true;
2397 }
2398 
2399 static bool trans_NCP(DisasContext *dc, arg_NCP *a)
2400 {
2401     /*
2402      * For sparc32, always generate the no-coprocessor exception.
2403      * For sparc64, always generate illegal instruction.
2404      */
2405 #ifdef TARGET_SPARC64
2406     return false;
2407 #else
2408     gen_exception(dc, TT_NCP_INSN);
2409     return true;
2410 #endif
2411 }
2412 
2413 static bool trans_SETHI(DisasContext *dc, arg_SETHI *a)
2414 {
2415     /* Special-case %g0 because that's the canonical nop.  */
2416     if (a->rd) {
2417         gen_store_gpr(dc, a->rd, tcg_constant_tl((uint32_t)a->i << 10));
2418     }
2419     return advance_pc(dc);
2420 }
2421 
2422 /*
2423  * Major Opcode 10 -- integer, floating-point, vis, and system insns.
2424  */
2425 
2426 static bool do_tcc(DisasContext *dc, int cond, int cc,
2427                    int rs1, bool imm, int rs2_or_imm)
2428 {
2429     int mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc)
2430                 ? UA2005_HTRAP_MASK : V8_TRAP_MASK);
2431     DisasCompare cmp;
2432     TCGLabel *lab;
2433     TCGv_i32 trap;
2434 
2435     /* Trap never.  */
2436     if (cond == 0) {
2437         return advance_pc(dc);
2438     }
2439 
2440     /*
2441      * Immediate traps are the most common case.  Since this value is
2442      * live across the branch, it really pays to evaluate the constant.
2443      */
2444     if (rs1 == 0 && (imm || rs2_or_imm == 0)) {
2445         trap = tcg_constant_i32((rs2_or_imm & mask) + TT_TRAP);
2446     } else {
2447         trap = tcg_temp_new_i32();
2448         tcg_gen_trunc_tl_i32(trap, gen_load_gpr(dc, rs1));
2449         if (imm) {
2450             tcg_gen_addi_i32(trap, trap, rs2_or_imm);
2451         } else {
2452             TCGv_i32 t2 = tcg_temp_new_i32();
2453             tcg_gen_trunc_tl_i32(t2, gen_load_gpr(dc, rs2_or_imm));
2454             tcg_gen_add_i32(trap, trap, t2);
2455         }
2456         tcg_gen_andi_i32(trap, trap, mask);
2457         tcg_gen_addi_i32(trap, trap, TT_TRAP);
2458     }
2459 
2460     finishing_insn(dc);
2461 
2462     /* Trap always.  */
2463     if (cond == 8) {
2464         save_state(dc);
2465         gen_helper_raise_exception(tcg_env, trap);
2466         dc->base.is_jmp = DISAS_NORETURN;
2467         return true;
2468     }
2469 
2470     /* Conditional trap.  */
2471     flush_cond(dc);
2472     lab = delay_exceptionv(dc, trap);
2473     gen_compare(&cmp, cc, cond, dc);
2474     tcg_gen_brcondi_tl(cmp.cond, cmp.c1, cmp.c2, lab);
2475 
2476     return advance_pc(dc);
2477 }
2478 
2479 static bool trans_Tcc_r(DisasContext *dc, arg_Tcc_r *a)
2480 {
2481     if (avail_32(dc) && a->cc) {
2482         return false;
2483     }
2484     return do_tcc(dc, a->cond, a->cc, a->rs1, false, a->rs2);
2485 }
2486 
2487 static bool trans_Tcc_i_v7(DisasContext *dc, arg_Tcc_i_v7 *a)
2488 {
2489     if (avail_64(dc)) {
2490         return false;
2491     }
2492     return do_tcc(dc, a->cond, 0, a->rs1, true, a->i);
2493 }
2494 
2495 static bool trans_Tcc_i_v9(DisasContext *dc, arg_Tcc_i_v9 *a)
2496 {
2497     if (avail_32(dc)) {
2498         return false;
2499     }
2500     return do_tcc(dc, a->cond, a->cc, a->rs1, true, a->i);
2501 }
2502 
2503 static bool trans_STBAR(DisasContext *dc, arg_STBAR *a)
2504 {
2505     tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
2506     return advance_pc(dc);
2507 }
2508 
2509 static bool trans_MEMBAR(DisasContext *dc, arg_MEMBAR *a)
2510 {
2511     if (avail_32(dc)) {
2512         return false;
2513     }
2514     if (a->mmask) {
2515         /* Note TCG_MO_* was modeled on sparc64, so mmask matches. */
2516         tcg_gen_mb(a->mmask | TCG_BAR_SC);
2517     }
2518     if (a->cmask) {
2519         /* For #Sync, etc, end the TB to recognize interrupts. */
2520         dc->base.is_jmp = DISAS_EXIT;
2521     }
2522     return advance_pc(dc);
2523 }
2524 
2525 static bool do_rd_special(DisasContext *dc, bool priv, int rd,
2526                           TCGv (*func)(DisasContext *, TCGv))
2527 {
2528     if (!priv) {
2529         return raise_priv(dc);
2530     }
2531     gen_store_gpr(dc, rd, func(dc, gen_dest_gpr(dc, rd)));
2532     return advance_pc(dc);
2533 }
2534 
2535 static TCGv do_rdy(DisasContext *dc, TCGv dst)
2536 {
2537     return cpu_y;
2538 }
2539 
2540 static bool trans_RDY(DisasContext *dc, arg_RDY *a)
2541 {
2542     /*
2543      * TODO: Need a feature bit for sparcv8.  In the meantime, treat all
2544      * 32-bit cpus like sparcv7, which ignores the rs1 field.
2545      * This matches after all other ASR, so Leon3 Asr17 is handled first.
2546      */
2547     if (avail_64(dc) && a->rs1 != 0) {
2548         return false;
2549     }
2550     return do_rd_special(dc, true, a->rd, do_rdy);
2551 }
2552 
2553 static TCGv do_rd_leon3_config(DisasContext *dc, TCGv dst)
2554 {
2555     gen_helper_rdasr17(dst, tcg_env);
2556     return dst;
2557 }
2558 
2559 TRANS(RDASR17, ASR17, do_rd_special, true, a->rd, do_rd_leon3_config)
2560 
2561 static TCGv do_rdccr(DisasContext *dc, TCGv dst)
2562 {
2563     gen_helper_rdccr(dst, tcg_env);
2564     return dst;
2565 }
2566 
2567 TRANS(RDCCR, 64, do_rd_special, true, a->rd, do_rdccr)
2568 
2569 static TCGv do_rdasi(DisasContext *dc, TCGv dst)
2570 {
2571 #ifdef TARGET_SPARC64
2572     return tcg_constant_tl(dc->asi);
2573 #else
2574     qemu_build_not_reached();
2575 #endif
2576 }
2577 
2578 TRANS(RDASI, 64, do_rd_special, true, a->rd, do_rdasi)
2579 
2580 static TCGv do_rdtick(DisasContext *dc, TCGv dst)
2581 {
2582     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
2583 
2584     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
2585     if (translator_io_start(&dc->base)) {
2586         dc->base.is_jmp = DISAS_EXIT;
2587     }
2588     gen_helper_tick_get_count(dst, tcg_env, r_tickptr,
2589                               tcg_constant_i32(dc->mem_idx));
2590     return dst;
2591 }
2592 
2593 /* TODO: non-priv access only allowed when enabled. */
2594 TRANS(RDTICK, 64, do_rd_special, true, a->rd, do_rdtick)
2595 
2596 static TCGv do_rdpc(DisasContext *dc, TCGv dst)
2597 {
2598     return tcg_constant_tl(address_mask_i(dc, dc->pc));
2599 }
2600 
2601 TRANS(RDPC, 64, do_rd_special, true, a->rd, do_rdpc)
2602 
2603 static TCGv do_rdfprs(DisasContext *dc, TCGv dst)
2604 {
2605     tcg_gen_ext_i32_tl(dst, cpu_fprs);
2606     return dst;
2607 }
2608 
2609 TRANS(RDFPRS, 64, do_rd_special, true, a->rd, do_rdfprs)
2610 
2611 static TCGv do_rdgsr(DisasContext *dc, TCGv dst)
2612 {
2613     gen_trap_ifnofpu(dc);
2614     return cpu_gsr;
2615 }
2616 
2617 TRANS(RDGSR, 64, do_rd_special, true, a->rd, do_rdgsr)
2618 
2619 static TCGv do_rdsoftint(DisasContext *dc, TCGv dst)
2620 {
2621     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(softint));
2622     return dst;
2623 }
2624 
2625 TRANS(RDSOFTINT, 64, do_rd_special, supervisor(dc), a->rd, do_rdsoftint)
2626 
2627 static TCGv do_rdtick_cmpr(DisasContext *dc, TCGv dst)
2628 {
2629     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(tick_cmpr));
2630     return dst;
2631 }
2632 
2633 /* TODO: non-priv access only allowed when enabled. */
2634 TRANS(RDTICK_CMPR, 64, do_rd_special, true, a->rd, do_rdtick_cmpr)
2635 
2636 static TCGv do_rdstick(DisasContext *dc, TCGv dst)
2637 {
2638     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
2639 
2640     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick));
2641     if (translator_io_start(&dc->base)) {
2642         dc->base.is_jmp = DISAS_EXIT;
2643     }
2644     gen_helper_tick_get_count(dst, tcg_env, r_tickptr,
2645                               tcg_constant_i32(dc->mem_idx));
2646     return dst;
2647 }
2648 
2649 /* TODO: non-priv access only allowed when enabled. */
2650 TRANS(RDSTICK, 64, do_rd_special, true, a->rd, do_rdstick)
2651 
2652 static TCGv do_rdstick_cmpr(DisasContext *dc, TCGv dst)
2653 {
2654     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(stick_cmpr));
2655     return dst;
2656 }
2657 
2658 /* TODO: supervisor access only allowed when enabled by hypervisor. */
2659 TRANS(RDSTICK_CMPR, 64, do_rd_special, supervisor(dc), a->rd, do_rdstick_cmpr)
2660 
2661 /*
2662  * UltraSPARC-T1 Strand status.
2663  * HYPV check maybe not enough, UA2005 & UA2007 describe
2664  * this ASR as impl. dep
2665  */
2666 static TCGv do_rdstrand_status(DisasContext *dc, TCGv dst)
2667 {
2668     return tcg_constant_tl(1);
2669 }
2670 
2671 TRANS(RDSTRAND_STATUS, HYPV, do_rd_special, true, a->rd, do_rdstrand_status)
2672 
2673 static TCGv do_rdpsr(DisasContext *dc, TCGv dst)
2674 {
2675     gen_helper_rdpsr(dst, tcg_env);
2676     return dst;
2677 }
2678 
2679 TRANS(RDPSR, 32, do_rd_special, supervisor(dc), a->rd, do_rdpsr)
2680 
2681 static TCGv do_rdhpstate(DisasContext *dc, TCGv dst)
2682 {
2683     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hpstate));
2684     return dst;
2685 }
2686 
2687 TRANS(RDHPR_hpstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhpstate)
2688 
2689 static TCGv do_rdhtstate(DisasContext *dc, TCGv dst)
2690 {
2691     TCGv_i32 tl = tcg_temp_new_i32();
2692     TCGv_ptr tp = tcg_temp_new_ptr();
2693 
2694     tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl));
2695     tcg_gen_andi_i32(tl, tl, MAXTL_MASK);
2696     tcg_gen_shli_i32(tl, tl, 3);
2697     tcg_gen_ext_i32_ptr(tp, tl);
2698     tcg_gen_add_ptr(tp, tp, tcg_env);
2699 
2700     tcg_gen_ld_tl(dst, tp, env64_field_offsetof(htstate));
2701     return dst;
2702 }
2703 
2704 TRANS(RDHPR_htstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtstate)
2705 
2706 static TCGv do_rdhintp(DisasContext *dc, TCGv dst)
2707 {
2708     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hintp));
2709     return dst;
2710 }
2711 
2712 TRANS(RDHPR_hintp, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhintp)
2713 
2714 static TCGv do_rdhtba(DisasContext *dc, TCGv dst)
2715 {
2716     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(htba));
2717     return dst;
2718 }
2719 
2720 TRANS(RDHPR_htba, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtba)
2721 
2722 static TCGv do_rdhver(DisasContext *dc, TCGv dst)
2723 {
2724     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hver));
2725     return dst;
2726 }
2727 
2728 TRANS(RDHPR_hver, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhver)
2729 
2730 static TCGv do_rdhstick_cmpr(DisasContext *dc, TCGv dst)
2731 {
2732     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hstick_cmpr));
2733     return dst;
2734 }
2735 
2736 TRANS(RDHPR_hstick_cmpr, HYPV, do_rd_special, hypervisor(dc), a->rd,
2737       do_rdhstick_cmpr)
2738 
2739 static TCGv do_rdwim(DisasContext *dc, TCGv dst)
2740 {
2741     tcg_gen_ld_tl(dst, tcg_env, env32_field_offsetof(wim));
2742     return dst;
2743 }
2744 
2745 TRANS(RDWIM, 32, do_rd_special, supervisor(dc), a->rd, do_rdwim)
2746 
2747 static TCGv do_rdtpc(DisasContext *dc, TCGv dst)
2748 {
2749 #ifdef TARGET_SPARC64
2750     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2751 
2752     gen_load_trap_state_at_tl(r_tsptr);
2753     tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tpc));
2754     return dst;
2755 #else
2756     qemu_build_not_reached();
2757 #endif
2758 }
2759 
2760 TRANS(RDPR_tpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtpc)
2761 
2762 static TCGv do_rdtnpc(DisasContext *dc, TCGv dst)
2763 {
2764 #ifdef TARGET_SPARC64
2765     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2766 
2767     gen_load_trap_state_at_tl(r_tsptr);
2768     tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tnpc));
2769     return dst;
2770 #else
2771     qemu_build_not_reached();
2772 #endif
2773 }
2774 
2775 TRANS(RDPR_tnpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtnpc)
2776 
2777 static TCGv do_rdtstate(DisasContext *dc, TCGv dst)
2778 {
2779 #ifdef TARGET_SPARC64
2780     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2781 
2782     gen_load_trap_state_at_tl(r_tsptr);
2783     tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tstate));
2784     return dst;
2785 #else
2786     qemu_build_not_reached();
2787 #endif
2788 }
2789 
2790 TRANS(RDPR_tstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdtstate)
2791 
2792 static TCGv do_rdtt(DisasContext *dc, TCGv dst)
2793 {
2794 #ifdef TARGET_SPARC64
2795     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2796 
2797     gen_load_trap_state_at_tl(r_tsptr);
2798     tcg_gen_ld32s_tl(dst, r_tsptr, offsetof(trap_state, tt));
2799     return dst;
2800 #else
2801     qemu_build_not_reached();
2802 #endif
2803 }
2804 
2805 TRANS(RDPR_tt, 64, do_rd_special, supervisor(dc), a->rd, do_rdtt)
2806 TRANS(RDPR_tick, 64, do_rd_special, supervisor(dc), a->rd, do_rdtick)
2807 
2808 static TCGv do_rdtba(DisasContext *dc, TCGv dst)
2809 {
2810     return cpu_tbr;
2811 }
2812 
2813 TRANS(RDTBR, 32, do_rd_special, supervisor(dc), a->rd, do_rdtba)
2814 TRANS(RDPR_tba, 64, do_rd_special, supervisor(dc), a->rd, do_rdtba)
2815 
2816 static TCGv do_rdpstate(DisasContext *dc, TCGv dst)
2817 {
2818     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(pstate));
2819     return dst;
2820 }
2821 
2822 TRANS(RDPR_pstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdpstate)
2823 
2824 static TCGv do_rdtl(DisasContext *dc, TCGv dst)
2825 {
2826     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(tl));
2827     return dst;
2828 }
2829 
2830 TRANS(RDPR_tl, 64, do_rd_special, supervisor(dc), a->rd, do_rdtl)
2831 
2832 static TCGv do_rdpil(DisasContext *dc, TCGv dst)
2833 {
2834     tcg_gen_ld32s_tl(dst, tcg_env, env_field_offsetof(psrpil));
2835     return dst;
2836 }
2837 
2838 TRANS(RDPR_pil, 64, do_rd_special, supervisor(dc), a->rd, do_rdpil)
2839 
2840 static TCGv do_rdcwp(DisasContext *dc, TCGv dst)
2841 {
2842     gen_helper_rdcwp(dst, tcg_env);
2843     return dst;
2844 }
2845 
2846 TRANS(RDPR_cwp, 64, do_rd_special, supervisor(dc), a->rd, do_rdcwp)
2847 
2848 static TCGv do_rdcansave(DisasContext *dc, TCGv dst)
2849 {
2850     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cansave));
2851     return dst;
2852 }
2853 
2854 TRANS(RDPR_cansave, 64, do_rd_special, supervisor(dc), a->rd, do_rdcansave)
2855 
2856 static TCGv do_rdcanrestore(DisasContext *dc, TCGv dst)
2857 {
2858     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(canrestore));
2859     return dst;
2860 }
2861 
2862 TRANS(RDPR_canrestore, 64, do_rd_special, supervisor(dc), a->rd,
2863       do_rdcanrestore)
2864 
2865 static TCGv do_rdcleanwin(DisasContext *dc, TCGv dst)
2866 {
2867     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cleanwin));
2868     return dst;
2869 }
2870 
2871 TRANS(RDPR_cleanwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdcleanwin)
2872 
2873 static TCGv do_rdotherwin(DisasContext *dc, TCGv dst)
2874 {
2875     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(otherwin));
2876     return dst;
2877 }
2878 
2879 TRANS(RDPR_otherwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdotherwin)
2880 
2881 static TCGv do_rdwstate(DisasContext *dc, TCGv dst)
2882 {
2883     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(wstate));
2884     return dst;
2885 }
2886 
2887 TRANS(RDPR_wstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdwstate)
2888 
2889 static TCGv do_rdgl(DisasContext *dc, TCGv dst)
2890 {
2891     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(gl));
2892     return dst;
2893 }
2894 
2895 TRANS(RDPR_gl, GL, do_rd_special, supervisor(dc), a->rd, do_rdgl)
2896 
2897 /* UA2005 strand status */
2898 static TCGv do_rdssr(DisasContext *dc, TCGv dst)
2899 {
2900     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(ssr));
2901     return dst;
2902 }
2903 
2904 TRANS(RDPR_strand_status, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdssr)
2905 
2906 static TCGv do_rdver(DisasContext *dc, TCGv dst)
2907 {
2908     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(version));
2909     return dst;
2910 }
2911 
2912 TRANS(RDPR_ver, 64, do_rd_special, supervisor(dc), a->rd, do_rdver)
2913 
2914 static bool trans_FLUSHW(DisasContext *dc, arg_FLUSHW *a)
2915 {
2916     if (avail_64(dc)) {
2917         gen_helper_flushw(tcg_env);
2918         return advance_pc(dc);
2919     }
2920     return false;
2921 }
2922 
2923 static bool do_wr_special(DisasContext *dc, arg_r_r_ri *a, bool priv,
2924                           void (*func)(DisasContext *, TCGv))
2925 {
2926     TCGv src;
2927 
2928     /* For simplicity, we under-decoded the rs2 form. */
2929     if (!a->imm && (a->rs2_or_imm & ~0x1f)) {
2930         return false;
2931     }
2932     if (!priv) {
2933         return raise_priv(dc);
2934     }
2935 
2936     if (a->rs1 == 0 && (a->imm || a->rs2_or_imm == 0)) {
2937         src = tcg_constant_tl(a->rs2_or_imm);
2938     } else {
2939         TCGv src1 = gen_load_gpr(dc, a->rs1);
2940         if (a->rs2_or_imm == 0) {
2941             src = src1;
2942         } else {
2943             src = tcg_temp_new();
2944             if (a->imm) {
2945                 tcg_gen_xori_tl(src, src1, a->rs2_or_imm);
2946             } else {
2947                 tcg_gen_xor_tl(src, src1, gen_load_gpr(dc, a->rs2_or_imm));
2948             }
2949         }
2950     }
2951     func(dc, src);
2952     return advance_pc(dc);
2953 }
2954 
2955 static void do_wry(DisasContext *dc, TCGv src)
2956 {
2957     tcg_gen_ext32u_tl(cpu_y, src);
2958 }
2959 
2960 TRANS(WRY, ALL, do_wr_special, a, true, do_wry)
2961 
2962 static void do_wrccr(DisasContext *dc, TCGv src)
2963 {
2964     gen_helper_wrccr(tcg_env, src);
2965 }
2966 
2967 TRANS(WRCCR, 64, do_wr_special, a, true, do_wrccr)
2968 
2969 static void do_wrasi(DisasContext *dc, TCGv src)
2970 {
2971     TCGv tmp = tcg_temp_new();
2972 
2973     tcg_gen_ext8u_tl(tmp, src);
2974     tcg_gen_st32_tl(tmp, tcg_env, env64_field_offsetof(asi));
2975     /* End TB to notice changed ASI. */
2976     dc->base.is_jmp = DISAS_EXIT;
2977 }
2978 
2979 TRANS(WRASI, 64, do_wr_special, a, true, do_wrasi)
2980 
2981 static void do_wrfprs(DisasContext *dc, TCGv src)
2982 {
2983 #ifdef TARGET_SPARC64
2984     tcg_gen_trunc_tl_i32(cpu_fprs, src);
2985     dc->fprs_dirty = 0;
2986     dc->base.is_jmp = DISAS_EXIT;
2987 #else
2988     qemu_build_not_reached();
2989 #endif
2990 }
2991 
2992 TRANS(WRFPRS, 64, do_wr_special, a, true, do_wrfprs)
2993 
2994 static void do_wrgsr(DisasContext *dc, TCGv src)
2995 {
2996     gen_trap_ifnofpu(dc);
2997     tcg_gen_mov_tl(cpu_gsr, src);
2998 }
2999 
3000 TRANS(WRGSR, 64, do_wr_special, a, true, do_wrgsr)
3001 
3002 static void do_wrsoftint_set(DisasContext *dc, TCGv src)
3003 {
3004     gen_helper_set_softint(tcg_env, src);
3005 }
3006 
3007 TRANS(WRSOFTINT_SET, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_set)
3008 
3009 static void do_wrsoftint_clr(DisasContext *dc, TCGv src)
3010 {
3011     gen_helper_clear_softint(tcg_env, src);
3012 }
3013 
3014 TRANS(WRSOFTINT_CLR, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_clr)
3015 
3016 static void do_wrsoftint(DisasContext *dc, TCGv src)
3017 {
3018     gen_helper_write_softint(tcg_env, src);
3019 }
3020 
3021 TRANS(WRSOFTINT, 64, do_wr_special, a, supervisor(dc), do_wrsoftint)
3022 
3023 static void do_wrtick_cmpr(DisasContext *dc, TCGv src)
3024 {
3025     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3026 
3027     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(tick_cmpr));
3028     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
3029     translator_io_start(&dc->base);
3030     gen_helper_tick_set_limit(r_tickptr, src);
3031     /* End TB to handle timer interrupt */
3032     dc->base.is_jmp = DISAS_EXIT;
3033 }
3034 
3035 TRANS(WRTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrtick_cmpr)
3036 
3037 static void do_wrstick(DisasContext *dc, TCGv src)
3038 {
3039 #ifdef TARGET_SPARC64
3040     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3041 
3042     tcg_gen_ld_ptr(r_tickptr, tcg_env, offsetof(CPUSPARCState, stick));
3043     translator_io_start(&dc->base);
3044     gen_helper_tick_set_count(r_tickptr, src);
3045     /* End TB to handle timer interrupt */
3046     dc->base.is_jmp = DISAS_EXIT;
3047 #else
3048     qemu_build_not_reached();
3049 #endif
3050 }
3051 
3052 TRANS(WRSTICK, 64, do_wr_special, a, supervisor(dc), do_wrstick)
3053 
3054 static void do_wrstick_cmpr(DisasContext *dc, TCGv src)
3055 {
3056     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3057 
3058     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(stick_cmpr));
3059     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick));
3060     translator_io_start(&dc->base);
3061     gen_helper_tick_set_limit(r_tickptr, src);
3062     /* End TB to handle timer interrupt */
3063     dc->base.is_jmp = DISAS_EXIT;
3064 }
3065 
3066 TRANS(WRSTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrstick_cmpr)
3067 
3068 static void do_wrpowerdown(DisasContext *dc, TCGv src)
3069 {
3070     finishing_insn(dc);
3071     save_state(dc);
3072     gen_helper_power_down(tcg_env);
3073 }
3074 
3075 TRANS(WRPOWERDOWN, POWERDOWN, do_wr_special, a, supervisor(dc), do_wrpowerdown)
3076 
3077 static void do_wrpsr(DisasContext *dc, TCGv src)
3078 {
3079     gen_helper_wrpsr(tcg_env, src);
3080     dc->base.is_jmp = DISAS_EXIT;
3081 }
3082 
3083 TRANS(WRPSR, 32, do_wr_special, a, supervisor(dc), do_wrpsr)
3084 
3085 static void do_wrwim(DisasContext *dc, TCGv src)
3086 {
3087     target_ulong mask = MAKE_64BIT_MASK(0, dc->def->nwindows);
3088     TCGv tmp = tcg_temp_new();
3089 
3090     tcg_gen_andi_tl(tmp, src, mask);
3091     tcg_gen_st_tl(tmp, tcg_env, env32_field_offsetof(wim));
3092 }
3093 
3094 TRANS(WRWIM, 32, do_wr_special, a, supervisor(dc), do_wrwim)
3095 
3096 static void do_wrtpc(DisasContext *dc, TCGv src)
3097 {
3098 #ifdef TARGET_SPARC64
3099     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3100 
3101     gen_load_trap_state_at_tl(r_tsptr);
3102     tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tpc));
3103 #else
3104     qemu_build_not_reached();
3105 #endif
3106 }
3107 
3108 TRANS(WRPR_tpc, 64, do_wr_special, a, supervisor(dc), do_wrtpc)
3109 
3110 static void do_wrtnpc(DisasContext *dc, TCGv src)
3111 {
3112 #ifdef TARGET_SPARC64
3113     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3114 
3115     gen_load_trap_state_at_tl(r_tsptr);
3116     tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tnpc));
3117 #else
3118     qemu_build_not_reached();
3119 #endif
3120 }
3121 
3122 TRANS(WRPR_tnpc, 64, do_wr_special, a, supervisor(dc), do_wrtnpc)
3123 
3124 static void do_wrtstate(DisasContext *dc, TCGv src)
3125 {
3126 #ifdef TARGET_SPARC64
3127     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3128 
3129     gen_load_trap_state_at_tl(r_tsptr);
3130     tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tstate));
3131 #else
3132     qemu_build_not_reached();
3133 #endif
3134 }
3135 
3136 TRANS(WRPR_tstate, 64, do_wr_special, a, supervisor(dc), do_wrtstate)
3137 
3138 static void do_wrtt(DisasContext *dc, TCGv src)
3139 {
3140 #ifdef TARGET_SPARC64
3141     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3142 
3143     gen_load_trap_state_at_tl(r_tsptr);
3144     tcg_gen_st32_tl(src, r_tsptr, offsetof(trap_state, tt));
3145 #else
3146     qemu_build_not_reached();
3147 #endif
3148 }
3149 
3150 TRANS(WRPR_tt, 64, do_wr_special, a, supervisor(dc), do_wrtt)
3151 
3152 static void do_wrtick(DisasContext *dc, TCGv src)
3153 {
3154     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3155 
3156     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
3157     translator_io_start(&dc->base);
3158     gen_helper_tick_set_count(r_tickptr, src);
3159     /* End TB to handle timer interrupt */
3160     dc->base.is_jmp = DISAS_EXIT;
3161 }
3162 
3163 TRANS(WRPR_tick, 64, do_wr_special, a, supervisor(dc), do_wrtick)
3164 
3165 static void do_wrtba(DisasContext *dc, TCGv src)
3166 {
3167     tcg_gen_mov_tl(cpu_tbr, src);
3168 }
3169 
3170 TRANS(WRPR_tba, 64, do_wr_special, a, supervisor(dc), do_wrtba)
3171 
3172 static void do_wrpstate(DisasContext *dc, TCGv src)
3173 {
3174     save_state(dc);
3175     if (translator_io_start(&dc->base)) {
3176         dc->base.is_jmp = DISAS_EXIT;
3177     }
3178     gen_helper_wrpstate(tcg_env, src);
3179     dc->npc = DYNAMIC_PC;
3180 }
3181 
3182 TRANS(WRPR_pstate, 64, do_wr_special, a, supervisor(dc), do_wrpstate)
3183 
3184 static void do_wrtl(DisasContext *dc, TCGv src)
3185 {
3186     save_state(dc);
3187     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(tl));
3188     dc->npc = DYNAMIC_PC;
3189 }
3190 
3191 TRANS(WRPR_tl, 64, do_wr_special, a, supervisor(dc), do_wrtl)
3192 
3193 static void do_wrpil(DisasContext *dc, TCGv src)
3194 {
3195     if (translator_io_start(&dc->base)) {
3196         dc->base.is_jmp = DISAS_EXIT;
3197     }
3198     gen_helper_wrpil(tcg_env, src);
3199 }
3200 
3201 TRANS(WRPR_pil, 64, do_wr_special, a, supervisor(dc), do_wrpil)
3202 
3203 static void do_wrcwp(DisasContext *dc, TCGv src)
3204 {
3205     gen_helper_wrcwp(tcg_env, src);
3206 }
3207 
3208 TRANS(WRPR_cwp, 64, do_wr_special, a, supervisor(dc), do_wrcwp)
3209 
3210 static void do_wrcansave(DisasContext *dc, TCGv src)
3211 {
3212     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cansave));
3213 }
3214 
3215 TRANS(WRPR_cansave, 64, do_wr_special, a, supervisor(dc), do_wrcansave)
3216 
3217 static void do_wrcanrestore(DisasContext *dc, TCGv src)
3218 {
3219     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(canrestore));
3220 }
3221 
3222 TRANS(WRPR_canrestore, 64, do_wr_special, a, supervisor(dc), do_wrcanrestore)
3223 
3224 static void do_wrcleanwin(DisasContext *dc, TCGv src)
3225 {
3226     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cleanwin));
3227 }
3228 
3229 TRANS(WRPR_cleanwin, 64, do_wr_special, a, supervisor(dc), do_wrcleanwin)
3230 
3231 static void do_wrotherwin(DisasContext *dc, TCGv src)
3232 {
3233     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(otherwin));
3234 }
3235 
3236 TRANS(WRPR_otherwin, 64, do_wr_special, a, supervisor(dc), do_wrotherwin)
3237 
3238 static void do_wrwstate(DisasContext *dc, TCGv src)
3239 {
3240     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(wstate));
3241 }
3242 
3243 TRANS(WRPR_wstate, 64, do_wr_special, a, supervisor(dc), do_wrwstate)
3244 
3245 static void do_wrgl(DisasContext *dc, TCGv src)
3246 {
3247     gen_helper_wrgl(tcg_env, src);
3248 }
3249 
3250 TRANS(WRPR_gl, GL, do_wr_special, a, supervisor(dc), do_wrgl)
3251 
3252 /* UA2005 strand status */
3253 static void do_wrssr(DisasContext *dc, TCGv src)
3254 {
3255     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(ssr));
3256 }
3257 
3258 TRANS(WRPR_strand_status, HYPV, do_wr_special, a, hypervisor(dc), do_wrssr)
3259 
3260 TRANS(WRTBR, 32, do_wr_special, a, supervisor(dc), do_wrtba)
3261 
3262 static void do_wrhpstate(DisasContext *dc, TCGv src)
3263 {
3264     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hpstate));
3265     dc->base.is_jmp = DISAS_EXIT;
3266 }
3267 
3268 TRANS(WRHPR_hpstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhpstate)
3269 
3270 static void do_wrhtstate(DisasContext *dc, TCGv src)
3271 {
3272     TCGv_i32 tl = tcg_temp_new_i32();
3273     TCGv_ptr tp = tcg_temp_new_ptr();
3274 
3275     tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl));
3276     tcg_gen_andi_i32(tl, tl, MAXTL_MASK);
3277     tcg_gen_shli_i32(tl, tl, 3);
3278     tcg_gen_ext_i32_ptr(tp, tl);
3279     tcg_gen_add_ptr(tp, tp, tcg_env);
3280 
3281     tcg_gen_st_tl(src, tp, env64_field_offsetof(htstate));
3282 }
3283 
3284 TRANS(WRHPR_htstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtstate)
3285 
3286 static void do_wrhintp(DisasContext *dc, TCGv src)
3287 {
3288     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hintp));
3289 }
3290 
3291 TRANS(WRHPR_hintp, HYPV, do_wr_special, a, hypervisor(dc), do_wrhintp)
3292 
3293 static void do_wrhtba(DisasContext *dc, TCGv src)
3294 {
3295     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(htba));
3296 }
3297 
3298 TRANS(WRHPR_htba, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtba)
3299 
3300 static void do_wrhstick_cmpr(DisasContext *dc, TCGv src)
3301 {
3302     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3303 
3304     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hstick_cmpr));
3305     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(hstick));
3306     translator_io_start(&dc->base);
3307     gen_helper_tick_set_limit(r_tickptr, src);
3308     /* End TB to handle timer interrupt */
3309     dc->base.is_jmp = DISAS_EXIT;
3310 }
3311 
3312 TRANS(WRHPR_hstick_cmpr, HYPV, do_wr_special, a, hypervisor(dc),
3313       do_wrhstick_cmpr)
3314 
3315 static bool do_saved_restored(DisasContext *dc, bool saved)
3316 {
3317     if (!supervisor(dc)) {
3318         return raise_priv(dc);
3319     }
3320     if (saved) {
3321         gen_helper_saved(tcg_env);
3322     } else {
3323         gen_helper_restored(tcg_env);
3324     }
3325     return advance_pc(dc);
3326 }
3327 
3328 TRANS(SAVED, 64, do_saved_restored, true)
3329 TRANS(RESTORED, 64, do_saved_restored, false)
3330 
3331 static bool trans_NOP(DisasContext *dc, arg_NOP *a)
3332 {
3333     return advance_pc(dc);
3334 }
3335 
3336 /*
3337  * TODO: Need a feature bit for sparcv8.
3338  * In the meantime, treat all 32-bit cpus like sparcv7.
3339  */
3340 TRANS(NOP_v7, 32, trans_NOP, a)
3341 TRANS(NOP_v9, 64, trans_NOP, a)
3342 
3343 static bool do_arith_int(DisasContext *dc, arg_r_r_ri_cc *a,
3344                          void (*func)(TCGv, TCGv, TCGv),
3345                          void (*funci)(TCGv, TCGv, target_long),
3346                          bool logic_cc)
3347 {
3348     TCGv dst, src1;
3349 
3350     /* For simplicity, we under-decoded the rs2 form. */
3351     if (!a->imm && a->rs2_or_imm & ~0x1f) {
3352         return false;
3353     }
3354 
3355     if (logic_cc) {
3356         dst = cpu_cc_N;
3357     } else {
3358         dst = gen_dest_gpr(dc, a->rd);
3359     }
3360     src1 = gen_load_gpr(dc, a->rs1);
3361 
3362     if (a->imm || a->rs2_or_imm == 0) {
3363         if (funci) {
3364             funci(dst, src1, a->rs2_or_imm);
3365         } else {
3366             func(dst, src1, tcg_constant_tl(a->rs2_or_imm));
3367         }
3368     } else {
3369         func(dst, src1, cpu_regs[a->rs2_or_imm]);
3370     }
3371 
3372     if (logic_cc) {
3373         if (TARGET_LONG_BITS == 64) {
3374             tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
3375             tcg_gen_movi_tl(cpu_icc_C, 0);
3376         }
3377         tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
3378         tcg_gen_movi_tl(cpu_cc_C, 0);
3379         tcg_gen_movi_tl(cpu_cc_V, 0);
3380     }
3381 
3382     gen_store_gpr(dc, a->rd, dst);
3383     return advance_pc(dc);
3384 }
3385 
3386 static bool do_arith(DisasContext *dc, arg_r_r_ri_cc *a,
3387                      void (*func)(TCGv, TCGv, TCGv),
3388                      void (*funci)(TCGv, TCGv, target_long),
3389                      void (*func_cc)(TCGv, TCGv, TCGv))
3390 {
3391     if (a->cc) {
3392         return do_arith_int(dc, a, func_cc, NULL, false);
3393     }
3394     return do_arith_int(dc, a, func, funci, false);
3395 }
3396 
3397 static bool do_logic(DisasContext *dc, arg_r_r_ri_cc *a,
3398                      void (*func)(TCGv, TCGv, TCGv),
3399                      void (*funci)(TCGv, TCGv, target_long))
3400 {
3401     return do_arith_int(dc, a, func, funci, a->cc);
3402 }
3403 
3404 TRANS(ADD, ALL, do_arith, a, tcg_gen_add_tl, tcg_gen_addi_tl, gen_op_addcc)
3405 TRANS(SUB, ALL, do_arith, a, tcg_gen_sub_tl, tcg_gen_subi_tl, gen_op_subcc)
3406 TRANS(ADDC, ALL, do_arith, a, gen_op_addc, NULL, gen_op_addccc)
3407 TRANS(SUBC, ALL, do_arith, a, gen_op_subc, NULL, gen_op_subccc)
3408 
3409 TRANS(TADDcc, ALL, do_arith, a, NULL, NULL, gen_op_taddcc)
3410 TRANS(TSUBcc, ALL, do_arith, a, NULL, NULL, gen_op_tsubcc)
3411 TRANS(TADDccTV, ALL, do_arith, a, NULL, NULL, gen_op_taddcctv)
3412 TRANS(TSUBccTV, ALL, do_arith, a, NULL, NULL, gen_op_tsubcctv)
3413 
3414 TRANS(AND, ALL, do_logic, a, tcg_gen_and_tl, tcg_gen_andi_tl)
3415 TRANS(XOR, ALL, do_logic, a, tcg_gen_xor_tl, tcg_gen_xori_tl)
3416 TRANS(ANDN, ALL, do_logic, a, tcg_gen_andc_tl, NULL)
3417 TRANS(ORN, ALL, do_logic, a, tcg_gen_orc_tl, NULL)
3418 TRANS(XORN, ALL, do_logic, a, tcg_gen_eqv_tl, NULL)
3419 
3420 TRANS(MULX, 64, do_arith, a, tcg_gen_mul_tl, tcg_gen_muli_tl, NULL)
3421 TRANS(UMUL, MUL, do_logic, a, gen_op_umul, NULL)
3422 TRANS(SMUL, MUL, do_logic, a, gen_op_smul, NULL)
3423 TRANS(MULScc, ALL, do_arith, a, NULL, NULL, gen_op_mulscc)
3424 
3425 TRANS(UDIVcc, DIV, do_arith, a, NULL, NULL, gen_op_udivcc)
3426 TRANS(SDIV, DIV, do_arith, a, gen_op_sdiv, NULL, gen_op_sdivcc)
3427 
3428 /* TODO: Should have feature bit -- comes in with UltraSparc T2. */
3429 TRANS(POPC, 64, do_arith, a, gen_op_popc, NULL, NULL)
3430 
3431 static bool trans_OR(DisasContext *dc, arg_r_r_ri_cc *a)
3432 {
3433     /* OR with %g0 is the canonical alias for MOV. */
3434     if (!a->cc && a->rs1 == 0) {
3435         if (a->imm || a->rs2_or_imm == 0) {
3436             gen_store_gpr(dc, a->rd, tcg_constant_tl(a->rs2_or_imm));
3437         } else if (a->rs2_or_imm & ~0x1f) {
3438             /* For simplicity, we under-decoded the rs2 form. */
3439             return false;
3440         } else {
3441             gen_store_gpr(dc, a->rd, cpu_regs[a->rs2_or_imm]);
3442         }
3443         return advance_pc(dc);
3444     }
3445     return do_logic(dc, a, tcg_gen_or_tl, tcg_gen_ori_tl);
3446 }
3447 
3448 static bool trans_UDIV(DisasContext *dc, arg_r_r_ri *a)
3449 {
3450     TCGv_i64 t1, t2;
3451     TCGv dst;
3452 
3453     if (!avail_DIV(dc)) {
3454         return false;
3455     }
3456     /* For simplicity, we under-decoded the rs2 form. */
3457     if (!a->imm && a->rs2_or_imm & ~0x1f) {
3458         return false;
3459     }
3460 
3461     if (unlikely(a->rs2_or_imm == 0)) {
3462         gen_exception(dc, TT_DIV_ZERO);
3463         return true;
3464     }
3465 
3466     if (a->imm) {
3467         t2 = tcg_constant_i64((uint32_t)a->rs2_or_imm);
3468     } else {
3469         TCGLabel *lab;
3470         TCGv_i32 n2;
3471 
3472         finishing_insn(dc);
3473         flush_cond(dc);
3474 
3475         n2 = tcg_temp_new_i32();
3476         tcg_gen_trunc_tl_i32(n2, cpu_regs[a->rs2_or_imm]);
3477 
3478         lab = delay_exception(dc, TT_DIV_ZERO);
3479         tcg_gen_brcondi_i32(TCG_COND_EQ, n2, 0, lab);
3480 
3481         t2 = tcg_temp_new_i64();
3482 #ifdef TARGET_SPARC64
3483         tcg_gen_ext32u_i64(t2, cpu_regs[a->rs2_or_imm]);
3484 #else
3485         tcg_gen_extu_i32_i64(t2, cpu_regs[a->rs2_or_imm]);
3486 #endif
3487     }
3488 
3489     t1 = tcg_temp_new_i64();
3490     tcg_gen_concat_tl_i64(t1, gen_load_gpr(dc, a->rs1), cpu_y);
3491 
3492     tcg_gen_divu_i64(t1, t1, t2);
3493     tcg_gen_umin_i64(t1, t1, tcg_constant_i64(UINT32_MAX));
3494 
3495     dst = gen_dest_gpr(dc, a->rd);
3496     tcg_gen_trunc_i64_tl(dst, t1);
3497     gen_store_gpr(dc, a->rd, dst);
3498     return advance_pc(dc);
3499 }
3500 
3501 static bool trans_UDIVX(DisasContext *dc, arg_r_r_ri *a)
3502 {
3503     TCGv dst, src1, src2;
3504 
3505     if (!avail_64(dc)) {
3506         return false;
3507     }
3508     /* For simplicity, we under-decoded the rs2 form. */
3509     if (!a->imm && a->rs2_or_imm & ~0x1f) {
3510         return false;
3511     }
3512 
3513     if (unlikely(a->rs2_or_imm == 0)) {
3514         gen_exception(dc, TT_DIV_ZERO);
3515         return true;
3516     }
3517 
3518     if (a->imm) {
3519         src2 = tcg_constant_tl(a->rs2_or_imm);
3520     } else {
3521         TCGLabel *lab;
3522 
3523         finishing_insn(dc);
3524         flush_cond(dc);
3525 
3526         lab = delay_exception(dc, TT_DIV_ZERO);
3527         src2 = cpu_regs[a->rs2_or_imm];
3528         tcg_gen_brcondi_tl(TCG_COND_EQ, src2, 0, lab);
3529     }
3530 
3531     dst = gen_dest_gpr(dc, a->rd);
3532     src1 = gen_load_gpr(dc, a->rs1);
3533 
3534     tcg_gen_divu_tl(dst, src1, src2);
3535     gen_store_gpr(dc, a->rd, dst);
3536     return advance_pc(dc);
3537 }
3538 
3539 static bool trans_SDIVX(DisasContext *dc, arg_r_r_ri *a)
3540 {
3541     TCGv dst, src1, src2;
3542 
3543     if (!avail_64(dc)) {
3544         return false;
3545     }
3546     /* For simplicity, we under-decoded the rs2 form. */
3547     if (!a->imm && a->rs2_or_imm & ~0x1f) {
3548         return false;
3549     }
3550 
3551     if (unlikely(a->rs2_or_imm == 0)) {
3552         gen_exception(dc, TT_DIV_ZERO);
3553         return true;
3554     }
3555 
3556     dst = gen_dest_gpr(dc, a->rd);
3557     src1 = gen_load_gpr(dc, a->rs1);
3558 
3559     if (a->imm) {
3560         if (unlikely(a->rs2_or_imm == -1)) {
3561             tcg_gen_neg_tl(dst, src1);
3562             gen_store_gpr(dc, a->rd, dst);
3563             return advance_pc(dc);
3564         }
3565         src2 = tcg_constant_tl(a->rs2_or_imm);
3566     } else {
3567         TCGLabel *lab;
3568         TCGv t1, t2;
3569 
3570         finishing_insn(dc);
3571         flush_cond(dc);
3572 
3573         lab = delay_exception(dc, TT_DIV_ZERO);
3574         src2 = cpu_regs[a->rs2_or_imm];
3575         tcg_gen_brcondi_tl(TCG_COND_EQ, src2, 0, lab);
3576 
3577         /*
3578          * Need to avoid INT64_MIN / -1, which will trap on x86 host.
3579          * Set SRC2 to 1 as a new divisor, to produce the correct result.
3580          */
3581         t1 = tcg_temp_new();
3582         t2 = tcg_temp_new();
3583         tcg_gen_setcondi_tl(TCG_COND_EQ, t1, src1, (target_long)INT64_MIN);
3584         tcg_gen_setcondi_tl(TCG_COND_EQ, t2, src2, -1);
3585         tcg_gen_and_tl(t1, t1, t2);
3586         tcg_gen_movcond_tl(TCG_COND_NE, t1, t1, tcg_constant_tl(0),
3587                            tcg_constant_tl(1), src2);
3588         src2 = t1;
3589     }
3590 
3591     tcg_gen_div_tl(dst, src1, src2);
3592     gen_store_gpr(dc, a->rd, dst);
3593     return advance_pc(dc);
3594 }
3595 
3596 static bool gen_edge(DisasContext *dc, arg_r_r_r *a,
3597                      int width, bool cc, bool little_endian)
3598 {
3599     TCGv dst, s1, s2, l, r, t, m;
3600     uint64_t amask = address_mask_i(dc, -8);
3601 
3602     dst = gen_dest_gpr(dc, a->rd);
3603     s1 = gen_load_gpr(dc, a->rs1);
3604     s2 = gen_load_gpr(dc, a->rs2);
3605 
3606     if (cc) {
3607         gen_op_subcc(cpu_cc_N, s1, s2);
3608     }
3609 
3610     l = tcg_temp_new();
3611     r = tcg_temp_new();
3612     t = tcg_temp_new();
3613 
3614     switch (width) {
3615     case 8:
3616         tcg_gen_andi_tl(l, s1, 7);
3617         tcg_gen_andi_tl(r, s2, 7);
3618         tcg_gen_xori_tl(r, r, 7);
3619         m = tcg_constant_tl(0xff);
3620         break;
3621     case 16:
3622         tcg_gen_extract_tl(l, s1, 1, 2);
3623         tcg_gen_extract_tl(r, s2, 1, 2);
3624         tcg_gen_xori_tl(r, r, 3);
3625         m = tcg_constant_tl(0xf);
3626         break;
3627     case 32:
3628         tcg_gen_extract_tl(l, s1, 2, 1);
3629         tcg_gen_extract_tl(r, s2, 2, 1);
3630         tcg_gen_xori_tl(r, r, 1);
3631         m = tcg_constant_tl(0x3);
3632         break;
3633     default:
3634         abort();
3635     }
3636 
3637     /* Compute Left Edge */
3638     if (little_endian) {
3639         tcg_gen_shl_tl(l, m, l);
3640         tcg_gen_and_tl(l, l, m);
3641     } else {
3642         tcg_gen_shr_tl(l, m, l);
3643     }
3644     /* Compute Right Edge */
3645     if (little_endian) {
3646         tcg_gen_shr_tl(r, m, r);
3647     } else {
3648         tcg_gen_shl_tl(r, m, r);
3649         tcg_gen_and_tl(r, r, m);
3650     }
3651 
3652     /* Compute dst = (s1 == s2 under amask ? l : l & r) */
3653     tcg_gen_xor_tl(t, s1, s2);
3654     tcg_gen_and_tl(r, r, l);
3655     tcg_gen_movcond_tl(TCG_COND_TSTEQ, dst, t, tcg_constant_tl(amask), r, l);
3656 
3657     gen_store_gpr(dc, a->rd, dst);
3658     return advance_pc(dc);
3659 }
3660 
3661 TRANS(EDGE8cc, VIS1, gen_edge, a, 8, 1, 0)
3662 TRANS(EDGE8Lcc, VIS1, gen_edge, a, 8, 1, 1)
3663 TRANS(EDGE16cc, VIS1, gen_edge, a, 16, 1, 0)
3664 TRANS(EDGE16Lcc, VIS1, gen_edge, a, 16, 1, 1)
3665 TRANS(EDGE32cc, VIS1, gen_edge, a, 32, 1, 0)
3666 TRANS(EDGE32Lcc, VIS1, gen_edge, a, 32, 1, 1)
3667 
3668 TRANS(EDGE8N, VIS2, gen_edge, a, 8, 0, 0)
3669 TRANS(EDGE8LN, VIS2, gen_edge, a, 8, 0, 1)
3670 TRANS(EDGE16N, VIS2, gen_edge, a, 16, 0, 0)
3671 TRANS(EDGE16LN, VIS2, gen_edge, a, 16, 0, 1)
3672 TRANS(EDGE32N, VIS2, gen_edge, a, 32, 0, 0)
3673 TRANS(EDGE32LN, VIS2, gen_edge, a, 32, 0, 1)
3674 
3675 static bool do_rrr(DisasContext *dc, arg_r_r_r *a,
3676                    void (*func)(TCGv, TCGv, TCGv))
3677 {
3678     TCGv dst = gen_dest_gpr(dc, a->rd);
3679     TCGv src1 = gen_load_gpr(dc, a->rs1);
3680     TCGv src2 = gen_load_gpr(dc, a->rs2);
3681 
3682     func(dst, src1, src2);
3683     gen_store_gpr(dc, a->rd, dst);
3684     return advance_pc(dc);
3685 }
3686 
3687 TRANS(ARRAY8, VIS1, do_rrr, a, gen_helper_array8)
3688 TRANS(ARRAY16, VIS1, do_rrr, a, gen_op_array16)
3689 TRANS(ARRAY32, VIS1, do_rrr, a, gen_op_array32)
3690 
3691 static void gen_op_alignaddr(TCGv dst, TCGv s1, TCGv s2)
3692 {
3693 #ifdef TARGET_SPARC64
3694     TCGv tmp = tcg_temp_new();
3695 
3696     tcg_gen_add_tl(tmp, s1, s2);
3697     tcg_gen_andi_tl(dst, tmp, -8);
3698     tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
3699 #else
3700     g_assert_not_reached();
3701 #endif
3702 }
3703 
3704 static void gen_op_alignaddrl(TCGv dst, TCGv s1, TCGv s2)
3705 {
3706 #ifdef TARGET_SPARC64
3707     TCGv tmp = tcg_temp_new();
3708 
3709     tcg_gen_add_tl(tmp, s1, s2);
3710     tcg_gen_andi_tl(dst, tmp, -8);
3711     tcg_gen_neg_tl(tmp, tmp);
3712     tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
3713 #else
3714     g_assert_not_reached();
3715 #endif
3716 }
3717 
3718 TRANS(ALIGNADDR, VIS1, do_rrr, a, gen_op_alignaddr)
3719 TRANS(ALIGNADDRL, VIS1, do_rrr, a, gen_op_alignaddrl)
3720 
3721 static void gen_op_bmask(TCGv dst, TCGv s1, TCGv s2)
3722 {
3723 #ifdef TARGET_SPARC64
3724     tcg_gen_add_tl(dst, s1, s2);
3725     tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, dst, 32, 32);
3726 #else
3727     g_assert_not_reached();
3728 #endif
3729 }
3730 
3731 TRANS(BMASK, VIS2, do_rrr, a, gen_op_bmask)
3732 
3733 static bool do_shift_r(DisasContext *dc, arg_shiftr *a, bool l, bool u)
3734 {
3735     TCGv dst, src1, src2;
3736 
3737     /* Reject 64-bit shifts for sparc32. */
3738     if (avail_32(dc) && a->x) {
3739         return false;
3740     }
3741 
3742     src2 = tcg_temp_new();
3743     tcg_gen_andi_tl(src2, gen_load_gpr(dc, a->rs2), a->x ? 63 : 31);
3744     src1 = gen_load_gpr(dc, a->rs1);
3745     dst = gen_dest_gpr(dc, a->rd);
3746 
3747     if (l) {
3748         tcg_gen_shl_tl(dst, src1, src2);
3749         if (!a->x) {
3750             tcg_gen_ext32u_tl(dst, dst);
3751         }
3752     } else if (u) {
3753         if (!a->x) {
3754             tcg_gen_ext32u_tl(dst, src1);
3755             src1 = dst;
3756         }
3757         tcg_gen_shr_tl(dst, src1, src2);
3758     } else {
3759         if (!a->x) {
3760             tcg_gen_ext32s_tl(dst, src1);
3761             src1 = dst;
3762         }
3763         tcg_gen_sar_tl(dst, src1, src2);
3764     }
3765     gen_store_gpr(dc, a->rd, dst);
3766     return advance_pc(dc);
3767 }
3768 
3769 TRANS(SLL_r, ALL, do_shift_r, a, true, true)
3770 TRANS(SRL_r, ALL, do_shift_r, a, false, true)
3771 TRANS(SRA_r, ALL, do_shift_r, a, false, false)
3772 
3773 static bool do_shift_i(DisasContext *dc, arg_shifti *a, bool l, bool u)
3774 {
3775     TCGv dst, src1;
3776 
3777     /* Reject 64-bit shifts for sparc32. */
3778     if (avail_32(dc) && (a->x || a->i >= 32)) {
3779         return false;
3780     }
3781 
3782     src1 = gen_load_gpr(dc, a->rs1);
3783     dst = gen_dest_gpr(dc, a->rd);
3784 
3785     if (avail_32(dc) || a->x) {
3786         if (l) {
3787             tcg_gen_shli_tl(dst, src1, a->i);
3788         } else if (u) {
3789             tcg_gen_shri_tl(dst, src1, a->i);
3790         } else {
3791             tcg_gen_sari_tl(dst, src1, a->i);
3792         }
3793     } else {
3794         if (l) {
3795             tcg_gen_deposit_z_tl(dst, src1, a->i, 32 - a->i);
3796         } else if (u) {
3797             tcg_gen_extract_tl(dst, src1, a->i, 32 - a->i);
3798         } else {
3799             tcg_gen_sextract_tl(dst, src1, a->i, 32 - a->i);
3800         }
3801     }
3802     gen_store_gpr(dc, a->rd, dst);
3803     return advance_pc(dc);
3804 }
3805 
3806 TRANS(SLL_i, ALL, do_shift_i, a, true, true)
3807 TRANS(SRL_i, ALL, do_shift_i, a, false, true)
3808 TRANS(SRA_i, ALL, do_shift_i, a, false, false)
3809 
3810 static TCGv gen_rs2_or_imm(DisasContext *dc, bool imm, int rs2_or_imm)
3811 {
3812     /* For simplicity, we under-decoded the rs2 form. */
3813     if (!imm && rs2_or_imm & ~0x1f) {
3814         return NULL;
3815     }
3816     if (imm || rs2_or_imm == 0) {
3817         return tcg_constant_tl(rs2_or_imm);
3818     } else {
3819         return cpu_regs[rs2_or_imm];
3820     }
3821 }
3822 
3823 static bool do_mov_cond(DisasContext *dc, DisasCompare *cmp, int rd, TCGv src2)
3824 {
3825     TCGv dst = gen_load_gpr(dc, rd);
3826     TCGv c2 = tcg_constant_tl(cmp->c2);
3827 
3828     tcg_gen_movcond_tl(cmp->cond, dst, cmp->c1, c2, src2, dst);
3829     gen_store_gpr(dc, rd, dst);
3830     return advance_pc(dc);
3831 }
3832 
3833 static bool trans_MOVcc(DisasContext *dc, arg_MOVcc *a)
3834 {
3835     TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
3836     DisasCompare cmp;
3837 
3838     if (src2 == NULL) {
3839         return false;
3840     }
3841     gen_compare(&cmp, a->cc, a->cond, dc);
3842     return do_mov_cond(dc, &cmp, a->rd, src2);
3843 }
3844 
3845 static bool trans_MOVfcc(DisasContext *dc, arg_MOVfcc *a)
3846 {
3847     TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
3848     DisasCompare cmp;
3849 
3850     if (src2 == NULL) {
3851         return false;
3852     }
3853     gen_fcompare(&cmp, a->cc, a->cond);
3854     return do_mov_cond(dc, &cmp, a->rd, src2);
3855 }
3856 
3857 static bool trans_MOVR(DisasContext *dc, arg_MOVR *a)
3858 {
3859     TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
3860     DisasCompare cmp;
3861 
3862     if (src2 == NULL) {
3863         return false;
3864     }
3865     if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
3866         return false;
3867     }
3868     return do_mov_cond(dc, &cmp, a->rd, src2);
3869 }
3870 
3871 static bool do_add_special(DisasContext *dc, arg_r_r_ri *a,
3872                            bool (*func)(DisasContext *dc, int rd, TCGv src))
3873 {
3874     TCGv src1, sum;
3875 
3876     /* For simplicity, we under-decoded the rs2 form. */
3877     if (!a->imm && a->rs2_or_imm & ~0x1f) {
3878         return false;
3879     }
3880 
3881     /*
3882      * Always load the sum into a new temporary.
3883      * This is required to capture the value across a window change,
3884      * e.g. SAVE and RESTORE, and may be optimized away otherwise.
3885      */
3886     sum = tcg_temp_new();
3887     src1 = gen_load_gpr(dc, a->rs1);
3888     if (a->imm || a->rs2_or_imm == 0) {
3889         tcg_gen_addi_tl(sum, src1, a->rs2_or_imm);
3890     } else {
3891         tcg_gen_add_tl(sum, src1, cpu_regs[a->rs2_or_imm]);
3892     }
3893     return func(dc, a->rd, sum);
3894 }
3895 
3896 static bool do_jmpl(DisasContext *dc, int rd, TCGv src)
3897 {
3898     /*
3899      * Preserve pc across advance, so that we can delay
3900      * the writeback to rd until after src is consumed.
3901      */
3902     target_ulong cur_pc = dc->pc;
3903 
3904     gen_check_align(dc, src, 3);
3905 
3906     gen_mov_pc_npc(dc);
3907     tcg_gen_mov_tl(cpu_npc, src);
3908     gen_address_mask(dc, cpu_npc);
3909     gen_store_gpr(dc, rd, tcg_constant_tl(cur_pc));
3910 
3911     dc->npc = DYNAMIC_PC_LOOKUP;
3912     return true;
3913 }
3914 
3915 TRANS(JMPL, ALL, do_add_special, a, do_jmpl)
3916 
3917 static bool do_rett(DisasContext *dc, int rd, TCGv src)
3918 {
3919     if (!supervisor(dc)) {
3920         return raise_priv(dc);
3921     }
3922 
3923     gen_check_align(dc, src, 3);
3924 
3925     gen_mov_pc_npc(dc);
3926     tcg_gen_mov_tl(cpu_npc, src);
3927     gen_helper_rett(tcg_env);
3928 
3929     dc->npc = DYNAMIC_PC;
3930     return true;
3931 }
3932 
3933 TRANS(RETT, 32, do_add_special, a, do_rett)
3934 
3935 static bool do_return(DisasContext *dc, int rd, TCGv src)
3936 {
3937     gen_check_align(dc, src, 3);
3938     gen_helper_restore(tcg_env);
3939 
3940     gen_mov_pc_npc(dc);
3941     tcg_gen_mov_tl(cpu_npc, src);
3942     gen_address_mask(dc, cpu_npc);
3943 
3944     dc->npc = DYNAMIC_PC_LOOKUP;
3945     return true;
3946 }
3947 
3948 TRANS(RETURN, 64, do_add_special, a, do_return)
3949 
3950 static bool do_save(DisasContext *dc, int rd, TCGv src)
3951 {
3952     gen_helper_save(tcg_env);
3953     gen_store_gpr(dc, rd, src);
3954     return advance_pc(dc);
3955 }
3956 
3957 TRANS(SAVE, ALL, do_add_special, a, do_save)
3958 
3959 static bool do_restore(DisasContext *dc, int rd, TCGv src)
3960 {
3961     gen_helper_restore(tcg_env);
3962     gen_store_gpr(dc, rd, src);
3963     return advance_pc(dc);
3964 }
3965 
3966 TRANS(RESTORE, ALL, do_add_special, a, do_restore)
3967 
3968 static bool do_done_retry(DisasContext *dc, bool done)
3969 {
3970     if (!supervisor(dc)) {
3971         return raise_priv(dc);
3972     }
3973     dc->npc = DYNAMIC_PC;
3974     dc->pc = DYNAMIC_PC;
3975     translator_io_start(&dc->base);
3976     if (done) {
3977         gen_helper_done(tcg_env);
3978     } else {
3979         gen_helper_retry(tcg_env);
3980     }
3981     return true;
3982 }
3983 
3984 TRANS(DONE, 64, do_done_retry, true)
3985 TRANS(RETRY, 64, do_done_retry, false)
3986 
3987 /*
3988  * Major opcode 11 -- load and store instructions
3989  */
3990 
3991 static TCGv gen_ldst_addr(DisasContext *dc, int rs1, bool imm, int rs2_or_imm)
3992 {
3993     TCGv addr, tmp = NULL;
3994 
3995     /* For simplicity, we under-decoded the rs2 form. */
3996     if (!imm && rs2_or_imm & ~0x1f) {
3997         return NULL;
3998     }
3999 
4000     addr = gen_load_gpr(dc, rs1);
4001     if (rs2_or_imm) {
4002         tmp = tcg_temp_new();
4003         if (imm) {
4004             tcg_gen_addi_tl(tmp, addr, rs2_or_imm);
4005         } else {
4006             tcg_gen_add_tl(tmp, addr, cpu_regs[rs2_or_imm]);
4007         }
4008         addr = tmp;
4009     }
4010     if (AM_CHECK(dc)) {
4011         if (!tmp) {
4012             tmp = tcg_temp_new();
4013         }
4014         tcg_gen_ext32u_tl(tmp, addr);
4015         addr = tmp;
4016     }
4017     return addr;
4018 }
4019 
4020 static bool do_ld_gpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4021 {
4022     TCGv reg, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4023     DisasASI da;
4024 
4025     if (addr == NULL) {
4026         return false;
4027     }
4028     da = resolve_asi(dc, a->asi, mop);
4029 
4030     reg = gen_dest_gpr(dc, a->rd);
4031     gen_ld_asi(dc, &da, reg, addr);
4032     gen_store_gpr(dc, a->rd, reg);
4033     return advance_pc(dc);
4034 }
4035 
4036 TRANS(LDUW, ALL, do_ld_gpr, a, MO_TEUL)
4037 TRANS(LDUB, ALL, do_ld_gpr, a, MO_UB)
4038 TRANS(LDUH, ALL, do_ld_gpr, a, MO_TEUW)
4039 TRANS(LDSB, ALL, do_ld_gpr, a, MO_SB)
4040 TRANS(LDSH, ALL, do_ld_gpr, a, MO_TESW)
4041 TRANS(LDSW, 64, do_ld_gpr, a, MO_TESL)
4042 TRANS(LDX, 64, do_ld_gpr, a, MO_TEUQ)
4043 
4044 static bool do_st_gpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4045 {
4046     TCGv reg, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4047     DisasASI da;
4048 
4049     if (addr == NULL) {
4050         return false;
4051     }
4052     da = resolve_asi(dc, a->asi, mop);
4053 
4054     reg = gen_load_gpr(dc, a->rd);
4055     gen_st_asi(dc, &da, reg, addr);
4056     return advance_pc(dc);
4057 }
4058 
4059 TRANS(STW, ALL, do_st_gpr, a, MO_TEUL)
4060 TRANS(STB, ALL, do_st_gpr, a, MO_UB)
4061 TRANS(STH, ALL, do_st_gpr, a, MO_TEUW)
4062 TRANS(STX, 64, do_st_gpr, a, MO_TEUQ)
4063 
4064 static bool trans_LDD(DisasContext *dc, arg_r_r_ri_asi *a)
4065 {
4066     TCGv addr;
4067     DisasASI da;
4068 
4069     if (a->rd & 1) {
4070         return false;
4071     }
4072     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4073     if (addr == NULL) {
4074         return false;
4075     }
4076     da = resolve_asi(dc, a->asi, MO_TEUQ);
4077     gen_ldda_asi(dc, &da, addr, a->rd);
4078     return advance_pc(dc);
4079 }
4080 
4081 static bool trans_STD(DisasContext *dc, arg_r_r_ri_asi *a)
4082 {
4083     TCGv addr;
4084     DisasASI da;
4085 
4086     if (a->rd & 1) {
4087         return false;
4088     }
4089     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4090     if (addr == NULL) {
4091         return false;
4092     }
4093     da = resolve_asi(dc, a->asi, MO_TEUQ);
4094     gen_stda_asi(dc, &da, addr, a->rd);
4095     return advance_pc(dc);
4096 }
4097 
4098 static bool trans_LDSTUB(DisasContext *dc, arg_r_r_ri_asi *a)
4099 {
4100     TCGv addr, reg;
4101     DisasASI da;
4102 
4103     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4104     if (addr == NULL) {
4105         return false;
4106     }
4107     da = resolve_asi(dc, a->asi, MO_UB);
4108 
4109     reg = gen_dest_gpr(dc, a->rd);
4110     gen_ldstub_asi(dc, &da, reg, addr);
4111     gen_store_gpr(dc, a->rd, reg);
4112     return advance_pc(dc);
4113 }
4114 
4115 static bool trans_SWAP(DisasContext *dc, arg_r_r_ri_asi *a)
4116 {
4117     TCGv addr, dst, src;
4118     DisasASI da;
4119 
4120     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4121     if (addr == NULL) {
4122         return false;
4123     }
4124     da = resolve_asi(dc, a->asi, MO_TEUL);
4125 
4126     dst = gen_dest_gpr(dc, a->rd);
4127     src = gen_load_gpr(dc, a->rd);
4128     gen_swap_asi(dc, &da, dst, src, addr);
4129     gen_store_gpr(dc, a->rd, dst);
4130     return advance_pc(dc);
4131 }
4132 
4133 static bool do_casa(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4134 {
4135     TCGv addr, o, n, c;
4136     DisasASI da;
4137 
4138     addr = gen_ldst_addr(dc, a->rs1, true, 0);
4139     if (addr == NULL) {
4140         return false;
4141     }
4142     da = resolve_asi(dc, a->asi, mop);
4143 
4144     o = gen_dest_gpr(dc, a->rd);
4145     n = gen_load_gpr(dc, a->rd);
4146     c = gen_load_gpr(dc, a->rs2_or_imm);
4147     gen_cas_asi(dc, &da, o, n, c, addr);
4148     gen_store_gpr(dc, a->rd, o);
4149     return advance_pc(dc);
4150 }
4151 
4152 TRANS(CASA, CASA, do_casa, a, MO_TEUL)
4153 TRANS(CASXA, 64, do_casa, a, MO_TEUQ)
4154 
4155 static bool do_ld_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz)
4156 {
4157     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4158     DisasASI da;
4159 
4160     if (addr == NULL) {
4161         return false;
4162     }
4163     if (gen_trap_ifnofpu(dc)) {
4164         return true;
4165     }
4166     if (sz == MO_128 && gen_trap_float128(dc)) {
4167         return true;
4168     }
4169     da = resolve_asi(dc, a->asi, MO_TE | sz);
4170     gen_ldf_asi(dc, &da, sz, addr, a->rd);
4171     gen_update_fprs_dirty(dc, a->rd);
4172     return advance_pc(dc);
4173 }
4174 
4175 TRANS(LDF, ALL, do_ld_fpr, a, MO_32)
4176 TRANS(LDDF, ALL, do_ld_fpr, a, MO_64)
4177 TRANS(LDQF, ALL, do_ld_fpr, a, MO_128)
4178 
4179 TRANS(LDFA, 64, do_ld_fpr, a, MO_32)
4180 TRANS(LDDFA, 64, do_ld_fpr, a, MO_64)
4181 TRANS(LDQFA, 64, do_ld_fpr, a, MO_128)
4182 
4183 static bool do_st_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz)
4184 {
4185     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4186     DisasASI da;
4187 
4188     if (addr == NULL) {
4189         return false;
4190     }
4191     if (gen_trap_ifnofpu(dc)) {
4192         return true;
4193     }
4194     if (sz == MO_128 && gen_trap_float128(dc)) {
4195         return true;
4196     }
4197     da = resolve_asi(dc, a->asi, MO_TE | sz);
4198     gen_stf_asi(dc, &da, sz, addr, a->rd);
4199     return advance_pc(dc);
4200 }
4201 
4202 TRANS(STF, ALL, do_st_fpr, a, MO_32)
4203 TRANS(STDF, ALL, do_st_fpr, a, MO_64)
4204 TRANS(STQF, ALL, do_st_fpr, a, MO_128)
4205 
4206 TRANS(STFA, 64, do_st_fpr, a, MO_32)
4207 TRANS(STDFA, 64, do_st_fpr, a, MO_64)
4208 TRANS(STQFA, 64, do_st_fpr, a, MO_128)
4209 
4210 static bool trans_STDFQ(DisasContext *dc, arg_STDFQ *a)
4211 {
4212     if (!avail_32(dc)) {
4213         return false;
4214     }
4215     if (!supervisor(dc)) {
4216         return raise_priv(dc);
4217     }
4218     if (gen_trap_ifnofpu(dc)) {
4219         return true;
4220     }
4221     gen_op_fpexception_im(dc, FSR_FTT_SEQ_ERROR);
4222     return true;
4223 }
4224 
4225 static bool trans_LDFSR(DisasContext *dc, arg_r_r_ri *a)
4226 {
4227     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4228     TCGv_i32 tmp;
4229 
4230     if (addr == NULL) {
4231         return false;
4232     }
4233     if (gen_trap_ifnofpu(dc)) {
4234         return true;
4235     }
4236 
4237     tmp = tcg_temp_new_i32();
4238     tcg_gen_qemu_ld_i32(tmp, addr, dc->mem_idx, MO_TEUL | MO_ALIGN);
4239 
4240     tcg_gen_extract_i32(cpu_fcc[0], tmp, FSR_FCC0_SHIFT, 2);
4241     /* LDFSR does not change FCC[1-3]. */
4242 
4243     gen_helper_set_fsr_nofcc_noftt(tcg_env, tmp);
4244     return advance_pc(dc);
4245 }
4246 
4247 static bool trans_LDXFSR(DisasContext *dc, arg_r_r_ri *a)
4248 {
4249 #ifdef TARGET_SPARC64
4250     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4251     TCGv_i64 t64;
4252     TCGv_i32 lo, hi;
4253 
4254     if (addr == NULL) {
4255         return false;
4256     }
4257     if (gen_trap_ifnofpu(dc)) {
4258         return true;
4259     }
4260 
4261     t64 = tcg_temp_new_i64();
4262     tcg_gen_qemu_ld_i64(t64, addr, dc->mem_idx, MO_TEUQ | MO_ALIGN);
4263 
4264     lo = tcg_temp_new_i32();
4265     hi = cpu_fcc[3];
4266     tcg_gen_extr_i64_i32(lo, hi, t64);
4267     tcg_gen_extract_i32(cpu_fcc[0], lo, FSR_FCC0_SHIFT, 2);
4268     tcg_gen_extract_i32(cpu_fcc[1], hi, FSR_FCC1_SHIFT - 32, 2);
4269     tcg_gen_extract_i32(cpu_fcc[2], hi, FSR_FCC2_SHIFT - 32, 2);
4270     tcg_gen_extract_i32(cpu_fcc[3], hi, FSR_FCC3_SHIFT - 32, 2);
4271 
4272     gen_helper_set_fsr_nofcc_noftt(tcg_env, lo);
4273     return advance_pc(dc);
4274 #else
4275     return false;
4276 #endif
4277 }
4278 
4279 static bool do_stfsr(DisasContext *dc, arg_r_r_ri *a, MemOp mop)
4280 {
4281     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4282     TCGv fsr;
4283 
4284     if (addr == NULL) {
4285         return false;
4286     }
4287     if (gen_trap_ifnofpu(dc)) {
4288         return true;
4289     }
4290 
4291     fsr = tcg_temp_new();
4292     gen_helper_get_fsr(fsr, tcg_env);
4293     tcg_gen_qemu_st_tl(fsr, addr, dc->mem_idx, mop | MO_ALIGN);
4294     return advance_pc(dc);
4295 }
4296 
4297 TRANS(STFSR, ALL, do_stfsr, a, MO_TEUL)
4298 TRANS(STXFSR, 64, do_stfsr, a, MO_TEUQ)
4299 
4300 static bool do_fc(DisasContext *dc, int rd, int32_t c)
4301 {
4302     if (gen_trap_ifnofpu(dc)) {
4303         return true;
4304     }
4305     gen_store_fpr_F(dc, rd, tcg_constant_i32(c));
4306     return advance_pc(dc);
4307 }
4308 
4309 TRANS(FZEROs, VIS1, do_fc, a->rd, 0)
4310 TRANS(FONEs, VIS1, do_fc, a->rd, -1)
4311 
4312 static bool do_dc(DisasContext *dc, int rd, int64_t c)
4313 {
4314     if (gen_trap_ifnofpu(dc)) {
4315         return true;
4316     }
4317     gen_store_fpr_D(dc, rd, tcg_constant_i64(c));
4318     return advance_pc(dc);
4319 }
4320 
4321 TRANS(FZEROd, VIS1, do_dc, a->rd, 0)
4322 TRANS(FONEd, VIS1, do_dc, a->rd, -1)
4323 
4324 static bool do_ff(DisasContext *dc, arg_r_r *a,
4325                   void (*func)(TCGv_i32, TCGv_i32))
4326 {
4327     TCGv_i32 tmp;
4328 
4329     if (gen_trap_ifnofpu(dc)) {
4330         return true;
4331     }
4332 
4333     tmp = gen_load_fpr_F(dc, a->rs);
4334     func(tmp, tmp);
4335     gen_store_fpr_F(dc, a->rd, tmp);
4336     return advance_pc(dc);
4337 }
4338 
4339 TRANS(FMOVs, ALL, do_ff, a, gen_op_fmovs)
4340 TRANS(FNEGs, ALL, do_ff, a, gen_op_fnegs)
4341 TRANS(FABSs, ALL, do_ff, a, gen_op_fabss)
4342 TRANS(FSRCs, VIS1, do_ff, a, tcg_gen_mov_i32)
4343 TRANS(FNOTs, VIS1, do_ff, a, tcg_gen_not_i32)
4344 
4345 static bool do_fd(DisasContext *dc, arg_r_r *a,
4346                   void (*func)(TCGv_i32, TCGv_i64))
4347 {
4348     TCGv_i32 dst;
4349     TCGv_i64 src;
4350 
4351     if (gen_trap_ifnofpu(dc)) {
4352         return true;
4353     }
4354 
4355     dst = tcg_temp_new_i32();
4356     src = gen_load_fpr_D(dc, a->rs);
4357     func(dst, src);
4358     gen_store_fpr_F(dc, a->rd, dst);
4359     return advance_pc(dc);
4360 }
4361 
4362 TRANS(FPACK16, VIS1, do_fd, a, gen_op_fpack16)
4363 TRANS(FPACKFIX, VIS1, do_fd, a, gen_op_fpackfix)
4364 
4365 static bool do_env_ff(DisasContext *dc, arg_r_r *a,
4366                       void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
4367 {
4368     TCGv_i32 tmp;
4369 
4370     if (gen_trap_ifnofpu(dc)) {
4371         return true;
4372     }
4373 
4374     tmp = gen_load_fpr_F(dc, a->rs);
4375     func(tmp, tcg_env, tmp);
4376     gen_store_fpr_F(dc, a->rd, tmp);
4377     return advance_pc(dc);
4378 }
4379 
4380 TRANS(FSQRTs, ALL, do_env_ff, a, gen_helper_fsqrts)
4381 TRANS(FiTOs, ALL, do_env_ff, a, gen_helper_fitos)
4382 TRANS(FsTOi, ALL, do_env_ff, a, gen_helper_fstoi)
4383 
4384 static bool do_env_fd(DisasContext *dc, arg_r_r *a,
4385                       void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
4386 {
4387     TCGv_i32 dst;
4388     TCGv_i64 src;
4389 
4390     if (gen_trap_ifnofpu(dc)) {
4391         return true;
4392     }
4393 
4394     dst = tcg_temp_new_i32();
4395     src = gen_load_fpr_D(dc, a->rs);
4396     func(dst, tcg_env, src);
4397     gen_store_fpr_F(dc, a->rd, dst);
4398     return advance_pc(dc);
4399 }
4400 
4401 TRANS(FdTOs, ALL, do_env_fd, a, gen_helper_fdtos)
4402 TRANS(FdTOi, ALL, do_env_fd, a, gen_helper_fdtoi)
4403 TRANS(FxTOs, 64, do_env_fd, a, gen_helper_fxtos)
4404 
4405 static bool do_dd(DisasContext *dc, arg_r_r *a,
4406                   void (*func)(TCGv_i64, TCGv_i64))
4407 {
4408     TCGv_i64 dst, src;
4409 
4410     if (gen_trap_ifnofpu(dc)) {
4411         return true;
4412     }
4413 
4414     dst = tcg_temp_new_i64();
4415     src = gen_load_fpr_D(dc, a->rs);
4416     func(dst, src);
4417     gen_store_fpr_D(dc, a->rd, dst);
4418     return advance_pc(dc);
4419 }
4420 
4421 TRANS(FMOVd, 64, do_dd, a, gen_op_fmovd)
4422 TRANS(FNEGd, 64, do_dd, a, gen_op_fnegd)
4423 TRANS(FABSd, 64, do_dd, a, gen_op_fabsd)
4424 TRANS(FSRCd, VIS1, do_dd, a, tcg_gen_mov_i64)
4425 TRANS(FNOTd, VIS1, do_dd, a, tcg_gen_not_i64)
4426 
4427 static bool do_env_dd(DisasContext *dc, arg_r_r *a,
4428                       void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
4429 {
4430     TCGv_i64 dst, src;
4431 
4432     if (gen_trap_ifnofpu(dc)) {
4433         return true;
4434     }
4435 
4436     dst = tcg_temp_new_i64();
4437     src = gen_load_fpr_D(dc, a->rs);
4438     func(dst, tcg_env, src);
4439     gen_store_fpr_D(dc, a->rd, dst);
4440     return advance_pc(dc);
4441 }
4442 
4443 TRANS(FSQRTd, ALL, do_env_dd, a, gen_helper_fsqrtd)
4444 TRANS(FxTOd, 64, do_env_dd, a, gen_helper_fxtod)
4445 TRANS(FdTOx, 64, do_env_dd, a, gen_helper_fdtox)
4446 
4447 static bool do_df(DisasContext *dc, arg_r_r *a,
4448                   void (*func)(TCGv_i64, TCGv_i32))
4449 {
4450     TCGv_i64 dst;
4451     TCGv_i32 src;
4452 
4453     if (gen_trap_ifnofpu(dc)) {
4454         return true;
4455     }
4456 
4457     dst = tcg_temp_new_i64();
4458     src = gen_load_fpr_F(dc, a->rs);
4459     func(dst, src);
4460     gen_store_fpr_D(dc, a->rd, dst);
4461     return advance_pc(dc);
4462 }
4463 
4464 TRANS(FEXPAND, VIS1, do_df, a, gen_helper_fexpand)
4465 
4466 static bool do_env_df(DisasContext *dc, arg_r_r *a,
4467                       void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
4468 {
4469     TCGv_i64 dst;
4470     TCGv_i32 src;
4471 
4472     if (gen_trap_ifnofpu(dc)) {
4473         return true;
4474     }
4475 
4476     dst = tcg_temp_new_i64();
4477     src = gen_load_fpr_F(dc, a->rs);
4478     func(dst, tcg_env, src);
4479     gen_store_fpr_D(dc, a->rd, dst);
4480     return advance_pc(dc);
4481 }
4482 
4483 TRANS(FiTOd, ALL, do_env_df, a, gen_helper_fitod)
4484 TRANS(FsTOd, ALL, do_env_df, a, gen_helper_fstod)
4485 TRANS(FsTOx, 64, do_env_df, a, gen_helper_fstox)
4486 
4487 static bool do_qq(DisasContext *dc, arg_r_r *a,
4488                   void (*func)(TCGv_i128, TCGv_i128))
4489 {
4490     TCGv_i128 t;
4491 
4492     if (gen_trap_ifnofpu(dc)) {
4493         return true;
4494     }
4495     if (gen_trap_float128(dc)) {
4496         return true;
4497     }
4498 
4499     gen_op_clear_ieee_excp_and_FTT();
4500     t = gen_load_fpr_Q(dc, a->rs);
4501     func(t, t);
4502     gen_store_fpr_Q(dc, a->rd, t);
4503     return advance_pc(dc);
4504 }
4505 
4506 TRANS(FMOVq, 64, do_qq, a, tcg_gen_mov_i128)
4507 TRANS(FNEGq, 64, do_qq, a, gen_op_fnegq)
4508 TRANS(FABSq, 64, do_qq, a, gen_op_fabsq)
4509 
4510 static bool do_env_qq(DisasContext *dc, arg_r_r *a,
4511                       void (*func)(TCGv_i128, TCGv_env, TCGv_i128))
4512 {
4513     TCGv_i128 t;
4514 
4515     if (gen_trap_ifnofpu(dc)) {
4516         return true;
4517     }
4518     if (gen_trap_float128(dc)) {
4519         return true;
4520     }
4521 
4522     t = gen_load_fpr_Q(dc, a->rs);
4523     func(t, tcg_env, t);
4524     gen_store_fpr_Q(dc, a->rd, t);
4525     return advance_pc(dc);
4526 }
4527 
4528 TRANS(FSQRTq, ALL, do_env_qq, a, gen_helper_fsqrtq)
4529 
4530 static bool do_env_fq(DisasContext *dc, arg_r_r *a,
4531                       void (*func)(TCGv_i32, TCGv_env, TCGv_i128))
4532 {
4533     TCGv_i128 src;
4534     TCGv_i32 dst;
4535 
4536     if (gen_trap_ifnofpu(dc)) {
4537         return true;
4538     }
4539     if (gen_trap_float128(dc)) {
4540         return true;
4541     }
4542 
4543     src = gen_load_fpr_Q(dc, a->rs);
4544     dst = tcg_temp_new_i32();
4545     func(dst, tcg_env, src);
4546     gen_store_fpr_F(dc, a->rd, dst);
4547     return advance_pc(dc);
4548 }
4549 
4550 TRANS(FqTOs, ALL, do_env_fq, a, gen_helper_fqtos)
4551 TRANS(FqTOi, ALL, do_env_fq, a, gen_helper_fqtoi)
4552 
4553 static bool do_env_dq(DisasContext *dc, arg_r_r *a,
4554                       void (*func)(TCGv_i64, TCGv_env, TCGv_i128))
4555 {
4556     TCGv_i128 src;
4557     TCGv_i64 dst;
4558 
4559     if (gen_trap_ifnofpu(dc)) {
4560         return true;
4561     }
4562     if (gen_trap_float128(dc)) {
4563         return true;
4564     }
4565 
4566     src = gen_load_fpr_Q(dc, a->rs);
4567     dst = tcg_temp_new_i64();
4568     func(dst, tcg_env, src);
4569     gen_store_fpr_D(dc, a->rd, dst);
4570     return advance_pc(dc);
4571 }
4572 
4573 TRANS(FqTOd, ALL, do_env_dq, a, gen_helper_fqtod)
4574 TRANS(FqTOx, 64, do_env_dq, a, gen_helper_fqtox)
4575 
4576 static bool do_env_qf(DisasContext *dc, arg_r_r *a,
4577                       void (*func)(TCGv_i128, TCGv_env, TCGv_i32))
4578 {
4579     TCGv_i32 src;
4580     TCGv_i128 dst;
4581 
4582     if (gen_trap_ifnofpu(dc)) {
4583         return true;
4584     }
4585     if (gen_trap_float128(dc)) {
4586         return true;
4587     }
4588 
4589     src = gen_load_fpr_F(dc, a->rs);
4590     dst = tcg_temp_new_i128();
4591     func(dst, tcg_env, src);
4592     gen_store_fpr_Q(dc, a->rd, dst);
4593     return advance_pc(dc);
4594 }
4595 
4596 TRANS(FiTOq, ALL, do_env_qf, a, gen_helper_fitoq)
4597 TRANS(FsTOq, ALL, do_env_qf, a, gen_helper_fstoq)
4598 
4599 static bool do_env_qd(DisasContext *dc, arg_r_r *a,
4600                       void (*func)(TCGv_i128, TCGv_env, TCGv_i64))
4601 {
4602     TCGv_i64 src;
4603     TCGv_i128 dst;
4604 
4605     if (gen_trap_ifnofpu(dc)) {
4606         return true;
4607     }
4608     if (gen_trap_float128(dc)) {
4609         return true;
4610     }
4611 
4612     src = gen_load_fpr_D(dc, a->rs);
4613     dst = tcg_temp_new_i128();
4614     func(dst, tcg_env, src);
4615     gen_store_fpr_Q(dc, a->rd, dst);
4616     return advance_pc(dc);
4617 }
4618 
4619 TRANS(FdTOq, ALL, do_env_qd, a, gen_helper_fdtoq)
4620 TRANS(FxTOq, 64, do_env_qd, a, gen_helper_fxtoq)
4621 
4622 static bool do_fff(DisasContext *dc, arg_r_r_r *a,
4623                    void (*func)(TCGv_i32, TCGv_i32, TCGv_i32))
4624 {
4625     TCGv_i32 src1, src2;
4626 
4627     if (gen_trap_ifnofpu(dc)) {
4628         return true;
4629     }
4630 
4631     src1 = gen_load_fpr_F(dc, a->rs1);
4632     src2 = gen_load_fpr_F(dc, a->rs2);
4633     func(src1, src1, src2);
4634     gen_store_fpr_F(dc, a->rd, src1);
4635     return advance_pc(dc);
4636 }
4637 
4638 TRANS(FPADD16s, VIS1, do_fff, a, tcg_gen_vec_add16_i32)
4639 TRANS(FPADD32s, VIS1, do_fff, a, tcg_gen_add_i32)
4640 TRANS(FPSUB16s, VIS1, do_fff, a, tcg_gen_vec_sub16_i32)
4641 TRANS(FPSUB32s, VIS1, do_fff, a, tcg_gen_sub_i32)
4642 TRANS(FNORs, VIS1, do_fff, a, tcg_gen_nor_i32)
4643 TRANS(FANDNOTs, VIS1, do_fff, a, tcg_gen_andc_i32)
4644 TRANS(FXORs, VIS1, do_fff, a, tcg_gen_xor_i32)
4645 TRANS(FNANDs, VIS1, do_fff, a, tcg_gen_nand_i32)
4646 TRANS(FANDs, VIS1, do_fff, a, tcg_gen_and_i32)
4647 TRANS(FXNORs, VIS1, do_fff, a, tcg_gen_eqv_i32)
4648 TRANS(FORNOTs, VIS1, do_fff, a, tcg_gen_orc_i32)
4649 TRANS(FORs, VIS1, do_fff, a, tcg_gen_or_i32)
4650 
4651 static bool do_env_fff(DisasContext *dc, arg_r_r_r *a,
4652                        void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
4653 {
4654     TCGv_i32 src1, src2;
4655 
4656     if (gen_trap_ifnofpu(dc)) {
4657         return true;
4658     }
4659 
4660     src1 = gen_load_fpr_F(dc, a->rs1);
4661     src2 = gen_load_fpr_F(dc, a->rs2);
4662     func(src1, tcg_env, src1, src2);
4663     gen_store_fpr_F(dc, a->rd, src1);
4664     return advance_pc(dc);
4665 }
4666 
4667 TRANS(FADDs, ALL, do_env_fff, a, gen_helper_fadds)
4668 TRANS(FSUBs, ALL, do_env_fff, a, gen_helper_fsubs)
4669 TRANS(FMULs, ALL, do_env_fff, a, gen_helper_fmuls)
4670 TRANS(FDIVs, ALL, do_env_fff, a, gen_helper_fdivs)
4671 
4672 static bool do_dff(DisasContext *dc, arg_r_r_r *a,
4673                    void (*func)(TCGv_i64, TCGv_i32, TCGv_i32))
4674 {
4675     TCGv_i64 dst;
4676     TCGv_i32 src1, src2;
4677 
4678     if (gen_trap_ifnofpu(dc)) {
4679         return true;
4680     }
4681 
4682     dst = tcg_temp_new_i64();
4683     src1 = gen_load_fpr_F(dc, a->rs1);
4684     src2 = gen_load_fpr_F(dc, a->rs2);
4685     func(dst, src1, src2);
4686     gen_store_fpr_D(dc, a->rd, dst);
4687     return advance_pc(dc);
4688 }
4689 
4690 TRANS(FMUL8x16AU, VIS1, do_dff, a, gen_op_fmul8x16au)
4691 TRANS(FMUL8x16AL, VIS1, do_dff, a, gen_op_fmul8x16al)
4692 TRANS(FMULD8SUx16, VIS1, do_dff, a, gen_op_fmuld8sux16)
4693 TRANS(FMULD8ULx16, VIS1, do_dff, a, gen_op_fmuld8ulx16)
4694 TRANS(FPMERGE, VIS1, do_dff, a, gen_helper_fpmerge)
4695 
4696 static bool do_dfd(DisasContext *dc, arg_r_r_r *a,
4697                    void (*func)(TCGv_i64, TCGv_i32, TCGv_i64))
4698 {
4699     TCGv_i64 dst, src2;
4700     TCGv_i32 src1;
4701 
4702     if (gen_trap_ifnofpu(dc)) {
4703         return true;
4704     }
4705 
4706     dst = tcg_temp_new_i64();
4707     src1 = gen_load_fpr_F(dc, a->rs1);
4708     src2 = gen_load_fpr_D(dc, a->rs2);
4709     func(dst, src1, src2);
4710     gen_store_fpr_D(dc, a->rd, dst);
4711     return advance_pc(dc);
4712 }
4713 
4714 TRANS(FMUL8x16, VIS1, do_dfd, a, gen_helper_fmul8x16)
4715 
4716 static bool do_gvec_ddd(DisasContext *dc, arg_r_r_r *a, MemOp vece,
4717                         void (*func)(unsigned, uint32_t, uint32_t,
4718                                      uint32_t, uint32_t, uint32_t))
4719 {
4720     if (gen_trap_ifnofpu(dc)) {
4721         return true;
4722     }
4723 
4724     func(vece, gen_offset_fpr_D(a->rd), gen_offset_fpr_D(a->rs1),
4725          gen_offset_fpr_D(a->rs2), 8, 8);
4726     return advance_pc(dc);
4727 }
4728 
4729 TRANS(FPADD16, VIS1, do_gvec_ddd, a, MO_16, tcg_gen_gvec_add)
4730 TRANS(FPADD32, VIS1, do_gvec_ddd, a, MO_32, tcg_gen_gvec_add)
4731 TRANS(FPSUB16, VIS1, do_gvec_ddd, a, MO_16, tcg_gen_gvec_sub)
4732 TRANS(FPSUB32, VIS1, do_gvec_ddd, a, MO_32, tcg_gen_gvec_sub)
4733 
4734 static bool do_ddd(DisasContext *dc, arg_r_r_r *a,
4735                    void (*func)(TCGv_i64, TCGv_i64, TCGv_i64))
4736 {
4737     TCGv_i64 dst, src1, src2;
4738 
4739     if (gen_trap_ifnofpu(dc)) {
4740         return true;
4741     }
4742 
4743     dst = tcg_temp_new_i64();
4744     src1 = gen_load_fpr_D(dc, a->rs1);
4745     src2 = gen_load_fpr_D(dc, a->rs2);
4746     func(dst, src1, src2);
4747     gen_store_fpr_D(dc, a->rd, dst);
4748     return advance_pc(dc);
4749 }
4750 
4751 TRANS(FMUL8SUx16, VIS1, do_ddd, a, gen_helper_fmul8sux16)
4752 TRANS(FMUL8ULx16, VIS1, do_ddd, a, gen_helper_fmul8ulx16)
4753 
4754 TRANS(FNORd, VIS1, do_ddd, a, tcg_gen_nor_i64)
4755 TRANS(FANDNOTd, VIS1, do_ddd, a, tcg_gen_andc_i64)
4756 TRANS(FXORd, VIS1, do_ddd, a, tcg_gen_xor_i64)
4757 TRANS(FNANDd, VIS1, do_ddd, a, tcg_gen_nand_i64)
4758 TRANS(FANDd, VIS1, do_ddd, a, tcg_gen_and_i64)
4759 TRANS(FXNORd, VIS1, do_ddd, a, tcg_gen_eqv_i64)
4760 TRANS(FORNOTd, VIS1, do_ddd, a, tcg_gen_orc_i64)
4761 TRANS(FORd, VIS1, do_ddd, a, tcg_gen_or_i64)
4762 
4763 TRANS(FPACK32, VIS1, do_ddd, a, gen_op_fpack32)
4764 TRANS(FALIGNDATAg, VIS1, do_ddd, a, gen_op_faligndata)
4765 TRANS(BSHUFFLE, VIS2, do_ddd, a, gen_op_bshuffle)
4766 
4767 static bool do_rdd(DisasContext *dc, arg_r_r_r *a,
4768                    void (*func)(TCGv, TCGv_i64, TCGv_i64))
4769 {
4770     TCGv_i64 src1, src2;
4771     TCGv dst;
4772 
4773     if (gen_trap_ifnofpu(dc)) {
4774         return true;
4775     }
4776 
4777     dst = gen_dest_gpr(dc, a->rd);
4778     src1 = gen_load_fpr_D(dc, a->rs1);
4779     src2 = gen_load_fpr_D(dc, a->rs2);
4780     func(dst, src1, src2);
4781     gen_store_gpr(dc, a->rd, dst);
4782     return advance_pc(dc);
4783 }
4784 
4785 TRANS(FPCMPLE16, VIS1, do_rdd, a, gen_helper_fcmple16)
4786 TRANS(FPCMPNE16, VIS1, do_rdd, a, gen_helper_fcmpne16)
4787 TRANS(FPCMPGT16, VIS1, do_rdd, a, gen_helper_fcmpgt16)
4788 TRANS(FPCMPEQ16, VIS1, do_rdd, a, gen_helper_fcmpeq16)
4789 
4790 TRANS(FPCMPLE32, VIS1, do_rdd, a, gen_helper_fcmple32)
4791 TRANS(FPCMPNE32, VIS1, do_rdd, a, gen_helper_fcmpne32)
4792 TRANS(FPCMPGT32, VIS1, do_rdd, a, gen_helper_fcmpgt32)
4793 TRANS(FPCMPEQ32, VIS1, do_rdd, a, gen_helper_fcmpeq32)
4794 
4795 static bool do_env_ddd(DisasContext *dc, arg_r_r_r *a,
4796                        void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
4797 {
4798     TCGv_i64 dst, src1, src2;
4799 
4800     if (gen_trap_ifnofpu(dc)) {
4801         return true;
4802     }
4803 
4804     dst = tcg_temp_new_i64();
4805     src1 = gen_load_fpr_D(dc, a->rs1);
4806     src2 = gen_load_fpr_D(dc, a->rs2);
4807     func(dst, tcg_env, src1, src2);
4808     gen_store_fpr_D(dc, a->rd, dst);
4809     return advance_pc(dc);
4810 }
4811 
4812 TRANS(FADDd, ALL, do_env_ddd, a, gen_helper_faddd)
4813 TRANS(FSUBd, ALL, do_env_ddd, a, gen_helper_fsubd)
4814 TRANS(FMULd, ALL, do_env_ddd, a, gen_helper_fmuld)
4815 TRANS(FDIVd, ALL, do_env_ddd, a, gen_helper_fdivd)
4816 
4817 static bool trans_FsMULd(DisasContext *dc, arg_r_r_r *a)
4818 {
4819     TCGv_i64 dst;
4820     TCGv_i32 src1, src2;
4821 
4822     if (gen_trap_ifnofpu(dc)) {
4823         return true;
4824     }
4825     if (!(dc->def->features & CPU_FEATURE_FSMULD)) {
4826         return raise_unimpfpop(dc);
4827     }
4828 
4829     dst = tcg_temp_new_i64();
4830     src1 = gen_load_fpr_F(dc, a->rs1);
4831     src2 = gen_load_fpr_F(dc, a->rs2);
4832     gen_helper_fsmuld(dst, tcg_env, src1, src2);
4833     gen_store_fpr_D(dc, a->rd, dst);
4834     return advance_pc(dc);
4835 }
4836 
4837 static bool do_ffff(DisasContext *dc, arg_r_r_r_r *a,
4838                     void (*func)(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_i32))
4839 {
4840     TCGv_i32 dst, src1, src2, src3;
4841 
4842     if (gen_trap_ifnofpu(dc)) {
4843         return true;
4844     }
4845 
4846     src1 = gen_load_fpr_F(dc, a->rs1);
4847     src2 = gen_load_fpr_F(dc, a->rs2);
4848     src3 = gen_load_fpr_F(dc, a->rs3);
4849     dst = tcg_temp_new_i32();
4850     func(dst, src1, src2, src3);
4851     gen_store_fpr_F(dc, a->rd, dst);
4852     return advance_pc(dc);
4853 }
4854 
4855 TRANS(FMADDs, FMAF, do_ffff, a, gen_op_fmadds)
4856 TRANS(FMSUBs, FMAF, do_ffff, a, gen_op_fmsubs)
4857 TRANS(FNMSUBs, FMAF, do_ffff, a, gen_op_fnmsubs)
4858 TRANS(FNMADDs, FMAF, do_ffff, a, gen_op_fnmadds)
4859 
4860 static bool do_dddd(DisasContext *dc, arg_r_r_r_r *a,
4861                     void (*func)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
4862 {
4863     TCGv_i64 dst, src1, src2, src3;
4864 
4865     if (gen_trap_ifnofpu(dc)) {
4866         return true;
4867     }
4868 
4869     dst  = tcg_temp_new_i64();
4870     src1 = gen_load_fpr_D(dc, a->rs1);
4871     src2 = gen_load_fpr_D(dc, a->rs2);
4872     src3 = gen_load_fpr_D(dc, a->rs3);
4873     func(dst, src1, src2, src3);
4874     gen_store_fpr_D(dc, a->rd, dst);
4875     return advance_pc(dc);
4876 }
4877 
4878 TRANS(PDIST, VIS1, do_dddd, a, gen_helper_pdist)
4879 TRANS(FMADDd, FMAF, do_dddd, a, gen_op_fmaddd)
4880 TRANS(FMSUBd, FMAF, do_dddd, a, gen_op_fmsubd)
4881 TRANS(FNMSUBd, FMAF, do_dddd, a, gen_op_fnmsubd)
4882 TRANS(FNMADDd, FMAF, do_dddd, a, gen_op_fnmaddd)
4883 
4884 static bool do_env_qqq(DisasContext *dc, arg_r_r_r *a,
4885                        void (*func)(TCGv_i128, TCGv_env, TCGv_i128, TCGv_i128))
4886 {
4887     TCGv_i128 src1, src2;
4888 
4889     if (gen_trap_ifnofpu(dc)) {
4890         return true;
4891     }
4892     if (gen_trap_float128(dc)) {
4893         return true;
4894     }
4895 
4896     src1 = gen_load_fpr_Q(dc, a->rs1);
4897     src2 = gen_load_fpr_Q(dc, a->rs2);
4898     func(src1, tcg_env, src1, src2);
4899     gen_store_fpr_Q(dc, a->rd, src1);
4900     return advance_pc(dc);
4901 }
4902 
4903 TRANS(FADDq, ALL, do_env_qqq, a, gen_helper_faddq)
4904 TRANS(FSUBq, ALL, do_env_qqq, a, gen_helper_fsubq)
4905 TRANS(FMULq, ALL, do_env_qqq, a, gen_helper_fmulq)
4906 TRANS(FDIVq, ALL, do_env_qqq, a, gen_helper_fdivq)
4907 
4908 static bool trans_FdMULq(DisasContext *dc, arg_r_r_r *a)
4909 {
4910     TCGv_i64 src1, src2;
4911     TCGv_i128 dst;
4912 
4913     if (gen_trap_ifnofpu(dc)) {
4914         return true;
4915     }
4916     if (gen_trap_float128(dc)) {
4917         return true;
4918     }
4919 
4920     src1 = gen_load_fpr_D(dc, a->rs1);
4921     src2 = gen_load_fpr_D(dc, a->rs2);
4922     dst = tcg_temp_new_i128();
4923     gen_helper_fdmulq(dst, tcg_env, src1, src2);
4924     gen_store_fpr_Q(dc, a->rd, dst);
4925     return advance_pc(dc);
4926 }
4927 
4928 static bool do_fmovr(DisasContext *dc, arg_FMOVRs *a, bool is_128,
4929                      void (*func)(DisasContext *, DisasCompare *, int, int))
4930 {
4931     DisasCompare cmp;
4932 
4933     if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
4934         return false;
4935     }
4936     if (gen_trap_ifnofpu(dc)) {
4937         return true;
4938     }
4939     if (is_128 && gen_trap_float128(dc)) {
4940         return true;
4941     }
4942 
4943     gen_op_clear_ieee_excp_and_FTT();
4944     func(dc, &cmp, a->rd, a->rs2);
4945     return advance_pc(dc);
4946 }
4947 
4948 TRANS(FMOVRs, 64, do_fmovr, a, false, gen_fmovs)
4949 TRANS(FMOVRd, 64, do_fmovr, a, false, gen_fmovd)
4950 TRANS(FMOVRq, 64, do_fmovr, a, true, gen_fmovq)
4951 
4952 static bool do_fmovcc(DisasContext *dc, arg_FMOVscc *a, bool is_128,
4953                       void (*func)(DisasContext *, DisasCompare *, int, int))
4954 {
4955     DisasCompare cmp;
4956 
4957     if (gen_trap_ifnofpu(dc)) {
4958         return true;
4959     }
4960     if (is_128 && gen_trap_float128(dc)) {
4961         return true;
4962     }
4963 
4964     gen_op_clear_ieee_excp_and_FTT();
4965     gen_compare(&cmp, a->cc, a->cond, dc);
4966     func(dc, &cmp, a->rd, a->rs2);
4967     return advance_pc(dc);
4968 }
4969 
4970 TRANS(FMOVscc, 64, do_fmovcc, a, false, gen_fmovs)
4971 TRANS(FMOVdcc, 64, do_fmovcc, a, false, gen_fmovd)
4972 TRANS(FMOVqcc, 64, do_fmovcc, a, true, gen_fmovq)
4973 
4974 static bool do_fmovfcc(DisasContext *dc, arg_FMOVsfcc *a, bool is_128,
4975                        void (*func)(DisasContext *, DisasCompare *, int, int))
4976 {
4977     DisasCompare cmp;
4978 
4979     if (gen_trap_ifnofpu(dc)) {
4980         return true;
4981     }
4982     if (is_128 && gen_trap_float128(dc)) {
4983         return true;
4984     }
4985 
4986     gen_op_clear_ieee_excp_and_FTT();
4987     gen_fcompare(&cmp, a->cc, a->cond);
4988     func(dc, &cmp, a->rd, a->rs2);
4989     return advance_pc(dc);
4990 }
4991 
4992 TRANS(FMOVsfcc, 64, do_fmovfcc, a, false, gen_fmovs)
4993 TRANS(FMOVdfcc, 64, do_fmovfcc, a, false, gen_fmovd)
4994 TRANS(FMOVqfcc, 64, do_fmovfcc, a, true, gen_fmovq)
4995 
4996 static bool do_fcmps(DisasContext *dc, arg_FCMPs *a, bool e)
4997 {
4998     TCGv_i32 src1, src2;
4999 
5000     if (avail_32(dc) && a->cc != 0) {
5001         return false;
5002     }
5003     if (gen_trap_ifnofpu(dc)) {
5004         return true;
5005     }
5006 
5007     src1 = gen_load_fpr_F(dc, a->rs1);
5008     src2 = gen_load_fpr_F(dc, a->rs2);
5009     if (e) {
5010         gen_helper_fcmpes(cpu_fcc[a->cc], tcg_env, src1, src2);
5011     } else {
5012         gen_helper_fcmps(cpu_fcc[a->cc], tcg_env, src1, src2);
5013     }
5014     return advance_pc(dc);
5015 }
5016 
5017 TRANS(FCMPs, ALL, do_fcmps, a, false)
5018 TRANS(FCMPEs, ALL, do_fcmps, a, true)
5019 
5020 static bool do_fcmpd(DisasContext *dc, arg_FCMPd *a, bool e)
5021 {
5022     TCGv_i64 src1, src2;
5023 
5024     if (avail_32(dc) && a->cc != 0) {
5025         return false;
5026     }
5027     if (gen_trap_ifnofpu(dc)) {
5028         return true;
5029     }
5030 
5031     src1 = gen_load_fpr_D(dc, a->rs1);
5032     src2 = gen_load_fpr_D(dc, a->rs2);
5033     if (e) {
5034         gen_helper_fcmped(cpu_fcc[a->cc], tcg_env, src1, src2);
5035     } else {
5036         gen_helper_fcmpd(cpu_fcc[a->cc], tcg_env, src1, src2);
5037     }
5038     return advance_pc(dc);
5039 }
5040 
5041 TRANS(FCMPd, ALL, do_fcmpd, a, false)
5042 TRANS(FCMPEd, ALL, do_fcmpd, a, true)
5043 
5044 static bool do_fcmpq(DisasContext *dc, arg_FCMPq *a, bool e)
5045 {
5046     TCGv_i128 src1, src2;
5047 
5048     if (avail_32(dc) && a->cc != 0) {
5049         return false;
5050     }
5051     if (gen_trap_ifnofpu(dc)) {
5052         return true;
5053     }
5054     if (gen_trap_float128(dc)) {
5055         return true;
5056     }
5057 
5058     src1 = gen_load_fpr_Q(dc, a->rs1);
5059     src2 = gen_load_fpr_Q(dc, a->rs2);
5060     if (e) {
5061         gen_helper_fcmpeq(cpu_fcc[a->cc], tcg_env, src1, src2);
5062     } else {
5063         gen_helper_fcmpq(cpu_fcc[a->cc], tcg_env, src1, src2);
5064     }
5065     return advance_pc(dc);
5066 }
5067 
5068 TRANS(FCMPq, ALL, do_fcmpq, a, false)
5069 TRANS(FCMPEq, ALL, do_fcmpq, a, true)
5070 
5071 static void sparc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
5072 {
5073     DisasContext *dc = container_of(dcbase, DisasContext, base);
5074     int bound;
5075 
5076     dc->pc = dc->base.pc_first;
5077     dc->npc = (target_ulong)dc->base.tb->cs_base;
5078     dc->mem_idx = dc->base.tb->flags & TB_FLAG_MMU_MASK;
5079     dc->def = &cpu_env(cs)->def;
5080     dc->fpu_enabled = tb_fpu_enabled(dc->base.tb->flags);
5081     dc->address_mask_32bit = tb_am_enabled(dc->base.tb->flags);
5082 #ifndef CONFIG_USER_ONLY
5083     dc->supervisor = (dc->base.tb->flags & TB_FLAG_SUPER) != 0;
5084 #endif
5085 #ifdef TARGET_SPARC64
5086     dc->fprs_dirty = 0;
5087     dc->asi = (dc->base.tb->flags >> TB_FLAG_ASI_SHIFT) & 0xff;
5088 #ifndef CONFIG_USER_ONLY
5089     dc->hypervisor = (dc->base.tb->flags & TB_FLAG_HYPER) != 0;
5090 #endif
5091 #endif
5092     /*
5093      * if we reach a page boundary, we stop generation so that the
5094      * PC of a TT_TFAULT exception is always in the right page
5095      */
5096     bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
5097     dc->base.max_insns = MIN(dc->base.max_insns, bound);
5098 }
5099 
5100 static void sparc_tr_tb_start(DisasContextBase *db, CPUState *cs)
5101 {
5102 }
5103 
5104 static void sparc_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
5105 {
5106     DisasContext *dc = container_of(dcbase, DisasContext, base);
5107     target_ulong npc = dc->npc;
5108 
5109     if (npc & 3) {
5110         switch (npc) {
5111         case JUMP_PC:
5112             assert(dc->jump_pc[1] == dc->pc + 4);
5113             npc = dc->jump_pc[0] | JUMP_PC;
5114             break;
5115         case DYNAMIC_PC:
5116         case DYNAMIC_PC_LOOKUP:
5117             npc = DYNAMIC_PC;
5118             break;
5119         default:
5120             g_assert_not_reached();
5121         }
5122     }
5123     tcg_gen_insn_start(dc->pc, npc);
5124 }
5125 
5126 static void sparc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
5127 {
5128     DisasContext *dc = container_of(dcbase, DisasContext, base);
5129     unsigned int insn;
5130 
5131     insn = translator_ldl(cpu_env(cs), &dc->base, dc->pc);
5132     dc->base.pc_next += 4;
5133 
5134     if (!decode(dc, insn)) {
5135         gen_exception(dc, TT_ILL_INSN);
5136     }
5137 
5138     if (dc->base.is_jmp == DISAS_NORETURN) {
5139         return;
5140     }
5141     if (dc->pc != dc->base.pc_next) {
5142         dc->base.is_jmp = DISAS_TOO_MANY;
5143     }
5144 }
5145 
5146 static void sparc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
5147 {
5148     DisasContext *dc = container_of(dcbase, DisasContext, base);
5149     DisasDelayException *e, *e_next;
5150     bool may_lookup;
5151 
5152     finishing_insn(dc);
5153 
5154     switch (dc->base.is_jmp) {
5155     case DISAS_NEXT:
5156     case DISAS_TOO_MANY:
5157         if (((dc->pc | dc->npc) & 3) == 0) {
5158             /* static PC and NPC: we can use direct chaining */
5159             gen_goto_tb(dc, 0, dc->pc, dc->npc);
5160             break;
5161         }
5162 
5163         may_lookup = true;
5164         if (dc->pc & 3) {
5165             switch (dc->pc) {
5166             case DYNAMIC_PC_LOOKUP:
5167                 break;
5168             case DYNAMIC_PC:
5169                 may_lookup = false;
5170                 break;
5171             default:
5172                 g_assert_not_reached();
5173             }
5174         } else {
5175             tcg_gen_movi_tl(cpu_pc, dc->pc);
5176         }
5177 
5178         if (dc->npc & 3) {
5179             switch (dc->npc) {
5180             case JUMP_PC:
5181                 gen_generic_branch(dc);
5182                 break;
5183             case DYNAMIC_PC:
5184                 may_lookup = false;
5185                 break;
5186             case DYNAMIC_PC_LOOKUP:
5187                 break;
5188             default:
5189                 g_assert_not_reached();
5190             }
5191         } else {
5192             tcg_gen_movi_tl(cpu_npc, dc->npc);
5193         }
5194         if (may_lookup) {
5195             tcg_gen_lookup_and_goto_ptr();
5196         } else {
5197             tcg_gen_exit_tb(NULL, 0);
5198         }
5199         break;
5200 
5201     case DISAS_NORETURN:
5202        break;
5203 
5204     case DISAS_EXIT:
5205         /* Exit TB */
5206         save_state(dc);
5207         tcg_gen_exit_tb(NULL, 0);
5208         break;
5209 
5210     default:
5211         g_assert_not_reached();
5212     }
5213 
5214     for (e = dc->delay_excp_list; e ; e = e_next) {
5215         gen_set_label(e->lab);
5216 
5217         tcg_gen_movi_tl(cpu_pc, e->pc);
5218         if (e->npc % 4 == 0) {
5219             tcg_gen_movi_tl(cpu_npc, e->npc);
5220         }
5221         gen_helper_raise_exception(tcg_env, e->excp);
5222 
5223         e_next = e->next;
5224         g_free(e);
5225     }
5226 }
5227 
5228 static const TranslatorOps sparc_tr_ops = {
5229     .init_disas_context = sparc_tr_init_disas_context,
5230     .tb_start           = sparc_tr_tb_start,
5231     .insn_start         = sparc_tr_insn_start,
5232     .translate_insn     = sparc_tr_translate_insn,
5233     .tb_stop            = sparc_tr_tb_stop,
5234 };
5235 
5236 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
5237                            vaddr pc, void *host_pc)
5238 {
5239     DisasContext dc = {};
5240 
5241     translator_loop(cs, tb, max_insns, pc, host_pc, &sparc_tr_ops, &dc.base);
5242 }
5243 
5244 void sparc_tcg_init(void)
5245 {
5246     static const char gregnames[32][4] = {
5247         "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7",
5248         "o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7",
5249         "l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7",
5250         "i0", "i1", "i2", "i3", "i4", "i5", "i6", "i7",
5251     };
5252 
5253     static const struct { TCGv_i32 *ptr; int off; const char *name; } r32[] = {
5254 #ifdef TARGET_SPARC64
5255         { &cpu_fprs, offsetof(CPUSPARCState, fprs), "fprs" },
5256         { &cpu_fcc[0], offsetof(CPUSPARCState, fcc[0]), "fcc0" },
5257         { &cpu_fcc[1], offsetof(CPUSPARCState, fcc[1]), "fcc1" },
5258         { &cpu_fcc[2], offsetof(CPUSPARCState, fcc[2]), "fcc2" },
5259         { &cpu_fcc[3], offsetof(CPUSPARCState, fcc[3]), "fcc3" },
5260 #else
5261         { &cpu_fcc[0], offsetof(CPUSPARCState, fcc[0]), "fcc" },
5262 #endif
5263     };
5264 
5265     static const struct { TCGv *ptr; int off; const char *name; } rtl[] = {
5266 #ifdef TARGET_SPARC64
5267         { &cpu_gsr, offsetof(CPUSPARCState, gsr), "gsr" },
5268         { &cpu_xcc_Z, offsetof(CPUSPARCState, xcc_Z), "xcc_Z" },
5269         { &cpu_xcc_C, offsetof(CPUSPARCState, xcc_C), "xcc_C" },
5270 #endif
5271         { &cpu_cc_N, offsetof(CPUSPARCState, cc_N), "cc_N" },
5272         { &cpu_cc_V, offsetof(CPUSPARCState, cc_V), "cc_V" },
5273         { &cpu_icc_Z, offsetof(CPUSPARCState, icc_Z), "icc_Z" },
5274         { &cpu_icc_C, offsetof(CPUSPARCState, icc_C), "icc_C" },
5275         { &cpu_cond, offsetof(CPUSPARCState, cond), "cond" },
5276         { &cpu_pc, offsetof(CPUSPARCState, pc), "pc" },
5277         { &cpu_npc, offsetof(CPUSPARCState, npc), "npc" },
5278         { &cpu_y, offsetof(CPUSPARCState, y), "y" },
5279         { &cpu_tbr, offsetof(CPUSPARCState, tbr), "tbr" },
5280     };
5281 
5282     unsigned int i;
5283 
5284     cpu_regwptr = tcg_global_mem_new_ptr(tcg_env,
5285                                          offsetof(CPUSPARCState, regwptr),
5286                                          "regwptr");
5287 
5288     for (i = 0; i < ARRAY_SIZE(r32); ++i) {
5289         *r32[i].ptr = tcg_global_mem_new_i32(tcg_env, r32[i].off, r32[i].name);
5290     }
5291 
5292     for (i = 0; i < ARRAY_SIZE(rtl); ++i) {
5293         *rtl[i].ptr = tcg_global_mem_new(tcg_env, rtl[i].off, rtl[i].name);
5294     }
5295 
5296     cpu_regs[0] = NULL;
5297     for (i = 1; i < 8; ++i) {
5298         cpu_regs[i] = tcg_global_mem_new(tcg_env,
5299                                          offsetof(CPUSPARCState, gregs[i]),
5300                                          gregnames[i]);
5301     }
5302 
5303     for (i = 8; i < 32; ++i) {
5304         cpu_regs[i] = tcg_global_mem_new(cpu_regwptr,
5305                                          (i - 8) * sizeof(target_ulong),
5306                                          gregnames[i]);
5307     }
5308 }
5309 
5310 void sparc_restore_state_to_opc(CPUState *cs,
5311                                 const TranslationBlock *tb,
5312                                 const uint64_t *data)
5313 {
5314     CPUSPARCState *env = cpu_env(cs);
5315     target_ulong pc = data[0];
5316     target_ulong npc = data[1];
5317 
5318     env->pc = pc;
5319     if (npc == DYNAMIC_PC) {
5320         /* dynamic NPC: already stored */
5321     } else if (npc & JUMP_PC) {
5322         /* jump PC: use 'cond' and the jump targets of the translation */
5323         if (env->cond) {
5324             env->npc = npc & ~3;
5325         } else {
5326             env->npc = pc + 4;
5327         }
5328     } else {
5329         env->npc = npc;
5330     }
5331 }
5332