xref: /openbmc/qemu/target/sparc/translate.c (revision db11dfea83e35c534fef8a86603b72354be9d71b)
1 /*
2    SPARC translation
3 
4    Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5    Copyright (C) 2003-2005 Fabrice Bellard
6 
7    This library is free software; you can redistribute it and/or
8    modify it under the terms of the GNU Lesser General Public
9    License as published by the Free Software Foundation; either
10    version 2.1 of the License, or (at your option) any later version.
11 
12    This library is distributed in the hope that it will be useful,
13    but WITHOUT ANY WARRANTY; without even the implied warranty of
14    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15    Lesser General Public License for more details.
16 
17    You should have received a copy of the GNU Lesser General Public
18    License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 
23 #include "cpu.h"
24 #include "exec/helper-proto.h"
25 #include "exec/exec-all.h"
26 #include "tcg/tcg-op.h"
27 #include "tcg/tcg-op-gvec.h"
28 #include "exec/helper-gen.h"
29 #include "exec/translator.h"
30 #include "exec/log.h"
31 #include "fpu/softfloat.h"
32 #include "asi.h"
33 
34 #define HELPER_H "helper.h"
35 #include "exec/helper-info.c.inc"
36 #undef  HELPER_H
37 
38 #ifdef TARGET_SPARC64
39 # define gen_helper_rdpsr(D, E)                 qemu_build_not_reached()
40 # define gen_helper_rdasr17(D, E)               qemu_build_not_reached()
41 # define gen_helper_rett(E)                     qemu_build_not_reached()
42 # define gen_helper_power_down(E)               qemu_build_not_reached()
43 # define gen_helper_wrpsr(E, S)                 qemu_build_not_reached()
44 #else
45 # define gen_helper_clear_softint(E, S)         qemu_build_not_reached()
46 # define gen_helper_done(E)                     qemu_build_not_reached()
47 # define gen_helper_flushw(E)                   qemu_build_not_reached()
48 # define gen_helper_fmul8x16a(D, S1, S2)        qemu_build_not_reached()
49 # define gen_helper_rdccr(D, E)                 qemu_build_not_reached()
50 # define gen_helper_rdcwp(D, E)                 qemu_build_not_reached()
51 # define gen_helper_restored(E)                 qemu_build_not_reached()
52 # define gen_helper_retry(E)                    qemu_build_not_reached()
53 # define gen_helper_saved(E)                    qemu_build_not_reached()
54 # define gen_helper_set_softint(E, S)           qemu_build_not_reached()
55 # define gen_helper_tick_get_count(D, E, T, C)  qemu_build_not_reached()
56 # define gen_helper_tick_set_count(P, S)        qemu_build_not_reached()
57 # define gen_helper_tick_set_limit(P, S)        qemu_build_not_reached()
58 # define gen_helper_wrccr(E, S)                 qemu_build_not_reached()
59 # define gen_helper_wrcwp(E, S)                 qemu_build_not_reached()
60 # define gen_helper_wrgl(E, S)                  qemu_build_not_reached()
61 # define gen_helper_write_softint(E, S)         qemu_build_not_reached()
62 # define gen_helper_wrpil(E, S)                 qemu_build_not_reached()
63 # define gen_helper_wrpstate(E, S)              qemu_build_not_reached()
64 # define gen_helper_cmask8               ({ qemu_build_not_reached(); NULL; })
65 # define gen_helper_cmask16              ({ qemu_build_not_reached(); NULL; })
66 # define gen_helper_cmask32              ({ qemu_build_not_reached(); NULL; })
67 # define gen_helper_fcmpeq8              ({ qemu_build_not_reached(); NULL; })
68 # define gen_helper_fcmpeq16             ({ qemu_build_not_reached(); NULL; })
69 # define gen_helper_fcmpeq32             ({ qemu_build_not_reached(); NULL; })
70 # define gen_helper_fcmpgt8              ({ qemu_build_not_reached(); NULL; })
71 # define gen_helper_fcmpgt16             ({ qemu_build_not_reached(); NULL; })
72 # define gen_helper_fcmpgt32             ({ qemu_build_not_reached(); NULL; })
73 # define gen_helper_fcmple8              ({ qemu_build_not_reached(); NULL; })
74 # define gen_helper_fcmple16             ({ qemu_build_not_reached(); NULL; })
75 # define gen_helper_fcmple32             ({ qemu_build_not_reached(); NULL; })
76 # define gen_helper_fcmpne8              ({ qemu_build_not_reached(); NULL; })
77 # define gen_helper_fcmpne16             ({ qemu_build_not_reached(); NULL; })
78 # define gen_helper_fcmpne32             ({ qemu_build_not_reached(); NULL; })
79 # define gen_helper_fcmpule8             ({ qemu_build_not_reached(); NULL; })
80 # define gen_helper_fcmpule16            ({ qemu_build_not_reached(); NULL; })
81 # define gen_helper_fcmpule32            ({ qemu_build_not_reached(); NULL; })
82 # define gen_helper_fcmpugt8             ({ qemu_build_not_reached(); NULL; })
83 # define gen_helper_fcmpugt16            ({ qemu_build_not_reached(); NULL; })
84 # define gen_helper_fcmpugt32            ({ qemu_build_not_reached(); NULL; })
85 # define gen_helper_fdtox                ({ qemu_build_not_reached(); NULL; })
86 # define gen_helper_fexpand              ({ qemu_build_not_reached(); NULL; })
87 # define gen_helper_fmul8sux16           ({ qemu_build_not_reached(); NULL; })
88 # define gen_helper_fmul8ulx16           ({ qemu_build_not_reached(); NULL; })
89 # define gen_helper_fmul8x16             ({ qemu_build_not_reached(); NULL; })
90 # define gen_helper_fpmerge              ({ qemu_build_not_reached(); NULL; })
91 # define gen_helper_fqtox                ({ qemu_build_not_reached(); NULL; })
92 # define gen_helper_fslas16              ({ qemu_build_not_reached(); NULL; })
93 # define gen_helper_fslas32              ({ qemu_build_not_reached(); NULL; })
94 # define gen_helper_fstox                ({ qemu_build_not_reached(); NULL; })
95 # define gen_helper_fxtod                ({ qemu_build_not_reached(); NULL; })
96 # define gen_helper_fxtoq                ({ qemu_build_not_reached(); NULL; })
97 # define gen_helper_fxtos                ({ qemu_build_not_reached(); NULL; })
98 # define gen_helper_pdist                ({ qemu_build_not_reached(); NULL; })
99 # define gen_helper_xmulx                ({ qemu_build_not_reached(); NULL; })
100 # define gen_helper_xmulxhi              ({ qemu_build_not_reached(); NULL; })
101 # define MAXTL_MASK                             0
102 #endif
103 
104 /* Dynamic PC, must exit to main loop. */
105 #define DYNAMIC_PC         1
106 /* Dynamic PC, one of two values according to jump_pc[T2]. */
107 #define JUMP_PC            2
108 /* Dynamic PC, may lookup next TB. */
109 #define DYNAMIC_PC_LOOKUP  3
110 
111 #define DISAS_EXIT  DISAS_TARGET_0
112 
113 /* global register indexes */
114 static TCGv_ptr cpu_regwptr;
115 static TCGv cpu_pc, cpu_npc;
116 static TCGv cpu_regs[32];
117 static TCGv cpu_y;
118 static TCGv cpu_tbr;
119 static TCGv cpu_cond;
120 static TCGv cpu_cc_N;
121 static TCGv cpu_cc_V;
122 static TCGv cpu_icc_Z;
123 static TCGv cpu_icc_C;
124 #ifdef TARGET_SPARC64
125 static TCGv cpu_xcc_Z;
126 static TCGv cpu_xcc_C;
127 static TCGv_i32 cpu_fprs;
128 static TCGv cpu_gsr;
129 #else
130 # define cpu_fprs               ({ qemu_build_not_reached(); (TCGv)NULL; })
131 # define cpu_gsr                ({ qemu_build_not_reached(); (TCGv)NULL; })
132 #endif
133 
134 #ifdef TARGET_SPARC64
135 #define cpu_cc_Z  cpu_xcc_Z
136 #define cpu_cc_C  cpu_xcc_C
137 #else
138 #define cpu_cc_Z  cpu_icc_Z
139 #define cpu_cc_C  cpu_icc_C
140 #define cpu_xcc_Z ({ qemu_build_not_reached(); NULL; })
141 #define cpu_xcc_C ({ qemu_build_not_reached(); NULL; })
142 #endif
143 
144 /* Floating point comparison registers */
145 static TCGv_i32 cpu_fcc[TARGET_FCCREGS];
146 
147 #define env_field_offsetof(X)     offsetof(CPUSPARCState, X)
148 #ifdef TARGET_SPARC64
149 # define env32_field_offsetof(X)  ({ qemu_build_not_reached(); 0; })
150 # define env64_field_offsetof(X)  env_field_offsetof(X)
151 #else
152 # define env32_field_offsetof(X)  env_field_offsetof(X)
153 # define env64_field_offsetof(X)  ({ qemu_build_not_reached(); 0; })
154 #endif
155 
156 typedef struct DisasCompare {
157     TCGCond cond;
158     TCGv c1;
159     int c2;
160 } DisasCompare;
161 
162 typedef struct DisasDelayException {
163     struct DisasDelayException *next;
164     TCGLabel *lab;
165     TCGv_i32 excp;
166     /* Saved state at parent insn. */
167     target_ulong pc;
168     target_ulong npc;
169 } DisasDelayException;
170 
171 typedef struct DisasContext {
172     DisasContextBase base;
173     target_ulong pc;    /* current Program Counter: integer or DYNAMIC_PC */
174     target_ulong npc;   /* next PC: integer or DYNAMIC_PC or JUMP_PC */
175 
176     /* Used when JUMP_PC value is used. */
177     DisasCompare jump;
178     target_ulong jump_pc[2];
179 
180     int mem_idx;
181     bool cpu_cond_live;
182     bool fpu_enabled;
183     bool address_mask_32bit;
184 #ifndef CONFIG_USER_ONLY
185     bool supervisor;
186 #ifdef TARGET_SPARC64
187     bool hypervisor;
188 #endif
189 #endif
190 
191     sparc_def_t *def;
192 #ifdef TARGET_SPARC64
193     int fprs_dirty;
194     int asi;
195 #endif
196     DisasDelayException *delay_excp_list;
197 } DisasContext;
198 
199 // This function uses non-native bit order
200 #define GET_FIELD(X, FROM, TO)                                  \
201     ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
202 
203 // This function uses the order in the manuals, i.e. bit 0 is 2^0
204 #define GET_FIELD_SP(X, FROM, TO)               \
205     GET_FIELD(X, 31 - (TO), 31 - (FROM))
206 
207 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
208 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
209 
210 #define UA2005_HTRAP_MASK 0xff
211 #define V8_TRAP_MASK 0x7f
212 
213 #define IS_IMM (insn & (1<<13))
214 
215 static void gen_update_fprs_dirty(DisasContext *dc, int rd)
216 {
217 #if defined(TARGET_SPARC64)
218     int bit = (rd < 32) ? 1 : 2;
219     /* If we know we've already set this bit within the TB,
220        we can avoid setting it again.  */
221     if (!(dc->fprs_dirty & bit)) {
222         dc->fprs_dirty |= bit;
223         tcg_gen_ori_i32(cpu_fprs, cpu_fprs, bit);
224     }
225 #endif
226 }
227 
228 /* floating point registers moves */
229 
230 static int gen_offset_fpr_F(unsigned int reg)
231 {
232     int ret;
233 
234     tcg_debug_assert(reg < 32);
235     ret= offsetof(CPUSPARCState, fpr[reg / 2]);
236     if (reg & 1) {
237         ret += offsetof(CPU_DoubleU, l.lower);
238     } else {
239         ret += offsetof(CPU_DoubleU, l.upper);
240     }
241     return ret;
242 }
243 
244 static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src)
245 {
246     TCGv_i32 ret = tcg_temp_new_i32();
247     tcg_gen_ld_i32(ret, tcg_env, gen_offset_fpr_F(src));
248     return ret;
249 }
250 
251 static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v)
252 {
253     tcg_gen_st_i32(v, tcg_env, gen_offset_fpr_F(dst));
254     gen_update_fprs_dirty(dc, dst);
255 }
256 
257 static int gen_offset_fpr_D(unsigned int reg)
258 {
259     tcg_debug_assert(reg < 64);
260     tcg_debug_assert(reg % 2 == 0);
261     return offsetof(CPUSPARCState, fpr[reg / 2]);
262 }
263 
264 static TCGv_i64 gen_load_fpr_D(DisasContext *dc, unsigned int src)
265 {
266     TCGv_i64 ret = tcg_temp_new_i64();
267     tcg_gen_ld_i64(ret, tcg_env, gen_offset_fpr_D(src));
268     return ret;
269 }
270 
271 static void gen_store_fpr_D(DisasContext *dc, unsigned int dst, TCGv_i64 v)
272 {
273     tcg_gen_st_i64(v, tcg_env, gen_offset_fpr_D(dst));
274     gen_update_fprs_dirty(dc, dst);
275 }
276 
277 static TCGv_i128 gen_load_fpr_Q(DisasContext *dc, unsigned int src)
278 {
279     TCGv_i128 ret = tcg_temp_new_i128();
280     TCGv_i64 h = gen_load_fpr_D(dc, src);
281     TCGv_i64 l = gen_load_fpr_D(dc, src + 2);
282 
283     tcg_gen_concat_i64_i128(ret, l, h);
284     return ret;
285 }
286 
287 static void gen_store_fpr_Q(DisasContext *dc, unsigned int dst, TCGv_i128 v)
288 {
289     TCGv_i64 h = tcg_temp_new_i64();
290     TCGv_i64 l = tcg_temp_new_i64();
291 
292     tcg_gen_extr_i128_i64(l, h, v);
293     gen_store_fpr_D(dc, dst, h);
294     gen_store_fpr_D(dc, dst + 2, l);
295 }
296 
297 /* moves */
298 #ifdef CONFIG_USER_ONLY
299 #define supervisor(dc) 0
300 #define hypervisor(dc) 0
301 #else
302 #ifdef TARGET_SPARC64
303 #define hypervisor(dc) (dc->hypervisor)
304 #define supervisor(dc) (dc->supervisor | dc->hypervisor)
305 #else
306 #define supervisor(dc) (dc->supervisor)
307 #define hypervisor(dc) 0
308 #endif
309 #endif
310 
311 #if !defined(TARGET_SPARC64)
312 # define AM_CHECK(dc)  false
313 #elif defined(TARGET_ABI32)
314 # define AM_CHECK(dc)  true
315 #elif defined(CONFIG_USER_ONLY)
316 # define AM_CHECK(dc)  false
317 #else
318 # define AM_CHECK(dc)  ((dc)->address_mask_32bit)
319 #endif
320 
321 static void gen_address_mask(DisasContext *dc, TCGv addr)
322 {
323     if (AM_CHECK(dc)) {
324         tcg_gen_andi_tl(addr, addr, 0xffffffffULL);
325     }
326 }
327 
328 static target_ulong address_mask_i(DisasContext *dc, target_ulong addr)
329 {
330     return AM_CHECK(dc) ? (uint32_t)addr : addr;
331 }
332 
333 static TCGv gen_load_gpr(DisasContext *dc, int reg)
334 {
335     if (reg > 0) {
336         assert(reg < 32);
337         return cpu_regs[reg];
338     } else {
339         TCGv t = tcg_temp_new();
340         tcg_gen_movi_tl(t, 0);
341         return t;
342     }
343 }
344 
345 static void gen_store_gpr(DisasContext *dc, int reg, TCGv v)
346 {
347     if (reg > 0) {
348         assert(reg < 32);
349         tcg_gen_mov_tl(cpu_regs[reg], v);
350     }
351 }
352 
353 static TCGv gen_dest_gpr(DisasContext *dc, int reg)
354 {
355     if (reg > 0) {
356         assert(reg < 32);
357         return cpu_regs[reg];
358     } else {
359         return tcg_temp_new();
360     }
361 }
362 
363 static bool use_goto_tb(DisasContext *s, target_ulong pc, target_ulong npc)
364 {
365     return translator_use_goto_tb(&s->base, pc) &&
366            translator_use_goto_tb(&s->base, npc);
367 }
368 
369 static void gen_goto_tb(DisasContext *s, int tb_num,
370                         target_ulong pc, target_ulong npc)
371 {
372     if (use_goto_tb(s, pc, npc))  {
373         /* jump to same page: we can use a direct jump */
374         tcg_gen_goto_tb(tb_num);
375         tcg_gen_movi_tl(cpu_pc, pc);
376         tcg_gen_movi_tl(cpu_npc, npc);
377         tcg_gen_exit_tb(s->base.tb, tb_num);
378     } else {
379         /* jump to another page: we can use an indirect jump */
380         tcg_gen_movi_tl(cpu_pc, pc);
381         tcg_gen_movi_tl(cpu_npc, npc);
382         tcg_gen_lookup_and_goto_ptr();
383     }
384 }
385 
386 static TCGv gen_carry32(void)
387 {
388     if (TARGET_LONG_BITS == 64) {
389         TCGv t = tcg_temp_new();
390         tcg_gen_extract_tl(t, cpu_icc_C, 32, 1);
391         return t;
392     }
393     return cpu_icc_C;
394 }
395 
396 static void gen_op_addcc_int(TCGv dst, TCGv src1, TCGv src2, TCGv cin)
397 {
398     TCGv z = tcg_constant_tl(0);
399 
400     if (cin) {
401         tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, src1, z, cin, z);
402         tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, cpu_cc_N, cpu_cc_C, src2, z);
403     } else {
404         tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, src1, z, src2, z);
405     }
406     tcg_gen_xor_tl(cpu_cc_Z, src1, src2);
407     tcg_gen_xor_tl(cpu_cc_V, cpu_cc_N, src2);
408     tcg_gen_andc_tl(cpu_cc_V, cpu_cc_V, cpu_cc_Z);
409     if (TARGET_LONG_BITS == 64) {
410         /*
411          * Carry-in to bit 32 is result ^ src1 ^ src2.
412          * We already have the src xor term in Z, from computation of V.
413          */
414         tcg_gen_xor_tl(cpu_icc_C, cpu_cc_Z, cpu_cc_N);
415         tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
416     }
417     tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
418     tcg_gen_mov_tl(dst, cpu_cc_N);
419 }
420 
421 static void gen_op_addcc(TCGv dst, TCGv src1, TCGv src2)
422 {
423     gen_op_addcc_int(dst, src1, src2, NULL);
424 }
425 
426 static void gen_op_taddcc(TCGv dst, TCGv src1, TCGv src2)
427 {
428     TCGv t = tcg_temp_new();
429 
430     /* Save the tag bits around modification of dst. */
431     tcg_gen_or_tl(t, src1, src2);
432 
433     gen_op_addcc(dst, src1, src2);
434 
435     /* Incorprate tag bits into icc.V */
436     tcg_gen_andi_tl(t, t, 3);
437     tcg_gen_neg_tl(t, t);
438     tcg_gen_ext32u_tl(t, t);
439     tcg_gen_or_tl(cpu_cc_V, cpu_cc_V, t);
440 }
441 
442 static void gen_op_addc(TCGv dst, TCGv src1, TCGv src2)
443 {
444     tcg_gen_add_tl(dst, src1, src2);
445     tcg_gen_add_tl(dst, dst, gen_carry32());
446 }
447 
448 static void gen_op_addccc(TCGv dst, TCGv src1, TCGv src2)
449 {
450     gen_op_addcc_int(dst, src1, src2, gen_carry32());
451 }
452 
453 static void gen_op_addxc(TCGv dst, TCGv src1, TCGv src2)
454 {
455     tcg_gen_add_tl(dst, src1, src2);
456     tcg_gen_add_tl(dst, dst, cpu_cc_C);
457 }
458 
459 static void gen_op_addxccc(TCGv dst, TCGv src1, TCGv src2)
460 {
461     gen_op_addcc_int(dst, src1, src2, cpu_cc_C);
462 }
463 
464 static void gen_op_subcc_int(TCGv dst, TCGv src1, TCGv src2, TCGv cin)
465 {
466     TCGv z = tcg_constant_tl(0);
467 
468     if (cin) {
469         tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, src1, z, cin, z);
470         tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, cpu_cc_N, cpu_cc_C, src2, z);
471     } else {
472         tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, src1, z, src2, z);
473     }
474     tcg_gen_neg_tl(cpu_cc_C, cpu_cc_C);
475     tcg_gen_xor_tl(cpu_cc_Z, src1, src2);
476     tcg_gen_xor_tl(cpu_cc_V, cpu_cc_N, src1);
477     tcg_gen_and_tl(cpu_cc_V, cpu_cc_V, cpu_cc_Z);
478 #ifdef TARGET_SPARC64
479     tcg_gen_xor_tl(cpu_icc_C, cpu_cc_Z, cpu_cc_N);
480     tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
481 #endif
482     tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
483     tcg_gen_mov_tl(dst, cpu_cc_N);
484 }
485 
486 static void gen_op_subcc(TCGv dst, TCGv src1, TCGv src2)
487 {
488     gen_op_subcc_int(dst, src1, src2, NULL);
489 }
490 
491 static void gen_op_tsubcc(TCGv dst, TCGv src1, TCGv src2)
492 {
493     TCGv t = tcg_temp_new();
494 
495     /* Save the tag bits around modification of dst. */
496     tcg_gen_or_tl(t, src1, src2);
497 
498     gen_op_subcc(dst, src1, src2);
499 
500     /* Incorprate tag bits into icc.V */
501     tcg_gen_andi_tl(t, t, 3);
502     tcg_gen_neg_tl(t, t);
503     tcg_gen_ext32u_tl(t, t);
504     tcg_gen_or_tl(cpu_cc_V, cpu_cc_V, t);
505 }
506 
507 static void gen_op_subc(TCGv dst, TCGv src1, TCGv src2)
508 {
509     tcg_gen_sub_tl(dst, src1, src2);
510     tcg_gen_sub_tl(dst, dst, gen_carry32());
511 }
512 
513 static void gen_op_subccc(TCGv dst, TCGv src1, TCGv src2)
514 {
515     gen_op_subcc_int(dst, src1, src2, gen_carry32());
516 }
517 
518 static void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
519 {
520     TCGv zero = tcg_constant_tl(0);
521     TCGv one = tcg_constant_tl(1);
522     TCGv t_src1 = tcg_temp_new();
523     TCGv t_src2 = tcg_temp_new();
524     TCGv t0 = tcg_temp_new();
525 
526     tcg_gen_ext32u_tl(t_src1, src1);
527     tcg_gen_ext32u_tl(t_src2, src2);
528 
529     /*
530      * if (!(env->y & 1))
531      *   src2 = 0;
532      */
533     tcg_gen_movcond_tl(TCG_COND_TSTEQ, t_src2, cpu_y, one, zero, t_src2);
534 
535     /*
536      * b2 = src1 & 1;
537      * y = (b2 << 31) | (y >> 1);
538      */
539     tcg_gen_extract_tl(t0, cpu_y, 1, 31);
540     tcg_gen_deposit_tl(cpu_y, t0, src1, 31, 1);
541 
542     // b1 = N ^ V;
543     tcg_gen_xor_tl(t0, cpu_cc_N, cpu_cc_V);
544 
545     /*
546      * src1 = (b1 << 31) | (src1 >> 1)
547      */
548     tcg_gen_andi_tl(t0, t0, 1u << 31);
549     tcg_gen_shri_tl(t_src1, t_src1, 1);
550     tcg_gen_or_tl(t_src1, t_src1, t0);
551 
552     gen_op_addcc(dst, t_src1, t_src2);
553 }
554 
555 static void gen_op_multiply(TCGv dst, TCGv src1, TCGv src2, int sign_ext)
556 {
557 #if TARGET_LONG_BITS == 32
558     if (sign_ext) {
559         tcg_gen_muls2_tl(dst, cpu_y, src1, src2);
560     } else {
561         tcg_gen_mulu2_tl(dst, cpu_y, src1, src2);
562     }
563 #else
564     TCGv t0 = tcg_temp_new_i64();
565     TCGv t1 = tcg_temp_new_i64();
566 
567     if (sign_ext) {
568         tcg_gen_ext32s_i64(t0, src1);
569         tcg_gen_ext32s_i64(t1, src2);
570     } else {
571         tcg_gen_ext32u_i64(t0, src1);
572         tcg_gen_ext32u_i64(t1, src2);
573     }
574 
575     tcg_gen_mul_i64(dst, t0, t1);
576     tcg_gen_shri_i64(cpu_y, dst, 32);
577 #endif
578 }
579 
580 static void gen_op_umul(TCGv dst, TCGv src1, TCGv src2)
581 {
582     /* zero-extend truncated operands before multiplication */
583     gen_op_multiply(dst, src1, src2, 0);
584 }
585 
586 static void gen_op_smul(TCGv dst, TCGv src1, TCGv src2)
587 {
588     /* sign-extend truncated operands before multiplication */
589     gen_op_multiply(dst, src1, src2, 1);
590 }
591 
592 static void gen_op_umulxhi(TCGv dst, TCGv src1, TCGv src2)
593 {
594     TCGv discard = tcg_temp_new();
595     tcg_gen_mulu2_tl(discard, dst, src1, src2);
596 }
597 
598 static void gen_op_fpmaddx(TCGv_i64 dst, TCGv_i64 src1,
599                            TCGv_i64 src2, TCGv_i64 src3)
600 {
601     TCGv_i64 t = tcg_temp_new_i64();
602 
603     tcg_gen_mul_i64(t, src1, src2);
604     tcg_gen_add_i64(dst, src3, t);
605 }
606 
607 static void gen_op_fpmaddxhi(TCGv_i64 dst, TCGv_i64 src1,
608                              TCGv_i64 src2, TCGv_i64 src3)
609 {
610     TCGv_i64 l = tcg_temp_new_i64();
611     TCGv_i64 h = tcg_temp_new_i64();
612     TCGv_i64 z = tcg_constant_i64(0);
613 
614     tcg_gen_mulu2_i64(l, h, src1, src2);
615     tcg_gen_add2_i64(l, dst, l, h, src3, z);
616 }
617 
618 static void gen_op_sdiv(TCGv dst, TCGv src1, TCGv src2)
619 {
620 #ifdef TARGET_SPARC64
621     gen_helper_sdiv(dst, tcg_env, src1, src2);
622     tcg_gen_ext32s_tl(dst, dst);
623 #else
624     TCGv_i64 t64 = tcg_temp_new_i64();
625     gen_helper_sdiv(t64, tcg_env, src1, src2);
626     tcg_gen_trunc_i64_tl(dst, t64);
627 #endif
628 }
629 
630 static void gen_op_udivcc(TCGv dst, TCGv src1, TCGv src2)
631 {
632     TCGv_i64 t64;
633 
634 #ifdef TARGET_SPARC64
635     t64 = cpu_cc_V;
636 #else
637     t64 = tcg_temp_new_i64();
638 #endif
639 
640     gen_helper_udiv(t64, tcg_env, src1, src2);
641 
642 #ifdef TARGET_SPARC64
643     tcg_gen_ext32u_tl(cpu_cc_N, t64);
644     tcg_gen_shri_tl(cpu_cc_V, t64, 32);
645     tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
646     tcg_gen_movi_tl(cpu_icc_C, 0);
647 #else
648     tcg_gen_extr_i64_tl(cpu_cc_N, cpu_cc_V, t64);
649 #endif
650     tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
651     tcg_gen_movi_tl(cpu_cc_C, 0);
652     tcg_gen_mov_tl(dst, cpu_cc_N);
653 }
654 
655 static void gen_op_sdivcc(TCGv dst, TCGv src1, TCGv src2)
656 {
657     TCGv_i64 t64;
658 
659 #ifdef TARGET_SPARC64
660     t64 = cpu_cc_V;
661 #else
662     t64 = tcg_temp_new_i64();
663 #endif
664 
665     gen_helper_sdiv(t64, tcg_env, src1, src2);
666 
667 #ifdef TARGET_SPARC64
668     tcg_gen_ext32s_tl(cpu_cc_N, t64);
669     tcg_gen_shri_tl(cpu_cc_V, t64, 32);
670     tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
671     tcg_gen_movi_tl(cpu_icc_C, 0);
672 #else
673     tcg_gen_extr_i64_tl(cpu_cc_N, cpu_cc_V, t64);
674 #endif
675     tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
676     tcg_gen_movi_tl(cpu_cc_C, 0);
677     tcg_gen_mov_tl(dst, cpu_cc_N);
678 }
679 
680 static void gen_op_taddcctv(TCGv dst, TCGv src1, TCGv src2)
681 {
682     gen_helper_taddcctv(dst, tcg_env, src1, src2);
683 }
684 
685 static void gen_op_tsubcctv(TCGv dst, TCGv src1, TCGv src2)
686 {
687     gen_helper_tsubcctv(dst, tcg_env, src1, src2);
688 }
689 
690 static void gen_op_popc(TCGv dst, TCGv src1, TCGv src2)
691 {
692     tcg_gen_ctpop_tl(dst, src2);
693 }
694 
695 static void gen_op_lzcnt(TCGv dst, TCGv src)
696 {
697     tcg_gen_clzi_tl(dst, src, TARGET_LONG_BITS);
698 }
699 
700 #ifndef TARGET_SPARC64
701 static void gen_helper_array8(TCGv dst, TCGv src1, TCGv src2)
702 {
703     g_assert_not_reached();
704 }
705 #endif
706 
707 static void gen_op_array16(TCGv dst, TCGv src1, TCGv src2)
708 {
709     gen_helper_array8(dst, src1, src2);
710     tcg_gen_shli_tl(dst, dst, 1);
711 }
712 
713 static void gen_op_array32(TCGv dst, TCGv src1, TCGv src2)
714 {
715     gen_helper_array8(dst, src1, src2);
716     tcg_gen_shli_tl(dst, dst, 2);
717 }
718 
719 static void gen_op_fpack16(TCGv_i32 dst, TCGv_i64 src)
720 {
721 #ifdef TARGET_SPARC64
722     gen_helper_fpack16(dst, cpu_gsr, src);
723 #else
724     g_assert_not_reached();
725 #endif
726 }
727 
728 static void gen_op_fpackfix(TCGv_i32 dst, TCGv_i64 src)
729 {
730 #ifdef TARGET_SPARC64
731     gen_helper_fpackfix(dst, cpu_gsr, src);
732 #else
733     g_assert_not_reached();
734 #endif
735 }
736 
737 static void gen_op_fpack32(TCGv_i64 dst, TCGv_i64 src1, TCGv_i64 src2)
738 {
739 #ifdef TARGET_SPARC64
740     gen_helper_fpack32(dst, cpu_gsr, src1, src2);
741 #else
742     g_assert_not_reached();
743 #endif
744 }
745 
746 static void gen_op_fpadds16s(TCGv_i32 d, TCGv_i32 src1, TCGv_i32 src2)
747 {
748     TCGv_i32 t[2];
749 
750     for (int i = 0; i < 2; i++) {
751         TCGv_i32 u = tcg_temp_new_i32();
752         TCGv_i32 v = tcg_temp_new_i32();
753 
754         tcg_gen_sextract_i32(u, src1, i * 16, 16);
755         tcg_gen_sextract_i32(v, src2, i * 16, 16);
756         tcg_gen_add_i32(u, u, v);
757         tcg_gen_smax_i32(u, u, tcg_constant_i32(INT16_MIN));
758         tcg_gen_smin_i32(u, u, tcg_constant_i32(INT16_MAX));
759         t[i] = u;
760     }
761     tcg_gen_deposit_i32(d, t[0], t[1], 16, 16);
762 }
763 
764 static void gen_op_fpsubs16s(TCGv_i32 d, TCGv_i32 src1, TCGv_i32 src2)
765 {
766     TCGv_i32 t[2];
767 
768     for (int i = 0; i < 2; i++) {
769         TCGv_i32 u = tcg_temp_new_i32();
770         TCGv_i32 v = tcg_temp_new_i32();
771 
772         tcg_gen_sextract_i32(u, src1, i * 16, 16);
773         tcg_gen_sextract_i32(v, src2, i * 16, 16);
774         tcg_gen_sub_i32(u, u, v);
775         tcg_gen_smax_i32(u, u, tcg_constant_i32(INT16_MIN));
776         tcg_gen_smin_i32(u, u, tcg_constant_i32(INT16_MAX));
777         t[i] = u;
778     }
779     tcg_gen_deposit_i32(d, t[0], t[1], 16, 16);
780 }
781 
782 static void gen_op_fpadds32s(TCGv_i32 d, TCGv_i32 src1, TCGv_i32 src2)
783 {
784     TCGv_i32 r = tcg_temp_new_i32();
785     TCGv_i32 t = tcg_temp_new_i32();
786     TCGv_i32 v = tcg_temp_new_i32();
787     TCGv_i32 z = tcg_constant_i32(0);
788 
789     tcg_gen_add_i32(r, src1, src2);
790     tcg_gen_xor_i32(t, src1, src2);
791     tcg_gen_xor_i32(v, r, src2);
792     tcg_gen_andc_i32(v, v, t);
793 
794     tcg_gen_setcond_i32(TCG_COND_GE, t, r, z);
795     tcg_gen_addi_i32(t, t, INT32_MAX);
796 
797     tcg_gen_movcond_i32(TCG_COND_LT, d, v, z, t, r);
798 }
799 
800 static void gen_op_fpsubs32s(TCGv_i32 d, TCGv_i32 src1, TCGv_i32 src2)
801 {
802     TCGv_i32 r = tcg_temp_new_i32();
803     TCGv_i32 t = tcg_temp_new_i32();
804     TCGv_i32 v = tcg_temp_new_i32();
805     TCGv_i32 z = tcg_constant_i32(0);
806 
807     tcg_gen_sub_i32(r, src1, src2);
808     tcg_gen_xor_i32(t, src1, src2);
809     tcg_gen_xor_i32(v, r, src1);
810     tcg_gen_and_i32(v, v, t);
811 
812     tcg_gen_setcond_i32(TCG_COND_GE, t, r, z);
813     tcg_gen_addi_i32(t, t, INT32_MAX);
814 
815     tcg_gen_movcond_i32(TCG_COND_LT, d, v, z, t, r);
816 }
817 
818 static void gen_op_faligndata_i(TCGv_i64 dst, TCGv_i64 s1,
819                                 TCGv_i64 s2, TCGv gsr)
820 {
821 #ifdef TARGET_SPARC64
822     TCGv t1, t2, shift;
823 
824     t1 = tcg_temp_new();
825     t2 = tcg_temp_new();
826     shift = tcg_temp_new();
827 
828     tcg_gen_andi_tl(shift, gsr, 7);
829     tcg_gen_shli_tl(shift, shift, 3);
830     tcg_gen_shl_tl(t1, s1, shift);
831 
832     /*
833      * A shift of 64 does not produce 0 in TCG.  Divide this into a
834      * shift of (up to 63) followed by a constant shift of 1.
835      */
836     tcg_gen_xori_tl(shift, shift, 63);
837     tcg_gen_shr_tl(t2, s2, shift);
838     tcg_gen_shri_tl(t2, t2, 1);
839 
840     tcg_gen_or_tl(dst, t1, t2);
841 #else
842     g_assert_not_reached();
843 #endif
844 }
845 
846 static void gen_op_faligndata_g(TCGv_i64 dst, TCGv_i64 s1, TCGv_i64 s2)
847 {
848     gen_op_faligndata_i(dst, s1, s2, cpu_gsr);
849 }
850 
851 static void gen_op_bshuffle(TCGv_i64 dst, TCGv_i64 src1, TCGv_i64 src2)
852 {
853 #ifdef TARGET_SPARC64
854     gen_helper_bshuffle(dst, cpu_gsr, src1, src2);
855 #else
856     g_assert_not_reached();
857 #endif
858 }
859 
860 static void gen_op_pdistn(TCGv dst, TCGv_i64 src1, TCGv_i64 src2)
861 {
862 #ifdef TARGET_SPARC64
863     gen_helper_pdist(dst, tcg_constant_i64(0), src1, src2);
864 #else
865     g_assert_not_reached();
866 #endif
867 }
868 
869 static void gen_op_fmul8x16al(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
870 {
871     tcg_gen_ext16s_i32(src2, src2);
872     gen_helper_fmul8x16a(dst, src1, src2);
873 }
874 
875 static void gen_op_fmul8x16au(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
876 {
877     tcg_gen_sari_i32(src2, src2, 16);
878     gen_helper_fmul8x16a(dst, src1, src2);
879 }
880 
881 static void gen_op_fmuld8ulx16(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
882 {
883     TCGv_i32 t0 = tcg_temp_new_i32();
884     TCGv_i32 t1 = tcg_temp_new_i32();
885     TCGv_i32 t2 = tcg_temp_new_i32();
886 
887     tcg_gen_ext8u_i32(t0, src1);
888     tcg_gen_ext16s_i32(t1, src2);
889     tcg_gen_mul_i32(t0, t0, t1);
890 
891     tcg_gen_extract_i32(t1, src1, 16, 8);
892     tcg_gen_sextract_i32(t2, src2, 16, 16);
893     tcg_gen_mul_i32(t1, t1, t2);
894 
895     tcg_gen_concat_i32_i64(dst, t0, t1);
896 }
897 
898 static void gen_op_fmuld8sux16(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
899 {
900     TCGv_i32 t0 = tcg_temp_new_i32();
901     TCGv_i32 t1 = tcg_temp_new_i32();
902     TCGv_i32 t2 = tcg_temp_new_i32();
903 
904     /*
905      * The insn description talks about extracting the upper 8 bits
906      * of the signed 16-bit input rs1, performing the multiply, then
907      * shifting left by 8 bits.  Instead, zap the lower 8 bits of
908      * the rs1 input, which avoids the need for two shifts.
909      */
910     tcg_gen_ext16s_i32(t0, src1);
911     tcg_gen_andi_i32(t0, t0, ~0xff);
912     tcg_gen_ext16s_i32(t1, src2);
913     tcg_gen_mul_i32(t0, t0, t1);
914 
915     tcg_gen_sextract_i32(t1, src1, 16, 16);
916     tcg_gen_andi_i32(t1, t1, ~0xff);
917     tcg_gen_sextract_i32(t2, src2, 16, 16);
918     tcg_gen_mul_i32(t1, t1, t2);
919 
920     tcg_gen_concat_i32_i64(dst, t0, t1);
921 }
922 
923 #ifdef TARGET_SPARC64
924 static void gen_vec_fchksm16(unsigned vece, TCGv_vec dst,
925                              TCGv_vec src1, TCGv_vec src2)
926 {
927     TCGv_vec a = tcg_temp_new_vec_matching(dst);
928     TCGv_vec c = tcg_temp_new_vec_matching(dst);
929 
930     tcg_gen_add_vec(vece, a, src1, src2);
931     tcg_gen_cmp_vec(TCG_COND_LTU, vece, c, a, src1);
932     /* Vector cmp produces -1 for true, so subtract to add carry. */
933     tcg_gen_sub_vec(vece, dst, a, c);
934 }
935 
936 static void gen_op_fchksm16(unsigned vece, uint32_t dofs, uint32_t aofs,
937                             uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
938 {
939     static const TCGOpcode vecop_list[] = {
940         INDEX_op_cmp_vec, INDEX_op_add_vec, INDEX_op_sub_vec,
941     };
942     static const GVecGen3 op = {
943         .fni8 = gen_helper_fchksm16,
944         .fniv = gen_vec_fchksm16,
945         .opt_opc = vecop_list,
946         .vece = MO_16,
947     };
948     tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &op);
949 }
950 
951 static void gen_vec_fmean16(unsigned vece, TCGv_vec dst,
952                             TCGv_vec src1, TCGv_vec src2)
953 {
954     TCGv_vec t = tcg_temp_new_vec_matching(dst);
955 
956     tcg_gen_or_vec(vece, t, src1, src2);
957     tcg_gen_and_vec(vece, t, t, tcg_constant_vec_matching(dst, vece, 1));
958     tcg_gen_sari_vec(vece, src1, src1, 1);
959     tcg_gen_sari_vec(vece, src2, src2, 1);
960     tcg_gen_add_vec(vece, dst, src1, src2);
961     tcg_gen_add_vec(vece, dst, dst, t);
962 }
963 
964 static void gen_op_fmean16(unsigned vece, uint32_t dofs, uint32_t aofs,
965                            uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
966 {
967     static const TCGOpcode vecop_list[] = {
968         INDEX_op_add_vec, INDEX_op_sari_vec,
969     };
970     static const GVecGen3 op = {
971         .fni8 = gen_helper_fmean16,
972         .fniv = gen_vec_fmean16,
973         .opt_opc = vecop_list,
974         .vece = MO_16,
975     };
976     tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &op);
977 }
978 #else
979 #define gen_op_fchksm16   ({ qemu_build_not_reached(); NULL; })
980 #define gen_op_fmean16    ({ qemu_build_not_reached(); NULL; })
981 #endif
982 
983 static void finishing_insn(DisasContext *dc)
984 {
985     /*
986      * From here, there is no future path through an unwinding exception.
987      * If the current insn cannot raise an exception, the computation of
988      * cpu_cond may be able to be elided.
989      */
990     if (dc->cpu_cond_live) {
991         tcg_gen_discard_tl(cpu_cond);
992         dc->cpu_cond_live = false;
993     }
994 }
995 
996 static void gen_generic_branch(DisasContext *dc)
997 {
998     TCGv npc0 = tcg_constant_tl(dc->jump_pc[0]);
999     TCGv npc1 = tcg_constant_tl(dc->jump_pc[1]);
1000     TCGv c2 = tcg_constant_tl(dc->jump.c2);
1001 
1002     tcg_gen_movcond_tl(dc->jump.cond, cpu_npc, dc->jump.c1, c2, npc0, npc1);
1003 }
1004 
1005 /* call this function before using the condition register as it may
1006    have been set for a jump */
1007 static void flush_cond(DisasContext *dc)
1008 {
1009     if (dc->npc == JUMP_PC) {
1010         gen_generic_branch(dc);
1011         dc->npc = DYNAMIC_PC_LOOKUP;
1012     }
1013 }
1014 
1015 static void save_npc(DisasContext *dc)
1016 {
1017     if (dc->npc & 3) {
1018         switch (dc->npc) {
1019         case JUMP_PC:
1020             gen_generic_branch(dc);
1021             dc->npc = DYNAMIC_PC_LOOKUP;
1022             break;
1023         case DYNAMIC_PC:
1024         case DYNAMIC_PC_LOOKUP:
1025             break;
1026         default:
1027             g_assert_not_reached();
1028         }
1029     } else {
1030         tcg_gen_movi_tl(cpu_npc, dc->npc);
1031     }
1032 }
1033 
1034 static void save_state(DisasContext *dc)
1035 {
1036     tcg_gen_movi_tl(cpu_pc, dc->pc);
1037     save_npc(dc);
1038 }
1039 
1040 static void gen_exception(DisasContext *dc, int which)
1041 {
1042     finishing_insn(dc);
1043     save_state(dc);
1044     gen_helper_raise_exception(tcg_env, tcg_constant_i32(which));
1045     dc->base.is_jmp = DISAS_NORETURN;
1046 }
1047 
1048 static TCGLabel *delay_exceptionv(DisasContext *dc, TCGv_i32 excp)
1049 {
1050     DisasDelayException *e = g_new0(DisasDelayException, 1);
1051 
1052     e->next = dc->delay_excp_list;
1053     dc->delay_excp_list = e;
1054 
1055     e->lab = gen_new_label();
1056     e->excp = excp;
1057     e->pc = dc->pc;
1058     /* Caller must have used flush_cond before branch. */
1059     assert(e->npc != JUMP_PC);
1060     e->npc = dc->npc;
1061 
1062     return e->lab;
1063 }
1064 
1065 static TCGLabel *delay_exception(DisasContext *dc, int excp)
1066 {
1067     return delay_exceptionv(dc, tcg_constant_i32(excp));
1068 }
1069 
1070 static void gen_check_align(DisasContext *dc, TCGv addr, int mask)
1071 {
1072     TCGv t = tcg_temp_new();
1073     TCGLabel *lab;
1074 
1075     tcg_gen_andi_tl(t, addr, mask);
1076 
1077     flush_cond(dc);
1078     lab = delay_exception(dc, TT_UNALIGNED);
1079     tcg_gen_brcondi_tl(TCG_COND_NE, t, 0, lab);
1080 }
1081 
1082 static void gen_mov_pc_npc(DisasContext *dc)
1083 {
1084     finishing_insn(dc);
1085 
1086     if (dc->npc & 3) {
1087         switch (dc->npc) {
1088         case JUMP_PC:
1089             gen_generic_branch(dc);
1090             tcg_gen_mov_tl(cpu_pc, cpu_npc);
1091             dc->pc = DYNAMIC_PC_LOOKUP;
1092             break;
1093         case DYNAMIC_PC:
1094         case DYNAMIC_PC_LOOKUP:
1095             tcg_gen_mov_tl(cpu_pc, cpu_npc);
1096             dc->pc = dc->npc;
1097             break;
1098         default:
1099             g_assert_not_reached();
1100         }
1101     } else {
1102         dc->pc = dc->npc;
1103     }
1104 }
1105 
1106 static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond,
1107                         DisasContext *dc)
1108 {
1109     TCGv t1;
1110 
1111     cmp->c1 = t1 = tcg_temp_new();
1112     cmp->c2 = 0;
1113 
1114     switch (cond & 7) {
1115     case 0x0: /* never */
1116         cmp->cond = TCG_COND_NEVER;
1117         cmp->c1 = tcg_constant_tl(0);
1118         break;
1119 
1120     case 0x1: /* eq: Z */
1121         cmp->cond = TCG_COND_EQ;
1122         if (TARGET_LONG_BITS == 32 || xcc) {
1123             tcg_gen_mov_tl(t1, cpu_cc_Z);
1124         } else {
1125             tcg_gen_ext32u_tl(t1, cpu_icc_Z);
1126         }
1127         break;
1128 
1129     case 0x2: /* le: Z | (N ^ V) */
1130         /*
1131          * Simplify:
1132          *   cc_Z || (N ^ V) < 0        NE
1133          *   cc_Z && !((N ^ V) < 0)     EQ
1134          *   cc_Z & ~((N ^ V) >> TLB)   EQ
1135          */
1136         cmp->cond = TCG_COND_EQ;
1137         tcg_gen_xor_tl(t1, cpu_cc_N, cpu_cc_V);
1138         tcg_gen_sextract_tl(t1, t1, xcc ? 63 : 31, 1);
1139         tcg_gen_andc_tl(t1, xcc ? cpu_cc_Z : cpu_icc_Z, t1);
1140         if (TARGET_LONG_BITS == 64 && !xcc) {
1141             tcg_gen_ext32u_tl(t1, t1);
1142         }
1143         break;
1144 
1145     case 0x3: /* lt: N ^ V */
1146         cmp->cond = TCG_COND_LT;
1147         tcg_gen_xor_tl(t1, cpu_cc_N, cpu_cc_V);
1148         if (TARGET_LONG_BITS == 64 && !xcc) {
1149             tcg_gen_ext32s_tl(t1, t1);
1150         }
1151         break;
1152 
1153     case 0x4: /* leu: Z | C */
1154         /*
1155          * Simplify:
1156          *   cc_Z == 0 || cc_C != 0     NE
1157          *   cc_Z != 0 && cc_C == 0     EQ
1158          *   cc_Z & (cc_C ? 0 : -1)     EQ
1159          *   cc_Z & (cc_C - 1)          EQ
1160          */
1161         cmp->cond = TCG_COND_EQ;
1162         if (TARGET_LONG_BITS == 32 || xcc) {
1163             tcg_gen_subi_tl(t1, cpu_cc_C, 1);
1164             tcg_gen_and_tl(t1, t1, cpu_cc_Z);
1165         } else {
1166             tcg_gen_extract_tl(t1, cpu_icc_C, 32, 1);
1167             tcg_gen_subi_tl(t1, t1, 1);
1168             tcg_gen_and_tl(t1, t1, cpu_icc_Z);
1169             tcg_gen_ext32u_tl(t1, t1);
1170         }
1171         break;
1172 
1173     case 0x5: /* ltu: C */
1174         cmp->cond = TCG_COND_NE;
1175         if (TARGET_LONG_BITS == 32 || xcc) {
1176             tcg_gen_mov_tl(t1, cpu_cc_C);
1177         } else {
1178             tcg_gen_extract_tl(t1, cpu_icc_C, 32, 1);
1179         }
1180         break;
1181 
1182     case 0x6: /* neg: N */
1183         cmp->cond = TCG_COND_LT;
1184         if (TARGET_LONG_BITS == 32 || xcc) {
1185             tcg_gen_mov_tl(t1, cpu_cc_N);
1186         } else {
1187             tcg_gen_ext32s_tl(t1, cpu_cc_N);
1188         }
1189         break;
1190 
1191     case 0x7: /* vs: V */
1192         cmp->cond = TCG_COND_LT;
1193         if (TARGET_LONG_BITS == 32 || xcc) {
1194             tcg_gen_mov_tl(t1, cpu_cc_V);
1195         } else {
1196             tcg_gen_ext32s_tl(t1, cpu_cc_V);
1197         }
1198         break;
1199     }
1200     if (cond & 8) {
1201         cmp->cond = tcg_invert_cond(cmp->cond);
1202     }
1203 }
1204 
1205 static void gen_fcompare(DisasCompare *cmp, unsigned int cc, unsigned int cond)
1206 {
1207     TCGv_i32 fcc = cpu_fcc[cc];
1208     TCGv_i32 c1 = fcc;
1209     int c2 = 0;
1210     TCGCond tcond;
1211 
1212     /*
1213      * FCC values:
1214      * 0 =
1215      * 1 <
1216      * 2 >
1217      * 3 unordered
1218      */
1219     switch (cond & 7) {
1220     case 0x0: /* fbn */
1221         tcond = TCG_COND_NEVER;
1222         break;
1223     case 0x1: /* fbne : !0 */
1224         tcond = TCG_COND_NE;
1225         break;
1226     case 0x2: /* fblg : 1 or 2 */
1227         /* fcc in {1,2} - 1 -> fcc in {0,1} */
1228         c1 = tcg_temp_new_i32();
1229         tcg_gen_addi_i32(c1, fcc, -1);
1230         c2 = 1;
1231         tcond = TCG_COND_LEU;
1232         break;
1233     case 0x3: /* fbul : 1 or 3 */
1234         c1 = tcg_temp_new_i32();
1235         tcg_gen_andi_i32(c1, fcc, 1);
1236         tcond = TCG_COND_NE;
1237         break;
1238     case 0x4: /* fbl  : 1 */
1239         c2 = 1;
1240         tcond = TCG_COND_EQ;
1241         break;
1242     case 0x5: /* fbug : 2 or 3 */
1243         c2 = 2;
1244         tcond = TCG_COND_GEU;
1245         break;
1246     case 0x6: /* fbg  : 2 */
1247         c2 = 2;
1248         tcond = TCG_COND_EQ;
1249         break;
1250     case 0x7: /* fbu  : 3 */
1251         c2 = 3;
1252         tcond = TCG_COND_EQ;
1253         break;
1254     }
1255     if (cond & 8) {
1256         tcond = tcg_invert_cond(tcond);
1257     }
1258 
1259     cmp->cond = tcond;
1260     cmp->c2 = c2;
1261     cmp->c1 = tcg_temp_new();
1262     tcg_gen_extu_i32_tl(cmp->c1, c1);
1263 }
1264 
1265 static bool gen_compare_reg(DisasCompare *cmp, int cond, TCGv r_src)
1266 {
1267     static const TCGCond cond_reg[4] = {
1268         TCG_COND_NEVER,  /* reserved */
1269         TCG_COND_EQ,
1270         TCG_COND_LE,
1271         TCG_COND_LT,
1272     };
1273     TCGCond tcond;
1274 
1275     if ((cond & 3) == 0) {
1276         return false;
1277     }
1278     tcond = cond_reg[cond & 3];
1279     if (cond & 4) {
1280         tcond = tcg_invert_cond(tcond);
1281     }
1282 
1283     cmp->cond = tcond;
1284     cmp->c1 = tcg_temp_new();
1285     cmp->c2 = 0;
1286     tcg_gen_mov_tl(cmp->c1, r_src);
1287     return true;
1288 }
1289 
1290 static void gen_op_clear_ieee_excp_and_FTT(void)
1291 {
1292     tcg_gen_st_i32(tcg_constant_i32(0), tcg_env,
1293                    offsetof(CPUSPARCState, fsr_cexc_ftt));
1294 }
1295 
1296 static void gen_op_fmovs(TCGv_i32 dst, TCGv_i32 src)
1297 {
1298     gen_op_clear_ieee_excp_and_FTT();
1299     tcg_gen_mov_i32(dst, src);
1300 }
1301 
1302 static void gen_op_fnegs(TCGv_i32 dst, TCGv_i32 src)
1303 {
1304     gen_op_clear_ieee_excp_and_FTT();
1305     tcg_gen_xori_i32(dst, src, 1u << 31);
1306 }
1307 
1308 static void gen_op_fabss(TCGv_i32 dst, TCGv_i32 src)
1309 {
1310     gen_op_clear_ieee_excp_and_FTT();
1311     tcg_gen_andi_i32(dst, src, ~(1u << 31));
1312 }
1313 
1314 static void gen_op_fmovd(TCGv_i64 dst, TCGv_i64 src)
1315 {
1316     gen_op_clear_ieee_excp_and_FTT();
1317     tcg_gen_mov_i64(dst, src);
1318 }
1319 
1320 static void gen_op_fnegd(TCGv_i64 dst, TCGv_i64 src)
1321 {
1322     gen_op_clear_ieee_excp_and_FTT();
1323     tcg_gen_xori_i64(dst, src, 1ull << 63);
1324 }
1325 
1326 static void gen_op_fabsd(TCGv_i64 dst, TCGv_i64 src)
1327 {
1328     gen_op_clear_ieee_excp_and_FTT();
1329     tcg_gen_andi_i64(dst, src, ~(1ull << 63));
1330 }
1331 
1332 static void gen_op_fnegq(TCGv_i128 dst, TCGv_i128 src)
1333 {
1334     TCGv_i64 l = tcg_temp_new_i64();
1335     TCGv_i64 h = tcg_temp_new_i64();
1336 
1337     tcg_gen_extr_i128_i64(l, h, src);
1338     tcg_gen_xori_i64(h, h, 1ull << 63);
1339     tcg_gen_concat_i64_i128(dst, l, h);
1340 }
1341 
1342 static void gen_op_fabsq(TCGv_i128 dst, TCGv_i128 src)
1343 {
1344     TCGv_i64 l = tcg_temp_new_i64();
1345     TCGv_i64 h = tcg_temp_new_i64();
1346 
1347     tcg_gen_extr_i128_i64(l, h, src);
1348     tcg_gen_andi_i64(h, h, ~(1ull << 63));
1349     tcg_gen_concat_i64_i128(dst, l, h);
1350 }
1351 
1352 static void gen_op_fmadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
1353 {
1354     gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(0));
1355 }
1356 
1357 static void gen_op_fmaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
1358 {
1359     gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(0));
1360 }
1361 
1362 static void gen_op_fmsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
1363 {
1364     int op = float_muladd_negate_c;
1365     gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1366 }
1367 
1368 static void gen_op_fmsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
1369 {
1370     int op = float_muladd_negate_c;
1371     gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1372 }
1373 
1374 static void gen_op_fnmsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
1375 {
1376     int op = float_muladd_negate_c | float_muladd_negate_result;
1377     gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1378 }
1379 
1380 static void gen_op_fnmsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
1381 {
1382     int op = float_muladd_negate_c | float_muladd_negate_result;
1383     gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1384 }
1385 
1386 static void gen_op_fnmadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
1387 {
1388     int op = float_muladd_negate_result;
1389     gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1390 }
1391 
1392 static void gen_op_fnmaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
1393 {
1394     int op = float_muladd_negate_result;
1395     gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1396 }
1397 
1398 /* Use muladd to compute (1 * src1) + src2 / 2 with one rounding. */
1399 static void gen_op_fhadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
1400 {
1401     TCGv_i32 one = tcg_constant_i32(float32_one);
1402     int op = float_muladd_halve_result;
1403     gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
1404 }
1405 
1406 static void gen_op_fhaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
1407 {
1408     TCGv_i64 one = tcg_constant_i64(float64_one);
1409     int op = float_muladd_halve_result;
1410     gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
1411 }
1412 
1413 /* Use muladd to compute (1 * src1) - src2 / 2 with one rounding. */
1414 static void gen_op_fhsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
1415 {
1416     TCGv_i32 one = tcg_constant_i32(float32_one);
1417     int op = float_muladd_negate_c | float_muladd_halve_result;
1418     gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
1419 }
1420 
1421 static void gen_op_fhsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
1422 {
1423     TCGv_i64 one = tcg_constant_i64(float64_one);
1424     int op = float_muladd_negate_c | float_muladd_halve_result;
1425     gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
1426 }
1427 
1428 /* Use muladd to compute -((1 * src1) + src2 / 2) with one rounding. */
1429 static void gen_op_fnhadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
1430 {
1431     TCGv_i32 one = tcg_constant_i32(float32_one);
1432     int op = float_muladd_negate_result | float_muladd_halve_result;
1433     gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
1434 }
1435 
1436 static void gen_op_fnhaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
1437 {
1438     TCGv_i64 one = tcg_constant_i64(float64_one);
1439     int op = float_muladd_negate_result | float_muladd_halve_result;
1440     gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
1441 }
1442 
1443 static void gen_op_fpexception_im(DisasContext *dc, int ftt)
1444 {
1445     /*
1446      * CEXC is only set when succesfully completing an FPop,
1447      * or when raising FSR_FTT_IEEE_EXCP, i.e. check_ieee_exception.
1448      * Thus we can simply store FTT into this field.
1449      */
1450     tcg_gen_st_i32(tcg_constant_i32(ftt), tcg_env,
1451                    offsetof(CPUSPARCState, fsr_cexc_ftt));
1452     gen_exception(dc, TT_FP_EXCP);
1453 }
1454 
1455 static int gen_trap_ifnofpu(DisasContext *dc)
1456 {
1457 #if !defined(CONFIG_USER_ONLY)
1458     if (!dc->fpu_enabled) {
1459         gen_exception(dc, TT_NFPU_INSN);
1460         return 1;
1461     }
1462 #endif
1463     return 0;
1464 }
1465 
1466 /* asi moves */
1467 typedef enum {
1468     GET_ASI_HELPER,
1469     GET_ASI_EXCP,
1470     GET_ASI_DIRECT,
1471     GET_ASI_DTWINX,
1472     GET_ASI_CODE,
1473     GET_ASI_BLOCK,
1474     GET_ASI_SHORT,
1475     GET_ASI_BCOPY,
1476     GET_ASI_BFILL,
1477 } ASIType;
1478 
1479 typedef struct {
1480     ASIType type;
1481     int asi;
1482     int mem_idx;
1483     MemOp memop;
1484 } DisasASI;
1485 
1486 /*
1487  * Build DisasASI.
1488  * For asi == -1, treat as non-asi.
1489  * For ask == -2, treat as immediate offset (v8 error, v9 %asi).
1490  */
1491 static DisasASI resolve_asi(DisasContext *dc, int asi, MemOp memop)
1492 {
1493     ASIType type = GET_ASI_HELPER;
1494     int mem_idx = dc->mem_idx;
1495 
1496     if (asi == -1) {
1497         /* Artificial "non-asi" case. */
1498         type = GET_ASI_DIRECT;
1499         goto done;
1500     }
1501 
1502 #ifndef TARGET_SPARC64
1503     /* Before v9, all asis are immediate and privileged.  */
1504     if (asi < 0) {
1505         gen_exception(dc, TT_ILL_INSN);
1506         type = GET_ASI_EXCP;
1507     } else if (supervisor(dc)
1508                /* Note that LEON accepts ASI_USERDATA in user mode, for
1509                   use with CASA.  Also note that previous versions of
1510                   QEMU allowed (and old versions of gcc emitted) ASI_P
1511                   for LEON, which is incorrect.  */
1512                || (asi == ASI_USERDATA
1513                    && (dc->def->features & CPU_FEATURE_CASA))) {
1514         switch (asi) {
1515         case ASI_USERDATA:    /* User data access */
1516             mem_idx = MMU_USER_IDX;
1517             type = GET_ASI_DIRECT;
1518             break;
1519         case ASI_KERNELDATA:  /* Supervisor data access */
1520             mem_idx = MMU_KERNEL_IDX;
1521             type = GET_ASI_DIRECT;
1522             break;
1523         case ASI_USERTXT:     /* User text access */
1524             mem_idx = MMU_USER_IDX;
1525             type = GET_ASI_CODE;
1526             break;
1527         case ASI_KERNELTXT:   /* Supervisor text access */
1528             mem_idx = MMU_KERNEL_IDX;
1529             type = GET_ASI_CODE;
1530             break;
1531         case ASI_M_BYPASS:    /* MMU passthrough */
1532         case ASI_LEON_BYPASS: /* LEON MMU passthrough */
1533             mem_idx = MMU_PHYS_IDX;
1534             type = GET_ASI_DIRECT;
1535             break;
1536         case ASI_M_BCOPY: /* Block copy, sta access */
1537             mem_idx = MMU_KERNEL_IDX;
1538             type = GET_ASI_BCOPY;
1539             break;
1540         case ASI_M_BFILL: /* Block fill, stda access */
1541             mem_idx = MMU_KERNEL_IDX;
1542             type = GET_ASI_BFILL;
1543             break;
1544         }
1545 
1546         /* MMU_PHYS_IDX is used when the MMU is disabled to passthrough the
1547          * permissions check in get_physical_address(..).
1548          */
1549         mem_idx = (dc->mem_idx == MMU_PHYS_IDX) ? MMU_PHYS_IDX : mem_idx;
1550     } else {
1551         gen_exception(dc, TT_PRIV_INSN);
1552         type = GET_ASI_EXCP;
1553     }
1554 #else
1555     if (asi < 0) {
1556         asi = dc->asi;
1557     }
1558     /* With v9, all asis below 0x80 are privileged.  */
1559     /* ??? We ought to check cpu_has_hypervisor, but we didn't copy
1560        down that bit into DisasContext.  For the moment that's ok,
1561        since the direct implementations below doesn't have any ASIs
1562        in the restricted [0x30, 0x7f] range, and the check will be
1563        done properly in the helper.  */
1564     if (!supervisor(dc) && asi < 0x80) {
1565         gen_exception(dc, TT_PRIV_ACT);
1566         type = GET_ASI_EXCP;
1567     } else {
1568         switch (asi) {
1569         case ASI_REAL:      /* Bypass */
1570         case ASI_REAL_IO:   /* Bypass, non-cacheable */
1571         case ASI_REAL_L:    /* Bypass LE */
1572         case ASI_REAL_IO_L: /* Bypass, non-cacheable LE */
1573         case ASI_TWINX_REAL:   /* Real address, twinx */
1574         case ASI_TWINX_REAL_L: /* Real address, twinx, LE */
1575         case ASI_QUAD_LDD_PHYS:
1576         case ASI_QUAD_LDD_PHYS_L:
1577             mem_idx = MMU_PHYS_IDX;
1578             break;
1579         case ASI_N:  /* Nucleus */
1580         case ASI_NL: /* Nucleus LE */
1581         case ASI_TWINX_N:
1582         case ASI_TWINX_NL:
1583         case ASI_NUCLEUS_QUAD_LDD:
1584         case ASI_NUCLEUS_QUAD_LDD_L:
1585             if (hypervisor(dc)) {
1586                 mem_idx = MMU_PHYS_IDX;
1587             } else {
1588                 mem_idx = MMU_NUCLEUS_IDX;
1589             }
1590             break;
1591         case ASI_AIUP:  /* As if user primary */
1592         case ASI_AIUPL: /* As if user primary LE */
1593         case ASI_TWINX_AIUP:
1594         case ASI_TWINX_AIUP_L:
1595         case ASI_BLK_AIUP_4V:
1596         case ASI_BLK_AIUP_L_4V:
1597         case ASI_BLK_AIUP:
1598         case ASI_BLK_AIUPL:
1599             mem_idx = MMU_USER_IDX;
1600             break;
1601         case ASI_AIUS:  /* As if user secondary */
1602         case ASI_AIUSL: /* As if user secondary LE */
1603         case ASI_TWINX_AIUS:
1604         case ASI_TWINX_AIUS_L:
1605         case ASI_BLK_AIUS_4V:
1606         case ASI_BLK_AIUS_L_4V:
1607         case ASI_BLK_AIUS:
1608         case ASI_BLK_AIUSL:
1609             mem_idx = MMU_USER_SECONDARY_IDX;
1610             break;
1611         case ASI_S:  /* Secondary */
1612         case ASI_SL: /* Secondary LE */
1613         case ASI_TWINX_S:
1614         case ASI_TWINX_SL:
1615         case ASI_BLK_COMMIT_S:
1616         case ASI_BLK_S:
1617         case ASI_BLK_SL:
1618         case ASI_FL8_S:
1619         case ASI_FL8_SL:
1620         case ASI_FL16_S:
1621         case ASI_FL16_SL:
1622             if (mem_idx == MMU_USER_IDX) {
1623                 mem_idx = MMU_USER_SECONDARY_IDX;
1624             } else if (mem_idx == MMU_KERNEL_IDX) {
1625                 mem_idx = MMU_KERNEL_SECONDARY_IDX;
1626             }
1627             break;
1628         case ASI_P:  /* Primary */
1629         case ASI_PL: /* Primary LE */
1630         case ASI_TWINX_P:
1631         case ASI_TWINX_PL:
1632         case ASI_BLK_COMMIT_P:
1633         case ASI_BLK_P:
1634         case ASI_BLK_PL:
1635         case ASI_FL8_P:
1636         case ASI_FL8_PL:
1637         case ASI_FL16_P:
1638         case ASI_FL16_PL:
1639             break;
1640         }
1641         switch (asi) {
1642         case ASI_REAL:
1643         case ASI_REAL_IO:
1644         case ASI_REAL_L:
1645         case ASI_REAL_IO_L:
1646         case ASI_N:
1647         case ASI_NL:
1648         case ASI_AIUP:
1649         case ASI_AIUPL:
1650         case ASI_AIUS:
1651         case ASI_AIUSL:
1652         case ASI_S:
1653         case ASI_SL:
1654         case ASI_P:
1655         case ASI_PL:
1656             type = GET_ASI_DIRECT;
1657             break;
1658         case ASI_TWINX_REAL:
1659         case ASI_TWINX_REAL_L:
1660         case ASI_TWINX_N:
1661         case ASI_TWINX_NL:
1662         case ASI_TWINX_AIUP:
1663         case ASI_TWINX_AIUP_L:
1664         case ASI_TWINX_AIUS:
1665         case ASI_TWINX_AIUS_L:
1666         case ASI_TWINX_P:
1667         case ASI_TWINX_PL:
1668         case ASI_TWINX_S:
1669         case ASI_TWINX_SL:
1670         case ASI_QUAD_LDD_PHYS:
1671         case ASI_QUAD_LDD_PHYS_L:
1672         case ASI_NUCLEUS_QUAD_LDD:
1673         case ASI_NUCLEUS_QUAD_LDD_L:
1674             type = GET_ASI_DTWINX;
1675             break;
1676         case ASI_BLK_COMMIT_P:
1677         case ASI_BLK_COMMIT_S:
1678         case ASI_BLK_AIUP_4V:
1679         case ASI_BLK_AIUP_L_4V:
1680         case ASI_BLK_AIUP:
1681         case ASI_BLK_AIUPL:
1682         case ASI_BLK_AIUS_4V:
1683         case ASI_BLK_AIUS_L_4V:
1684         case ASI_BLK_AIUS:
1685         case ASI_BLK_AIUSL:
1686         case ASI_BLK_S:
1687         case ASI_BLK_SL:
1688         case ASI_BLK_P:
1689         case ASI_BLK_PL:
1690             type = GET_ASI_BLOCK;
1691             break;
1692         case ASI_FL8_S:
1693         case ASI_FL8_SL:
1694         case ASI_FL8_P:
1695         case ASI_FL8_PL:
1696             memop = MO_UB;
1697             type = GET_ASI_SHORT;
1698             break;
1699         case ASI_FL16_S:
1700         case ASI_FL16_SL:
1701         case ASI_FL16_P:
1702         case ASI_FL16_PL:
1703             memop = MO_TEUW;
1704             type = GET_ASI_SHORT;
1705             break;
1706         }
1707         /* The little-endian asis all have bit 3 set.  */
1708         if (asi & 8) {
1709             memop ^= MO_BSWAP;
1710         }
1711     }
1712 #endif
1713 
1714  done:
1715     return (DisasASI){ type, asi, mem_idx, memop };
1716 }
1717 
1718 #if defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
1719 static void gen_helper_ld_asi(TCGv_i64 r, TCGv_env e, TCGv a,
1720                               TCGv_i32 asi, TCGv_i32 mop)
1721 {
1722     g_assert_not_reached();
1723 }
1724 
1725 static void gen_helper_st_asi(TCGv_env e, TCGv a, TCGv_i64 r,
1726                               TCGv_i32 asi, TCGv_i32 mop)
1727 {
1728     g_assert_not_reached();
1729 }
1730 #endif
1731 
1732 static void gen_ld_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr)
1733 {
1734     switch (da->type) {
1735     case GET_ASI_EXCP:
1736         break;
1737     case GET_ASI_DTWINX: /* Reserved for ldda.  */
1738         gen_exception(dc, TT_ILL_INSN);
1739         break;
1740     case GET_ASI_DIRECT:
1741         tcg_gen_qemu_ld_tl(dst, addr, da->mem_idx, da->memop | MO_ALIGN);
1742         break;
1743 
1744     case GET_ASI_CODE:
1745 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
1746         {
1747             MemOpIdx oi = make_memop_idx(da->memop, da->mem_idx);
1748             TCGv_i64 t64 = tcg_temp_new_i64();
1749 
1750             gen_helper_ld_code(t64, tcg_env, addr, tcg_constant_i32(oi));
1751             tcg_gen_trunc_i64_tl(dst, t64);
1752         }
1753         break;
1754 #else
1755         g_assert_not_reached();
1756 #endif
1757 
1758     default:
1759         {
1760             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1761             TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN);
1762 
1763             save_state(dc);
1764 #ifdef TARGET_SPARC64
1765             gen_helper_ld_asi(dst, tcg_env, addr, r_asi, r_mop);
1766 #else
1767             {
1768                 TCGv_i64 t64 = tcg_temp_new_i64();
1769                 gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
1770                 tcg_gen_trunc_i64_tl(dst, t64);
1771             }
1772 #endif
1773         }
1774         break;
1775     }
1776 }
1777 
1778 static void gen_st_asi(DisasContext *dc, DisasASI *da, TCGv src, TCGv addr)
1779 {
1780     switch (da->type) {
1781     case GET_ASI_EXCP:
1782         break;
1783 
1784     case GET_ASI_DTWINX: /* Reserved for stda.  */
1785         if (TARGET_LONG_BITS == 32) {
1786             gen_exception(dc, TT_ILL_INSN);
1787             break;
1788         } else if (!(dc->def->features & CPU_FEATURE_HYPV)) {
1789             /* Pre OpenSPARC CPUs don't have these */
1790             gen_exception(dc, TT_ILL_INSN);
1791             break;
1792         }
1793         /* In OpenSPARC T1+ CPUs TWINX ASIs in store are ST_BLKINIT_ ASIs */
1794         /* fall through */
1795 
1796     case GET_ASI_DIRECT:
1797         tcg_gen_qemu_st_tl(src, addr, da->mem_idx, da->memop | MO_ALIGN);
1798         break;
1799 
1800     case GET_ASI_BCOPY:
1801         assert(TARGET_LONG_BITS == 32);
1802         /*
1803          * Copy 32 bytes from the address in SRC to ADDR.
1804          *
1805          * From Ross RT625 hyperSPARC manual, section 4.6:
1806          * "Block Copy and Block Fill will work only on cache line boundaries."
1807          *
1808          * It does not specify if an unaliged address is truncated or trapped.
1809          * Previous qemu behaviour was to truncate to 4 byte alignment, which
1810          * is obviously wrong.  The only place I can see this used is in the
1811          * Linux kernel which begins with page alignment, advancing by 32,
1812          * so is always aligned.  Assume truncation as the simpler option.
1813          *
1814          * Since the loads and stores are paired, allow the copy to happen
1815          * in the host endianness.  The copy need not be atomic.
1816          */
1817         {
1818             MemOp mop = MO_128 | MO_ATOM_IFALIGN_PAIR;
1819             TCGv saddr = tcg_temp_new();
1820             TCGv daddr = tcg_temp_new();
1821             TCGv_i128 tmp = tcg_temp_new_i128();
1822 
1823             tcg_gen_andi_tl(saddr, src, -32);
1824             tcg_gen_andi_tl(daddr, addr, -32);
1825             tcg_gen_qemu_ld_i128(tmp, saddr, da->mem_idx, mop);
1826             tcg_gen_qemu_st_i128(tmp, daddr, da->mem_idx, mop);
1827             tcg_gen_addi_tl(saddr, saddr, 16);
1828             tcg_gen_addi_tl(daddr, daddr, 16);
1829             tcg_gen_qemu_ld_i128(tmp, saddr, da->mem_idx, mop);
1830             tcg_gen_qemu_st_i128(tmp, daddr, da->mem_idx, mop);
1831         }
1832         break;
1833 
1834     default:
1835         {
1836             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1837             TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN);
1838 
1839             save_state(dc);
1840 #ifdef TARGET_SPARC64
1841             gen_helper_st_asi(tcg_env, addr, src, r_asi, r_mop);
1842 #else
1843             {
1844                 TCGv_i64 t64 = tcg_temp_new_i64();
1845                 tcg_gen_extu_tl_i64(t64, src);
1846                 gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
1847             }
1848 #endif
1849 
1850             /* A write to a TLB register may alter page maps.  End the TB. */
1851             dc->npc = DYNAMIC_PC;
1852         }
1853         break;
1854     }
1855 }
1856 
1857 static void gen_swap_asi(DisasContext *dc, DisasASI *da,
1858                          TCGv dst, TCGv src, TCGv addr)
1859 {
1860     switch (da->type) {
1861     case GET_ASI_EXCP:
1862         break;
1863     case GET_ASI_DIRECT:
1864         tcg_gen_atomic_xchg_tl(dst, addr, src,
1865                                da->mem_idx, da->memop | MO_ALIGN);
1866         break;
1867     default:
1868         /* ??? Should be DAE_invalid_asi.  */
1869         gen_exception(dc, TT_DATA_ACCESS);
1870         break;
1871     }
1872 }
1873 
1874 static void gen_cas_asi(DisasContext *dc, DisasASI *da,
1875                         TCGv oldv, TCGv newv, TCGv cmpv, TCGv addr)
1876 {
1877     switch (da->type) {
1878     case GET_ASI_EXCP:
1879         return;
1880     case GET_ASI_DIRECT:
1881         tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, newv,
1882                                   da->mem_idx, da->memop | MO_ALIGN);
1883         break;
1884     default:
1885         /* ??? Should be DAE_invalid_asi.  */
1886         gen_exception(dc, TT_DATA_ACCESS);
1887         break;
1888     }
1889 }
1890 
1891 static void gen_ldstub_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr)
1892 {
1893     switch (da->type) {
1894     case GET_ASI_EXCP:
1895         break;
1896     case GET_ASI_DIRECT:
1897         tcg_gen_atomic_xchg_tl(dst, addr, tcg_constant_tl(0xff),
1898                                da->mem_idx, MO_UB);
1899         break;
1900     default:
1901         /* ??? In theory, this should be raise DAE_invalid_asi.
1902            But the SS-20 roms do ldstuba [%l0] #ASI_M_CTL, %o1.  */
1903         if (tb_cflags(dc->base.tb) & CF_PARALLEL) {
1904             gen_helper_exit_atomic(tcg_env);
1905         } else {
1906             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1907             TCGv_i32 r_mop = tcg_constant_i32(MO_UB);
1908             TCGv_i64 s64, t64;
1909 
1910             save_state(dc);
1911             t64 = tcg_temp_new_i64();
1912             gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
1913 
1914             s64 = tcg_constant_i64(0xff);
1915             gen_helper_st_asi(tcg_env, addr, s64, r_asi, r_mop);
1916 
1917             tcg_gen_trunc_i64_tl(dst, t64);
1918 
1919             /* End the TB.  */
1920             dc->npc = DYNAMIC_PC;
1921         }
1922         break;
1923     }
1924 }
1925 
1926 static void gen_ldf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size,
1927                         TCGv addr, int rd)
1928 {
1929     MemOp memop = da->memop;
1930     MemOp size = memop & MO_SIZE;
1931     TCGv_i32 d32;
1932     TCGv_i64 d64, l64;
1933     TCGv addr_tmp;
1934 
1935     /* TODO: Use 128-bit load/store below. */
1936     if (size == MO_128) {
1937         memop = (memop & ~MO_SIZE) | MO_64;
1938     }
1939 
1940     switch (da->type) {
1941     case GET_ASI_EXCP:
1942         break;
1943 
1944     case GET_ASI_DIRECT:
1945         memop |= MO_ALIGN_4;
1946         switch (size) {
1947         case MO_32:
1948             d32 = tcg_temp_new_i32();
1949             tcg_gen_qemu_ld_i32(d32, addr, da->mem_idx, memop);
1950             gen_store_fpr_F(dc, rd, d32);
1951             break;
1952 
1953         case MO_64:
1954             d64 = tcg_temp_new_i64();
1955             tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx, memop);
1956             gen_store_fpr_D(dc, rd, d64);
1957             break;
1958 
1959         case MO_128:
1960             d64 = tcg_temp_new_i64();
1961             l64 = tcg_temp_new_i64();
1962             tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx, memop);
1963             addr_tmp = tcg_temp_new();
1964             tcg_gen_addi_tl(addr_tmp, addr, 8);
1965             tcg_gen_qemu_ld_i64(l64, addr_tmp, da->mem_idx, memop);
1966             gen_store_fpr_D(dc, rd, d64);
1967             gen_store_fpr_D(dc, rd + 2, l64);
1968             break;
1969         default:
1970             g_assert_not_reached();
1971         }
1972         break;
1973 
1974     case GET_ASI_BLOCK:
1975         /* Valid for lddfa on aligned registers only.  */
1976         if (orig_size == MO_64 && (rd & 7) == 0) {
1977             /* The first operation checks required alignment.  */
1978             addr_tmp = tcg_temp_new();
1979             d64 = tcg_temp_new_i64();
1980             for (int i = 0; ; ++i) {
1981                 tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx,
1982                                     memop | (i == 0 ? MO_ALIGN_64 : 0));
1983                 gen_store_fpr_D(dc, rd + 2 * i, d64);
1984                 if (i == 7) {
1985                     break;
1986                 }
1987                 tcg_gen_addi_tl(addr_tmp, addr, 8);
1988                 addr = addr_tmp;
1989             }
1990         } else {
1991             gen_exception(dc, TT_ILL_INSN);
1992         }
1993         break;
1994 
1995     case GET_ASI_SHORT:
1996         /* Valid for lddfa only.  */
1997         if (orig_size == MO_64) {
1998             d64 = tcg_temp_new_i64();
1999             tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx, memop | MO_ALIGN);
2000             gen_store_fpr_D(dc, rd, d64);
2001         } else {
2002             gen_exception(dc, TT_ILL_INSN);
2003         }
2004         break;
2005 
2006     default:
2007         {
2008             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2009             TCGv_i32 r_mop = tcg_constant_i32(memop | MO_ALIGN);
2010 
2011             save_state(dc);
2012             /* According to the table in the UA2011 manual, the only
2013                other asis that are valid for ldfa/lddfa/ldqfa are
2014                the NO_FAULT asis.  We still need a helper for these,
2015                but we can just use the integer asi helper for them.  */
2016             switch (size) {
2017             case MO_32:
2018                 d64 = tcg_temp_new_i64();
2019                 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
2020                 d32 = tcg_temp_new_i32();
2021                 tcg_gen_extrl_i64_i32(d32, d64);
2022                 gen_store_fpr_F(dc, rd, d32);
2023                 break;
2024             case MO_64:
2025                 d64 = tcg_temp_new_i64();
2026                 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
2027                 gen_store_fpr_D(dc, rd, d64);
2028                 break;
2029             case MO_128:
2030                 d64 = tcg_temp_new_i64();
2031                 l64 = tcg_temp_new_i64();
2032                 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
2033                 addr_tmp = tcg_temp_new();
2034                 tcg_gen_addi_tl(addr_tmp, addr, 8);
2035                 gen_helper_ld_asi(l64, tcg_env, addr_tmp, r_asi, r_mop);
2036                 gen_store_fpr_D(dc, rd, d64);
2037                 gen_store_fpr_D(dc, rd + 2, l64);
2038                 break;
2039             default:
2040                 g_assert_not_reached();
2041             }
2042         }
2043         break;
2044     }
2045 }
2046 
2047 static void gen_stf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size,
2048                         TCGv addr, int rd)
2049 {
2050     MemOp memop = da->memop;
2051     MemOp size = memop & MO_SIZE;
2052     TCGv_i32 d32;
2053     TCGv_i64 d64;
2054     TCGv addr_tmp;
2055 
2056     /* TODO: Use 128-bit load/store below. */
2057     if (size == MO_128) {
2058         memop = (memop & ~MO_SIZE) | MO_64;
2059     }
2060 
2061     switch (da->type) {
2062     case GET_ASI_EXCP:
2063         break;
2064 
2065     case GET_ASI_DIRECT:
2066         memop |= MO_ALIGN_4;
2067         switch (size) {
2068         case MO_32:
2069             d32 = gen_load_fpr_F(dc, rd);
2070             tcg_gen_qemu_st_i32(d32, addr, da->mem_idx, memop | MO_ALIGN);
2071             break;
2072         case MO_64:
2073             d64 = gen_load_fpr_D(dc, rd);
2074             tcg_gen_qemu_st_i64(d64, addr, da->mem_idx, memop | MO_ALIGN_4);
2075             break;
2076         case MO_128:
2077             /* Only 4-byte alignment required.  However, it is legal for the
2078                cpu to signal the alignment fault, and the OS trap handler is
2079                required to fix it up.  Requiring 16-byte alignment here avoids
2080                having to probe the second page before performing the first
2081                write.  */
2082             d64 = gen_load_fpr_D(dc, rd);
2083             tcg_gen_qemu_st_i64(d64, addr, da->mem_idx, memop | MO_ALIGN_16);
2084             addr_tmp = tcg_temp_new();
2085             tcg_gen_addi_tl(addr_tmp, addr, 8);
2086             d64 = gen_load_fpr_D(dc, rd + 2);
2087             tcg_gen_qemu_st_i64(d64, addr_tmp, da->mem_idx, memop);
2088             break;
2089         default:
2090             g_assert_not_reached();
2091         }
2092         break;
2093 
2094     case GET_ASI_BLOCK:
2095         /* Valid for stdfa on aligned registers only.  */
2096         if (orig_size == MO_64 && (rd & 7) == 0) {
2097             /* The first operation checks required alignment.  */
2098             addr_tmp = tcg_temp_new();
2099             for (int i = 0; ; ++i) {
2100                 d64 = gen_load_fpr_D(dc, rd + 2 * i);
2101                 tcg_gen_qemu_st_i64(d64, addr, da->mem_idx,
2102                                     memop | (i == 0 ? MO_ALIGN_64 : 0));
2103                 if (i == 7) {
2104                     break;
2105                 }
2106                 tcg_gen_addi_tl(addr_tmp, addr, 8);
2107                 addr = addr_tmp;
2108             }
2109         } else {
2110             gen_exception(dc, TT_ILL_INSN);
2111         }
2112         break;
2113 
2114     case GET_ASI_SHORT:
2115         /* Valid for stdfa only.  */
2116         if (orig_size == MO_64) {
2117             d64 = gen_load_fpr_D(dc, rd);
2118             tcg_gen_qemu_st_i64(d64, addr, da->mem_idx, memop | MO_ALIGN);
2119         } else {
2120             gen_exception(dc, TT_ILL_INSN);
2121         }
2122         break;
2123 
2124     default:
2125         /* According to the table in the UA2011 manual, the only
2126            other asis that are valid for ldfa/lddfa/ldqfa are
2127            the PST* asis, which aren't currently handled.  */
2128         gen_exception(dc, TT_ILL_INSN);
2129         break;
2130     }
2131 }
2132 
2133 static void gen_ldda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd)
2134 {
2135     TCGv hi = gen_dest_gpr(dc, rd);
2136     TCGv lo = gen_dest_gpr(dc, rd + 1);
2137 
2138     switch (da->type) {
2139     case GET_ASI_EXCP:
2140         return;
2141 
2142     case GET_ASI_DTWINX:
2143 #ifdef TARGET_SPARC64
2144         {
2145             MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16;
2146             TCGv_i128 t = tcg_temp_new_i128();
2147 
2148             tcg_gen_qemu_ld_i128(t, addr, da->mem_idx, mop);
2149             /*
2150              * Note that LE twinx acts as if each 64-bit register result is
2151              * byte swapped.  We perform one 128-bit LE load, so must swap
2152              * the order of the writebacks.
2153              */
2154             if ((mop & MO_BSWAP) == MO_TE) {
2155                 tcg_gen_extr_i128_i64(lo, hi, t);
2156             } else {
2157                 tcg_gen_extr_i128_i64(hi, lo, t);
2158             }
2159         }
2160         break;
2161 #else
2162         g_assert_not_reached();
2163 #endif
2164 
2165     case GET_ASI_DIRECT:
2166         {
2167             TCGv_i64 tmp = tcg_temp_new_i64();
2168 
2169             tcg_gen_qemu_ld_i64(tmp, addr, da->mem_idx, da->memop | MO_ALIGN);
2170 
2171             /* Note that LE ldda acts as if each 32-bit register
2172                result is byte swapped.  Having just performed one
2173                64-bit bswap, we need now to swap the writebacks.  */
2174             if ((da->memop & MO_BSWAP) == MO_TE) {
2175                 tcg_gen_extr_i64_tl(lo, hi, tmp);
2176             } else {
2177                 tcg_gen_extr_i64_tl(hi, lo, tmp);
2178             }
2179         }
2180         break;
2181 
2182     case GET_ASI_CODE:
2183 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
2184         {
2185             MemOpIdx oi = make_memop_idx(da->memop, da->mem_idx);
2186             TCGv_i64 tmp = tcg_temp_new_i64();
2187 
2188             gen_helper_ld_code(tmp, tcg_env, addr, tcg_constant_i32(oi));
2189 
2190             /* See above.  */
2191             if ((da->memop & MO_BSWAP) == MO_TE) {
2192                 tcg_gen_extr_i64_tl(lo, hi, tmp);
2193             } else {
2194                 tcg_gen_extr_i64_tl(hi, lo, tmp);
2195             }
2196         }
2197         break;
2198 #else
2199         g_assert_not_reached();
2200 #endif
2201 
2202     default:
2203         /* ??? In theory we've handled all of the ASIs that are valid
2204            for ldda, and this should raise DAE_invalid_asi.  However,
2205            real hardware allows others.  This can be seen with e.g.
2206            FreeBSD 10.3 wrt ASI_IC_TAG.  */
2207         {
2208             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2209             TCGv_i32 r_mop = tcg_constant_i32(da->memop);
2210             TCGv_i64 tmp = tcg_temp_new_i64();
2211 
2212             save_state(dc);
2213             gen_helper_ld_asi(tmp, tcg_env, addr, r_asi, r_mop);
2214 
2215             /* See above.  */
2216             if ((da->memop & MO_BSWAP) == MO_TE) {
2217                 tcg_gen_extr_i64_tl(lo, hi, tmp);
2218             } else {
2219                 tcg_gen_extr_i64_tl(hi, lo, tmp);
2220             }
2221         }
2222         break;
2223     }
2224 
2225     gen_store_gpr(dc, rd, hi);
2226     gen_store_gpr(dc, rd + 1, lo);
2227 }
2228 
2229 static void gen_stda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd)
2230 {
2231     TCGv hi = gen_load_gpr(dc, rd);
2232     TCGv lo = gen_load_gpr(dc, rd + 1);
2233 
2234     switch (da->type) {
2235     case GET_ASI_EXCP:
2236         break;
2237 
2238     case GET_ASI_DTWINX:
2239 #ifdef TARGET_SPARC64
2240         {
2241             MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16;
2242             TCGv_i128 t = tcg_temp_new_i128();
2243 
2244             /*
2245              * Note that LE twinx acts as if each 64-bit register result is
2246              * byte swapped.  We perform one 128-bit LE store, so must swap
2247              * the order of the construction.
2248              */
2249             if ((mop & MO_BSWAP) == MO_TE) {
2250                 tcg_gen_concat_i64_i128(t, lo, hi);
2251             } else {
2252                 tcg_gen_concat_i64_i128(t, hi, lo);
2253             }
2254             tcg_gen_qemu_st_i128(t, addr, da->mem_idx, mop);
2255         }
2256         break;
2257 #else
2258         g_assert_not_reached();
2259 #endif
2260 
2261     case GET_ASI_DIRECT:
2262         {
2263             TCGv_i64 t64 = tcg_temp_new_i64();
2264 
2265             /* Note that LE stda acts as if each 32-bit register result is
2266                byte swapped.  We will perform one 64-bit LE store, so now
2267                we must swap the order of the construction.  */
2268             if ((da->memop & MO_BSWAP) == MO_TE) {
2269                 tcg_gen_concat_tl_i64(t64, lo, hi);
2270             } else {
2271                 tcg_gen_concat_tl_i64(t64, hi, lo);
2272             }
2273             tcg_gen_qemu_st_i64(t64, addr, da->mem_idx, da->memop | MO_ALIGN);
2274         }
2275         break;
2276 
2277     case GET_ASI_BFILL:
2278         assert(TARGET_LONG_BITS == 32);
2279         /*
2280          * Store 32 bytes of [rd:rd+1] to ADDR.
2281          * See comments for GET_ASI_COPY above.
2282          */
2283         {
2284             MemOp mop = MO_TE | MO_128 | MO_ATOM_IFALIGN_PAIR;
2285             TCGv_i64 t8 = tcg_temp_new_i64();
2286             TCGv_i128 t16 = tcg_temp_new_i128();
2287             TCGv daddr = tcg_temp_new();
2288 
2289             tcg_gen_concat_tl_i64(t8, lo, hi);
2290             tcg_gen_concat_i64_i128(t16, t8, t8);
2291             tcg_gen_andi_tl(daddr, addr, -32);
2292             tcg_gen_qemu_st_i128(t16, daddr, da->mem_idx, mop);
2293             tcg_gen_addi_tl(daddr, daddr, 16);
2294             tcg_gen_qemu_st_i128(t16, daddr, da->mem_idx, mop);
2295         }
2296         break;
2297 
2298     default:
2299         /* ??? In theory we've handled all of the ASIs that are valid
2300            for stda, and this should raise DAE_invalid_asi.  */
2301         {
2302             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2303             TCGv_i32 r_mop = tcg_constant_i32(da->memop);
2304             TCGv_i64 t64 = tcg_temp_new_i64();
2305 
2306             /* See above.  */
2307             if ((da->memop & MO_BSWAP) == MO_TE) {
2308                 tcg_gen_concat_tl_i64(t64, lo, hi);
2309             } else {
2310                 tcg_gen_concat_tl_i64(t64, hi, lo);
2311             }
2312 
2313             save_state(dc);
2314             gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
2315         }
2316         break;
2317     }
2318 }
2319 
2320 static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2321 {
2322 #ifdef TARGET_SPARC64
2323     TCGv_i32 c32, zero, dst, s1, s2;
2324     TCGv_i64 c64 = tcg_temp_new_i64();
2325 
2326     /* We have two choices here: extend the 32 bit data and use movcond_i64,
2327        or fold the comparison down to 32 bits and use movcond_i32.  Choose
2328        the later.  */
2329     c32 = tcg_temp_new_i32();
2330     tcg_gen_setcondi_i64(cmp->cond, c64, cmp->c1, cmp->c2);
2331     tcg_gen_extrl_i64_i32(c32, c64);
2332 
2333     s1 = gen_load_fpr_F(dc, rs);
2334     s2 = gen_load_fpr_F(dc, rd);
2335     dst = tcg_temp_new_i32();
2336     zero = tcg_constant_i32(0);
2337 
2338     tcg_gen_movcond_i32(TCG_COND_NE, dst, c32, zero, s1, s2);
2339 
2340     gen_store_fpr_F(dc, rd, dst);
2341 #else
2342     qemu_build_not_reached();
2343 #endif
2344 }
2345 
2346 static void gen_fmovd(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2347 {
2348 #ifdef TARGET_SPARC64
2349     TCGv_i64 dst = tcg_temp_new_i64();
2350     tcg_gen_movcond_i64(cmp->cond, dst, cmp->c1, tcg_constant_tl(cmp->c2),
2351                         gen_load_fpr_D(dc, rs),
2352                         gen_load_fpr_D(dc, rd));
2353     gen_store_fpr_D(dc, rd, dst);
2354 #else
2355     qemu_build_not_reached();
2356 #endif
2357 }
2358 
2359 static void gen_fmovq(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2360 {
2361 #ifdef TARGET_SPARC64
2362     TCGv c2 = tcg_constant_tl(cmp->c2);
2363     TCGv_i64 h = tcg_temp_new_i64();
2364     TCGv_i64 l = tcg_temp_new_i64();
2365 
2366     tcg_gen_movcond_i64(cmp->cond, h, cmp->c1, c2,
2367                         gen_load_fpr_D(dc, rs),
2368                         gen_load_fpr_D(dc, rd));
2369     tcg_gen_movcond_i64(cmp->cond, l, cmp->c1, c2,
2370                         gen_load_fpr_D(dc, rs + 2),
2371                         gen_load_fpr_D(dc, rd + 2));
2372     gen_store_fpr_D(dc, rd, h);
2373     gen_store_fpr_D(dc, rd + 2, l);
2374 #else
2375     qemu_build_not_reached();
2376 #endif
2377 }
2378 
2379 #ifdef TARGET_SPARC64
2380 static void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr)
2381 {
2382     TCGv_i32 r_tl = tcg_temp_new_i32();
2383 
2384     /* load env->tl into r_tl */
2385     tcg_gen_ld_i32(r_tl, tcg_env, offsetof(CPUSPARCState, tl));
2386 
2387     /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2388     tcg_gen_andi_i32(r_tl, r_tl, MAXTL_MASK);
2389 
2390     /* calculate offset to current trap state from env->ts, reuse r_tl */
2391     tcg_gen_muli_i32(r_tl, r_tl, sizeof (trap_state));
2392     tcg_gen_addi_ptr(r_tsptr, tcg_env, offsetof(CPUSPARCState, ts));
2393 
2394     /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2395     {
2396         TCGv_ptr r_tl_tmp = tcg_temp_new_ptr();
2397         tcg_gen_ext_i32_ptr(r_tl_tmp, r_tl);
2398         tcg_gen_add_ptr(r_tsptr, r_tsptr, r_tl_tmp);
2399     }
2400 }
2401 #endif
2402 
2403 static int extract_dfpreg(DisasContext *dc, int x)
2404 {
2405     int r = x & 0x1e;
2406 #ifdef TARGET_SPARC64
2407     r |= (x & 1) << 5;
2408 #endif
2409     return r;
2410 }
2411 
2412 static int extract_qfpreg(DisasContext *dc, int x)
2413 {
2414     int r = x & 0x1c;
2415 #ifdef TARGET_SPARC64
2416     r |= (x & 1) << 5;
2417 #endif
2418     return r;
2419 }
2420 
2421 /* Include the auto-generated decoder.  */
2422 #include "decode-insns.c.inc"
2423 
2424 #define TRANS(NAME, AVAIL, FUNC, ...) \
2425     static bool trans_##NAME(DisasContext *dc, arg_##NAME *a) \
2426     { return avail_##AVAIL(dc) && FUNC(dc, __VA_ARGS__); }
2427 
2428 #define avail_ALL(C)      true
2429 #ifdef TARGET_SPARC64
2430 # define avail_32(C)      false
2431 # define avail_ASR17(C)   false
2432 # define avail_CASA(C)    true
2433 # define avail_DIV(C)     true
2434 # define avail_MUL(C)     true
2435 # define avail_POWERDOWN(C) false
2436 # define avail_64(C)      true
2437 # define avail_FMAF(C)    ((C)->def->features & CPU_FEATURE_FMAF)
2438 # define avail_GL(C)      ((C)->def->features & CPU_FEATURE_GL)
2439 # define avail_HYPV(C)    ((C)->def->features & CPU_FEATURE_HYPV)
2440 # define avail_IMA(C)     ((C)->def->features & CPU_FEATURE_IMA)
2441 # define avail_VIS1(C)    ((C)->def->features & CPU_FEATURE_VIS1)
2442 # define avail_VIS2(C)    ((C)->def->features & CPU_FEATURE_VIS2)
2443 # define avail_VIS3(C)    ((C)->def->features & CPU_FEATURE_VIS3)
2444 # define avail_VIS3B(C)   avail_VIS3(C)
2445 # define avail_VIS4(C)    ((C)->def->features & CPU_FEATURE_VIS4)
2446 #else
2447 # define avail_32(C)      true
2448 # define avail_ASR17(C)   ((C)->def->features & CPU_FEATURE_ASR17)
2449 # define avail_CASA(C)    ((C)->def->features & CPU_FEATURE_CASA)
2450 # define avail_DIV(C)     ((C)->def->features & CPU_FEATURE_DIV)
2451 # define avail_MUL(C)     ((C)->def->features & CPU_FEATURE_MUL)
2452 # define avail_POWERDOWN(C) ((C)->def->features & CPU_FEATURE_POWERDOWN)
2453 # define avail_64(C)      false
2454 # define avail_FMAF(C)    false
2455 # define avail_GL(C)      false
2456 # define avail_HYPV(C)    false
2457 # define avail_IMA(C)     false
2458 # define avail_VIS1(C)    false
2459 # define avail_VIS2(C)    false
2460 # define avail_VIS3(C)    false
2461 # define avail_VIS3B(C)   false
2462 # define avail_VIS4(C)    false
2463 #endif
2464 
2465 /* Default case for non jump instructions. */
2466 static bool advance_pc(DisasContext *dc)
2467 {
2468     TCGLabel *l1;
2469 
2470     finishing_insn(dc);
2471 
2472     if (dc->npc & 3) {
2473         switch (dc->npc) {
2474         case DYNAMIC_PC:
2475         case DYNAMIC_PC_LOOKUP:
2476             dc->pc = dc->npc;
2477             tcg_gen_mov_tl(cpu_pc, cpu_npc);
2478             tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
2479             break;
2480 
2481         case JUMP_PC:
2482             /* we can do a static jump */
2483             l1 = gen_new_label();
2484             tcg_gen_brcondi_tl(dc->jump.cond, dc->jump.c1, dc->jump.c2, l1);
2485 
2486             /* jump not taken */
2487             gen_goto_tb(dc, 1, dc->jump_pc[1], dc->jump_pc[1] + 4);
2488 
2489             /* jump taken */
2490             gen_set_label(l1);
2491             gen_goto_tb(dc, 0, dc->jump_pc[0], dc->jump_pc[0] + 4);
2492 
2493             dc->base.is_jmp = DISAS_NORETURN;
2494             break;
2495 
2496         default:
2497             g_assert_not_reached();
2498         }
2499     } else {
2500         dc->pc = dc->npc;
2501         dc->npc = dc->npc + 4;
2502     }
2503     return true;
2504 }
2505 
2506 /*
2507  * Major opcodes 00 and 01 -- branches, call, and sethi
2508  */
2509 
2510 static bool advance_jump_cond(DisasContext *dc, DisasCompare *cmp,
2511                               bool annul, int disp)
2512 {
2513     target_ulong dest = address_mask_i(dc, dc->pc + disp * 4);
2514     target_ulong npc;
2515 
2516     finishing_insn(dc);
2517 
2518     if (cmp->cond == TCG_COND_ALWAYS) {
2519         if (annul) {
2520             dc->pc = dest;
2521             dc->npc = dest + 4;
2522         } else {
2523             gen_mov_pc_npc(dc);
2524             dc->npc = dest;
2525         }
2526         return true;
2527     }
2528 
2529     if (cmp->cond == TCG_COND_NEVER) {
2530         npc = dc->npc;
2531         if (npc & 3) {
2532             gen_mov_pc_npc(dc);
2533             if (annul) {
2534                 tcg_gen_addi_tl(cpu_pc, cpu_pc, 4);
2535             }
2536             tcg_gen_addi_tl(cpu_npc, cpu_pc, 4);
2537         } else {
2538             dc->pc = npc + (annul ? 4 : 0);
2539             dc->npc = dc->pc + 4;
2540         }
2541         return true;
2542     }
2543 
2544     flush_cond(dc);
2545     npc = dc->npc;
2546 
2547     if (annul) {
2548         TCGLabel *l1 = gen_new_label();
2549 
2550         tcg_gen_brcondi_tl(tcg_invert_cond(cmp->cond), cmp->c1, cmp->c2, l1);
2551         gen_goto_tb(dc, 0, npc, dest);
2552         gen_set_label(l1);
2553         gen_goto_tb(dc, 1, npc + 4, npc + 8);
2554 
2555         dc->base.is_jmp = DISAS_NORETURN;
2556     } else {
2557         if (npc & 3) {
2558             switch (npc) {
2559             case DYNAMIC_PC:
2560             case DYNAMIC_PC_LOOKUP:
2561                 tcg_gen_mov_tl(cpu_pc, cpu_npc);
2562                 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
2563                 tcg_gen_movcond_tl(cmp->cond, cpu_npc,
2564                                    cmp->c1, tcg_constant_tl(cmp->c2),
2565                                    tcg_constant_tl(dest), cpu_npc);
2566                 dc->pc = npc;
2567                 break;
2568             default:
2569                 g_assert_not_reached();
2570             }
2571         } else {
2572             dc->pc = npc;
2573             dc->npc = JUMP_PC;
2574             dc->jump = *cmp;
2575             dc->jump_pc[0] = dest;
2576             dc->jump_pc[1] = npc + 4;
2577 
2578             /* The condition for cpu_cond is always NE -- normalize. */
2579             if (cmp->cond == TCG_COND_NE) {
2580                 tcg_gen_xori_tl(cpu_cond, cmp->c1, cmp->c2);
2581             } else {
2582                 tcg_gen_setcondi_tl(cmp->cond, cpu_cond, cmp->c1, cmp->c2);
2583             }
2584             dc->cpu_cond_live = true;
2585         }
2586     }
2587     return true;
2588 }
2589 
2590 static bool raise_priv(DisasContext *dc)
2591 {
2592     gen_exception(dc, TT_PRIV_INSN);
2593     return true;
2594 }
2595 
2596 static bool raise_unimpfpop(DisasContext *dc)
2597 {
2598     gen_op_fpexception_im(dc, FSR_FTT_UNIMPFPOP);
2599     return true;
2600 }
2601 
2602 static bool gen_trap_float128(DisasContext *dc)
2603 {
2604     if (dc->def->features & CPU_FEATURE_FLOAT128) {
2605         return false;
2606     }
2607     return raise_unimpfpop(dc);
2608 }
2609 
2610 static bool do_bpcc(DisasContext *dc, arg_bcc *a)
2611 {
2612     DisasCompare cmp;
2613 
2614     gen_compare(&cmp, a->cc, a->cond, dc);
2615     return advance_jump_cond(dc, &cmp, a->a, a->i);
2616 }
2617 
2618 TRANS(Bicc, ALL, do_bpcc, a)
2619 TRANS(BPcc,  64, do_bpcc, a)
2620 
2621 static bool do_fbpfcc(DisasContext *dc, arg_bcc *a)
2622 {
2623     DisasCompare cmp;
2624 
2625     if (gen_trap_ifnofpu(dc)) {
2626         return true;
2627     }
2628     gen_fcompare(&cmp, a->cc, a->cond);
2629     return advance_jump_cond(dc, &cmp, a->a, a->i);
2630 }
2631 
2632 TRANS(FBPfcc,  64, do_fbpfcc, a)
2633 TRANS(FBfcc,  ALL, do_fbpfcc, a)
2634 
2635 static bool trans_BPr(DisasContext *dc, arg_BPr *a)
2636 {
2637     DisasCompare cmp;
2638 
2639     if (!avail_64(dc)) {
2640         return false;
2641     }
2642     if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
2643         return false;
2644     }
2645     return advance_jump_cond(dc, &cmp, a->a, a->i);
2646 }
2647 
2648 static bool trans_CALL(DisasContext *dc, arg_CALL *a)
2649 {
2650     target_long target = address_mask_i(dc, dc->pc + a->i * 4);
2651 
2652     gen_store_gpr(dc, 15, tcg_constant_tl(dc->pc));
2653     gen_mov_pc_npc(dc);
2654     dc->npc = target;
2655     return true;
2656 }
2657 
2658 static bool trans_NCP(DisasContext *dc, arg_NCP *a)
2659 {
2660     /*
2661      * For sparc32, always generate the no-coprocessor exception.
2662      * For sparc64, always generate illegal instruction.
2663      */
2664 #ifdef TARGET_SPARC64
2665     return false;
2666 #else
2667     gen_exception(dc, TT_NCP_INSN);
2668     return true;
2669 #endif
2670 }
2671 
2672 static bool trans_SETHI(DisasContext *dc, arg_SETHI *a)
2673 {
2674     /* Special-case %g0 because that's the canonical nop.  */
2675     if (a->rd) {
2676         gen_store_gpr(dc, a->rd, tcg_constant_tl((uint32_t)a->i << 10));
2677     }
2678     return advance_pc(dc);
2679 }
2680 
2681 /*
2682  * Major Opcode 10 -- integer, floating-point, vis, and system insns.
2683  */
2684 
2685 static bool do_tcc(DisasContext *dc, int cond, int cc,
2686                    int rs1, bool imm, int rs2_or_imm)
2687 {
2688     int mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc)
2689                 ? UA2005_HTRAP_MASK : V8_TRAP_MASK);
2690     DisasCompare cmp;
2691     TCGLabel *lab;
2692     TCGv_i32 trap;
2693 
2694     /* Trap never.  */
2695     if (cond == 0) {
2696         return advance_pc(dc);
2697     }
2698 
2699     /*
2700      * Immediate traps are the most common case.  Since this value is
2701      * live across the branch, it really pays to evaluate the constant.
2702      */
2703     if (rs1 == 0 && (imm || rs2_or_imm == 0)) {
2704         trap = tcg_constant_i32((rs2_or_imm & mask) + TT_TRAP);
2705     } else {
2706         trap = tcg_temp_new_i32();
2707         tcg_gen_trunc_tl_i32(trap, gen_load_gpr(dc, rs1));
2708         if (imm) {
2709             tcg_gen_addi_i32(trap, trap, rs2_or_imm);
2710         } else {
2711             TCGv_i32 t2 = tcg_temp_new_i32();
2712             tcg_gen_trunc_tl_i32(t2, gen_load_gpr(dc, rs2_or_imm));
2713             tcg_gen_add_i32(trap, trap, t2);
2714         }
2715         tcg_gen_andi_i32(trap, trap, mask);
2716         tcg_gen_addi_i32(trap, trap, TT_TRAP);
2717     }
2718 
2719     finishing_insn(dc);
2720 
2721     /* Trap always.  */
2722     if (cond == 8) {
2723         save_state(dc);
2724         gen_helper_raise_exception(tcg_env, trap);
2725         dc->base.is_jmp = DISAS_NORETURN;
2726         return true;
2727     }
2728 
2729     /* Conditional trap.  */
2730     flush_cond(dc);
2731     lab = delay_exceptionv(dc, trap);
2732     gen_compare(&cmp, cc, cond, dc);
2733     tcg_gen_brcondi_tl(cmp.cond, cmp.c1, cmp.c2, lab);
2734 
2735     return advance_pc(dc);
2736 }
2737 
2738 static bool trans_Tcc_r(DisasContext *dc, arg_Tcc_r *a)
2739 {
2740     if (avail_32(dc) && a->cc) {
2741         return false;
2742     }
2743     return do_tcc(dc, a->cond, a->cc, a->rs1, false, a->rs2);
2744 }
2745 
2746 static bool trans_Tcc_i_v7(DisasContext *dc, arg_Tcc_i_v7 *a)
2747 {
2748     if (avail_64(dc)) {
2749         return false;
2750     }
2751     return do_tcc(dc, a->cond, 0, a->rs1, true, a->i);
2752 }
2753 
2754 static bool trans_Tcc_i_v9(DisasContext *dc, arg_Tcc_i_v9 *a)
2755 {
2756     if (avail_32(dc)) {
2757         return false;
2758     }
2759     return do_tcc(dc, a->cond, a->cc, a->rs1, true, a->i);
2760 }
2761 
2762 static bool trans_STBAR(DisasContext *dc, arg_STBAR *a)
2763 {
2764     tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
2765     return advance_pc(dc);
2766 }
2767 
2768 static bool trans_MEMBAR(DisasContext *dc, arg_MEMBAR *a)
2769 {
2770     if (avail_32(dc)) {
2771         return false;
2772     }
2773     if (a->mmask) {
2774         /* Note TCG_MO_* was modeled on sparc64, so mmask matches. */
2775         tcg_gen_mb(a->mmask | TCG_BAR_SC);
2776     }
2777     if (a->cmask) {
2778         /* For #Sync, etc, end the TB to recognize interrupts. */
2779         dc->base.is_jmp = DISAS_EXIT;
2780     }
2781     return advance_pc(dc);
2782 }
2783 
2784 static bool do_rd_special(DisasContext *dc, bool priv, int rd,
2785                           TCGv (*func)(DisasContext *, TCGv))
2786 {
2787     if (!priv) {
2788         return raise_priv(dc);
2789     }
2790     gen_store_gpr(dc, rd, func(dc, gen_dest_gpr(dc, rd)));
2791     return advance_pc(dc);
2792 }
2793 
2794 static TCGv do_rdy(DisasContext *dc, TCGv dst)
2795 {
2796     return cpu_y;
2797 }
2798 
2799 static bool trans_RDY(DisasContext *dc, arg_RDY *a)
2800 {
2801     /*
2802      * TODO: Need a feature bit for sparcv8.  In the meantime, treat all
2803      * 32-bit cpus like sparcv7, which ignores the rs1 field.
2804      * This matches after all other ASR, so Leon3 Asr17 is handled first.
2805      */
2806     if (avail_64(dc) && a->rs1 != 0) {
2807         return false;
2808     }
2809     return do_rd_special(dc, true, a->rd, do_rdy);
2810 }
2811 
2812 static TCGv do_rd_leon3_config(DisasContext *dc, TCGv dst)
2813 {
2814     gen_helper_rdasr17(dst, tcg_env);
2815     return dst;
2816 }
2817 
2818 TRANS(RDASR17, ASR17, do_rd_special, true, a->rd, do_rd_leon3_config)
2819 
2820 static TCGv do_rdccr(DisasContext *dc, TCGv dst)
2821 {
2822     gen_helper_rdccr(dst, tcg_env);
2823     return dst;
2824 }
2825 
2826 TRANS(RDCCR, 64, do_rd_special, true, a->rd, do_rdccr)
2827 
2828 static TCGv do_rdasi(DisasContext *dc, TCGv dst)
2829 {
2830 #ifdef TARGET_SPARC64
2831     return tcg_constant_tl(dc->asi);
2832 #else
2833     qemu_build_not_reached();
2834 #endif
2835 }
2836 
2837 TRANS(RDASI, 64, do_rd_special, true, a->rd, do_rdasi)
2838 
2839 static TCGv do_rdtick(DisasContext *dc, TCGv dst)
2840 {
2841     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
2842 
2843     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
2844     if (translator_io_start(&dc->base)) {
2845         dc->base.is_jmp = DISAS_EXIT;
2846     }
2847     gen_helper_tick_get_count(dst, tcg_env, r_tickptr,
2848                               tcg_constant_i32(dc->mem_idx));
2849     return dst;
2850 }
2851 
2852 /* TODO: non-priv access only allowed when enabled. */
2853 TRANS(RDTICK, 64, do_rd_special, true, a->rd, do_rdtick)
2854 
2855 static TCGv do_rdpc(DisasContext *dc, TCGv dst)
2856 {
2857     return tcg_constant_tl(address_mask_i(dc, dc->pc));
2858 }
2859 
2860 TRANS(RDPC, 64, do_rd_special, true, a->rd, do_rdpc)
2861 
2862 static TCGv do_rdfprs(DisasContext *dc, TCGv dst)
2863 {
2864     tcg_gen_ext_i32_tl(dst, cpu_fprs);
2865     return dst;
2866 }
2867 
2868 TRANS(RDFPRS, 64, do_rd_special, true, a->rd, do_rdfprs)
2869 
2870 static TCGv do_rdgsr(DisasContext *dc, TCGv dst)
2871 {
2872     gen_trap_ifnofpu(dc);
2873     return cpu_gsr;
2874 }
2875 
2876 TRANS(RDGSR, 64, do_rd_special, true, a->rd, do_rdgsr)
2877 
2878 static TCGv do_rdsoftint(DisasContext *dc, TCGv dst)
2879 {
2880     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(softint));
2881     return dst;
2882 }
2883 
2884 TRANS(RDSOFTINT, 64, do_rd_special, supervisor(dc), a->rd, do_rdsoftint)
2885 
2886 static TCGv do_rdtick_cmpr(DisasContext *dc, TCGv dst)
2887 {
2888     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(tick_cmpr));
2889     return dst;
2890 }
2891 
2892 /* TODO: non-priv access only allowed when enabled. */
2893 TRANS(RDTICK_CMPR, 64, do_rd_special, true, a->rd, do_rdtick_cmpr)
2894 
2895 static TCGv do_rdstick(DisasContext *dc, TCGv dst)
2896 {
2897     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
2898 
2899     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick));
2900     if (translator_io_start(&dc->base)) {
2901         dc->base.is_jmp = DISAS_EXIT;
2902     }
2903     gen_helper_tick_get_count(dst, tcg_env, r_tickptr,
2904                               tcg_constant_i32(dc->mem_idx));
2905     return dst;
2906 }
2907 
2908 /* TODO: non-priv access only allowed when enabled. */
2909 TRANS(RDSTICK, 64, do_rd_special, true, a->rd, do_rdstick)
2910 
2911 static TCGv do_rdstick_cmpr(DisasContext *dc, TCGv dst)
2912 {
2913     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(stick_cmpr));
2914     return dst;
2915 }
2916 
2917 /* TODO: supervisor access only allowed when enabled by hypervisor. */
2918 TRANS(RDSTICK_CMPR, 64, do_rd_special, supervisor(dc), a->rd, do_rdstick_cmpr)
2919 
2920 /*
2921  * UltraSPARC-T1 Strand status.
2922  * HYPV check maybe not enough, UA2005 & UA2007 describe
2923  * this ASR as impl. dep
2924  */
2925 static TCGv do_rdstrand_status(DisasContext *dc, TCGv dst)
2926 {
2927     return tcg_constant_tl(1);
2928 }
2929 
2930 TRANS(RDSTRAND_STATUS, HYPV, do_rd_special, true, a->rd, do_rdstrand_status)
2931 
2932 static TCGv do_rdpsr(DisasContext *dc, TCGv dst)
2933 {
2934     gen_helper_rdpsr(dst, tcg_env);
2935     return dst;
2936 }
2937 
2938 TRANS(RDPSR, 32, do_rd_special, supervisor(dc), a->rd, do_rdpsr)
2939 
2940 static TCGv do_rdhpstate(DisasContext *dc, TCGv dst)
2941 {
2942     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hpstate));
2943     return dst;
2944 }
2945 
2946 TRANS(RDHPR_hpstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhpstate)
2947 
2948 static TCGv do_rdhtstate(DisasContext *dc, TCGv dst)
2949 {
2950     TCGv_i32 tl = tcg_temp_new_i32();
2951     TCGv_ptr tp = tcg_temp_new_ptr();
2952 
2953     tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl));
2954     tcg_gen_andi_i32(tl, tl, MAXTL_MASK);
2955     tcg_gen_shli_i32(tl, tl, 3);
2956     tcg_gen_ext_i32_ptr(tp, tl);
2957     tcg_gen_add_ptr(tp, tp, tcg_env);
2958 
2959     tcg_gen_ld_tl(dst, tp, env64_field_offsetof(htstate));
2960     return dst;
2961 }
2962 
2963 TRANS(RDHPR_htstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtstate)
2964 
2965 static TCGv do_rdhintp(DisasContext *dc, TCGv dst)
2966 {
2967     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hintp));
2968     return dst;
2969 }
2970 
2971 TRANS(RDHPR_hintp, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhintp)
2972 
2973 static TCGv do_rdhtba(DisasContext *dc, TCGv dst)
2974 {
2975     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(htba));
2976     return dst;
2977 }
2978 
2979 TRANS(RDHPR_htba, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtba)
2980 
2981 static TCGv do_rdhver(DisasContext *dc, TCGv dst)
2982 {
2983     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hver));
2984     return dst;
2985 }
2986 
2987 TRANS(RDHPR_hver, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhver)
2988 
2989 static TCGv do_rdhstick_cmpr(DisasContext *dc, TCGv dst)
2990 {
2991     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hstick_cmpr));
2992     return dst;
2993 }
2994 
2995 TRANS(RDHPR_hstick_cmpr, HYPV, do_rd_special, hypervisor(dc), a->rd,
2996       do_rdhstick_cmpr)
2997 
2998 static TCGv do_rdwim(DisasContext *dc, TCGv dst)
2999 {
3000     tcg_gen_ld_tl(dst, tcg_env, env32_field_offsetof(wim));
3001     return dst;
3002 }
3003 
3004 TRANS(RDWIM, 32, do_rd_special, supervisor(dc), a->rd, do_rdwim)
3005 
3006 static TCGv do_rdtpc(DisasContext *dc, TCGv dst)
3007 {
3008 #ifdef TARGET_SPARC64
3009     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3010 
3011     gen_load_trap_state_at_tl(r_tsptr);
3012     tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tpc));
3013     return dst;
3014 #else
3015     qemu_build_not_reached();
3016 #endif
3017 }
3018 
3019 TRANS(RDPR_tpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtpc)
3020 
3021 static TCGv do_rdtnpc(DisasContext *dc, TCGv dst)
3022 {
3023 #ifdef TARGET_SPARC64
3024     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3025 
3026     gen_load_trap_state_at_tl(r_tsptr);
3027     tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tnpc));
3028     return dst;
3029 #else
3030     qemu_build_not_reached();
3031 #endif
3032 }
3033 
3034 TRANS(RDPR_tnpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtnpc)
3035 
3036 static TCGv do_rdtstate(DisasContext *dc, TCGv dst)
3037 {
3038 #ifdef TARGET_SPARC64
3039     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3040 
3041     gen_load_trap_state_at_tl(r_tsptr);
3042     tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tstate));
3043     return dst;
3044 #else
3045     qemu_build_not_reached();
3046 #endif
3047 }
3048 
3049 TRANS(RDPR_tstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdtstate)
3050 
3051 static TCGv do_rdtt(DisasContext *dc, TCGv dst)
3052 {
3053 #ifdef TARGET_SPARC64
3054     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3055 
3056     gen_load_trap_state_at_tl(r_tsptr);
3057     tcg_gen_ld32s_tl(dst, r_tsptr, offsetof(trap_state, tt));
3058     return dst;
3059 #else
3060     qemu_build_not_reached();
3061 #endif
3062 }
3063 
3064 TRANS(RDPR_tt, 64, do_rd_special, supervisor(dc), a->rd, do_rdtt)
3065 TRANS(RDPR_tick, 64, do_rd_special, supervisor(dc), a->rd, do_rdtick)
3066 
3067 static TCGv do_rdtba(DisasContext *dc, TCGv dst)
3068 {
3069     return cpu_tbr;
3070 }
3071 
3072 TRANS(RDTBR, 32, do_rd_special, supervisor(dc), a->rd, do_rdtba)
3073 TRANS(RDPR_tba, 64, do_rd_special, supervisor(dc), a->rd, do_rdtba)
3074 
3075 static TCGv do_rdpstate(DisasContext *dc, TCGv dst)
3076 {
3077     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(pstate));
3078     return dst;
3079 }
3080 
3081 TRANS(RDPR_pstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdpstate)
3082 
3083 static TCGv do_rdtl(DisasContext *dc, TCGv dst)
3084 {
3085     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(tl));
3086     return dst;
3087 }
3088 
3089 TRANS(RDPR_tl, 64, do_rd_special, supervisor(dc), a->rd, do_rdtl)
3090 
3091 static TCGv do_rdpil(DisasContext *dc, TCGv dst)
3092 {
3093     tcg_gen_ld32s_tl(dst, tcg_env, env_field_offsetof(psrpil));
3094     return dst;
3095 }
3096 
3097 TRANS(RDPR_pil, 64, do_rd_special, supervisor(dc), a->rd, do_rdpil)
3098 
3099 static TCGv do_rdcwp(DisasContext *dc, TCGv dst)
3100 {
3101     gen_helper_rdcwp(dst, tcg_env);
3102     return dst;
3103 }
3104 
3105 TRANS(RDPR_cwp, 64, do_rd_special, supervisor(dc), a->rd, do_rdcwp)
3106 
3107 static TCGv do_rdcansave(DisasContext *dc, TCGv dst)
3108 {
3109     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cansave));
3110     return dst;
3111 }
3112 
3113 TRANS(RDPR_cansave, 64, do_rd_special, supervisor(dc), a->rd, do_rdcansave)
3114 
3115 static TCGv do_rdcanrestore(DisasContext *dc, TCGv dst)
3116 {
3117     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(canrestore));
3118     return dst;
3119 }
3120 
3121 TRANS(RDPR_canrestore, 64, do_rd_special, supervisor(dc), a->rd,
3122       do_rdcanrestore)
3123 
3124 static TCGv do_rdcleanwin(DisasContext *dc, TCGv dst)
3125 {
3126     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cleanwin));
3127     return dst;
3128 }
3129 
3130 TRANS(RDPR_cleanwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdcleanwin)
3131 
3132 static TCGv do_rdotherwin(DisasContext *dc, TCGv dst)
3133 {
3134     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(otherwin));
3135     return dst;
3136 }
3137 
3138 TRANS(RDPR_otherwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdotherwin)
3139 
3140 static TCGv do_rdwstate(DisasContext *dc, TCGv dst)
3141 {
3142     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(wstate));
3143     return dst;
3144 }
3145 
3146 TRANS(RDPR_wstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdwstate)
3147 
3148 static TCGv do_rdgl(DisasContext *dc, TCGv dst)
3149 {
3150     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(gl));
3151     return dst;
3152 }
3153 
3154 TRANS(RDPR_gl, GL, do_rd_special, supervisor(dc), a->rd, do_rdgl)
3155 
3156 /* UA2005 strand status */
3157 static TCGv do_rdssr(DisasContext *dc, TCGv dst)
3158 {
3159     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(ssr));
3160     return dst;
3161 }
3162 
3163 TRANS(RDPR_strand_status, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdssr)
3164 
3165 static TCGv do_rdver(DisasContext *dc, TCGv dst)
3166 {
3167     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(version));
3168     return dst;
3169 }
3170 
3171 TRANS(RDPR_ver, 64, do_rd_special, supervisor(dc), a->rd, do_rdver)
3172 
3173 static bool trans_FLUSHW(DisasContext *dc, arg_FLUSHW *a)
3174 {
3175     if (avail_64(dc)) {
3176         gen_helper_flushw(tcg_env);
3177         return advance_pc(dc);
3178     }
3179     return false;
3180 }
3181 
3182 static bool do_wr_special(DisasContext *dc, arg_r_r_ri *a, bool priv,
3183                           void (*func)(DisasContext *, TCGv))
3184 {
3185     TCGv src;
3186 
3187     /* For simplicity, we under-decoded the rs2 form. */
3188     if (!a->imm && (a->rs2_or_imm & ~0x1f)) {
3189         return false;
3190     }
3191     if (!priv) {
3192         return raise_priv(dc);
3193     }
3194 
3195     if (a->rs1 == 0 && (a->imm || a->rs2_or_imm == 0)) {
3196         src = tcg_constant_tl(a->rs2_or_imm);
3197     } else {
3198         TCGv src1 = gen_load_gpr(dc, a->rs1);
3199         if (a->rs2_or_imm == 0) {
3200             src = src1;
3201         } else {
3202             src = tcg_temp_new();
3203             if (a->imm) {
3204                 tcg_gen_xori_tl(src, src1, a->rs2_or_imm);
3205             } else {
3206                 tcg_gen_xor_tl(src, src1, gen_load_gpr(dc, a->rs2_or_imm));
3207             }
3208         }
3209     }
3210     func(dc, src);
3211     return advance_pc(dc);
3212 }
3213 
3214 static void do_wry(DisasContext *dc, TCGv src)
3215 {
3216     tcg_gen_ext32u_tl(cpu_y, src);
3217 }
3218 
3219 TRANS(WRY, ALL, do_wr_special, a, true, do_wry)
3220 
3221 static void do_wrccr(DisasContext *dc, TCGv src)
3222 {
3223     gen_helper_wrccr(tcg_env, src);
3224 }
3225 
3226 TRANS(WRCCR, 64, do_wr_special, a, true, do_wrccr)
3227 
3228 static void do_wrasi(DisasContext *dc, TCGv src)
3229 {
3230     TCGv tmp = tcg_temp_new();
3231 
3232     tcg_gen_ext8u_tl(tmp, src);
3233     tcg_gen_st32_tl(tmp, tcg_env, env64_field_offsetof(asi));
3234     /* End TB to notice changed ASI. */
3235     dc->base.is_jmp = DISAS_EXIT;
3236 }
3237 
3238 TRANS(WRASI, 64, do_wr_special, a, true, do_wrasi)
3239 
3240 static void do_wrfprs(DisasContext *dc, TCGv src)
3241 {
3242 #ifdef TARGET_SPARC64
3243     tcg_gen_trunc_tl_i32(cpu_fprs, src);
3244     dc->fprs_dirty = 0;
3245     dc->base.is_jmp = DISAS_EXIT;
3246 #else
3247     qemu_build_not_reached();
3248 #endif
3249 }
3250 
3251 TRANS(WRFPRS, 64, do_wr_special, a, true, do_wrfprs)
3252 
3253 static void do_wrgsr(DisasContext *dc, TCGv src)
3254 {
3255     gen_trap_ifnofpu(dc);
3256     tcg_gen_mov_tl(cpu_gsr, src);
3257 }
3258 
3259 TRANS(WRGSR, 64, do_wr_special, a, true, do_wrgsr)
3260 
3261 static void do_wrsoftint_set(DisasContext *dc, TCGv src)
3262 {
3263     gen_helper_set_softint(tcg_env, src);
3264 }
3265 
3266 TRANS(WRSOFTINT_SET, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_set)
3267 
3268 static void do_wrsoftint_clr(DisasContext *dc, TCGv src)
3269 {
3270     gen_helper_clear_softint(tcg_env, src);
3271 }
3272 
3273 TRANS(WRSOFTINT_CLR, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_clr)
3274 
3275 static void do_wrsoftint(DisasContext *dc, TCGv src)
3276 {
3277     gen_helper_write_softint(tcg_env, src);
3278 }
3279 
3280 TRANS(WRSOFTINT, 64, do_wr_special, a, supervisor(dc), do_wrsoftint)
3281 
3282 static void do_wrtick_cmpr(DisasContext *dc, TCGv src)
3283 {
3284     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3285 
3286     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(tick_cmpr));
3287     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
3288     translator_io_start(&dc->base);
3289     gen_helper_tick_set_limit(r_tickptr, src);
3290     /* End TB to handle timer interrupt */
3291     dc->base.is_jmp = DISAS_EXIT;
3292 }
3293 
3294 TRANS(WRTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrtick_cmpr)
3295 
3296 static void do_wrstick(DisasContext *dc, TCGv src)
3297 {
3298 #ifdef TARGET_SPARC64
3299     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3300 
3301     tcg_gen_ld_ptr(r_tickptr, tcg_env, offsetof(CPUSPARCState, stick));
3302     translator_io_start(&dc->base);
3303     gen_helper_tick_set_count(r_tickptr, src);
3304     /* End TB to handle timer interrupt */
3305     dc->base.is_jmp = DISAS_EXIT;
3306 #else
3307     qemu_build_not_reached();
3308 #endif
3309 }
3310 
3311 TRANS(WRSTICK, 64, do_wr_special, a, supervisor(dc), do_wrstick)
3312 
3313 static void do_wrstick_cmpr(DisasContext *dc, TCGv src)
3314 {
3315     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3316 
3317     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(stick_cmpr));
3318     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick));
3319     translator_io_start(&dc->base);
3320     gen_helper_tick_set_limit(r_tickptr, src);
3321     /* End TB to handle timer interrupt */
3322     dc->base.is_jmp = DISAS_EXIT;
3323 }
3324 
3325 TRANS(WRSTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrstick_cmpr)
3326 
3327 static void do_wrpowerdown(DisasContext *dc, TCGv src)
3328 {
3329     finishing_insn(dc);
3330     save_state(dc);
3331     gen_helper_power_down(tcg_env);
3332 }
3333 
3334 TRANS(WRPOWERDOWN, POWERDOWN, do_wr_special, a, supervisor(dc), do_wrpowerdown)
3335 
3336 static void do_wrpsr(DisasContext *dc, TCGv src)
3337 {
3338     gen_helper_wrpsr(tcg_env, src);
3339     dc->base.is_jmp = DISAS_EXIT;
3340 }
3341 
3342 TRANS(WRPSR, 32, do_wr_special, a, supervisor(dc), do_wrpsr)
3343 
3344 static void do_wrwim(DisasContext *dc, TCGv src)
3345 {
3346     target_ulong mask = MAKE_64BIT_MASK(0, dc->def->nwindows);
3347     TCGv tmp = tcg_temp_new();
3348 
3349     tcg_gen_andi_tl(tmp, src, mask);
3350     tcg_gen_st_tl(tmp, tcg_env, env32_field_offsetof(wim));
3351 }
3352 
3353 TRANS(WRWIM, 32, do_wr_special, a, supervisor(dc), do_wrwim)
3354 
3355 static void do_wrtpc(DisasContext *dc, TCGv src)
3356 {
3357 #ifdef TARGET_SPARC64
3358     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3359 
3360     gen_load_trap_state_at_tl(r_tsptr);
3361     tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tpc));
3362 #else
3363     qemu_build_not_reached();
3364 #endif
3365 }
3366 
3367 TRANS(WRPR_tpc, 64, do_wr_special, a, supervisor(dc), do_wrtpc)
3368 
3369 static void do_wrtnpc(DisasContext *dc, TCGv src)
3370 {
3371 #ifdef TARGET_SPARC64
3372     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3373 
3374     gen_load_trap_state_at_tl(r_tsptr);
3375     tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tnpc));
3376 #else
3377     qemu_build_not_reached();
3378 #endif
3379 }
3380 
3381 TRANS(WRPR_tnpc, 64, do_wr_special, a, supervisor(dc), do_wrtnpc)
3382 
3383 static void do_wrtstate(DisasContext *dc, TCGv src)
3384 {
3385 #ifdef TARGET_SPARC64
3386     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3387 
3388     gen_load_trap_state_at_tl(r_tsptr);
3389     tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tstate));
3390 #else
3391     qemu_build_not_reached();
3392 #endif
3393 }
3394 
3395 TRANS(WRPR_tstate, 64, do_wr_special, a, supervisor(dc), do_wrtstate)
3396 
3397 static void do_wrtt(DisasContext *dc, TCGv src)
3398 {
3399 #ifdef TARGET_SPARC64
3400     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3401 
3402     gen_load_trap_state_at_tl(r_tsptr);
3403     tcg_gen_st32_tl(src, r_tsptr, offsetof(trap_state, tt));
3404 #else
3405     qemu_build_not_reached();
3406 #endif
3407 }
3408 
3409 TRANS(WRPR_tt, 64, do_wr_special, a, supervisor(dc), do_wrtt)
3410 
3411 static void do_wrtick(DisasContext *dc, TCGv src)
3412 {
3413     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3414 
3415     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
3416     translator_io_start(&dc->base);
3417     gen_helper_tick_set_count(r_tickptr, src);
3418     /* End TB to handle timer interrupt */
3419     dc->base.is_jmp = DISAS_EXIT;
3420 }
3421 
3422 TRANS(WRPR_tick, 64, do_wr_special, a, supervisor(dc), do_wrtick)
3423 
3424 static void do_wrtba(DisasContext *dc, TCGv src)
3425 {
3426     tcg_gen_mov_tl(cpu_tbr, src);
3427 }
3428 
3429 TRANS(WRPR_tba, 64, do_wr_special, a, supervisor(dc), do_wrtba)
3430 
3431 static void do_wrpstate(DisasContext *dc, TCGv src)
3432 {
3433     save_state(dc);
3434     if (translator_io_start(&dc->base)) {
3435         dc->base.is_jmp = DISAS_EXIT;
3436     }
3437     gen_helper_wrpstate(tcg_env, src);
3438     dc->npc = DYNAMIC_PC;
3439 }
3440 
3441 TRANS(WRPR_pstate, 64, do_wr_special, a, supervisor(dc), do_wrpstate)
3442 
3443 static void do_wrtl(DisasContext *dc, TCGv src)
3444 {
3445     save_state(dc);
3446     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(tl));
3447     dc->npc = DYNAMIC_PC;
3448 }
3449 
3450 TRANS(WRPR_tl, 64, do_wr_special, a, supervisor(dc), do_wrtl)
3451 
3452 static void do_wrpil(DisasContext *dc, TCGv src)
3453 {
3454     if (translator_io_start(&dc->base)) {
3455         dc->base.is_jmp = DISAS_EXIT;
3456     }
3457     gen_helper_wrpil(tcg_env, src);
3458 }
3459 
3460 TRANS(WRPR_pil, 64, do_wr_special, a, supervisor(dc), do_wrpil)
3461 
3462 static void do_wrcwp(DisasContext *dc, TCGv src)
3463 {
3464     gen_helper_wrcwp(tcg_env, src);
3465 }
3466 
3467 TRANS(WRPR_cwp, 64, do_wr_special, a, supervisor(dc), do_wrcwp)
3468 
3469 static void do_wrcansave(DisasContext *dc, TCGv src)
3470 {
3471     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cansave));
3472 }
3473 
3474 TRANS(WRPR_cansave, 64, do_wr_special, a, supervisor(dc), do_wrcansave)
3475 
3476 static void do_wrcanrestore(DisasContext *dc, TCGv src)
3477 {
3478     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(canrestore));
3479 }
3480 
3481 TRANS(WRPR_canrestore, 64, do_wr_special, a, supervisor(dc), do_wrcanrestore)
3482 
3483 static void do_wrcleanwin(DisasContext *dc, TCGv src)
3484 {
3485     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cleanwin));
3486 }
3487 
3488 TRANS(WRPR_cleanwin, 64, do_wr_special, a, supervisor(dc), do_wrcleanwin)
3489 
3490 static void do_wrotherwin(DisasContext *dc, TCGv src)
3491 {
3492     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(otherwin));
3493 }
3494 
3495 TRANS(WRPR_otherwin, 64, do_wr_special, a, supervisor(dc), do_wrotherwin)
3496 
3497 static void do_wrwstate(DisasContext *dc, TCGv src)
3498 {
3499     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(wstate));
3500 }
3501 
3502 TRANS(WRPR_wstate, 64, do_wr_special, a, supervisor(dc), do_wrwstate)
3503 
3504 static void do_wrgl(DisasContext *dc, TCGv src)
3505 {
3506     gen_helper_wrgl(tcg_env, src);
3507 }
3508 
3509 TRANS(WRPR_gl, GL, do_wr_special, a, supervisor(dc), do_wrgl)
3510 
3511 /* UA2005 strand status */
3512 static void do_wrssr(DisasContext *dc, TCGv src)
3513 {
3514     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(ssr));
3515 }
3516 
3517 TRANS(WRPR_strand_status, HYPV, do_wr_special, a, hypervisor(dc), do_wrssr)
3518 
3519 TRANS(WRTBR, 32, do_wr_special, a, supervisor(dc), do_wrtba)
3520 
3521 static void do_wrhpstate(DisasContext *dc, TCGv src)
3522 {
3523     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hpstate));
3524     dc->base.is_jmp = DISAS_EXIT;
3525 }
3526 
3527 TRANS(WRHPR_hpstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhpstate)
3528 
3529 static void do_wrhtstate(DisasContext *dc, TCGv src)
3530 {
3531     TCGv_i32 tl = tcg_temp_new_i32();
3532     TCGv_ptr tp = tcg_temp_new_ptr();
3533 
3534     tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl));
3535     tcg_gen_andi_i32(tl, tl, MAXTL_MASK);
3536     tcg_gen_shli_i32(tl, tl, 3);
3537     tcg_gen_ext_i32_ptr(tp, tl);
3538     tcg_gen_add_ptr(tp, tp, tcg_env);
3539 
3540     tcg_gen_st_tl(src, tp, env64_field_offsetof(htstate));
3541 }
3542 
3543 TRANS(WRHPR_htstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtstate)
3544 
3545 static void do_wrhintp(DisasContext *dc, TCGv src)
3546 {
3547     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hintp));
3548 }
3549 
3550 TRANS(WRHPR_hintp, HYPV, do_wr_special, a, hypervisor(dc), do_wrhintp)
3551 
3552 static void do_wrhtba(DisasContext *dc, TCGv src)
3553 {
3554     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(htba));
3555 }
3556 
3557 TRANS(WRHPR_htba, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtba)
3558 
3559 static void do_wrhstick_cmpr(DisasContext *dc, TCGv src)
3560 {
3561     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3562 
3563     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hstick_cmpr));
3564     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(hstick));
3565     translator_io_start(&dc->base);
3566     gen_helper_tick_set_limit(r_tickptr, src);
3567     /* End TB to handle timer interrupt */
3568     dc->base.is_jmp = DISAS_EXIT;
3569 }
3570 
3571 TRANS(WRHPR_hstick_cmpr, HYPV, do_wr_special, a, hypervisor(dc),
3572       do_wrhstick_cmpr)
3573 
3574 static bool do_saved_restored(DisasContext *dc, bool saved)
3575 {
3576     if (!supervisor(dc)) {
3577         return raise_priv(dc);
3578     }
3579     if (saved) {
3580         gen_helper_saved(tcg_env);
3581     } else {
3582         gen_helper_restored(tcg_env);
3583     }
3584     return advance_pc(dc);
3585 }
3586 
3587 TRANS(SAVED, 64, do_saved_restored, true)
3588 TRANS(RESTORED, 64, do_saved_restored, false)
3589 
3590 static bool trans_NOP(DisasContext *dc, arg_NOP *a)
3591 {
3592     return advance_pc(dc);
3593 }
3594 
3595 /*
3596  * TODO: Need a feature bit for sparcv8.
3597  * In the meantime, treat all 32-bit cpus like sparcv7.
3598  */
3599 TRANS(NOP_v7, 32, trans_NOP, a)
3600 TRANS(NOP_v9, 64, trans_NOP, a)
3601 
3602 static bool do_arith_int(DisasContext *dc, arg_r_r_ri_cc *a,
3603                          void (*func)(TCGv, TCGv, TCGv),
3604                          void (*funci)(TCGv, TCGv, target_long),
3605                          bool logic_cc)
3606 {
3607     TCGv dst, src1;
3608 
3609     /* For simplicity, we under-decoded the rs2 form. */
3610     if (!a->imm && a->rs2_or_imm & ~0x1f) {
3611         return false;
3612     }
3613 
3614     if (logic_cc) {
3615         dst = cpu_cc_N;
3616     } else {
3617         dst = gen_dest_gpr(dc, a->rd);
3618     }
3619     src1 = gen_load_gpr(dc, a->rs1);
3620 
3621     if (a->imm || a->rs2_or_imm == 0) {
3622         if (funci) {
3623             funci(dst, src1, a->rs2_or_imm);
3624         } else {
3625             func(dst, src1, tcg_constant_tl(a->rs2_or_imm));
3626         }
3627     } else {
3628         func(dst, src1, cpu_regs[a->rs2_or_imm]);
3629     }
3630 
3631     if (logic_cc) {
3632         if (TARGET_LONG_BITS == 64) {
3633             tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
3634             tcg_gen_movi_tl(cpu_icc_C, 0);
3635         }
3636         tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
3637         tcg_gen_movi_tl(cpu_cc_C, 0);
3638         tcg_gen_movi_tl(cpu_cc_V, 0);
3639     }
3640 
3641     gen_store_gpr(dc, a->rd, dst);
3642     return advance_pc(dc);
3643 }
3644 
3645 static bool do_arith(DisasContext *dc, arg_r_r_ri_cc *a,
3646                      void (*func)(TCGv, TCGv, TCGv),
3647                      void (*funci)(TCGv, TCGv, target_long),
3648                      void (*func_cc)(TCGv, TCGv, TCGv))
3649 {
3650     if (a->cc) {
3651         return do_arith_int(dc, a, func_cc, NULL, false);
3652     }
3653     return do_arith_int(dc, a, func, funci, false);
3654 }
3655 
3656 static bool do_logic(DisasContext *dc, arg_r_r_ri_cc *a,
3657                      void (*func)(TCGv, TCGv, TCGv),
3658                      void (*funci)(TCGv, TCGv, target_long))
3659 {
3660     return do_arith_int(dc, a, func, funci, a->cc);
3661 }
3662 
3663 TRANS(ADD, ALL, do_arith, a, tcg_gen_add_tl, tcg_gen_addi_tl, gen_op_addcc)
3664 TRANS(SUB, ALL, do_arith, a, tcg_gen_sub_tl, tcg_gen_subi_tl, gen_op_subcc)
3665 TRANS(ADDC, ALL, do_arith, a, gen_op_addc, NULL, gen_op_addccc)
3666 TRANS(SUBC, ALL, do_arith, a, gen_op_subc, NULL, gen_op_subccc)
3667 
3668 TRANS(TADDcc, ALL, do_arith, a, NULL, NULL, gen_op_taddcc)
3669 TRANS(TSUBcc, ALL, do_arith, a, NULL, NULL, gen_op_tsubcc)
3670 TRANS(TADDccTV, ALL, do_arith, a, NULL, NULL, gen_op_taddcctv)
3671 TRANS(TSUBccTV, ALL, do_arith, a, NULL, NULL, gen_op_tsubcctv)
3672 
3673 TRANS(AND, ALL, do_logic, a, tcg_gen_and_tl, tcg_gen_andi_tl)
3674 TRANS(XOR, ALL, do_logic, a, tcg_gen_xor_tl, tcg_gen_xori_tl)
3675 TRANS(ANDN, ALL, do_logic, a, tcg_gen_andc_tl, NULL)
3676 TRANS(ORN, ALL, do_logic, a, tcg_gen_orc_tl, NULL)
3677 TRANS(XORN, ALL, do_logic, a, tcg_gen_eqv_tl, NULL)
3678 
3679 TRANS(MULX, 64, do_arith, a, tcg_gen_mul_tl, tcg_gen_muli_tl, NULL)
3680 TRANS(UMUL, MUL, do_logic, a, gen_op_umul, NULL)
3681 TRANS(SMUL, MUL, do_logic, a, gen_op_smul, NULL)
3682 TRANS(MULScc, ALL, do_arith, a, NULL, NULL, gen_op_mulscc)
3683 
3684 TRANS(UDIVcc, DIV, do_arith, a, NULL, NULL, gen_op_udivcc)
3685 TRANS(SDIV, DIV, do_arith, a, gen_op_sdiv, NULL, gen_op_sdivcc)
3686 
3687 /* TODO: Should have feature bit -- comes in with UltraSparc T2. */
3688 TRANS(POPC, 64, do_arith, a, gen_op_popc, NULL, NULL)
3689 
3690 static bool trans_OR(DisasContext *dc, arg_r_r_ri_cc *a)
3691 {
3692     /* OR with %g0 is the canonical alias for MOV. */
3693     if (!a->cc && a->rs1 == 0) {
3694         if (a->imm || a->rs2_or_imm == 0) {
3695             gen_store_gpr(dc, a->rd, tcg_constant_tl(a->rs2_or_imm));
3696         } else if (a->rs2_or_imm & ~0x1f) {
3697             /* For simplicity, we under-decoded the rs2 form. */
3698             return false;
3699         } else {
3700             gen_store_gpr(dc, a->rd, cpu_regs[a->rs2_or_imm]);
3701         }
3702         return advance_pc(dc);
3703     }
3704     return do_logic(dc, a, tcg_gen_or_tl, tcg_gen_ori_tl);
3705 }
3706 
3707 static bool trans_UDIV(DisasContext *dc, arg_r_r_ri *a)
3708 {
3709     TCGv_i64 t1, t2;
3710     TCGv dst;
3711 
3712     if (!avail_DIV(dc)) {
3713         return false;
3714     }
3715     /* For simplicity, we under-decoded the rs2 form. */
3716     if (!a->imm && a->rs2_or_imm & ~0x1f) {
3717         return false;
3718     }
3719 
3720     if (unlikely(a->rs2_or_imm == 0)) {
3721         gen_exception(dc, TT_DIV_ZERO);
3722         return true;
3723     }
3724 
3725     if (a->imm) {
3726         t2 = tcg_constant_i64((uint32_t)a->rs2_or_imm);
3727     } else {
3728         TCGLabel *lab;
3729         TCGv_i32 n2;
3730 
3731         finishing_insn(dc);
3732         flush_cond(dc);
3733 
3734         n2 = tcg_temp_new_i32();
3735         tcg_gen_trunc_tl_i32(n2, cpu_regs[a->rs2_or_imm]);
3736 
3737         lab = delay_exception(dc, TT_DIV_ZERO);
3738         tcg_gen_brcondi_i32(TCG_COND_EQ, n2, 0, lab);
3739 
3740         t2 = tcg_temp_new_i64();
3741 #ifdef TARGET_SPARC64
3742         tcg_gen_ext32u_i64(t2, cpu_regs[a->rs2_or_imm]);
3743 #else
3744         tcg_gen_extu_i32_i64(t2, cpu_regs[a->rs2_or_imm]);
3745 #endif
3746     }
3747 
3748     t1 = tcg_temp_new_i64();
3749     tcg_gen_concat_tl_i64(t1, gen_load_gpr(dc, a->rs1), cpu_y);
3750 
3751     tcg_gen_divu_i64(t1, t1, t2);
3752     tcg_gen_umin_i64(t1, t1, tcg_constant_i64(UINT32_MAX));
3753 
3754     dst = gen_dest_gpr(dc, a->rd);
3755     tcg_gen_trunc_i64_tl(dst, t1);
3756     gen_store_gpr(dc, a->rd, dst);
3757     return advance_pc(dc);
3758 }
3759 
3760 static bool trans_UDIVX(DisasContext *dc, arg_r_r_ri *a)
3761 {
3762     TCGv dst, src1, src2;
3763 
3764     if (!avail_64(dc)) {
3765         return false;
3766     }
3767     /* For simplicity, we under-decoded the rs2 form. */
3768     if (!a->imm && a->rs2_or_imm & ~0x1f) {
3769         return false;
3770     }
3771 
3772     if (unlikely(a->rs2_or_imm == 0)) {
3773         gen_exception(dc, TT_DIV_ZERO);
3774         return true;
3775     }
3776 
3777     if (a->imm) {
3778         src2 = tcg_constant_tl(a->rs2_or_imm);
3779     } else {
3780         TCGLabel *lab;
3781 
3782         finishing_insn(dc);
3783         flush_cond(dc);
3784 
3785         lab = delay_exception(dc, TT_DIV_ZERO);
3786         src2 = cpu_regs[a->rs2_or_imm];
3787         tcg_gen_brcondi_tl(TCG_COND_EQ, src2, 0, lab);
3788     }
3789 
3790     dst = gen_dest_gpr(dc, a->rd);
3791     src1 = gen_load_gpr(dc, a->rs1);
3792 
3793     tcg_gen_divu_tl(dst, src1, src2);
3794     gen_store_gpr(dc, a->rd, dst);
3795     return advance_pc(dc);
3796 }
3797 
3798 static bool trans_SDIVX(DisasContext *dc, arg_r_r_ri *a)
3799 {
3800     TCGv dst, src1, src2;
3801 
3802     if (!avail_64(dc)) {
3803         return false;
3804     }
3805     /* For simplicity, we under-decoded the rs2 form. */
3806     if (!a->imm && a->rs2_or_imm & ~0x1f) {
3807         return false;
3808     }
3809 
3810     if (unlikely(a->rs2_or_imm == 0)) {
3811         gen_exception(dc, TT_DIV_ZERO);
3812         return true;
3813     }
3814 
3815     dst = gen_dest_gpr(dc, a->rd);
3816     src1 = gen_load_gpr(dc, a->rs1);
3817 
3818     if (a->imm) {
3819         if (unlikely(a->rs2_or_imm == -1)) {
3820             tcg_gen_neg_tl(dst, src1);
3821             gen_store_gpr(dc, a->rd, dst);
3822             return advance_pc(dc);
3823         }
3824         src2 = tcg_constant_tl(a->rs2_or_imm);
3825     } else {
3826         TCGLabel *lab;
3827         TCGv t1, t2;
3828 
3829         finishing_insn(dc);
3830         flush_cond(dc);
3831 
3832         lab = delay_exception(dc, TT_DIV_ZERO);
3833         src2 = cpu_regs[a->rs2_or_imm];
3834         tcg_gen_brcondi_tl(TCG_COND_EQ, src2, 0, lab);
3835 
3836         /*
3837          * Need to avoid INT64_MIN / -1, which will trap on x86 host.
3838          * Set SRC2 to 1 as a new divisor, to produce the correct result.
3839          */
3840         t1 = tcg_temp_new();
3841         t2 = tcg_temp_new();
3842         tcg_gen_setcondi_tl(TCG_COND_EQ, t1, src1, (target_long)INT64_MIN);
3843         tcg_gen_setcondi_tl(TCG_COND_EQ, t2, src2, -1);
3844         tcg_gen_and_tl(t1, t1, t2);
3845         tcg_gen_movcond_tl(TCG_COND_NE, t1, t1, tcg_constant_tl(0),
3846                            tcg_constant_tl(1), src2);
3847         src2 = t1;
3848     }
3849 
3850     tcg_gen_div_tl(dst, src1, src2);
3851     gen_store_gpr(dc, a->rd, dst);
3852     return advance_pc(dc);
3853 }
3854 
3855 static bool gen_edge(DisasContext *dc, arg_r_r_r *a,
3856                      int width, bool cc, bool little_endian)
3857 {
3858     TCGv dst, s1, s2, l, r, t, m;
3859     uint64_t amask = address_mask_i(dc, -8);
3860 
3861     dst = gen_dest_gpr(dc, a->rd);
3862     s1 = gen_load_gpr(dc, a->rs1);
3863     s2 = gen_load_gpr(dc, a->rs2);
3864 
3865     if (cc) {
3866         gen_op_subcc(cpu_cc_N, s1, s2);
3867     }
3868 
3869     l = tcg_temp_new();
3870     r = tcg_temp_new();
3871     t = tcg_temp_new();
3872 
3873     switch (width) {
3874     case 8:
3875         tcg_gen_andi_tl(l, s1, 7);
3876         tcg_gen_andi_tl(r, s2, 7);
3877         tcg_gen_xori_tl(r, r, 7);
3878         m = tcg_constant_tl(0xff);
3879         break;
3880     case 16:
3881         tcg_gen_extract_tl(l, s1, 1, 2);
3882         tcg_gen_extract_tl(r, s2, 1, 2);
3883         tcg_gen_xori_tl(r, r, 3);
3884         m = tcg_constant_tl(0xf);
3885         break;
3886     case 32:
3887         tcg_gen_extract_tl(l, s1, 2, 1);
3888         tcg_gen_extract_tl(r, s2, 2, 1);
3889         tcg_gen_xori_tl(r, r, 1);
3890         m = tcg_constant_tl(0x3);
3891         break;
3892     default:
3893         abort();
3894     }
3895 
3896     /* Compute Left Edge */
3897     if (little_endian) {
3898         tcg_gen_shl_tl(l, m, l);
3899         tcg_gen_and_tl(l, l, m);
3900     } else {
3901         tcg_gen_shr_tl(l, m, l);
3902     }
3903     /* Compute Right Edge */
3904     if (little_endian) {
3905         tcg_gen_shr_tl(r, m, r);
3906     } else {
3907         tcg_gen_shl_tl(r, m, r);
3908         tcg_gen_and_tl(r, r, m);
3909     }
3910 
3911     /* Compute dst = (s1 == s2 under amask ? l : l & r) */
3912     tcg_gen_xor_tl(t, s1, s2);
3913     tcg_gen_and_tl(r, r, l);
3914     tcg_gen_movcond_tl(TCG_COND_TSTEQ, dst, t, tcg_constant_tl(amask), r, l);
3915 
3916     gen_store_gpr(dc, a->rd, dst);
3917     return advance_pc(dc);
3918 }
3919 
3920 TRANS(EDGE8cc, VIS1, gen_edge, a, 8, 1, 0)
3921 TRANS(EDGE8Lcc, VIS1, gen_edge, a, 8, 1, 1)
3922 TRANS(EDGE16cc, VIS1, gen_edge, a, 16, 1, 0)
3923 TRANS(EDGE16Lcc, VIS1, gen_edge, a, 16, 1, 1)
3924 TRANS(EDGE32cc, VIS1, gen_edge, a, 32, 1, 0)
3925 TRANS(EDGE32Lcc, VIS1, gen_edge, a, 32, 1, 1)
3926 
3927 TRANS(EDGE8N, VIS2, gen_edge, a, 8, 0, 0)
3928 TRANS(EDGE8LN, VIS2, gen_edge, a, 8, 0, 1)
3929 TRANS(EDGE16N, VIS2, gen_edge, a, 16, 0, 0)
3930 TRANS(EDGE16LN, VIS2, gen_edge, a, 16, 0, 1)
3931 TRANS(EDGE32N, VIS2, gen_edge, a, 32, 0, 0)
3932 TRANS(EDGE32LN, VIS2, gen_edge, a, 32, 0, 1)
3933 
3934 static bool do_rr(DisasContext *dc, arg_r_r *a,
3935                   void (*func)(TCGv, TCGv))
3936 {
3937     TCGv dst = gen_dest_gpr(dc, a->rd);
3938     TCGv src = gen_load_gpr(dc, a->rs);
3939 
3940     func(dst, src);
3941     gen_store_gpr(dc, a->rd, dst);
3942     return advance_pc(dc);
3943 }
3944 
3945 TRANS(LZCNT, VIS3, do_rr, a, gen_op_lzcnt)
3946 
3947 static bool do_rrr(DisasContext *dc, arg_r_r_r *a,
3948                    void (*func)(TCGv, TCGv, TCGv))
3949 {
3950     TCGv dst = gen_dest_gpr(dc, a->rd);
3951     TCGv src1 = gen_load_gpr(dc, a->rs1);
3952     TCGv src2 = gen_load_gpr(dc, a->rs2);
3953 
3954     func(dst, src1, src2);
3955     gen_store_gpr(dc, a->rd, dst);
3956     return advance_pc(dc);
3957 }
3958 
3959 TRANS(ARRAY8, VIS1, do_rrr, a, gen_helper_array8)
3960 TRANS(ARRAY16, VIS1, do_rrr, a, gen_op_array16)
3961 TRANS(ARRAY32, VIS1, do_rrr, a, gen_op_array32)
3962 
3963 TRANS(ADDXC, VIS3, do_rrr, a, gen_op_addxc)
3964 TRANS(ADDXCcc, VIS3, do_rrr, a, gen_op_addxccc)
3965 
3966 TRANS(UMULXHI, VIS3, do_rrr, a, gen_op_umulxhi)
3967 
3968 static void gen_op_alignaddr(TCGv dst, TCGv s1, TCGv s2)
3969 {
3970 #ifdef TARGET_SPARC64
3971     TCGv tmp = tcg_temp_new();
3972 
3973     tcg_gen_add_tl(tmp, s1, s2);
3974     tcg_gen_andi_tl(dst, tmp, -8);
3975     tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
3976 #else
3977     g_assert_not_reached();
3978 #endif
3979 }
3980 
3981 static void gen_op_alignaddrl(TCGv dst, TCGv s1, TCGv s2)
3982 {
3983 #ifdef TARGET_SPARC64
3984     TCGv tmp = tcg_temp_new();
3985 
3986     tcg_gen_add_tl(tmp, s1, s2);
3987     tcg_gen_andi_tl(dst, tmp, -8);
3988     tcg_gen_neg_tl(tmp, tmp);
3989     tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
3990 #else
3991     g_assert_not_reached();
3992 #endif
3993 }
3994 
3995 TRANS(ALIGNADDR, VIS1, do_rrr, a, gen_op_alignaddr)
3996 TRANS(ALIGNADDRL, VIS1, do_rrr, a, gen_op_alignaddrl)
3997 
3998 static void gen_op_bmask(TCGv dst, TCGv s1, TCGv s2)
3999 {
4000 #ifdef TARGET_SPARC64
4001     tcg_gen_add_tl(dst, s1, s2);
4002     tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, dst, 32, 32);
4003 #else
4004     g_assert_not_reached();
4005 #endif
4006 }
4007 
4008 TRANS(BMASK, VIS2, do_rrr, a, gen_op_bmask)
4009 
4010 static bool do_cmask(DisasContext *dc, int rs2, void (*func)(TCGv, TCGv, TCGv))
4011 {
4012     func(cpu_gsr, cpu_gsr, gen_load_gpr(dc, rs2));
4013     return true;
4014 }
4015 
4016 TRANS(CMASK8, VIS3, do_cmask, a->rs2, gen_helper_cmask8)
4017 TRANS(CMASK16, VIS3, do_cmask, a->rs2, gen_helper_cmask16)
4018 TRANS(CMASK32, VIS3, do_cmask, a->rs2, gen_helper_cmask32)
4019 
4020 static bool do_shift_r(DisasContext *dc, arg_shiftr *a, bool l, bool u)
4021 {
4022     TCGv dst, src1, src2;
4023 
4024     /* Reject 64-bit shifts for sparc32. */
4025     if (avail_32(dc) && a->x) {
4026         return false;
4027     }
4028 
4029     src2 = tcg_temp_new();
4030     tcg_gen_andi_tl(src2, gen_load_gpr(dc, a->rs2), a->x ? 63 : 31);
4031     src1 = gen_load_gpr(dc, a->rs1);
4032     dst = gen_dest_gpr(dc, a->rd);
4033 
4034     if (l) {
4035         tcg_gen_shl_tl(dst, src1, src2);
4036         if (!a->x) {
4037             tcg_gen_ext32u_tl(dst, dst);
4038         }
4039     } else if (u) {
4040         if (!a->x) {
4041             tcg_gen_ext32u_tl(dst, src1);
4042             src1 = dst;
4043         }
4044         tcg_gen_shr_tl(dst, src1, src2);
4045     } else {
4046         if (!a->x) {
4047             tcg_gen_ext32s_tl(dst, src1);
4048             src1 = dst;
4049         }
4050         tcg_gen_sar_tl(dst, src1, src2);
4051     }
4052     gen_store_gpr(dc, a->rd, dst);
4053     return advance_pc(dc);
4054 }
4055 
4056 TRANS(SLL_r, ALL, do_shift_r, a, true, true)
4057 TRANS(SRL_r, ALL, do_shift_r, a, false, true)
4058 TRANS(SRA_r, ALL, do_shift_r, a, false, false)
4059 
4060 static bool do_shift_i(DisasContext *dc, arg_shifti *a, bool l, bool u)
4061 {
4062     TCGv dst, src1;
4063 
4064     /* Reject 64-bit shifts for sparc32. */
4065     if (avail_32(dc) && (a->x || a->i >= 32)) {
4066         return false;
4067     }
4068 
4069     src1 = gen_load_gpr(dc, a->rs1);
4070     dst = gen_dest_gpr(dc, a->rd);
4071 
4072     if (avail_32(dc) || a->x) {
4073         if (l) {
4074             tcg_gen_shli_tl(dst, src1, a->i);
4075         } else if (u) {
4076             tcg_gen_shri_tl(dst, src1, a->i);
4077         } else {
4078             tcg_gen_sari_tl(dst, src1, a->i);
4079         }
4080     } else {
4081         if (l) {
4082             tcg_gen_deposit_z_tl(dst, src1, a->i, 32 - a->i);
4083         } else if (u) {
4084             tcg_gen_extract_tl(dst, src1, a->i, 32 - a->i);
4085         } else {
4086             tcg_gen_sextract_tl(dst, src1, a->i, 32 - a->i);
4087         }
4088     }
4089     gen_store_gpr(dc, a->rd, dst);
4090     return advance_pc(dc);
4091 }
4092 
4093 TRANS(SLL_i, ALL, do_shift_i, a, true, true)
4094 TRANS(SRL_i, ALL, do_shift_i, a, false, true)
4095 TRANS(SRA_i, ALL, do_shift_i, a, false, false)
4096 
4097 static TCGv gen_rs2_or_imm(DisasContext *dc, bool imm, int rs2_or_imm)
4098 {
4099     /* For simplicity, we under-decoded the rs2 form. */
4100     if (!imm && rs2_or_imm & ~0x1f) {
4101         return NULL;
4102     }
4103     if (imm || rs2_or_imm == 0) {
4104         return tcg_constant_tl(rs2_or_imm);
4105     } else {
4106         return cpu_regs[rs2_or_imm];
4107     }
4108 }
4109 
4110 static bool do_mov_cond(DisasContext *dc, DisasCompare *cmp, int rd, TCGv src2)
4111 {
4112     TCGv dst = gen_load_gpr(dc, rd);
4113     TCGv c2 = tcg_constant_tl(cmp->c2);
4114 
4115     tcg_gen_movcond_tl(cmp->cond, dst, cmp->c1, c2, src2, dst);
4116     gen_store_gpr(dc, rd, dst);
4117     return advance_pc(dc);
4118 }
4119 
4120 static bool trans_MOVcc(DisasContext *dc, arg_MOVcc *a)
4121 {
4122     TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4123     DisasCompare cmp;
4124 
4125     if (src2 == NULL) {
4126         return false;
4127     }
4128     gen_compare(&cmp, a->cc, a->cond, dc);
4129     return do_mov_cond(dc, &cmp, a->rd, src2);
4130 }
4131 
4132 static bool trans_MOVfcc(DisasContext *dc, arg_MOVfcc *a)
4133 {
4134     TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4135     DisasCompare cmp;
4136 
4137     if (src2 == NULL) {
4138         return false;
4139     }
4140     gen_fcompare(&cmp, a->cc, a->cond);
4141     return do_mov_cond(dc, &cmp, a->rd, src2);
4142 }
4143 
4144 static bool trans_MOVR(DisasContext *dc, arg_MOVR *a)
4145 {
4146     TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4147     DisasCompare cmp;
4148 
4149     if (src2 == NULL) {
4150         return false;
4151     }
4152     if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
4153         return false;
4154     }
4155     return do_mov_cond(dc, &cmp, a->rd, src2);
4156 }
4157 
4158 static bool do_add_special(DisasContext *dc, arg_r_r_ri *a,
4159                            bool (*func)(DisasContext *dc, int rd, TCGv src))
4160 {
4161     TCGv src1, sum;
4162 
4163     /* For simplicity, we under-decoded the rs2 form. */
4164     if (!a->imm && a->rs2_or_imm & ~0x1f) {
4165         return false;
4166     }
4167 
4168     /*
4169      * Always load the sum into a new temporary.
4170      * This is required to capture the value across a window change,
4171      * e.g. SAVE and RESTORE, and may be optimized away otherwise.
4172      */
4173     sum = tcg_temp_new();
4174     src1 = gen_load_gpr(dc, a->rs1);
4175     if (a->imm || a->rs2_or_imm == 0) {
4176         tcg_gen_addi_tl(sum, src1, a->rs2_or_imm);
4177     } else {
4178         tcg_gen_add_tl(sum, src1, cpu_regs[a->rs2_or_imm]);
4179     }
4180     return func(dc, a->rd, sum);
4181 }
4182 
4183 static bool do_jmpl(DisasContext *dc, int rd, TCGv src)
4184 {
4185     /*
4186      * Preserve pc across advance, so that we can delay
4187      * the writeback to rd until after src is consumed.
4188      */
4189     target_ulong cur_pc = dc->pc;
4190 
4191     gen_check_align(dc, src, 3);
4192 
4193     gen_mov_pc_npc(dc);
4194     tcg_gen_mov_tl(cpu_npc, src);
4195     gen_address_mask(dc, cpu_npc);
4196     gen_store_gpr(dc, rd, tcg_constant_tl(cur_pc));
4197 
4198     dc->npc = DYNAMIC_PC_LOOKUP;
4199     return true;
4200 }
4201 
4202 TRANS(JMPL, ALL, do_add_special, a, do_jmpl)
4203 
4204 static bool do_rett(DisasContext *dc, int rd, TCGv src)
4205 {
4206     if (!supervisor(dc)) {
4207         return raise_priv(dc);
4208     }
4209 
4210     gen_check_align(dc, src, 3);
4211 
4212     gen_mov_pc_npc(dc);
4213     tcg_gen_mov_tl(cpu_npc, src);
4214     gen_helper_rett(tcg_env);
4215 
4216     dc->npc = DYNAMIC_PC;
4217     return true;
4218 }
4219 
4220 TRANS(RETT, 32, do_add_special, a, do_rett)
4221 
4222 static bool do_return(DisasContext *dc, int rd, TCGv src)
4223 {
4224     gen_check_align(dc, src, 3);
4225     gen_helper_restore(tcg_env);
4226 
4227     gen_mov_pc_npc(dc);
4228     tcg_gen_mov_tl(cpu_npc, src);
4229     gen_address_mask(dc, cpu_npc);
4230 
4231     dc->npc = DYNAMIC_PC_LOOKUP;
4232     return true;
4233 }
4234 
4235 TRANS(RETURN, 64, do_add_special, a, do_return)
4236 
4237 static bool do_save(DisasContext *dc, int rd, TCGv src)
4238 {
4239     gen_helper_save(tcg_env);
4240     gen_store_gpr(dc, rd, src);
4241     return advance_pc(dc);
4242 }
4243 
4244 TRANS(SAVE, ALL, do_add_special, a, do_save)
4245 
4246 static bool do_restore(DisasContext *dc, int rd, TCGv src)
4247 {
4248     gen_helper_restore(tcg_env);
4249     gen_store_gpr(dc, rd, src);
4250     return advance_pc(dc);
4251 }
4252 
4253 TRANS(RESTORE, ALL, do_add_special, a, do_restore)
4254 
4255 static bool do_done_retry(DisasContext *dc, bool done)
4256 {
4257     if (!supervisor(dc)) {
4258         return raise_priv(dc);
4259     }
4260     dc->npc = DYNAMIC_PC;
4261     dc->pc = DYNAMIC_PC;
4262     translator_io_start(&dc->base);
4263     if (done) {
4264         gen_helper_done(tcg_env);
4265     } else {
4266         gen_helper_retry(tcg_env);
4267     }
4268     return true;
4269 }
4270 
4271 TRANS(DONE, 64, do_done_retry, true)
4272 TRANS(RETRY, 64, do_done_retry, false)
4273 
4274 /*
4275  * Major opcode 11 -- load and store instructions
4276  */
4277 
4278 static TCGv gen_ldst_addr(DisasContext *dc, int rs1, bool imm, int rs2_or_imm)
4279 {
4280     TCGv addr, tmp = NULL;
4281 
4282     /* For simplicity, we under-decoded the rs2 form. */
4283     if (!imm && rs2_or_imm & ~0x1f) {
4284         return NULL;
4285     }
4286 
4287     addr = gen_load_gpr(dc, rs1);
4288     if (rs2_or_imm) {
4289         tmp = tcg_temp_new();
4290         if (imm) {
4291             tcg_gen_addi_tl(tmp, addr, rs2_or_imm);
4292         } else {
4293             tcg_gen_add_tl(tmp, addr, cpu_regs[rs2_or_imm]);
4294         }
4295         addr = tmp;
4296     }
4297     if (AM_CHECK(dc)) {
4298         if (!tmp) {
4299             tmp = tcg_temp_new();
4300         }
4301         tcg_gen_ext32u_tl(tmp, addr);
4302         addr = tmp;
4303     }
4304     return addr;
4305 }
4306 
4307 static bool do_ld_gpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4308 {
4309     TCGv reg, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4310     DisasASI da;
4311 
4312     if (addr == NULL) {
4313         return false;
4314     }
4315     da = resolve_asi(dc, a->asi, mop);
4316 
4317     reg = gen_dest_gpr(dc, a->rd);
4318     gen_ld_asi(dc, &da, reg, addr);
4319     gen_store_gpr(dc, a->rd, reg);
4320     return advance_pc(dc);
4321 }
4322 
4323 TRANS(LDUW, ALL, do_ld_gpr, a, MO_TEUL)
4324 TRANS(LDUB, ALL, do_ld_gpr, a, MO_UB)
4325 TRANS(LDUH, ALL, do_ld_gpr, a, MO_TEUW)
4326 TRANS(LDSB, ALL, do_ld_gpr, a, MO_SB)
4327 TRANS(LDSH, ALL, do_ld_gpr, a, MO_TESW)
4328 TRANS(LDSW, 64, do_ld_gpr, a, MO_TESL)
4329 TRANS(LDX, 64, do_ld_gpr, a, MO_TEUQ)
4330 
4331 static bool do_st_gpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4332 {
4333     TCGv reg, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4334     DisasASI da;
4335 
4336     if (addr == NULL) {
4337         return false;
4338     }
4339     da = resolve_asi(dc, a->asi, mop);
4340 
4341     reg = gen_load_gpr(dc, a->rd);
4342     gen_st_asi(dc, &da, reg, addr);
4343     return advance_pc(dc);
4344 }
4345 
4346 TRANS(STW, ALL, do_st_gpr, a, MO_TEUL)
4347 TRANS(STB, ALL, do_st_gpr, a, MO_UB)
4348 TRANS(STH, ALL, do_st_gpr, a, MO_TEUW)
4349 TRANS(STX, 64, do_st_gpr, a, MO_TEUQ)
4350 
4351 static bool trans_LDD(DisasContext *dc, arg_r_r_ri_asi *a)
4352 {
4353     TCGv addr;
4354     DisasASI da;
4355 
4356     if (a->rd & 1) {
4357         return false;
4358     }
4359     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4360     if (addr == NULL) {
4361         return false;
4362     }
4363     da = resolve_asi(dc, a->asi, MO_TEUQ);
4364     gen_ldda_asi(dc, &da, addr, a->rd);
4365     return advance_pc(dc);
4366 }
4367 
4368 static bool trans_STD(DisasContext *dc, arg_r_r_ri_asi *a)
4369 {
4370     TCGv addr;
4371     DisasASI da;
4372 
4373     if (a->rd & 1) {
4374         return false;
4375     }
4376     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4377     if (addr == NULL) {
4378         return false;
4379     }
4380     da = resolve_asi(dc, a->asi, MO_TEUQ);
4381     gen_stda_asi(dc, &da, addr, a->rd);
4382     return advance_pc(dc);
4383 }
4384 
4385 static bool trans_LDSTUB(DisasContext *dc, arg_r_r_ri_asi *a)
4386 {
4387     TCGv addr, reg;
4388     DisasASI da;
4389 
4390     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4391     if (addr == NULL) {
4392         return false;
4393     }
4394     da = resolve_asi(dc, a->asi, MO_UB);
4395 
4396     reg = gen_dest_gpr(dc, a->rd);
4397     gen_ldstub_asi(dc, &da, reg, addr);
4398     gen_store_gpr(dc, a->rd, reg);
4399     return advance_pc(dc);
4400 }
4401 
4402 static bool trans_SWAP(DisasContext *dc, arg_r_r_ri_asi *a)
4403 {
4404     TCGv addr, dst, src;
4405     DisasASI da;
4406 
4407     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4408     if (addr == NULL) {
4409         return false;
4410     }
4411     da = resolve_asi(dc, a->asi, MO_TEUL);
4412 
4413     dst = gen_dest_gpr(dc, a->rd);
4414     src = gen_load_gpr(dc, a->rd);
4415     gen_swap_asi(dc, &da, dst, src, addr);
4416     gen_store_gpr(dc, a->rd, dst);
4417     return advance_pc(dc);
4418 }
4419 
4420 static bool do_casa(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4421 {
4422     TCGv addr, o, n, c;
4423     DisasASI da;
4424 
4425     addr = gen_ldst_addr(dc, a->rs1, true, 0);
4426     if (addr == NULL) {
4427         return false;
4428     }
4429     da = resolve_asi(dc, a->asi, mop);
4430 
4431     o = gen_dest_gpr(dc, a->rd);
4432     n = gen_load_gpr(dc, a->rd);
4433     c = gen_load_gpr(dc, a->rs2_or_imm);
4434     gen_cas_asi(dc, &da, o, n, c, addr);
4435     gen_store_gpr(dc, a->rd, o);
4436     return advance_pc(dc);
4437 }
4438 
4439 TRANS(CASA, CASA, do_casa, a, MO_TEUL)
4440 TRANS(CASXA, 64, do_casa, a, MO_TEUQ)
4441 
4442 static bool do_ld_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz)
4443 {
4444     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4445     DisasASI da;
4446 
4447     if (addr == NULL) {
4448         return false;
4449     }
4450     if (gen_trap_ifnofpu(dc)) {
4451         return true;
4452     }
4453     if (sz == MO_128 && gen_trap_float128(dc)) {
4454         return true;
4455     }
4456     da = resolve_asi(dc, a->asi, MO_TE | sz);
4457     gen_ldf_asi(dc, &da, sz, addr, a->rd);
4458     gen_update_fprs_dirty(dc, a->rd);
4459     return advance_pc(dc);
4460 }
4461 
4462 TRANS(LDF, ALL, do_ld_fpr, a, MO_32)
4463 TRANS(LDDF, ALL, do_ld_fpr, a, MO_64)
4464 TRANS(LDQF, ALL, do_ld_fpr, a, MO_128)
4465 
4466 TRANS(LDFA, 64, do_ld_fpr, a, MO_32)
4467 TRANS(LDDFA, 64, do_ld_fpr, a, MO_64)
4468 TRANS(LDQFA, 64, do_ld_fpr, a, MO_128)
4469 
4470 static bool do_st_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz)
4471 {
4472     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4473     DisasASI da;
4474 
4475     if (addr == NULL) {
4476         return false;
4477     }
4478     if (gen_trap_ifnofpu(dc)) {
4479         return true;
4480     }
4481     if (sz == MO_128 && gen_trap_float128(dc)) {
4482         return true;
4483     }
4484     da = resolve_asi(dc, a->asi, MO_TE | sz);
4485     gen_stf_asi(dc, &da, sz, addr, a->rd);
4486     return advance_pc(dc);
4487 }
4488 
4489 TRANS(STF, ALL, do_st_fpr, a, MO_32)
4490 TRANS(STDF, ALL, do_st_fpr, a, MO_64)
4491 TRANS(STQF, ALL, do_st_fpr, a, MO_128)
4492 
4493 TRANS(STFA, 64, do_st_fpr, a, MO_32)
4494 TRANS(STDFA, 64, do_st_fpr, a, MO_64)
4495 TRANS(STQFA, 64, do_st_fpr, a, MO_128)
4496 
4497 static bool trans_STDFQ(DisasContext *dc, arg_STDFQ *a)
4498 {
4499     if (!avail_32(dc)) {
4500         return false;
4501     }
4502     if (!supervisor(dc)) {
4503         return raise_priv(dc);
4504     }
4505     if (gen_trap_ifnofpu(dc)) {
4506         return true;
4507     }
4508     gen_op_fpexception_im(dc, FSR_FTT_SEQ_ERROR);
4509     return true;
4510 }
4511 
4512 static bool trans_LDFSR(DisasContext *dc, arg_r_r_ri *a)
4513 {
4514     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4515     TCGv_i32 tmp;
4516 
4517     if (addr == NULL) {
4518         return false;
4519     }
4520     if (gen_trap_ifnofpu(dc)) {
4521         return true;
4522     }
4523 
4524     tmp = tcg_temp_new_i32();
4525     tcg_gen_qemu_ld_i32(tmp, addr, dc->mem_idx, MO_TEUL | MO_ALIGN);
4526 
4527     tcg_gen_extract_i32(cpu_fcc[0], tmp, FSR_FCC0_SHIFT, 2);
4528     /* LDFSR does not change FCC[1-3]. */
4529 
4530     gen_helper_set_fsr_nofcc_noftt(tcg_env, tmp);
4531     return advance_pc(dc);
4532 }
4533 
4534 static bool do_ldxfsr(DisasContext *dc, arg_r_r_ri *a, bool entire)
4535 {
4536 #ifdef TARGET_SPARC64
4537     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4538     TCGv_i64 t64;
4539     TCGv_i32 lo, hi;
4540 
4541     if (addr == NULL) {
4542         return false;
4543     }
4544     if (gen_trap_ifnofpu(dc)) {
4545         return true;
4546     }
4547 
4548     t64 = tcg_temp_new_i64();
4549     tcg_gen_qemu_ld_i64(t64, addr, dc->mem_idx, MO_TEUQ | MO_ALIGN);
4550 
4551     lo = tcg_temp_new_i32();
4552     hi = cpu_fcc[3];
4553     tcg_gen_extr_i64_i32(lo, hi, t64);
4554     tcg_gen_extract_i32(cpu_fcc[0], lo, FSR_FCC0_SHIFT, 2);
4555     tcg_gen_extract_i32(cpu_fcc[1], hi, FSR_FCC1_SHIFT - 32, 2);
4556     tcg_gen_extract_i32(cpu_fcc[2], hi, FSR_FCC2_SHIFT - 32, 2);
4557     tcg_gen_extract_i32(cpu_fcc[3], hi, FSR_FCC3_SHIFT - 32, 2);
4558 
4559     if (entire) {
4560         gen_helper_set_fsr_nofcc(tcg_env, lo);
4561     } else {
4562         gen_helper_set_fsr_nofcc_noftt(tcg_env, lo);
4563     }
4564     return advance_pc(dc);
4565 #else
4566     return false;
4567 #endif
4568 }
4569 
4570 TRANS(LDXFSR, 64, do_ldxfsr, a, false)
4571 TRANS(LDXEFSR, VIS3B, do_ldxfsr, a, true)
4572 
4573 static bool do_stfsr(DisasContext *dc, arg_r_r_ri *a, MemOp mop)
4574 {
4575     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4576     TCGv fsr;
4577 
4578     if (addr == NULL) {
4579         return false;
4580     }
4581     if (gen_trap_ifnofpu(dc)) {
4582         return true;
4583     }
4584 
4585     fsr = tcg_temp_new();
4586     gen_helper_get_fsr(fsr, tcg_env);
4587     tcg_gen_qemu_st_tl(fsr, addr, dc->mem_idx, mop | MO_ALIGN);
4588     return advance_pc(dc);
4589 }
4590 
4591 TRANS(STFSR, ALL, do_stfsr, a, MO_TEUL)
4592 TRANS(STXFSR, 64, do_stfsr, a, MO_TEUQ)
4593 
4594 static bool do_fc(DisasContext *dc, int rd, int32_t c)
4595 {
4596     if (gen_trap_ifnofpu(dc)) {
4597         return true;
4598     }
4599     gen_store_fpr_F(dc, rd, tcg_constant_i32(c));
4600     return advance_pc(dc);
4601 }
4602 
4603 TRANS(FZEROs, VIS1, do_fc, a->rd, 0)
4604 TRANS(FONEs, VIS1, do_fc, a->rd, -1)
4605 
4606 static bool do_dc(DisasContext *dc, int rd, int64_t c)
4607 {
4608     if (gen_trap_ifnofpu(dc)) {
4609         return true;
4610     }
4611     gen_store_fpr_D(dc, rd, tcg_constant_i64(c));
4612     return advance_pc(dc);
4613 }
4614 
4615 TRANS(FZEROd, VIS1, do_dc, a->rd, 0)
4616 TRANS(FONEd, VIS1, do_dc, a->rd, -1)
4617 
4618 static bool do_ff(DisasContext *dc, arg_r_r *a,
4619                   void (*func)(TCGv_i32, TCGv_i32))
4620 {
4621     TCGv_i32 tmp;
4622 
4623     if (gen_trap_ifnofpu(dc)) {
4624         return true;
4625     }
4626 
4627     tmp = gen_load_fpr_F(dc, a->rs);
4628     func(tmp, tmp);
4629     gen_store_fpr_F(dc, a->rd, tmp);
4630     return advance_pc(dc);
4631 }
4632 
4633 TRANS(FMOVs, ALL, do_ff, a, gen_op_fmovs)
4634 TRANS(FNEGs, ALL, do_ff, a, gen_op_fnegs)
4635 TRANS(FABSs, ALL, do_ff, a, gen_op_fabss)
4636 TRANS(FSRCs, VIS1, do_ff, a, tcg_gen_mov_i32)
4637 TRANS(FNOTs, VIS1, do_ff, a, tcg_gen_not_i32)
4638 
4639 static bool do_fd(DisasContext *dc, arg_r_r *a,
4640                   void (*func)(TCGv_i32, TCGv_i64))
4641 {
4642     TCGv_i32 dst;
4643     TCGv_i64 src;
4644 
4645     if (gen_trap_ifnofpu(dc)) {
4646         return true;
4647     }
4648 
4649     dst = tcg_temp_new_i32();
4650     src = gen_load_fpr_D(dc, a->rs);
4651     func(dst, src);
4652     gen_store_fpr_F(dc, a->rd, dst);
4653     return advance_pc(dc);
4654 }
4655 
4656 TRANS(FPACK16, VIS1, do_fd, a, gen_op_fpack16)
4657 TRANS(FPACKFIX, VIS1, do_fd, a, gen_op_fpackfix)
4658 
4659 static bool do_env_ff(DisasContext *dc, arg_r_r *a,
4660                       void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
4661 {
4662     TCGv_i32 tmp;
4663 
4664     if (gen_trap_ifnofpu(dc)) {
4665         return true;
4666     }
4667 
4668     tmp = gen_load_fpr_F(dc, a->rs);
4669     func(tmp, tcg_env, tmp);
4670     gen_store_fpr_F(dc, a->rd, tmp);
4671     return advance_pc(dc);
4672 }
4673 
4674 TRANS(FSQRTs, ALL, do_env_ff, a, gen_helper_fsqrts)
4675 TRANS(FiTOs, ALL, do_env_ff, a, gen_helper_fitos)
4676 TRANS(FsTOi, ALL, do_env_ff, a, gen_helper_fstoi)
4677 
4678 static bool do_env_fd(DisasContext *dc, arg_r_r *a,
4679                       void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
4680 {
4681     TCGv_i32 dst;
4682     TCGv_i64 src;
4683 
4684     if (gen_trap_ifnofpu(dc)) {
4685         return true;
4686     }
4687 
4688     dst = tcg_temp_new_i32();
4689     src = gen_load_fpr_D(dc, a->rs);
4690     func(dst, tcg_env, src);
4691     gen_store_fpr_F(dc, a->rd, dst);
4692     return advance_pc(dc);
4693 }
4694 
4695 TRANS(FdTOs, ALL, do_env_fd, a, gen_helper_fdtos)
4696 TRANS(FdTOi, ALL, do_env_fd, a, gen_helper_fdtoi)
4697 TRANS(FxTOs, 64, do_env_fd, a, gen_helper_fxtos)
4698 
4699 static bool do_dd(DisasContext *dc, arg_r_r *a,
4700                   void (*func)(TCGv_i64, TCGv_i64))
4701 {
4702     TCGv_i64 dst, src;
4703 
4704     if (gen_trap_ifnofpu(dc)) {
4705         return true;
4706     }
4707 
4708     dst = tcg_temp_new_i64();
4709     src = gen_load_fpr_D(dc, a->rs);
4710     func(dst, src);
4711     gen_store_fpr_D(dc, a->rd, dst);
4712     return advance_pc(dc);
4713 }
4714 
4715 TRANS(FMOVd, 64, do_dd, a, gen_op_fmovd)
4716 TRANS(FNEGd, 64, do_dd, a, gen_op_fnegd)
4717 TRANS(FABSd, 64, do_dd, a, gen_op_fabsd)
4718 TRANS(FSRCd, VIS1, do_dd, a, tcg_gen_mov_i64)
4719 TRANS(FNOTd, VIS1, do_dd, a, tcg_gen_not_i64)
4720 
4721 static bool do_env_dd(DisasContext *dc, arg_r_r *a,
4722                       void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
4723 {
4724     TCGv_i64 dst, src;
4725 
4726     if (gen_trap_ifnofpu(dc)) {
4727         return true;
4728     }
4729 
4730     dst = tcg_temp_new_i64();
4731     src = gen_load_fpr_D(dc, a->rs);
4732     func(dst, tcg_env, src);
4733     gen_store_fpr_D(dc, a->rd, dst);
4734     return advance_pc(dc);
4735 }
4736 
4737 TRANS(FSQRTd, ALL, do_env_dd, a, gen_helper_fsqrtd)
4738 TRANS(FxTOd, 64, do_env_dd, a, gen_helper_fxtod)
4739 TRANS(FdTOx, 64, do_env_dd, a, gen_helper_fdtox)
4740 
4741 static bool do_df(DisasContext *dc, arg_r_r *a,
4742                   void (*func)(TCGv_i64, TCGv_i32))
4743 {
4744     TCGv_i64 dst;
4745     TCGv_i32 src;
4746 
4747     if (gen_trap_ifnofpu(dc)) {
4748         return true;
4749     }
4750 
4751     dst = tcg_temp_new_i64();
4752     src = gen_load_fpr_F(dc, a->rs);
4753     func(dst, src);
4754     gen_store_fpr_D(dc, a->rd, dst);
4755     return advance_pc(dc);
4756 }
4757 
4758 TRANS(FEXPAND, VIS1, do_df, a, gen_helper_fexpand)
4759 
4760 static bool do_env_df(DisasContext *dc, arg_r_r *a,
4761                       void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
4762 {
4763     TCGv_i64 dst;
4764     TCGv_i32 src;
4765 
4766     if (gen_trap_ifnofpu(dc)) {
4767         return true;
4768     }
4769 
4770     dst = tcg_temp_new_i64();
4771     src = gen_load_fpr_F(dc, a->rs);
4772     func(dst, tcg_env, src);
4773     gen_store_fpr_D(dc, a->rd, dst);
4774     return advance_pc(dc);
4775 }
4776 
4777 TRANS(FiTOd, ALL, do_env_df, a, gen_helper_fitod)
4778 TRANS(FsTOd, ALL, do_env_df, a, gen_helper_fstod)
4779 TRANS(FsTOx, 64, do_env_df, a, gen_helper_fstox)
4780 
4781 static bool do_qq(DisasContext *dc, arg_r_r *a,
4782                   void (*func)(TCGv_i128, TCGv_i128))
4783 {
4784     TCGv_i128 t;
4785 
4786     if (gen_trap_ifnofpu(dc)) {
4787         return true;
4788     }
4789     if (gen_trap_float128(dc)) {
4790         return true;
4791     }
4792 
4793     gen_op_clear_ieee_excp_and_FTT();
4794     t = gen_load_fpr_Q(dc, a->rs);
4795     func(t, t);
4796     gen_store_fpr_Q(dc, a->rd, t);
4797     return advance_pc(dc);
4798 }
4799 
4800 TRANS(FMOVq, 64, do_qq, a, tcg_gen_mov_i128)
4801 TRANS(FNEGq, 64, do_qq, a, gen_op_fnegq)
4802 TRANS(FABSq, 64, do_qq, a, gen_op_fabsq)
4803 
4804 static bool do_env_qq(DisasContext *dc, arg_r_r *a,
4805                       void (*func)(TCGv_i128, TCGv_env, TCGv_i128))
4806 {
4807     TCGv_i128 t;
4808 
4809     if (gen_trap_ifnofpu(dc)) {
4810         return true;
4811     }
4812     if (gen_trap_float128(dc)) {
4813         return true;
4814     }
4815 
4816     t = gen_load_fpr_Q(dc, a->rs);
4817     func(t, tcg_env, t);
4818     gen_store_fpr_Q(dc, a->rd, t);
4819     return advance_pc(dc);
4820 }
4821 
4822 TRANS(FSQRTq, ALL, do_env_qq, a, gen_helper_fsqrtq)
4823 
4824 static bool do_env_fq(DisasContext *dc, arg_r_r *a,
4825                       void (*func)(TCGv_i32, TCGv_env, TCGv_i128))
4826 {
4827     TCGv_i128 src;
4828     TCGv_i32 dst;
4829 
4830     if (gen_trap_ifnofpu(dc)) {
4831         return true;
4832     }
4833     if (gen_trap_float128(dc)) {
4834         return true;
4835     }
4836 
4837     src = gen_load_fpr_Q(dc, a->rs);
4838     dst = tcg_temp_new_i32();
4839     func(dst, tcg_env, src);
4840     gen_store_fpr_F(dc, a->rd, dst);
4841     return advance_pc(dc);
4842 }
4843 
4844 TRANS(FqTOs, ALL, do_env_fq, a, gen_helper_fqtos)
4845 TRANS(FqTOi, ALL, do_env_fq, a, gen_helper_fqtoi)
4846 
4847 static bool do_env_dq(DisasContext *dc, arg_r_r *a,
4848                       void (*func)(TCGv_i64, TCGv_env, TCGv_i128))
4849 {
4850     TCGv_i128 src;
4851     TCGv_i64 dst;
4852 
4853     if (gen_trap_ifnofpu(dc)) {
4854         return true;
4855     }
4856     if (gen_trap_float128(dc)) {
4857         return true;
4858     }
4859 
4860     src = gen_load_fpr_Q(dc, a->rs);
4861     dst = tcg_temp_new_i64();
4862     func(dst, tcg_env, src);
4863     gen_store_fpr_D(dc, a->rd, dst);
4864     return advance_pc(dc);
4865 }
4866 
4867 TRANS(FqTOd, ALL, do_env_dq, a, gen_helper_fqtod)
4868 TRANS(FqTOx, 64, do_env_dq, a, gen_helper_fqtox)
4869 
4870 static bool do_env_qf(DisasContext *dc, arg_r_r *a,
4871                       void (*func)(TCGv_i128, TCGv_env, TCGv_i32))
4872 {
4873     TCGv_i32 src;
4874     TCGv_i128 dst;
4875 
4876     if (gen_trap_ifnofpu(dc)) {
4877         return true;
4878     }
4879     if (gen_trap_float128(dc)) {
4880         return true;
4881     }
4882 
4883     src = gen_load_fpr_F(dc, a->rs);
4884     dst = tcg_temp_new_i128();
4885     func(dst, tcg_env, src);
4886     gen_store_fpr_Q(dc, a->rd, dst);
4887     return advance_pc(dc);
4888 }
4889 
4890 TRANS(FiTOq, ALL, do_env_qf, a, gen_helper_fitoq)
4891 TRANS(FsTOq, ALL, do_env_qf, a, gen_helper_fstoq)
4892 
4893 static bool do_env_qd(DisasContext *dc, arg_r_r *a,
4894                       void (*func)(TCGv_i128, TCGv_env, TCGv_i64))
4895 {
4896     TCGv_i64 src;
4897     TCGv_i128 dst;
4898 
4899     if (gen_trap_ifnofpu(dc)) {
4900         return true;
4901     }
4902     if (gen_trap_float128(dc)) {
4903         return true;
4904     }
4905 
4906     src = gen_load_fpr_D(dc, a->rs);
4907     dst = tcg_temp_new_i128();
4908     func(dst, tcg_env, src);
4909     gen_store_fpr_Q(dc, a->rd, dst);
4910     return advance_pc(dc);
4911 }
4912 
4913 TRANS(FdTOq, ALL, do_env_qd, a, gen_helper_fdtoq)
4914 TRANS(FxTOq, 64, do_env_qd, a, gen_helper_fxtoq)
4915 
4916 static bool do_fff(DisasContext *dc, arg_r_r_r *a,
4917                    void (*func)(TCGv_i32, TCGv_i32, TCGv_i32))
4918 {
4919     TCGv_i32 src1, src2;
4920 
4921     if (gen_trap_ifnofpu(dc)) {
4922         return true;
4923     }
4924 
4925     src1 = gen_load_fpr_F(dc, a->rs1);
4926     src2 = gen_load_fpr_F(dc, a->rs2);
4927     func(src1, src1, src2);
4928     gen_store_fpr_F(dc, a->rd, src1);
4929     return advance_pc(dc);
4930 }
4931 
4932 TRANS(FPADD16s, VIS1, do_fff, a, tcg_gen_vec_add16_i32)
4933 TRANS(FPADD32s, VIS1, do_fff, a, tcg_gen_add_i32)
4934 TRANS(FPSUB16s, VIS1, do_fff, a, tcg_gen_vec_sub16_i32)
4935 TRANS(FPSUB32s, VIS1, do_fff, a, tcg_gen_sub_i32)
4936 TRANS(FNORs, VIS1, do_fff, a, tcg_gen_nor_i32)
4937 TRANS(FANDNOTs, VIS1, do_fff, a, tcg_gen_andc_i32)
4938 TRANS(FXORs, VIS1, do_fff, a, tcg_gen_xor_i32)
4939 TRANS(FNANDs, VIS1, do_fff, a, tcg_gen_nand_i32)
4940 TRANS(FANDs, VIS1, do_fff, a, tcg_gen_and_i32)
4941 TRANS(FXNORs, VIS1, do_fff, a, tcg_gen_eqv_i32)
4942 TRANS(FORNOTs, VIS1, do_fff, a, tcg_gen_orc_i32)
4943 TRANS(FORs, VIS1, do_fff, a, tcg_gen_or_i32)
4944 
4945 TRANS(FHADDs, VIS3, do_fff, a, gen_op_fhadds)
4946 TRANS(FHSUBs, VIS3, do_fff, a, gen_op_fhsubs)
4947 TRANS(FNHADDs, VIS3, do_fff, a, gen_op_fnhadds)
4948 
4949 TRANS(FPADDS16s, VIS3, do_fff, a, gen_op_fpadds16s)
4950 TRANS(FPSUBS16s, VIS3, do_fff, a, gen_op_fpsubs16s)
4951 TRANS(FPADDS32s, VIS3, do_fff, a, gen_op_fpadds32s)
4952 TRANS(FPSUBS32s, VIS3, do_fff, a, gen_op_fpsubs32s)
4953 
4954 static bool do_env_fff(DisasContext *dc, arg_r_r_r *a,
4955                        void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
4956 {
4957     TCGv_i32 src1, src2;
4958 
4959     if (gen_trap_ifnofpu(dc)) {
4960         return true;
4961     }
4962 
4963     src1 = gen_load_fpr_F(dc, a->rs1);
4964     src2 = gen_load_fpr_F(dc, a->rs2);
4965     func(src1, tcg_env, src1, src2);
4966     gen_store_fpr_F(dc, a->rd, src1);
4967     return advance_pc(dc);
4968 }
4969 
4970 TRANS(FADDs, ALL, do_env_fff, a, gen_helper_fadds)
4971 TRANS(FSUBs, ALL, do_env_fff, a, gen_helper_fsubs)
4972 TRANS(FMULs, ALL, do_env_fff, a, gen_helper_fmuls)
4973 TRANS(FDIVs, ALL, do_env_fff, a, gen_helper_fdivs)
4974 TRANS(FNADDs, VIS3, do_env_fff, a, gen_helper_fnadds)
4975 TRANS(FNMULs, VIS3, do_env_fff, a, gen_helper_fnmuls)
4976 
4977 static bool do_dff(DisasContext *dc, arg_r_r_r *a,
4978                    void (*func)(TCGv_i64, TCGv_i32, TCGv_i32))
4979 {
4980     TCGv_i64 dst;
4981     TCGv_i32 src1, src2;
4982 
4983     if (gen_trap_ifnofpu(dc)) {
4984         return true;
4985     }
4986 
4987     dst = tcg_temp_new_i64();
4988     src1 = gen_load_fpr_F(dc, a->rs1);
4989     src2 = gen_load_fpr_F(dc, a->rs2);
4990     func(dst, src1, src2);
4991     gen_store_fpr_D(dc, a->rd, dst);
4992     return advance_pc(dc);
4993 }
4994 
4995 TRANS(FMUL8x16AU, VIS1, do_dff, a, gen_op_fmul8x16au)
4996 TRANS(FMUL8x16AL, VIS1, do_dff, a, gen_op_fmul8x16al)
4997 TRANS(FMULD8SUx16, VIS1, do_dff, a, gen_op_fmuld8sux16)
4998 TRANS(FMULD8ULx16, VIS1, do_dff, a, gen_op_fmuld8ulx16)
4999 TRANS(FPMERGE, VIS1, do_dff, a, gen_helper_fpmerge)
5000 
5001 static bool do_dfd(DisasContext *dc, arg_r_r_r *a,
5002                    void (*func)(TCGv_i64, TCGv_i32, TCGv_i64))
5003 {
5004     TCGv_i64 dst, src2;
5005     TCGv_i32 src1;
5006 
5007     if (gen_trap_ifnofpu(dc)) {
5008         return true;
5009     }
5010 
5011     dst = tcg_temp_new_i64();
5012     src1 = gen_load_fpr_F(dc, a->rs1);
5013     src2 = gen_load_fpr_D(dc, a->rs2);
5014     func(dst, src1, src2);
5015     gen_store_fpr_D(dc, a->rd, dst);
5016     return advance_pc(dc);
5017 }
5018 
5019 TRANS(FMUL8x16, VIS1, do_dfd, a, gen_helper_fmul8x16)
5020 
5021 static bool do_gvec_ddd(DisasContext *dc, arg_r_r_r *a, MemOp vece,
5022                         void (*func)(unsigned, uint32_t, uint32_t,
5023                                      uint32_t, uint32_t, uint32_t))
5024 {
5025     if (gen_trap_ifnofpu(dc)) {
5026         return true;
5027     }
5028 
5029     func(vece, gen_offset_fpr_D(a->rd), gen_offset_fpr_D(a->rs1),
5030          gen_offset_fpr_D(a->rs2), 8, 8);
5031     return advance_pc(dc);
5032 }
5033 
5034 TRANS(FPADD8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_add)
5035 TRANS(FPADD16, VIS1, do_gvec_ddd, a, MO_16, tcg_gen_gvec_add)
5036 TRANS(FPADD32, VIS1, do_gvec_ddd, a, MO_32, tcg_gen_gvec_add)
5037 
5038 TRANS(FPSUB8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_sub)
5039 TRANS(FPSUB16, VIS1, do_gvec_ddd, a, MO_16, tcg_gen_gvec_sub)
5040 TRANS(FPSUB32, VIS1, do_gvec_ddd, a, MO_32, tcg_gen_gvec_sub)
5041 
5042 TRANS(FCHKSM16, VIS3, do_gvec_ddd, a, MO_16, gen_op_fchksm16)
5043 TRANS(FMEAN16, VIS3, do_gvec_ddd, a, MO_16, gen_op_fmean16)
5044 
5045 TRANS(FPADDS8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_ssadd)
5046 TRANS(FPADDS16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_ssadd)
5047 TRANS(FPADDS32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_ssadd)
5048 TRANS(FPADDUS8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_usadd)
5049 TRANS(FPADDUS16, VIS4, do_gvec_ddd, a, MO_16, tcg_gen_gvec_usadd)
5050 
5051 TRANS(FPSUBS8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_sssub)
5052 TRANS(FPSUBS16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_sssub)
5053 TRANS(FPSUBS32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_sssub)
5054 TRANS(FPSUBUS8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_ussub)
5055 TRANS(FPSUBUS16, VIS4, do_gvec_ddd, a, MO_16, tcg_gen_gvec_ussub)
5056 
5057 TRANS(FSLL16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_shlv)
5058 TRANS(FSLL32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_shlv)
5059 TRANS(FSRL16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_shrv)
5060 TRANS(FSRL32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_shrv)
5061 TRANS(FSRA16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_sarv)
5062 TRANS(FSRA32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_sarv)
5063 
5064 TRANS(FPMIN8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_smin)
5065 TRANS(FPMIN16, VIS4, do_gvec_ddd, a, MO_16, tcg_gen_gvec_smin)
5066 TRANS(FPMIN32, VIS4, do_gvec_ddd, a, MO_32, tcg_gen_gvec_smin)
5067 TRANS(FPMINU8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_umin)
5068 TRANS(FPMINU16, VIS4, do_gvec_ddd, a, MO_16, tcg_gen_gvec_umin)
5069 TRANS(FPMINU32, VIS4, do_gvec_ddd, a, MO_32, tcg_gen_gvec_umin)
5070 
5071 TRANS(FPMAX8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_smax)
5072 TRANS(FPMAX16, VIS4, do_gvec_ddd, a, MO_16, tcg_gen_gvec_smax)
5073 TRANS(FPMAX32, VIS4, do_gvec_ddd, a, MO_32, tcg_gen_gvec_smax)
5074 TRANS(FPMAXU8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_umax)
5075 TRANS(FPMAXU16, VIS4, do_gvec_ddd, a, MO_16, tcg_gen_gvec_umax)
5076 TRANS(FPMAXU32, VIS4, do_gvec_ddd, a, MO_32, tcg_gen_gvec_umax)
5077 
5078 static bool do_ddd(DisasContext *dc, arg_r_r_r *a,
5079                    void (*func)(TCGv_i64, TCGv_i64, TCGv_i64))
5080 {
5081     TCGv_i64 dst, src1, src2;
5082 
5083     if (gen_trap_ifnofpu(dc)) {
5084         return true;
5085     }
5086 
5087     dst = tcg_temp_new_i64();
5088     src1 = gen_load_fpr_D(dc, a->rs1);
5089     src2 = gen_load_fpr_D(dc, a->rs2);
5090     func(dst, src1, src2);
5091     gen_store_fpr_D(dc, a->rd, dst);
5092     return advance_pc(dc);
5093 }
5094 
5095 TRANS(FMUL8SUx16, VIS1, do_ddd, a, gen_helper_fmul8sux16)
5096 TRANS(FMUL8ULx16, VIS1, do_ddd, a, gen_helper_fmul8ulx16)
5097 
5098 TRANS(FNORd, VIS1, do_ddd, a, tcg_gen_nor_i64)
5099 TRANS(FANDNOTd, VIS1, do_ddd, a, tcg_gen_andc_i64)
5100 TRANS(FXORd, VIS1, do_ddd, a, tcg_gen_xor_i64)
5101 TRANS(FNANDd, VIS1, do_ddd, a, tcg_gen_nand_i64)
5102 TRANS(FANDd, VIS1, do_ddd, a, tcg_gen_and_i64)
5103 TRANS(FXNORd, VIS1, do_ddd, a, tcg_gen_eqv_i64)
5104 TRANS(FORNOTd, VIS1, do_ddd, a, tcg_gen_orc_i64)
5105 TRANS(FORd, VIS1, do_ddd, a, tcg_gen_or_i64)
5106 
5107 TRANS(FPACK32, VIS1, do_ddd, a, gen_op_fpack32)
5108 TRANS(FALIGNDATAg, VIS1, do_ddd, a, gen_op_faligndata_g)
5109 TRANS(BSHUFFLE, VIS2, do_ddd, a, gen_op_bshuffle)
5110 
5111 TRANS(FHADDd, VIS3, do_ddd, a, gen_op_fhaddd)
5112 TRANS(FHSUBd, VIS3, do_ddd, a, gen_op_fhsubd)
5113 TRANS(FNHADDd, VIS3, do_ddd, a, gen_op_fnhaddd)
5114 
5115 TRANS(FPADD64, VIS3B, do_ddd, a, tcg_gen_add_i64)
5116 TRANS(FPSUB64, VIS3B, do_ddd, a, tcg_gen_sub_i64)
5117 TRANS(FSLAS16, VIS3, do_ddd, a, gen_helper_fslas16)
5118 TRANS(FSLAS32, VIS3, do_ddd, a, gen_helper_fslas32)
5119 
5120 static bool do_rdd(DisasContext *dc, arg_r_r_r *a,
5121                    void (*func)(TCGv, TCGv_i64, TCGv_i64))
5122 {
5123     TCGv_i64 src1, src2;
5124     TCGv dst;
5125 
5126     if (gen_trap_ifnofpu(dc)) {
5127         return true;
5128     }
5129 
5130     dst = gen_dest_gpr(dc, a->rd);
5131     src1 = gen_load_fpr_D(dc, a->rs1);
5132     src2 = gen_load_fpr_D(dc, a->rs2);
5133     func(dst, src1, src2);
5134     gen_store_gpr(dc, a->rd, dst);
5135     return advance_pc(dc);
5136 }
5137 
5138 TRANS(FPCMPLE16, VIS1, do_rdd, a, gen_helper_fcmple16)
5139 TRANS(FPCMPNE16, VIS1, do_rdd, a, gen_helper_fcmpne16)
5140 TRANS(FPCMPGT16, VIS1, do_rdd, a, gen_helper_fcmpgt16)
5141 TRANS(FPCMPEQ16, VIS1, do_rdd, a, gen_helper_fcmpeq16)
5142 TRANS(FPCMPULE16, VIS4, do_rdd, a, gen_helper_fcmpule16)
5143 TRANS(FPCMPUGT16, VIS4, do_rdd, a, gen_helper_fcmpugt16)
5144 
5145 TRANS(FPCMPLE32, VIS1, do_rdd, a, gen_helper_fcmple32)
5146 TRANS(FPCMPNE32, VIS1, do_rdd, a, gen_helper_fcmpne32)
5147 TRANS(FPCMPGT32, VIS1, do_rdd, a, gen_helper_fcmpgt32)
5148 TRANS(FPCMPEQ32, VIS1, do_rdd, a, gen_helper_fcmpeq32)
5149 TRANS(FPCMPULE32, VIS4, do_rdd, a, gen_helper_fcmpule32)
5150 TRANS(FPCMPUGT32, VIS4, do_rdd, a, gen_helper_fcmpugt32)
5151 
5152 TRANS(FPCMPEQ8, VIS3B, do_rdd, a, gen_helper_fcmpeq8)
5153 TRANS(FPCMPNE8, VIS3B, do_rdd, a, gen_helper_fcmpne8)
5154 TRANS(FPCMPULE8, VIS3B, do_rdd, a, gen_helper_fcmpule8)
5155 TRANS(FPCMPUGT8, VIS3B, do_rdd, a, gen_helper_fcmpugt8)
5156 TRANS(FPCMPLE8, VIS4, do_rdd, a, gen_helper_fcmple8)
5157 TRANS(FPCMPGT8, VIS4, do_rdd, a, gen_helper_fcmpgt8)
5158 
5159 TRANS(PDISTN, VIS3, do_rdd, a, gen_op_pdistn)
5160 TRANS(XMULX, VIS3, do_rrr, a, gen_helper_xmulx)
5161 TRANS(XMULXHI, VIS3, do_rrr, a, gen_helper_xmulxhi)
5162 
5163 static bool do_env_ddd(DisasContext *dc, arg_r_r_r *a,
5164                        void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
5165 {
5166     TCGv_i64 dst, src1, src2;
5167 
5168     if (gen_trap_ifnofpu(dc)) {
5169         return true;
5170     }
5171 
5172     dst = tcg_temp_new_i64();
5173     src1 = gen_load_fpr_D(dc, a->rs1);
5174     src2 = gen_load_fpr_D(dc, a->rs2);
5175     func(dst, tcg_env, src1, src2);
5176     gen_store_fpr_D(dc, a->rd, dst);
5177     return advance_pc(dc);
5178 }
5179 
5180 TRANS(FADDd, ALL, do_env_ddd, a, gen_helper_faddd)
5181 TRANS(FSUBd, ALL, do_env_ddd, a, gen_helper_fsubd)
5182 TRANS(FMULd, ALL, do_env_ddd, a, gen_helper_fmuld)
5183 TRANS(FDIVd, ALL, do_env_ddd, a, gen_helper_fdivd)
5184 TRANS(FNADDd, VIS3, do_env_ddd, a, gen_helper_fnaddd)
5185 TRANS(FNMULd, VIS3, do_env_ddd, a, gen_helper_fnmuld)
5186 
5187 static bool trans_FsMULd(DisasContext *dc, arg_r_r_r *a)
5188 {
5189     TCGv_i64 dst;
5190     TCGv_i32 src1, src2;
5191 
5192     if (gen_trap_ifnofpu(dc)) {
5193         return true;
5194     }
5195     if (!(dc->def->features & CPU_FEATURE_FSMULD)) {
5196         return raise_unimpfpop(dc);
5197     }
5198 
5199     dst = tcg_temp_new_i64();
5200     src1 = gen_load_fpr_F(dc, a->rs1);
5201     src2 = gen_load_fpr_F(dc, a->rs2);
5202     gen_helper_fsmuld(dst, tcg_env, src1, src2);
5203     gen_store_fpr_D(dc, a->rd, dst);
5204     return advance_pc(dc);
5205 }
5206 
5207 static bool trans_FNsMULd(DisasContext *dc, arg_r_r_r *a)
5208 {
5209     TCGv_i64 dst;
5210     TCGv_i32 src1, src2;
5211 
5212     if (!avail_VIS3(dc)) {
5213         return false;
5214     }
5215     if (gen_trap_ifnofpu(dc)) {
5216         return true;
5217     }
5218     dst = tcg_temp_new_i64();
5219     src1 = gen_load_fpr_F(dc, a->rs1);
5220     src2 = gen_load_fpr_F(dc, a->rs2);
5221     gen_helper_fnsmuld(dst, tcg_env, src1, src2);
5222     gen_store_fpr_D(dc, a->rd, dst);
5223     return advance_pc(dc);
5224 }
5225 
5226 static bool do_ffff(DisasContext *dc, arg_r_r_r_r *a,
5227                     void (*func)(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_i32))
5228 {
5229     TCGv_i32 dst, src1, src2, src3;
5230 
5231     if (gen_trap_ifnofpu(dc)) {
5232         return true;
5233     }
5234 
5235     src1 = gen_load_fpr_F(dc, a->rs1);
5236     src2 = gen_load_fpr_F(dc, a->rs2);
5237     src3 = gen_load_fpr_F(dc, a->rs3);
5238     dst = tcg_temp_new_i32();
5239     func(dst, src1, src2, src3);
5240     gen_store_fpr_F(dc, a->rd, dst);
5241     return advance_pc(dc);
5242 }
5243 
5244 TRANS(FMADDs, FMAF, do_ffff, a, gen_op_fmadds)
5245 TRANS(FMSUBs, FMAF, do_ffff, a, gen_op_fmsubs)
5246 TRANS(FNMSUBs, FMAF, do_ffff, a, gen_op_fnmsubs)
5247 TRANS(FNMADDs, FMAF, do_ffff, a, gen_op_fnmadds)
5248 
5249 static bool do_dddd(DisasContext *dc, arg_r_r_r_r *a,
5250                     void (*func)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
5251 {
5252     TCGv_i64 dst, src1, src2, src3;
5253 
5254     if (gen_trap_ifnofpu(dc)) {
5255         return true;
5256     }
5257 
5258     dst  = tcg_temp_new_i64();
5259     src1 = gen_load_fpr_D(dc, a->rs1);
5260     src2 = gen_load_fpr_D(dc, a->rs2);
5261     src3 = gen_load_fpr_D(dc, a->rs3);
5262     func(dst, src1, src2, src3);
5263     gen_store_fpr_D(dc, a->rd, dst);
5264     return advance_pc(dc);
5265 }
5266 
5267 TRANS(PDIST, VIS1, do_dddd, a, gen_helper_pdist)
5268 TRANS(FMADDd, FMAF, do_dddd, a, gen_op_fmaddd)
5269 TRANS(FMSUBd, FMAF, do_dddd, a, gen_op_fmsubd)
5270 TRANS(FNMSUBd, FMAF, do_dddd, a, gen_op_fnmsubd)
5271 TRANS(FNMADDd, FMAF, do_dddd, a, gen_op_fnmaddd)
5272 TRANS(FPMADDX, IMA, do_dddd, a, gen_op_fpmaddx)
5273 TRANS(FPMADDXHI, IMA, do_dddd, a, gen_op_fpmaddxhi)
5274 
5275 static bool trans_FALIGNDATAi(DisasContext *dc, arg_r_r_r *a)
5276 {
5277     TCGv_i64 dst, src1, src2;
5278     TCGv src3;
5279 
5280     if (!avail_VIS4(dc)) {
5281         return false;
5282     }
5283     if (gen_trap_ifnofpu(dc)) {
5284         return true;
5285     }
5286 
5287     dst  = tcg_temp_new_i64();
5288     src1 = gen_load_fpr_D(dc, a->rd);
5289     src2 = gen_load_fpr_D(dc, a->rs2);
5290     src3 = gen_load_gpr(dc, a->rs1);
5291     gen_op_faligndata_i(dst, src1, src2, src3);
5292     gen_store_fpr_D(dc, a->rd, dst);
5293     return advance_pc(dc);
5294 }
5295 
5296 static bool do_env_qqq(DisasContext *dc, arg_r_r_r *a,
5297                        void (*func)(TCGv_i128, TCGv_env, TCGv_i128, TCGv_i128))
5298 {
5299     TCGv_i128 src1, src2;
5300 
5301     if (gen_trap_ifnofpu(dc)) {
5302         return true;
5303     }
5304     if (gen_trap_float128(dc)) {
5305         return true;
5306     }
5307 
5308     src1 = gen_load_fpr_Q(dc, a->rs1);
5309     src2 = gen_load_fpr_Q(dc, a->rs2);
5310     func(src1, tcg_env, src1, src2);
5311     gen_store_fpr_Q(dc, a->rd, src1);
5312     return advance_pc(dc);
5313 }
5314 
5315 TRANS(FADDq, ALL, do_env_qqq, a, gen_helper_faddq)
5316 TRANS(FSUBq, ALL, do_env_qqq, a, gen_helper_fsubq)
5317 TRANS(FMULq, ALL, do_env_qqq, a, gen_helper_fmulq)
5318 TRANS(FDIVq, ALL, do_env_qqq, a, gen_helper_fdivq)
5319 
5320 static bool trans_FdMULq(DisasContext *dc, arg_r_r_r *a)
5321 {
5322     TCGv_i64 src1, src2;
5323     TCGv_i128 dst;
5324 
5325     if (gen_trap_ifnofpu(dc)) {
5326         return true;
5327     }
5328     if (gen_trap_float128(dc)) {
5329         return true;
5330     }
5331 
5332     src1 = gen_load_fpr_D(dc, a->rs1);
5333     src2 = gen_load_fpr_D(dc, a->rs2);
5334     dst = tcg_temp_new_i128();
5335     gen_helper_fdmulq(dst, tcg_env, src1, src2);
5336     gen_store_fpr_Q(dc, a->rd, dst);
5337     return advance_pc(dc);
5338 }
5339 
5340 static bool do_fmovr(DisasContext *dc, arg_FMOVRs *a, bool is_128,
5341                      void (*func)(DisasContext *, DisasCompare *, int, int))
5342 {
5343     DisasCompare cmp;
5344 
5345     if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
5346         return false;
5347     }
5348     if (gen_trap_ifnofpu(dc)) {
5349         return true;
5350     }
5351     if (is_128 && gen_trap_float128(dc)) {
5352         return true;
5353     }
5354 
5355     gen_op_clear_ieee_excp_and_FTT();
5356     func(dc, &cmp, a->rd, a->rs2);
5357     return advance_pc(dc);
5358 }
5359 
5360 TRANS(FMOVRs, 64, do_fmovr, a, false, gen_fmovs)
5361 TRANS(FMOVRd, 64, do_fmovr, a, false, gen_fmovd)
5362 TRANS(FMOVRq, 64, do_fmovr, a, true, gen_fmovq)
5363 
5364 static bool do_fmovcc(DisasContext *dc, arg_FMOVscc *a, bool is_128,
5365                       void (*func)(DisasContext *, DisasCompare *, int, int))
5366 {
5367     DisasCompare cmp;
5368 
5369     if (gen_trap_ifnofpu(dc)) {
5370         return true;
5371     }
5372     if (is_128 && gen_trap_float128(dc)) {
5373         return true;
5374     }
5375 
5376     gen_op_clear_ieee_excp_and_FTT();
5377     gen_compare(&cmp, a->cc, a->cond, dc);
5378     func(dc, &cmp, a->rd, a->rs2);
5379     return advance_pc(dc);
5380 }
5381 
5382 TRANS(FMOVscc, 64, do_fmovcc, a, false, gen_fmovs)
5383 TRANS(FMOVdcc, 64, do_fmovcc, a, false, gen_fmovd)
5384 TRANS(FMOVqcc, 64, do_fmovcc, a, true, gen_fmovq)
5385 
5386 static bool do_fmovfcc(DisasContext *dc, arg_FMOVsfcc *a, bool is_128,
5387                        void (*func)(DisasContext *, DisasCompare *, int, int))
5388 {
5389     DisasCompare cmp;
5390 
5391     if (gen_trap_ifnofpu(dc)) {
5392         return true;
5393     }
5394     if (is_128 && gen_trap_float128(dc)) {
5395         return true;
5396     }
5397 
5398     gen_op_clear_ieee_excp_and_FTT();
5399     gen_fcompare(&cmp, a->cc, a->cond);
5400     func(dc, &cmp, a->rd, a->rs2);
5401     return advance_pc(dc);
5402 }
5403 
5404 TRANS(FMOVsfcc, 64, do_fmovfcc, a, false, gen_fmovs)
5405 TRANS(FMOVdfcc, 64, do_fmovfcc, a, false, gen_fmovd)
5406 TRANS(FMOVqfcc, 64, do_fmovfcc, a, true, gen_fmovq)
5407 
5408 static bool do_fcmps(DisasContext *dc, arg_FCMPs *a, bool e)
5409 {
5410     TCGv_i32 src1, src2;
5411 
5412     if (avail_32(dc) && a->cc != 0) {
5413         return false;
5414     }
5415     if (gen_trap_ifnofpu(dc)) {
5416         return true;
5417     }
5418 
5419     src1 = gen_load_fpr_F(dc, a->rs1);
5420     src2 = gen_load_fpr_F(dc, a->rs2);
5421     if (e) {
5422         gen_helper_fcmpes(cpu_fcc[a->cc], tcg_env, src1, src2);
5423     } else {
5424         gen_helper_fcmps(cpu_fcc[a->cc], tcg_env, src1, src2);
5425     }
5426     return advance_pc(dc);
5427 }
5428 
5429 TRANS(FCMPs, ALL, do_fcmps, a, false)
5430 TRANS(FCMPEs, ALL, do_fcmps, a, true)
5431 
5432 static bool do_fcmpd(DisasContext *dc, arg_FCMPd *a, bool e)
5433 {
5434     TCGv_i64 src1, src2;
5435 
5436     if (avail_32(dc) && a->cc != 0) {
5437         return false;
5438     }
5439     if (gen_trap_ifnofpu(dc)) {
5440         return true;
5441     }
5442 
5443     src1 = gen_load_fpr_D(dc, a->rs1);
5444     src2 = gen_load_fpr_D(dc, a->rs2);
5445     if (e) {
5446         gen_helper_fcmped(cpu_fcc[a->cc], tcg_env, src1, src2);
5447     } else {
5448         gen_helper_fcmpd(cpu_fcc[a->cc], tcg_env, src1, src2);
5449     }
5450     return advance_pc(dc);
5451 }
5452 
5453 TRANS(FCMPd, ALL, do_fcmpd, a, false)
5454 TRANS(FCMPEd, ALL, do_fcmpd, a, true)
5455 
5456 static bool do_fcmpq(DisasContext *dc, arg_FCMPq *a, bool e)
5457 {
5458     TCGv_i128 src1, src2;
5459 
5460     if (avail_32(dc) && a->cc != 0) {
5461         return false;
5462     }
5463     if (gen_trap_ifnofpu(dc)) {
5464         return true;
5465     }
5466     if (gen_trap_float128(dc)) {
5467         return true;
5468     }
5469 
5470     src1 = gen_load_fpr_Q(dc, a->rs1);
5471     src2 = gen_load_fpr_Q(dc, a->rs2);
5472     if (e) {
5473         gen_helper_fcmpeq(cpu_fcc[a->cc], tcg_env, src1, src2);
5474     } else {
5475         gen_helper_fcmpq(cpu_fcc[a->cc], tcg_env, src1, src2);
5476     }
5477     return advance_pc(dc);
5478 }
5479 
5480 TRANS(FCMPq, ALL, do_fcmpq, a, false)
5481 TRANS(FCMPEq, ALL, do_fcmpq, a, true)
5482 
5483 static bool trans_FLCMPs(DisasContext *dc, arg_FLCMPs *a)
5484 {
5485     TCGv_i32 src1, src2;
5486 
5487     if (!avail_VIS3(dc)) {
5488         return false;
5489     }
5490     if (gen_trap_ifnofpu(dc)) {
5491         return true;
5492     }
5493 
5494     src1 = gen_load_fpr_F(dc, a->rs1);
5495     src2 = gen_load_fpr_F(dc, a->rs2);
5496     gen_helper_flcmps(cpu_fcc[a->cc], src1, src2);
5497     return advance_pc(dc);
5498 }
5499 
5500 static bool trans_FLCMPd(DisasContext *dc, arg_FLCMPd *a)
5501 {
5502     TCGv_i64 src1, src2;
5503 
5504     if (!avail_VIS3(dc)) {
5505         return false;
5506     }
5507     if (gen_trap_ifnofpu(dc)) {
5508         return true;
5509     }
5510 
5511     src1 = gen_load_fpr_D(dc, a->rs1);
5512     src2 = gen_load_fpr_D(dc, a->rs2);
5513     gen_helper_flcmpd(cpu_fcc[a->cc], src1, src2);
5514     return advance_pc(dc);
5515 }
5516 
5517 static bool do_movf2r(DisasContext *dc, arg_r_r *a,
5518                       int (*offset)(unsigned int),
5519                       void (*load)(TCGv, TCGv_ptr, tcg_target_long))
5520 {
5521     TCGv dst;
5522 
5523     if (gen_trap_ifnofpu(dc)) {
5524         return true;
5525     }
5526     dst = gen_dest_gpr(dc, a->rd);
5527     load(dst, tcg_env, offset(a->rs));
5528     gen_store_gpr(dc, a->rd, dst);
5529     return advance_pc(dc);
5530 }
5531 
5532 TRANS(MOVsTOsw, VIS3B, do_movf2r, a, gen_offset_fpr_F, tcg_gen_ld32s_tl)
5533 TRANS(MOVsTOuw, VIS3B, do_movf2r, a, gen_offset_fpr_F, tcg_gen_ld32u_tl)
5534 TRANS(MOVdTOx, VIS3B, do_movf2r, a, gen_offset_fpr_D, tcg_gen_ld_tl)
5535 
5536 static bool do_movr2f(DisasContext *dc, arg_r_r *a,
5537                       int (*offset)(unsigned int),
5538                       void (*store)(TCGv, TCGv_ptr, tcg_target_long))
5539 {
5540     TCGv src;
5541 
5542     if (gen_trap_ifnofpu(dc)) {
5543         return true;
5544     }
5545     src = gen_load_gpr(dc, a->rs);
5546     store(src, tcg_env, offset(a->rd));
5547     return advance_pc(dc);
5548 }
5549 
5550 TRANS(MOVwTOs, VIS3B, do_movr2f, a, gen_offset_fpr_F, tcg_gen_st32_tl)
5551 TRANS(MOVxTOd, VIS3B, do_movr2f, a, gen_offset_fpr_D, tcg_gen_st_tl)
5552 
5553 static void sparc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
5554 {
5555     DisasContext *dc = container_of(dcbase, DisasContext, base);
5556     int bound;
5557 
5558     dc->pc = dc->base.pc_first;
5559     dc->npc = (target_ulong)dc->base.tb->cs_base;
5560     dc->mem_idx = dc->base.tb->flags & TB_FLAG_MMU_MASK;
5561     dc->def = &cpu_env(cs)->def;
5562     dc->fpu_enabled = tb_fpu_enabled(dc->base.tb->flags);
5563     dc->address_mask_32bit = tb_am_enabled(dc->base.tb->flags);
5564 #ifndef CONFIG_USER_ONLY
5565     dc->supervisor = (dc->base.tb->flags & TB_FLAG_SUPER) != 0;
5566 #endif
5567 #ifdef TARGET_SPARC64
5568     dc->fprs_dirty = 0;
5569     dc->asi = (dc->base.tb->flags >> TB_FLAG_ASI_SHIFT) & 0xff;
5570 #ifndef CONFIG_USER_ONLY
5571     dc->hypervisor = (dc->base.tb->flags & TB_FLAG_HYPER) != 0;
5572 #endif
5573 #endif
5574     /*
5575      * if we reach a page boundary, we stop generation so that the
5576      * PC of a TT_TFAULT exception is always in the right page
5577      */
5578     bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
5579     dc->base.max_insns = MIN(dc->base.max_insns, bound);
5580 }
5581 
5582 static void sparc_tr_tb_start(DisasContextBase *db, CPUState *cs)
5583 {
5584 }
5585 
5586 static void sparc_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
5587 {
5588     DisasContext *dc = container_of(dcbase, DisasContext, base);
5589     target_ulong npc = dc->npc;
5590 
5591     if (npc & 3) {
5592         switch (npc) {
5593         case JUMP_PC:
5594             assert(dc->jump_pc[1] == dc->pc + 4);
5595             npc = dc->jump_pc[0] | JUMP_PC;
5596             break;
5597         case DYNAMIC_PC:
5598         case DYNAMIC_PC_LOOKUP:
5599             npc = DYNAMIC_PC;
5600             break;
5601         default:
5602             g_assert_not_reached();
5603         }
5604     }
5605     tcg_gen_insn_start(dc->pc, npc);
5606 }
5607 
5608 static void sparc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
5609 {
5610     DisasContext *dc = container_of(dcbase, DisasContext, base);
5611     unsigned int insn;
5612 
5613     insn = translator_ldl(cpu_env(cs), &dc->base, dc->pc);
5614     dc->base.pc_next += 4;
5615 
5616     if (!decode(dc, insn)) {
5617         gen_exception(dc, TT_ILL_INSN);
5618     }
5619 
5620     if (dc->base.is_jmp == DISAS_NORETURN) {
5621         return;
5622     }
5623     if (dc->pc != dc->base.pc_next) {
5624         dc->base.is_jmp = DISAS_TOO_MANY;
5625     }
5626 }
5627 
5628 static void sparc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
5629 {
5630     DisasContext *dc = container_of(dcbase, DisasContext, base);
5631     DisasDelayException *e, *e_next;
5632     bool may_lookup;
5633 
5634     finishing_insn(dc);
5635 
5636     switch (dc->base.is_jmp) {
5637     case DISAS_NEXT:
5638     case DISAS_TOO_MANY:
5639         if (((dc->pc | dc->npc) & 3) == 0) {
5640             /* static PC and NPC: we can use direct chaining */
5641             gen_goto_tb(dc, 0, dc->pc, dc->npc);
5642             break;
5643         }
5644 
5645         may_lookup = true;
5646         if (dc->pc & 3) {
5647             switch (dc->pc) {
5648             case DYNAMIC_PC_LOOKUP:
5649                 break;
5650             case DYNAMIC_PC:
5651                 may_lookup = false;
5652                 break;
5653             default:
5654                 g_assert_not_reached();
5655             }
5656         } else {
5657             tcg_gen_movi_tl(cpu_pc, dc->pc);
5658         }
5659 
5660         if (dc->npc & 3) {
5661             switch (dc->npc) {
5662             case JUMP_PC:
5663                 gen_generic_branch(dc);
5664                 break;
5665             case DYNAMIC_PC:
5666                 may_lookup = false;
5667                 break;
5668             case DYNAMIC_PC_LOOKUP:
5669                 break;
5670             default:
5671                 g_assert_not_reached();
5672             }
5673         } else {
5674             tcg_gen_movi_tl(cpu_npc, dc->npc);
5675         }
5676         if (may_lookup) {
5677             tcg_gen_lookup_and_goto_ptr();
5678         } else {
5679             tcg_gen_exit_tb(NULL, 0);
5680         }
5681         break;
5682 
5683     case DISAS_NORETURN:
5684        break;
5685 
5686     case DISAS_EXIT:
5687         /* Exit TB */
5688         save_state(dc);
5689         tcg_gen_exit_tb(NULL, 0);
5690         break;
5691 
5692     default:
5693         g_assert_not_reached();
5694     }
5695 
5696     for (e = dc->delay_excp_list; e ; e = e_next) {
5697         gen_set_label(e->lab);
5698 
5699         tcg_gen_movi_tl(cpu_pc, e->pc);
5700         if (e->npc % 4 == 0) {
5701             tcg_gen_movi_tl(cpu_npc, e->npc);
5702         }
5703         gen_helper_raise_exception(tcg_env, e->excp);
5704 
5705         e_next = e->next;
5706         g_free(e);
5707     }
5708 }
5709 
5710 static const TranslatorOps sparc_tr_ops = {
5711     .init_disas_context = sparc_tr_init_disas_context,
5712     .tb_start           = sparc_tr_tb_start,
5713     .insn_start         = sparc_tr_insn_start,
5714     .translate_insn     = sparc_tr_translate_insn,
5715     .tb_stop            = sparc_tr_tb_stop,
5716 };
5717 
5718 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
5719                            vaddr pc, void *host_pc)
5720 {
5721     DisasContext dc = {};
5722 
5723     translator_loop(cs, tb, max_insns, pc, host_pc, &sparc_tr_ops, &dc.base);
5724 }
5725 
5726 void sparc_tcg_init(void)
5727 {
5728     static const char gregnames[32][4] = {
5729         "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7",
5730         "o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7",
5731         "l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7",
5732         "i0", "i1", "i2", "i3", "i4", "i5", "i6", "i7",
5733     };
5734 
5735     static const struct { TCGv_i32 *ptr; int off; const char *name; } r32[] = {
5736 #ifdef TARGET_SPARC64
5737         { &cpu_fprs, offsetof(CPUSPARCState, fprs), "fprs" },
5738         { &cpu_fcc[0], offsetof(CPUSPARCState, fcc[0]), "fcc0" },
5739         { &cpu_fcc[1], offsetof(CPUSPARCState, fcc[1]), "fcc1" },
5740         { &cpu_fcc[2], offsetof(CPUSPARCState, fcc[2]), "fcc2" },
5741         { &cpu_fcc[3], offsetof(CPUSPARCState, fcc[3]), "fcc3" },
5742 #else
5743         { &cpu_fcc[0], offsetof(CPUSPARCState, fcc[0]), "fcc" },
5744 #endif
5745     };
5746 
5747     static const struct { TCGv *ptr; int off; const char *name; } rtl[] = {
5748 #ifdef TARGET_SPARC64
5749         { &cpu_gsr, offsetof(CPUSPARCState, gsr), "gsr" },
5750         { &cpu_xcc_Z, offsetof(CPUSPARCState, xcc_Z), "xcc_Z" },
5751         { &cpu_xcc_C, offsetof(CPUSPARCState, xcc_C), "xcc_C" },
5752 #endif
5753         { &cpu_cc_N, offsetof(CPUSPARCState, cc_N), "cc_N" },
5754         { &cpu_cc_V, offsetof(CPUSPARCState, cc_V), "cc_V" },
5755         { &cpu_icc_Z, offsetof(CPUSPARCState, icc_Z), "icc_Z" },
5756         { &cpu_icc_C, offsetof(CPUSPARCState, icc_C), "icc_C" },
5757         { &cpu_cond, offsetof(CPUSPARCState, cond), "cond" },
5758         { &cpu_pc, offsetof(CPUSPARCState, pc), "pc" },
5759         { &cpu_npc, offsetof(CPUSPARCState, npc), "npc" },
5760         { &cpu_y, offsetof(CPUSPARCState, y), "y" },
5761         { &cpu_tbr, offsetof(CPUSPARCState, tbr), "tbr" },
5762     };
5763 
5764     unsigned int i;
5765 
5766     cpu_regwptr = tcg_global_mem_new_ptr(tcg_env,
5767                                          offsetof(CPUSPARCState, regwptr),
5768                                          "regwptr");
5769 
5770     for (i = 0; i < ARRAY_SIZE(r32); ++i) {
5771         *r32[i].ptr = tcg_global_mem_new_i32(tcg_env, r32[i].off, r32[i].name);
5772     }
5773 
5774     for (i = 0; i < ARRAY_SIZE(rtl); ++i) {
5775         *rtl[i].ptr = tcg_global_mem_new(tcg_env, rtl[i].off, rtl[i].name);
5776     }
5777 
5778     cpu_regs[0] = NULL;
5779     for (i = 1; i < 8; ++i) {
5780         cpu_regs[i] = tcg_global_mem_new(tcg_env,
5781                                          offsetof(CPUSPARCState, gregs[i]),
5782                                          gregnames[i]);
5783     }
5784 
5785     for (i = 8; i < 32; ++i) {
5786         cpu_regs[i] = tcg_global_mem_new(cpu_regwptr,
5787                                          (i - 8) * sizeof(target_ulong),
5788                                          gregnames[i]);
5789     }
5790 }
5791 
5792 void sparc_restore_state_to_opc(CPUState *cs,
5793                                 const TranslationBlock *tb,
5794                                 const uint64_t *data)
5795 {
5796     CPUSPARCState *env = cpu_env(cs);
5797     target_ulong pc = data[0];
5798     target_ulong npc = data[1];
5799 
5800     env->pc = pc;
5801     if (npc == DYNAMIC_PC) {
5802         /* dynamic NPC: already stored */
5803     } else if (npc & JUMP_PC) {
5804         /* jump PC: use 'cond' and the jump targets of the translation */
5805         if (env->cond) {
5806             env->npc = npc & ~3;
5807         } else {
5808             env->npc = pc + 4;
5809         }
5810     } else {
5811         env->npc = npc;
5812     }
5813 }
5814