xref: /openbmc/qemu/target/sparc/translate.c (revision 56f2ef9c7958320d574448f555cc3a82e500c485)
1 /*
2    SPARC translation
3 
4    Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5    Copyright (C) 2003-2005 Fabrice Bellard
6 
7    This library is free software; you can redistribute it and/or
8    modify it under the terms of the GNU Lesser General Public
9    License as published by the Free Software Foundation; either
10    version 2.1 of the License, or (at your option) any later version.
11 
12    This library is distributed in the hope that it will be useful,
13    but WITHOUT ANY WARRANTY; without even the implied warranty of
14    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15    Lesser General Public License for more details.
16 
17    You should have received a copy of the GNU Lesser General Public
18    License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 
23 #include "cpu.h"
24 #include "exec/helper-proto.h"
25 #include "exec/exec-all.h"
26 #include "tcg/tcg-op.h"
27 #include "tcg/tcg-op-gvec.h"
28 #include "exec/helper-gen.h"
29 #include "exec/translator.h"
30 #include "exec/log.h"
31 #include "fpu/softfloat.h"
32 #include "asi.h"
33 
34 #define HELPER_H "helper.h"
35 #include "exec/helper-info.c.inc"
36 #undef  HELPER_H
37 
38 #ifdef TARGET_SPARC64
39 # define gen_helper_rdpsr(D, E)                 qemu_build_not_reached()
40 # define gen_helper_rdasr17(D, E)               qemu_build_not_reached()
41 # define gen_helper_rett(E)                     qemu_build_not_reached()
42 # define gen_helper_power_down(E)               qemu_build_not_reached()
43 # define gen_helper_wrpsr(E, S)                 qemu_build_not_reached()
44 #else
45 # define gen_helper_clear_softint(E, S)         qemu_build_not_reached()
46 # define gen_helper_done(E)                     qemu_build_not_reached()
47 # define gen_helper_flushw(E)                   qemu_build_not_reached()
48 # define gen_helper_fmul8x16a(D, S1, S2)        qemu_build_not_reached()
49 # define gen_helper_rdccr(D, E)                 qemu_build_not_reached()
50 # define gen_helper_rdcwp(D, E)                 qemu_build_not_reached()
51 # define gen_helper_restored(E)                 qemu_build_not_reached()
52 # define gen_helper_retry(E)                    qemu_build_not_reached()
53 # define gen_helper_saved(E)                    qemu_build_not_reached()
54 # define gen_helper_set_softint(E, S)           qemu_build_not_reached()
55 # define gen_helper_tick_get_count(D, E, T, C)  qemu_build_not_reached()
56 # define gen_helper_tick_set_count(P, S)        qemu_build_not_reached()
57 # define gen_helper_tick_set_limit(P, S)        qemu_build_not_reached()
58 # define gen_helper_wrccr(E, S)                 qemu_build_not_reached()
59 # define gen_helper_wrcwp(E, S)                 qemu_build_not_reached()
60 # define gen_helper_wrgl(E, S)                  qemu_build_not_reached()
61 # define gen_helper_write_softint(E, S)         qemu_build_not_reached()
62 # define gen_helper_wrpil(E, S)                 qemu_build_not_reached()
63 # define gen_helper_wrpstate(E, S)              qemu_build_not_reached()
64 # define gen_helper_cmask8               ({ qemu_build_not_reached(); NULL; })
65 # define gen_helper_cmask16              ({ qemu_build_not_reached(); NULL; })
66 # define gen_helper_cmask32              ({ qemu_build_not_reached(); NULL; })
67 # define gen_helper_fcmpeq8              ({ qemu_build_not_reached(); NULL; })
68 # define gen_helper_fcmpeq16             ({ qemu_build_not_reached(); NULL; })
69 # define gen_helper_fcmpeq32             ({ qemu_build_not_reached(); NULL; })
70 # define gen_helper_fcmpgt8              ({ qemu_build_not_reached(); NULL; })
71 # define gen_helper_fcmpgt16             ({ qemu_build_not_reached(); NULL; })
72 # define gen_helper_fcmpgt32             ({ qemu_build_not_reached(); NULL; })
73 # define gen_helper_fcmple8              ({ qemu_build_not_reached(); NULL; })
74 # define gen_helper_fcmple16             ({ qemu_build_not_reached(); NULL; })
75 # define gen_helper_fcmple32             ({ qemu_build_not_reached(); NULL; })
76 # define gen_helper_fcmpne8              ({ qemu_build_not_reached(); NULL; })
77 # define gen_helper_fcmpne16             ({ qemu_build_not_reached(); NULL; })
78 # define gen_helper_fcmpne32             ({ qemu_build_not_reached(); NULL; })
79 # define gen_helper_fcmpule8             ({ qemu_build_not_reached(); NULL; })
80 # define gen_helper_fcmpule16            ({ qemu_build_not_reached(); NULL; })
81 # define gen_helper_fcmpule32            ({ qemu_build_not_reached(); NULL; })
82 # define gen_helper_fcmpugt8             ({ qemu_build_not_reached(); NULL; })
83 # define gen_helper_fcmpugt16            ({ qemu_build_not_reached(); NULL; })
84 # define gen_helper_fcmpugt32            ({ qemu_build_not_reached(); NULL; })
85 # define gen_helper_fdtox                ({ qemu_build_not_reached(); NULL; })
86 # define gen_helper_fexpand              ({ qemu_build_not_reached(); NULL; })
87 # define gen_helper_fmul8sux16           ({ qemu_build_not_reached(); NULL; })
88 # define gen_helper_fmul8ulx16           ({ qemu_build_not_reached(); NULL; })
89 # define gen_helper_fmul8x16             ({ qemu_build_not_reached(); NULL; })
90 # define gen_helper_fpmerge              ({ qemu_build_not_reached(); NULL; })
91 # define gen_helper_fqtox                ({ qemu_build_not_reached(); NULL; })
92 # define gen_helper_fslas16              ({ qemu_build_not_reached(); NULL; })
93 # define gen_helper_fslas32              ({ qemu_build_not_reached(); NULL; })
94 # define gen_helper_fstox                ({ qemu_build_not_reached(); NULL; })
95 # define gen_helper_fxtod                ({ qemu_build_not_reached(); NULL; })
96 # define gen_helper_fxtoq                ({ qemu_build_not_reached(); NULL; })
97 # define gen_helper_fxtos                ({ qemu_build_not_reached(); NULL; })
98 # define gen_helper_pdist                ({ qemu_build_not_reached(); NULL; })
99 # define gen_helper_xmulx                ({ qemu_build_not_reached(); NULL; })
100 # define gen_helper_xmulxhi              ({ qemu_build_not_reached(); NULL; })
101 # define MAXTL_MASK                             0
102 #endif
103 
104 /* Dynamic PC, must exit to main loop. */
105 #define DYNAMIC_PC         1
106 /* Dynamic PC, one of two values according to jump_pc[T2]. */
107 #define JUMP_PC            2
108 /* Dynamic PC, may lookup next TB. */
109 #define DYNAMIC_PC_LOOKUP  3
110 
111 #define DISAS_EXIT  DISAS_TARGET_0
112 
113 /* global register indexes */
114 static TCGv_ptr cpu_regwptr;
115 static TCGv cpu_pc, cpu_npc;
116 static TCGv cpu_regs[32];
117 static TCGv cpu_y;
118 static TCGv cpu_tbr;
119 static TCGv cpu_cond;
120 static TCGv cpu_cc_N;
121 static TCGv cpu_cc_V;
122 static TCGv cpu_icc_Z;
123 static TCGv cpu_icc_C;
124 #ifdef TARGET_SPARC64
125 static TCGv cpu_xcc_Z;
126 static TCGv cpu_xcc_C;
127 static TCGv_i32 cpu_fprs;
128 static TCGv cpu_gsr;
129 #else
130 # define cpu_fprs               ({ qemu_build_not_reached(); (TCGv)NULL; })
131 # define cpu_gsr                ({ qemu_build_not_reached(); (TCGv)NULL; })
132 #endif
133 
134 #ifdef TARGET_SPARC64
135 #define cpu_cc_Z  cpu_xcc_Z
136 #define cpu_cc_C  cpu_xcc_C
137 #else
138 #define cpu_cc_Z  cpu_icc_Z
139 #define cpu_cc_C  cpu_icc_C
140 #define cpu_xcc_Z ({ qemu_build_not_reached(); NULL; })
141 #define cpu_xcc_C ({ qemu_build_not_reached(); NULL; })
142 #endif
143 
144 /* Floating point comparison registers */
145 static TCGv_i32 cpu_fcc[TARGET_FCCREGS];
146 
147 #define env_field_offsetof(X)     offsetof(CPUSPARCState, X)
148 #ifdef TARGET_SPARC64
149 # define env32_field_offsetof(X)  ({ qemu_build_not_reached(); 0; })
150 # define env64_field_offsetof(X)  env_field_offsetof(X)
151 #else
152 # define env32_field_offsetof(X)  env_field_offsetof(X)
153 # define env64_field_offsetof(X)  ({ qemu_build_not_reached(); 0; })
154 #endif
155 
156 typedef struct DisasCompare {
157     TCGCond cond;
158     TCGv c1;
159     int c2;
160 } DisasCompare;
161 
162 typedef struct DisasDelayException {
163     struct DisasDelayException *next;
164     TCGLabel *lab;
165     TCGv_i32 excp;
166     /* Saved state at parent insn. */
167     target_ulong pc;
168     target_ulong npc;
169 } DisasDelayException;
170 
171 typedef struct DisasContext {
172     DisasContextBase base;
173     target_ulong pc;    /* current Program Counter: integer or DYNAMIC_PC */
174     target_ulong npc;   /* next PC: integer or DYNAMIC_PC or JUMP_PC */
175 
176     /* Used when JUMP_PC value is used. */
177     DisasCompare jump;
178     target_ulong jump_pc[2];
179 
180     int mem_idx;
181     bool cpu_cond_live;
182     bool fpu_enabled;
183     bool address_mask_32bit;
184 #ifndef CONFIG_USER_ONLY
185     bool supervisor;
186 #ifdef TARGET_SPARC64
187     bool hypervisor;
188 #endif
189 #endif
190 
191     sparc_def_t *def;
192 #ifdef TARGET_SPARC64
193     int fprs_dirty;
194     int asi;
195 #endif
196     DisasDelayException *delay_excp_list;
197 } DisasContext;
198 
199 // This function uses non-native bit order
200 #define GET_FIELD(X, FROM, TO)                                  \
201     ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
202 
203 // This function uses the order in the manuals, i.e. bit 0 is 2^0
204 #define GET_FIELD_SP(X, FROM, TO)               \
205     GET_FIELD(X, 31 - (TO), 31 - (FROM))
206 
207 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
208 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
209 
210 #define UA2005_HTRAP_MASK 0xff
211 #define V8_TRAP_MASK 0x7f
212 
213 #define IS_IMM (insn & (1<<13))
214 
215 static void gen_update_fprs_dirty(DisasContext *dc, int rd)
216 {
217 #if defined(TARGET_SPARC64)
218     int bit = (rd < 32) ? 1 : 2;
219     /* If we know we've already set this bit within the TB,
220        we can avoid setting it again.  */
221     if (!(dc->fprs_dirty & bit)) {
222         dc->fprs_dirty |= bit;
223         tcg_gen_ori_i32(cpu_fprs, cpu_fprs, bit);
224     }
225 #endif
226 }
227 
228 /* floating point registers moves */
229 
230 static int gen_offset_fpr_F(unsigned int reg)
231 {
232     int ret;
233 
234     tcg_debug_assert(reg < 32);
235     ret= offsetof(CPUSPARCState, fpr[reg / 2]);
236     if (reg & 1) {
237         ret += offsetof(CPU_DoubleU, l.lower);
238     } else {
239         ret += offsetof(CPU_DoubleU, l.upper);
240     }
241     return ret;
242 }
243 
244 static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src)
245 {
246     TCGv_i32 ret = tcg_temp_new_i32();
247     tcg_gen_ld_i32(ret, tcg_env, gen_offset_fpr_F(src));
248     return ret;
249 }
250 
251 static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v)
252 {
253     tcg_gen_st_i32(v, tcg_env, gen_offset_fpr_F(dst));
254     gen_update_fprs_dirty(dc, dst);
255 }
256 
257 static int gen_offset_fpr_D(unsigned int reg)
258 {
259     tcg_debug_assert(reg < 64);
260     tcg_debug_assert(reg % 2 == 0);
261     return offsetof(CPUSPARCState, fpr[reg / 2]);
262 }
263 
264 static TCGv_i64 gen_load_fpr_D(DisasContext *dc, unsigned int src)
265 {
266     TCGv_i64 ret = tcg_temp_new_i64();
267     tcg_gen_ld_i64(ret, tcg_env, gen_offset_fpr_D(src));
268     return ret;
269 }
270 
271 static void gen_store_fpr_D(DisasContext *dc, unsigned int dst, TCGv_i64 v)
272 {
273     tcg_gen_st_i64(v, tcg_env, gen_offset_fpr_D(dst));
274     gen_update_fprs_dirty(dc, dst);
275 }
276 
277 static TCGv_i128 gen_load_fpr_Q(DisasContext *dc, unsigned int src)
278 {
279     TCGv_i128 ret = tcg_temp_new_i128();
280     TCGv_i64 h = gen_load_fpr_D(dc, src);
281     TCGv_i64 l = gen_load_fpr_D(dc, src + 2);
282 
283     tcg_gen_concat_i64_i128(ret, l, h);
284     return ret;
285 }
286 
287 static void gen_store_fpr_Q(DisasContext *dc, unsigned int dst, TCGv_i128 v)
288 {
289     TCGv_i64 h = tcg_temp_new_i64();
290     TCGv_i64 l = tcg_temp_new_i64();
291 
292     tcg_gen_extr_i128_i64(l, h, v);
293     gen_store_fpr_D(dc, dst, h);
294     gen_store_fpr_D(dc, dst + 2, l);
295 }
296 
297 /* moves */
298 #ifdef CONFIG_USER_ONLY
299 #define supervisor(dc) 0
300 #define hypervisor(dc) 0
301 #else
302 #ifdef TARGET_SPARC64
303 #define hypervisor(dc) (dc->hypervisor)
304 #define supervisor(dc) (dc->supervisor | dc->hypervisor)
305 #else
306 #define supervisor(dc) (dc->supervisor)
307 #define hypervisor(dc) 0
308 #endif
309 #endif
310 
311 #if !defined(TARGET_SPARC64)
312 # define AM_CHECK(dc)  false
313 #elif defined(TARGET_ABI32)
314 # define AM_CHECK(dc)  true
315 #elif defined(CONFIG_USER_ONLY)
316 # define AM_CHECK(dc)  false
317 #else
318 # define AM_CHECK(dc)  ((dc)->address_mask_32bit)
319 #endif
320 
321 static void gen_address_mask(DisasContext *dc, TCGv addr)
322 {
323     if (AM_CHECK(dc)) {
324         tcg_gen_andi_tl(addr, addr, 0xffffffffULL);
325     }
326 }
327 
328 static target_ulong address_mask_i(DisasContext *dc, target_ulong addr)
329 {
330     return AM_CHECK(dc) ? (uint32_t)addr : addr;
331 }
332 
333 static TCGv gen_load_gpr(DisasContext *dc, int reg)
334 {
335     if (reg > 0) {
336         assert(reg < 32);
337         return cpu_regs[reg];
338     } else {
339         TCGv t = tcg_temp_new();
340         tcg_gen_movi_tl(t, 0);
341         return t;
342     }
343 }
344 
345 static void gen_store_gpr(DisasContext *dc, int reg, TCGv v)
346 {
347     if (reg > 0) {
348         assert(reg < 32);
349         tcg_gen_mov_tl(cpu_regs[reg], v);
350     }
351 }
352 
353 static TCGv gen_dest_gpr(DisasContext *dc, int reg)
354 {
355     if (reg > 0) {
356         assert(reg < 32);
357         return cpu_regs[reg];
358     } else {
359         return tcg_temp_new();
360     }
361 }
362 
363 static bool use_goto_tb(DisasContext *s, target_ulong pc, target_ulong npc)
364 {
365     return translator_use_goto_tb(&s->base, pc) &&
366            translator_use_goto_tb(&s->base, npc);
367 }
368 
369 static void gen_goto_tb(DisasContext *s, int tb_num,
370                         target_ulong pc, target_ulong npc)
371 {
372     if (use_goto_tb(s, pc, npc))  {
373         /* jump to same page: we can use a direct jump */
374         tcg_gen_goto_tb(tb_num);
375         tcg_gen_movi_tl(cpu_pc, pc);
376         tcg_gen_movi_tl(cpu_npc, npc);
377         tcg_gen_exit_tb(s->base.tb, tb_num);
378     } else {
379         /* jump to another page: we can use an indirect jump */
380         tcg_gen_movi_tl(cpu_pc, pc);
381         tcg_gen_movi_tl(cpu_npc, npc);
382         tcg_gen_lookup_and_goto_ptr();
383     }
384 }
385 
386 static TCGv gen_carry32(void)
387 {
388     if (TARGET_LONG_BITS == 64) {
389         TCGv t = tcg_temp_new();
390         tcg_gen_extract_tl(t, cpu_icc_C, 32, 1);
391         return t;
392     }
393     return cpu_icc_C;
394 }
395 
396 static void gen_op_addcc_int(TCGv dst, TCGv src1, TCGv src2, TCGv cin)
397 {
398     TCGv z = tcg_constant_tl(0);
399 
400     if (cin) {
401         tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, src1, z, cin, z);
402         tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, cpu_cc_N, cpu_cc_C, src2, z);
403     } else {
404         tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, src1, z, src2, z);
405     }
406     tcg_gen_xor_tl(cpu_cc_Z, src1, src2);
407     tcg_gen_xor_tl(cpu_cc_V, cpu_cc_N, src2);
408     tcg_gen_andc_tl(cpu_cc_V, cpu_cc_V, cpu_cc_Z);
409     if (TARGET_LONG_BITS == 64) {
410         /*
411          * Carry-in to bit 32 is result ^ src1 ^ src2.
412          * We already have the src xor term in Z, from computation of V.
413          */
414         tcg_gen_xor_tl(cpu_icc_C, cpu_cc_Z, cpu_cc_N);
415         tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
416     }
417     tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
418     tcg_gen_mov_tl(dst, cpu_cc_N);
419 }
420 
421 static void gen_op_addcc(TCGv dst, TCGv src1, TCGv src2)
422 {
423     gen_op_addcc_int(dst, src1, src2, NULL);
424 }
425 
426 static void gen_op_taddcc(TCGv dst, TCGv src1, TCGv src2)
427 {
428     TCGv t = tcg_temp_new();
429 
430     /* Save the tag bits around modification of dst. */
431     tcg_gen_or_tl(t, src1, src2);
432 
433     gen_op_addcc(dst, src1, src2);
434 
435     /* Incorprate tag bits into icc.V */
436     tcg_gen_andi_tl(t, t, 3);
437     tcg_gen_neg_tl(t, t);
438     tcg_gen_ext32u_tl(t, t);
439     tcg_gen_or_tl(cpu_cc_V, cpu_cc_V, t);
440 }
441 
442 static void gen_op_addc(TCGv dst, TCGv src1, TCGv src2)
443 {
444     tcg_gen_add_tl(dst, src1, src2);
445     tcg_gen_add_tl(dst, dst, gen_carry32());
446 }
447 
448 static void gen_op_addccc(TCGv dst, TCGv src1, TCGv src2)
449 {
450     gen_op_addcc_int(dst, src1, src2, gen_carry32());
451 }
452 
453 static void gen_op_addxc(TCGv dst, TCGv src1, TCGv src2)
454 {
455     tcg_gen_add_tl(dst, src1, src2);
456     tcg_gen_add_tl(dst, dst, cpu_cc_C);
457 }
458 
459 static void gen_op_addxccc(TCGv dst, TCGv src1, TCGv src2)
460 {
461     gen_op_addcc_int(dst, src1, src2, cpu_cc_C);
462 }
463 
464 static void gen_op_subcc_int(TCGv dst, TCGv src1, TCGv src2, TCGv cin)
465 {
466     TCGv z = tcg_constant_tl(0);
467 
468     if (cin) {
469         tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, src1, z, cin, z);
470         tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, cpu_cc_N, cpu_cc_C, src2, z);
471     } else {
472         tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, src1, z, src2, z);
473     }
474     tcg_gen_neg_tl(cpu_cc_C, cpu_cc_C);
475     tcg_gen_xor_tl(cpu_cc_Z, src1, src2);
476     tcg_gen_xor_tl(cpu_cc_V, cpu_cc_N, src1);
477     tcg_gen_and_tl(cpu_cc_V, cpu_cc_V, cpu_cc_Z);
478 #ifdef TARGET_SPARC64
479     tcg_gen_xor_tl(cpu_icc_C, cpu_cc_Z, cpu_cc_N);
480     tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
481 #endif
482     tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
483     tcg_gen_mov_tl(dst, cpu_cc_N);
484 }
485 
486 static void gen_op_subcc(TCGv dst, TCGv src1, TCGv src2)
487 {
488     gen_op_subcc_int(dst, src1, src2, NULL);
489 }
490 
491 static void gen_op_tsubcc(TCGv dst, TCGv src1, TCGv src2)
492 {
493     TCGv t = tcg_temp_new();
494 
495     /* Save the tag bits around modification of dst. */
496     tcg_gen_or_tl(t, src1, src2);
497 
498     gen_op_subcc(dst, src1, src2);
499 
500     /* Incorprate tag bits into icc.V */
501     tcg_gen_andi_tl(t, t, 3);
502     tcg_gen_neg_tl(t, t);
503     tcg_gen_ext32u_tl(t, t);
504     tcg_gen_or_tl(cpu_cc_V, cpu_cc_V, t);
505 }
506 
507 static void gen_op_subc(TCGv dst, TCGv src1, TCGv src2)
508 {
509     tcg_gen_sub_tl(dst, src1, src2);
510     tcg_gen_sub_tl(dst, dst, gen_carry32());
511 }
512 
513 static void gen_op_subccc(TCGv dst, TCGv src1, TCGv src2)
514 {
515     gen_op_subcc_int(dst, src1, src2, gen_carry32());
516 }
517 
518 static void gen_op_subxc(TCGv dst, TCGv src1, TCGv src2)
519 {
520     tcg_gen_sub_tl(dst, src1, src2);
521     tcg_gen_sub_tl(dst, dst, cpu_cc_C);
522 }
523 
524 static void gen_op_subxccc(TCGv dst, TCGv src1, TCGv src2)
525 {
526     gen_op_subcc_int(dst, src1, src2, cpu_cc_C);
527 }
528 
529 static void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
530 {
531     TCGv zero = tcg_constant_tl(0);
532     TCGv one = tcg_constant_tl(1);
533     TCGv t_src1 = tcg_temp_new();
534     TCGv t_src2 = tcg_temp_new();
535     TCGv t0 = tcg_temp_new();
536 
537     tcg_gen_ext32u_tl(t_src1, src1);
538     tcg_gen_ext32u_tl(t_src2, src2);
539 
540     /*
541      * if (!(env->y & 1))
542      *   src2 = 0;
543      */
544     tcg_gen_movcond_tl(TCG_COND_TSTEQ, t_src2, cpu_y, one, zero, t_src2);
545 
546     /*
547      * b2 = src1 & 1;
548      * y = (b2 << 31) | (y >> 1);
549      */
550     tcg_gen_extract_tl(t0, cpu_y, 1, 31);
551     tcg_gen_deposit_tl(cpu_y, t0, src1, 31, 1);
552 
553     // b1 = N ^ V;
554     tcg_gen_xor_tl(t0, cpu_cc_N, cpu_cc_V);
555 
556     /*
557      * src1 = (b1 << 31) | (src1 >> 1)
558      */
559     tcg_gen_andi_tl(t0, t0, 1u << 31);
560     tcg_gen_shri_tl(t_src1, t_src1, 1);
561     tcg_gen_or_tl(t_src1, t_src1, t0);
562 
563     gen_op_addcc(dst, t_src1, t_src2);
564 }
565 
566 static void gen_op_multiply(TCGv dst, TCGv src1, TCGv src2, int sign_ext)
567 {
568 #if TARGET_LONG_BITS == 32
569     if (sign_ext) {
570         tcg_gen_muls2_tl(dst, cpu_y, src1, src2);
571     } else {
572         tcg_gen_mulu2_tl(dst, cpu_y, src1, src2);
573     }
574 #else
575     TCGv t0 = tcg_temp_new_i64();
576     TCGv t1 = tcg_temp_new_i64();
577 
578     if (sign_ext) {
579         tcg_gen_ext32s_i64(t0, src1);
580         tcg_gen_ext32s_i64(t1, src2);
581     } else {
582         tcg_gen_ext32u_i64(t0, src1);
583         tcg_gen_ext32u_i64(t1, src2);
584     }
585 
586     tcg_gen_mul_i64(dst, t0, t1);
587     tcg_gen_shri_i64(cpu_y, dst, 32);
588 #endif
589 }
590 
591 static void gen_op_umul(TCGv dst, TCGv src1, TCGv src2)
592 {
593     /* zero-extend truncated operands before multiplication */
594     gen_op_multiply(dst, src1, src2, 0);
595 }
596 
597 static void gen_op_smul(TCGv dst, TCGv src1, TCGv src2)
598 {
599     /* sign-extend truncated operands before multiplication */
600     gen_op_multiply(dst, src1, src2, 1);
601 }
602 
603 static void gen_op_umulxhi(TCGv dst, TCGv src1, TCGv src2)
604 {
605     TCGv discard = tcg_temp_new();
606     tcg_gen_mulu2_tl(discard, dst, src1, src2);
607 }
608 
609 static void gen_op_fpmaddx(TCGv_i64 dst, TCGv_i64 src1,
610                            TCGv_i64 src2, TCGv_i64 src3)
611 {
612     TCGv_i64 t = tcg_temp_new_i64();
613 
614     tcg_gen_mul_i64(t, src1, src2);
615     tcg_gen_add_i64(dst, src3, t);
616 }
617 
618 static void gen_op_fpmaddxhi(TCGv_i64 dst, TCGv_i64 src1,
619                              TCGv_i64 src2, TCGv_i64 src3)
620 {
621     TCGv_i64 l = tcg_temp_new_i64();
622     TCGv_i64 h = tcg_temp_new_i64();
623     TCGv_i64 z = tcg_constant_i64(0);
624 
625     tcg_gen_mulu2_i64(l, h, src1, src2);
626     tcg_gen_add2_i64(l, dst, l, h, src3, z);
627 }
628 
629 static void gen_op_sdiv(TCGv dst, TCGv src1, TCGv src2)
630 {
631 #ifdef TARGET_SPARC64
632     gen_helper_sdiv(dst, tcg_env, src1, src2);
633     tcg_gen_ext32s_tl(dst, dst);
634 #else
635     TCGv_i64 t64 = tcg_temp_new_i64();
636     gen_helper_sdiv(t64, tcg_env, src1, src2);
637     tcg_gen_trunc_i64_tl(dst, t64);
638 #endif
639 }
640 
641 static void gen_op_udivcc(TCGv dst, TCGv src1, TCGv src2)
642 {
643     TCGv_i64 t64;
644 
645 #ifdef TARGET_SPARC64
646     t64 = cpu_cc_V;
647 #else
648     t64 = tcg_temp_new_i64();
649 #endif
650 
651     gen_helper_udiv(t64, tcg_env, src1, src2);
652 
653 #ifdef TARGET_SPARC64
654     tcg_gen_ext32u_tl(cpu_cc_N, t64);
655     tcg_gen_shri_tl(cpu_cc_V, t64, 32);
656     tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
657     tcg_gen_movi_tl(cpu_icc_C, 0);
658 #else
659     tcg_gen_extr_i64_tl(cpu_cc_N, cpu_cc_V, t64);
660 #endif
661     tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
662     tcg_gen_movi_tl(cpu_cc_C, 0);
663     tcg_gen_mov_tl(dst, cpu_cc_N);
664 }
665 
666 static void gen_op_sdivcc(TCGv dst, TCGv src1, TCGv src2)
667 {
668     TCGv_i64 t64;
669 
670 #ifdef TARGET_SPARC64
671     t64 = cpu_cc_V;
672 #else
673     t64 = tcg_temp_new_i64();
674 #endif
675 
676     gen_helper_sdiv(t64, tcg_env, src1, src2);
677 
678 #ifdef TARGET_SPARC64
679     tcg_gen_ext32s_tl(cpu_cc_N, t64);
680     tcg_gen_shri_tl(cpu_cc_V, t64, 32);
681     tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
682     tcg_gen_movi_tl(cpu_icc_C, 0);
683 #else
684     tcg_gen_extr_i64_tl(cpu_cc_N, cpu_cc_V, t64);
685 #endif
686     tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
687     tcg_gen_movi_tl(cpu_cc_C, 0);
688     tcg_gen_mov_tl(dst, cpu_cc_N);
689 }
690 
691 static void gen_op_taddcctv(TCGv dst, TCGv src1, TCGv src2)
692 {
693     gen_helper_taddcctv(dst, tcg_env, src1, src2);
694 }
695 
696 static void gen_op_tsubcctv(TCGv dst, TCGv src1, TCGv src2)
697 {
698     gen_helper_tsubcctv(dst, tcg_env, src1, src2);
699 }
700 
701 static void gen_op_popc(TCGv dst, TCGv src1, TCGv src2)
702 {
703     tcg_gen_ctpop_tl(dst, src2);
704 }
705 
706 static void gen_op_lzcnt(TCGv dst, TCGv src)
707 {
708     tcg_gen_clzi_tl(dst, src, TARGET_LONG_BITS);
709 }
710 
711 #ifndef TARGET_SPARC64
712 static void gen_helper_array8(TCGv dst, TCGv src1, TCGv src2)
713 {
714     g_assert_not_reached();
715 }
716 #endif
717 
718 static void gen_op_array16(TCGv dst, TCGv src1, TCGv src2)
719 {
720     gen_helper_array8(dst, src1, src2);
721     tcg_gen_shli_tl(dst, dst, 1);
722 }
723 
724 static void gen_op_array32(TCGv dst, TCGv src1, TCGv src2)
725 {
726     gen_helper_array8(dst, src1, src2);
727     tcg_gen_shli_tl(dst, dst, 2);
728 }
729 
730 static void gen_op_fpack16(TCGv_i32 dst, TCGv_i64 src)
731 {
732 #ifdef TARGET_SPARC64
733     gen_helper_fpack16(dst, cpu_gsr, src);
734 #else
735     g_assert_not_reached();
736 #endif
737 }
738 
739 static void gen_op_fpackfix(TCGv_i32 dst, TCGv_i64 src)
740 {
741 #ifdef TARGET_SPARC64
742     gen_helper_fpackfix(dst, cpu_gsr, src);
743 #else
744     g_assert_not_reached();
745 #endif
746 }
747 
748 static void gen_op_fpack32(TCGv_i64 dst, TCGv_i64 src1, TCGv_i64 src2)
749 {
750 #ifdef TARGET_SPARC64
751     gen_helper_fpack32(dst, cpu_gsr, src1, src2);
752 #else
753     g_assert_not_reached();
754 #endif
755 }
756 
757 static void gen_op_fpadds16s(TCGv_i32 d, TCGv_i32 src1, TCGv_i32 src2)
758 {
759     TCGv_i32 t[2];
760 
761     for (int i = 0; i < 2; i++) {
762         TCGv_i32 u = tcg_temp_new_i32();
763         TCGv_i32 v = tcg_temp_new_i32();
764 
765         tcg_gen_sextract_i32(u, src1, i * 16, 16);
766         tcg_gen_sextract_i32(v, src2, i * 16, 16);
767         tcg_gen_add_i32(u, u, v);
768         tcg_gen_smax_i32(u, u, tcg_constant_i32(INT16_MIN));
769         tcg_gen_smin_i32(u, u, tcg_constant_i32(INT16_MAX));
770         t[i] = u;
771     }
772     tcg_gen_deposit_i32(d, t[0], t[1], 16, 16);
773 }
774 
775 static void gen_op_fpsubs16s(TCGv_i32 d, TCGv_i32 src1, TCGv_i32 src2)
776 {
777     TCGv_i32 t[2];
778 
779     for (int i = 0; i < 2; i++) {
780         TCGv_i32 u = tcg_temp_new_i32();
781         TCGv_i32 v = tcg_temp_new_i32();
782 
783         tcg_gen_sextract_i32(u, src1, i * 16, 16);
784         tcg_gen_sextract_i32(v, src2, i * 16, 16);
785         tcg_gen_sub_i32(u, u, v);
786         tcg_gen_smax_i32(u, u, tcg_constant_i32(INT16_MIN));
787         tcg_gen_smin_i32(u, u, tcg_constant_i32(INT16_MAX));
788         t[i] = u;
789     }
790     tcg_gen_deposit_i32(d, t[0], t[1], 16, 16);
791 }
792 
793 static void gen_op_fpadds32s(TCGv_i32 d, TCGv_i32 src1, TCGv_i32 src2)
794 {
795     TCGv_i32 r = tcg_temp_new_i32();
796     TCGv_i32 t = tcg_temp_new_i32();
797     TCGv_i32 v = tcg_temp_new_i32();
798     TCGv_i32 z = tcg_constant_i32(0);
799 
800     tcg_gen_add_i32(r, src1, src2);
801     tcg_gen_xor_i32(t, src1, src2);
802     tcg_gen_xor_i32(v, r, src2);
803     tcg_gen_andc_i32(v, v, t);
804 
805     tcg_gen_setcond_i32(TCG_COND_GE, t, r, z);
806     tcg_gen_addi_i32(t, t, INT32_MAX);
807 
808     tcg_gen_movcond_i32(TCG_COND_LT, d, v, z, t, r);
809 }
810 
811 static void gen_op_fpsubs32s(TCGv_i32 d, TCGv_i32 src1, TCGv_i32 src2)
812 {
813     TCGv_i32 r = tcg_temp_new_i32();
814     TCGv_i32 t = tcg_temp_new_i32();
815     TCGv_i32 v = tcg_temp_new_i32();
816     TCGv_i32 z = tcg_constant_i32(0);
817 
818     tcg_gen_sub_i32(r, src1, src2);
819     tcg_gen_xor_i32(t, src1, src2);
820     tcg_gen_xor_i32(v, r, src1);
821     tcg_gen_and_i32(v, v, t);
822 
823     tcg_gen_setcond_i32(TCG_COND_GE, t, r, z);
824     tcg_gen_addi_i32(t, t, INT32_MAX);
825 
826     tcg_gen_movcond_i32(TCG_COND_LT, d, v, z, t, r);
827 }
828 
829 static void gen_op_faligndata_i(TCGv_i64 dst, TCGv_i64 s1,
830                                 TCGv_i64 s2, TCGv gsr)
831 {
832 #ifdef TARGET_SPARC64
833     TCGv t1, t2, shift;
834 
835     t1 = tcg_temp_new();
836     t2 = tcg_temp_new();
837     shift = tcg_temp_new();
838 
839     tcg_gen_andi_tl(shift, gsr, 7);
840     tcg_gen_shli_tl(shift, shift, 3);
841     tcg_gen_shl_tl(t1, s1, shift);
842 
843     /*
844      * A shift of 64 does not produce 0 in TCG.  Divide this into a
845      * shift of (up to 63) followed by a constant shift of 1.
846      */
847     tcg_gen_xori_tl(shift, shift, 63);
848     tcg_gen_shr_tl(t2, s2, shift);
849     tcg_gen_shri_tl(t2, t2, 1);
850 
851     tcg_gen_or_tl(dst, t1, t2);
852 #else
853     g_assert_not_reached();
854 #endif
855 }
856 
857 static void gen_op_faligndata_g(TCGv_i64 dst, TCGv_i64 s1, TCGv_i64 s2)
858 {
859     gen_op_faligndata_i(dst, s1, s2, cpu_gsr);
860 }
861 
862 static void gen_op_bshuffle(TCGv_i64 dst, TCGv_i64 src1, TCGv_i64 src2)
863 {
864 #ifdef TARGET_SPARC64
865     gen_helper_bshuffle(dst, cpu_gsr, src1, src2);
866 #else
867     g_assert_not_reached();
868 #endif
869 }
870 
871 static void gen_op_pdistn(TCGv dst, TCGv_i64 src1, TCGv_i64 src2)
872 {
873 #ifdef TARGET_SPARC64
874     gen_helper_pdist(dst, tcg_constant_i64(0), src1, src2);
875 #else
876     g_assert_not_reached();
877 #endif
878 }
879 
880 static void gen_op_fmul8x16al(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
881 {
882     tcg_gen_ext16s_i32(src2, src2);
883     gen_helper_fmul8x16a(dst, src1, src2);
884 }
885 
886 static void gen_op_fmul8x16au(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
887 {
888     tcg_gen_sari_i32(src2, src2, 16);
889     gen_helper_fmul8x16a(dst, src1, src2);
890 }
891 
892 static void gen_op_fmuld8ulx16(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
893 {
894     TCGv_i32 t0 = tcg_temp_new_i32();
895     TCGv_i32 t1 = tcg_temp_new_i32();
896     TCGv_i32 t2 = tcg_temp_new_i32();
897 
898     tcg_gen_ext8u_i32(t0, src1);
899     tcg_gen_ext16s_i32(t1, src2);
900     tcg_gen_mul_i32(t0, t0, t1);
901 
902     tcg_gen_extract_i32(t1, src1, 16, 8);
903     tcg_gen_sextract_i32(t2, src2, 16, 16);
904     tcg_gen_mul_i32(t1, t1, t2);
905 
906     tcg_gen_concat_i32_i64(dst, t0, t1);
907 }
908 
909 static void gen_op_fmuld8sux16(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
910 {
911     TCGv_i32 t0 = tcg_temp_new_i32();
912     TCGv_i32 t1 = tcg_temp_new_i32();
913     TCGv_i32 t2 = tcg_temp_new_i32();
914 
915     /*
916      * The insn description talks about extracting the upper 8 bits
917      * of the signed 16-bit input rs1, performing the multiply, then
918      * shifting left by 8 bits.  Instead, zap the lower 8 bits of
919      * the rs1 input, which avoids the need for two shifts.
920      */
921     tcg_gen_ext16s_i32(t0, src1);
922     tcg_gen_andi_i32(t0, t0, ~0xff);
923     tcg_gen_ext16s_i32(t1, src2);
924     tcg_gen_mul_i32(t0, t0, t1);
925 
926     tcg_gen_sextract_i32(t1, src1, 16, 16);
927     tcg_gen_andi_i32(t1, t1, ~0xff);
928     tcg_gen_sextract_i32(t2, src2, 16, 16);
929     tcg_gen_mul_i32(t1, t1, t2);
930 
931     tcg_gen_concat_i32_i64(dst, t0, t1);
932 }
933 
934 #ifdef TARGET_SPARC64
935 static void gen_vec_fchksm16(unsigned vece, TCGv_vec dst,
936                              TCGv_vec src1, TCGv_vec src2)
937 {
938     TCGv_vec a = tcg_temp_new_vec_matching(dst);
939     TCGv_vec c = tcg_temp_new_vec_matching(dst);
940 
941     tcg_gen_add_vec(vece, a, src1, src2);
942     tcg_gen_cmp_vec(TCG_COND_LTU, vece, c, a, src1);
943     /* Vector cmp produces -1 for true, so subtract to add carry. */
944     tcg_gen_sub_vec(vece, dst, a, c);
945 }
946 
947 static void gen_op_fchksm16(unsigned vece, uint32_t dofs, uint32_t aofs,
948                             uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
949 {
950     static const TCGOpcode vecop_list[] = {
951         INDEX_op_cmp_vec, INDEX_op_add_vec, INDEX_op_sub_vec,
952     };
953     static const GVecGen3 op = {
954         .fni8 = gen_helper_fchksm16,
955         .fniv = gen_vec_fchksm16,
956         .opt_opc = vecop_list,
957         .vece = MO_16,
958     };
959     tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &op);
960 }
961 
962 static void gen_vec_fmean16(unsigned vece, TCGv_vec dst,
963                             TCGv_vec src1, TCGv_vec src2)
964 {
965     TCGv_vec t = tcg_temp_new_vec_matching(dst);
966 
967     tcg_gen_or_vec(vece, t, src1, src2);
968     tcg_gen_and_vec(vece, t, t, tcg_constant_vec_matching(dst, vece, 1));
969     tcg_gen_sari_vec(vece, src1, src1, 1);
970     tcg_gen_sari_vec(vece, src2, src2, 1);
971     tcg_gen_add_vec(vece, dst, src1, src2);
972     tcg_gen_add_vec(vece, dst, dst, t);
973 }
974 
975 static void gen_op_fmean16(unsigned vece, uint32_t dofs, uint32_t aofs,
976                            uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
977 {
978     static const TCGOpcode vecop_list[] = {
979         INDEX_op_add_vec, INDEX_op_sari_vec,
980     };
981     static const GVecGen3 op = {
982         .fni8 = gen_helper_fmean16,
983         .fniv = gen_vec_fmean16,
984         .opt_opc = vecop_list,
985         .vece = MO_16,
986     };
987     tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &op);
988 }
989 #else
990 #define gen_op_fchksm16   ({ qemu_build_not_reached(); NULL; })
991 #define gen_op_fmean16    ({ qemu_build_not_reached(); NULL; })
992 #endif
993 
994 static void finishing_insn(DisasContext *dc)
995 {
996     /*
997      * From here, there is no future path through an unwinding exception.
998      * If the current insn cannot raise an exception, the computation of
999      * cpu_cond may be able to be elided.
1000      */
1001     if (dc->cpu_cond_live) {
1002         tcg_gen_discard_tl(cpu_cond);
1003         dc->cpu_cond_live = false;
1004     }
1005 }
1006 
1007 static void gen_generic_branch(DisasContext *dc)
1008 {
1009     TCGv npc0 = tcg_constant_tl(dc->jump_pc[0]);
1010     TCGv npc1 = tcg_constant_tl(dc->jump_pc[1]);
1011     TCGv c2 = tcg_constant_tl(dc->jump.c2);
1012 
1013     tcg_gen_movcond_tl(dc->jump.cond, cpu_npc, dc->jump.c1, c2, npc0, npc1);
1014 }
1015 
1016 /* call this function before using the condition register as it may
1017    have been set for a jump */
1018 static void flush_cond(DisasContext *dc)
1019 {
1020     if (dc->npc == JUMP_PC) {
1021         gen_generic_branch(dc);
1022         dc->npc = DYNAMIC_PC_LOOKUP;
1023     }
1024 }
1025 
1026 static void save_npc(DisasContext *dc)
1027 {
1028     if (dc->npc & 3) {
1029         switch (dc->npc) {
1030         case JUMP_PC:
1031             gen_generic_branch(dc);
1032             dc->npc = DYNAMIC_PC_LOOKUP;
1033             break;
1034         case DYNAMIC_PC:
1035         case DYNAMIC_PC_LOOKUP:
1036             break;
1037         default:
1038             g_assert_not_reached();
1039         }
1040     } else {
1041         tcg_gen_movi_tl(cpu_npc, dc->npc);
1042     }
1043 }
1044 
1045 static void save_state(DisasContext *dc)
1046 {
1047     tcg_gen_movi_tl(cpu_pc, dc->pc);
1048     save_npc(dc);
1049 }
1050 
1051 static void gen_exception(DisasContext *dc, int which)
1052 {
1053     finishing_insn(dc);
1054     save_state(dc);
1055     gen_helper_raise_exception(tcg_env, tcg_constant_i32(which));
1056     dc->base.is_jmp = DISAS_NORETURN;
1057 }
1058 
1059 static TCGLabel *delay_exceptionv(DisasContext *dc, TCGv_i32 excp)
1060 {
1061     DisasDelayException *e = g_new0(DisasDelayException, 1);
1062 
1063     e->next = dc->delay_excp_list;
1064     dc->delay_excp_list = e;
1065 
1066     e->lab = gen_new_label();
1067     e->excp = excp;
1068     e->pc = dc->pc;
1069     /* Caller must have used flush_cond before branch. */
1070     assert(e->npc != JUMP_PC);
1071     e->npc = dc->npc;
1072 
1073     return e->lab;
1074 }
1075 
1076 static TCGLabel *delay_exception(DisasContext *dc, int excp)
1077 {
1078     return delay_exceptionv(dc, tcg_constant_i32(excp));
1079 }
1080 
1081 static void gen_check_align(DisasContext *dc, TCGv addr, int mask)
1082 {
1083     TCGv t = tcg_temp_new();
1084     TCGLabel *lab;
1085 
1086     tcg_gen_andi_tl(t, addr, mask);
1087 
1088     flush_cond(dc);
1089     lab = delay_exception(dc, TT_UNALIGNED);
1090     tcg_gen_brcondi_tl(TCG_COND_NE, t, 0, lab);
1091 }
1092 
1093 static void gen_mov_pc_npc(DisasContext *dc)
1094 {
1095     finishing_insn(dc);
1096 
1097     if (dc->npc & 3) {
1098         switch (dc->npc) {
1099         case JUMP_PC:
1100             gen_generic_branch(dc);
1101             tcg_gen_mov_tl(cpu_pc, cpu_npc);
1102             dc->pc = DYNAMIC_PC_LOOKUP;
1103             break;
1104         case DYNAMIC_PC:
1105         case DYNAMIC_PC_LOOKUP:
1106             tcg_gen_mov_tl(cpu_pc, cpu_npc);
1107             dc->pc = dc->npc;
1108             break;
1109         default:
1110             g_assert_not_reached();
1111         }
1112     } else {
1113         dc->pc = dc->npc;
1114     }
1115 }
1116 
1117 static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond,
1118                         DisasContext *dc)
1119 {
1120     TCGv t1;
1121 
1122     cmp->c1 = t1 = tcg_temp_new();
1123     cmp->c2 = 0;
1124 
1125     switch (cond & 7) {
1126     case 0x0: /* never */
1127         cmp->cond = TCG_COND_NEVER;
1128         cmp->c1 = tcg_constant_tl(0);
1129         break;
1130 
1131     case 0x1: /* eq: Z */
1132         cmp->cond = TCG_COND_EQ;
1133         if (TARGET_LONG_BITS == 32 || xcc) {
1134             tcg_gen_mov_tl(t1, cpu_cc_Z);
1135         } else {
1136             tcg_gen_ext32u_tl(t1, cpu_icc_Z);
1137         }
1138         break;
1139 
1140     case 0x2: /* le: Z | (N ^ V) */
1141         /*
1142          * Simplify:
1143          *   cc_Z || (N ^ V) < 0        NE
1144          *   cc_Z && !((N ^ V) < 0)     EQ
1145          *   cc_Z & ~((N ^ V) >> TLB)   EQ
1146          */
1147         cmp->cond = TCG_COND_EQ;
1148         tcg_gen_xor_tl(t1, cpu_cc_N, cpu_cc_V);
1149         tcg_gen_sextract_tl(t1, t1, xcc ? 63 : 31, 1);
1150         tcg_gen_andc_tl(t1, xcc ? cpu_cc_Z : cpu_icc_Z, t1);
1151         if (TARGET_LONG_BITS == 64 && !xcc) {
1152             tcg_gen_ext32u_tl(t1, t1);
1153         }
1154         break;
1155 
1156     case 0x3: /* lt: N ^ V */
1157         cmp->cond = TCG_COND_LT;
1158         tcg_gen_xor_tl(t1, cpu_cc_N, cpu_cc_V);
1159         if (TARGET_LONG_BITS == 64 && !xcc) {
1160             tcg_gen_ext32s_tl(t1, t1);
1161         }
1162         break;
1163 
1164     case 0x4: /* leu: Z | C */
1165         /*
1166          * Simplify:
1167          *   cc_Z == 0 || cc_C != 0     NE
1168          *   cc_Z != 0 && cc_C == 0     EQ
1169          *   cc_Z & (cc_C ? 0 : -1)     EQ
1170          *   cc_Z & (cc_C - 1)          EQ
1171          */
1172         cmp->cond = TCG_COND_EQ;
1173         if (TARGET_LONG_BITS == 32 || xcc) {
1174             tcg_gen_subi_tl(t1, cpu_cc_C, 1);
1175             tcg_gen_and_tl(t1, t1, cpu_cc_Z);
1176         } else {
1177             tcg_gen_extract_tl(t1, cpu_icc_C, 32, 1);
1178             tcg_gen_subi_tl(t1, t1, 1);
1179             tcg_gen_and_tl(t1, t1, cpu_icc_Z);
1180             tcg_gen_ext32u_tl(t1, t1);
1181         }
1182         break;
1183 
1184     case 0x5: /* ltu: C */
1185         cmp->cond = TCG_COND_NE;
1186         if (TARGET_LONG_BITS == 32 || xcc) {
1187             tcg_gen_mov_tl(t1, cpu_cc_C);
1188         } else {
1189             tcg_gen_extract_tl(t1, cpu_icc_C, 32, 1);
1190         }
1191         break;
1192 
1193     case 0x6: /* neg: N */
1194         cmp->cond = TCG_COND_LT;
1195         if (TARGET_LONG_BITS == 32 || xcc) {
1196             tcg_gen_mov_tl(t1, cpu_cc_N);
1197         } else {
1198             tcg_gen_ext32s_tl(t1, cpu_cc_N);
1199         }
1200         break;
1201 
1202     case 0x7: /* vs: V */
1203         cmp->cond = TCG_COND_LT;
1204         if (TARGET_LONG_BITS == 32 || xcc) {
1205             tcg_gen_mov_tl(t1, cpu_cc_V);
1206         } else {
1207             tcg_gen_ext32s_tl(t1, cpu_cc_V);
1208         }
1209         break;
1210     }
1211     if (cond & 8) {
1212         cmp->cond = tcg_invert_cond(cmp->cond);
1213     }
1214 }
1215 
1216 static void gen_fcompare(DisasCompare *cmp, unsigned int cc, unsigned int cond)
1217 {
1218     TCGv_i32 fcc = cpu_fcc[cc];
1219     TCGv_i32 c1 = fcc;
1220     int c2 = 0;
1221     TCGCond tcond;
1222 
1223     /*
1224      * FCC values:
1225      * 0 =
1226      * 1 <
1227      * 2 >
1228      * 3 unordered
1229      */
1230     switch (cond & 7) {
1231     case 0x0: /* fbn */
1232         tcond = TCG_COND_NEVER;
1233         break;
1234     case 0x1: /* fbne : !0 */
1235         tcond = TCG_COND_NE;
1236         break;
1237     case 0x2: /* fblg : 1 or 2 */
1238         /* fcc in {1,2} - 1 -> fcc in {0,1} */
1239         c1 = tcg_temp_new_i32();
1240         tcg_gen_addi_i32(c1, fcc, -1);
1241         c2 = 1;
1242         tcond = TCG_COND_LEU;
1243         break;
1244     case 0x3: /* fbul : 1 or 3 */
1245         c1 = tcg_temp_new_i32();
1246         tcg_gen_andi_i32(c1, fcc, 1);
1247         tcond = TCG_COND_NE;
1248         break;
1249     case 0x4: /* fbl  : 1 */
1250         c2 = 1;
1251         tcond = TCG_COND_EQ;
1252         break;
1253     case 0x5: /* fbug : 2 or 3 */
1254         c2 = 2;
1255         tcond = TCG_COND_GEU;
1256         break;
1257     case 0x6: /* fbg  : 2 */
1258         c2 = 2;
1259         tcond = TCG_COND_EQ;
1260         break;
1261     case 0x7: /* fbu  : 3 */
1262         c2 = 3;
1263         tcond = TCG_COND_EQ;
1264         break;
1265     }
1266     if (cond & 8) {
1267         tcond = tcg_invert_cond(tcond);
1268     }
1269 
1270     cmp->cond = tcond;
1271     cmp->c2 = c2;
1272     cmp->c1 = tcg_temp_new();
1273     tcg_gen_extu_i32_tl(cmp->c1, c1);
1274 }
1275 
1276 static bool gen_compare_reg(DisasCompare *cmp, int cond, TCGv r_src)
1277 {
1278     static const TCGCond cond_reg[4] = {
1279         TCG_COND_NEVER,  /* reserved */
1280         TCG_COND_EQ,
1281         TCG_COND_LE,
1282         TCG_COND_LT,
1283     };
1284     TCGCond tcond;
1285 
1286     if ((cond & 3) == 0) {
1287         return false;
1288     }
1289     tcond = cond_reg[cond & 3];
1290     if (cond & 4) {
1291         tcond = tcg_invert_cond(tcond);
1292     }
1293 
1294     cmp->cond = tcond;
1295     cmp->c1 = tcg_temp_new();
1296     cmp->c2 = 0;
1297     tcg_gen_mov_tl(cmp->c1, r_src);
1298     return true;
1299 }
1300 
1301 static void gen_op_clear_ieee_excp_and_FTT(void)
1302 {
1303     tcg_gen_st_i32(tcg_constant_i32(0), tcg_env,
1304                    offsetof(CPUSPARCState, fsr_cexc_ftt));
1305 }
1306 
1307 static void gen_op_fmovs(TCGv_i32 dst, TCGv_i32 src)
1308 {
1309     gen_op_clear_ieee_excp_and_FTT();
1310     tcg_gen_mov_i32(dst, src);
1311 }
1312 
1313 static void gen_op_fnegs(TCGv_i32 dst, TCGv_i32 src)
1314 {
1315     gen_op_clear_ieee_excp_and_FTT();
1316     tcg_gen_xori_i32(dst, src, 1u << 31);
1317 }
1318 
1319 static void gen_op_fabss(TCGv_i32 dst, TCGv_i32 src)
1320 {
1321     gen_op_clear_ieee_excp_and_FTT();
1322     tcg_gen_andi_i32(dst, src, ~(1u << 31));
1323 }
1324 
1325 static void gen_op_fmovd(TCGv_i64 dst, TCGv_i64 src)
1326 {
1327     gen_op_clear_ieee_excp_and_FTT();
1328     tcg_gen_mov_i64(dst, src);
1329 }
1330 
1331 static void gen_op_fnegd(TCGv_i64 dst, TCGv_i64 src)
1332 {
1333     gen_op_clear_ieee_excp_and_FTT();
1334     tcg_gen_xori_i64(dst, src, 1ull << 63);
1335 }
1336 
1337 static void gen_op_fabsd(TCGv_i64 dst, TCGv_i64 src)
1338 {
1339     gen_op_clear_ieee_excp_and_FTT();
1340     tcg_gen_andi_i64(dst, src, ~(1ull << 63));
1341 }
1342 
1343 static void gen_op_fnegq(TCGv_i128 dst, TCGv_i128 src)
1344 {
1345     TCGv_i64 l = tcg_temp_new_i64();
1346     TCGv_i64 h = tcg_temp_new_i64();
1347 
1348     tcg_gen_extr_i128_i64(l, h, src);
1349     tcg_gen_xori_i64(h, h, 1ull << 63);
1350     tcg_gen_concat_i64_i128(dst, l, h);
1351 }
1352 
1353 static void gen_op_fabsq(TCGv_i128 dst, TCGv_i128 src)
1354 {
1355     TCGv_i64 l = tcg_temp_new_i64();
1356     TCGv_i64 h = tcg_temp_new_i64();
1357 
1358     tcg_gen_extr_i128_i64(l, h, src);
1359     tcg_gen_andi_i64(h, h, ~(1ull << 63));
1360     tcg_gen_concat_i64_i128(dst, l, h);
1361 }
1362 
1363 static void gen_op_fmadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
1364 {
1365     gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(0));
1366 }
1367 
1368 static void gen_op_fmaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
1369 {
1370     gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(0));
1371 }
1372 
1373 static void gen_op_fmsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
1374 {
1375     int op = float_muladd_negate_c;
1376     gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1377 }
1378 
1379 static void gen_op_fmsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
1380 {
1381     int op = float_muladd_negate_c;
1382     gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1383 }
1384 
1385 static void gen_op_fnmsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
1386 {
1387     int op = float_muladd_negate_c | float_muladd_negate_result;
1388     gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1389 }
1390 
1391 static void gen_op_fnmsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
1392 {
1393     int op = float_muladd_negate_c | float_muladd_negate_result;
1394     gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1395 }
1396 
1397 static void gen_op_fnmadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
1398 {
1399     int op = float_muladd_negate_result;
1400     gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1401 }
1402 
1403 static void gen_op_fnmaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
1404 {
1405     int op = float_muladd_negate_result;
1406     gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1407 }
1408 
1409 /* Use muladd to compute (1 * src1) + src2 / 2 with one rounding. */
1410 static void gen_op_fhadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
1411 {
1412     TCGv_i32 one = tcg_constant_i32(float32_one);
1413     int op = float_muladd_halve_result;
1414     gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
1415 }
1416 
1417 static void gen_op_fhaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
1418 {
1419     TCGv_i64 one = tcg_constant_i64(float64_one);
1420     int op = float_muladd_halve_result;
1421     gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
1422 }
1423 
1424 /* Use muladd to compute (1 * src1) - src2 / 2 with one rounding. */
1425 static void gen_op_fhsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
1426 {
1427     TCGv_i32 one = tcg_constant_i32(float32_one);
1428     int op = float_muladd_negate_c | float_muladd_halve_result;
1429     gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
1430 }
1431 
1432 static void gen_op_fhsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
1433 {
1434     TCGv_i64 one = tcg_constant_i64(float64_one);
1435     int op = float_muladd_negate_c | float_muladd_halve_result;
1436     gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
1437 }
1438 
1439 /* Use muladd to compute -((1 * src1) + src2 / 2) with one rounding. */
1440 static void gen_op_fnhadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
1441 {
1442     TCGv_i32 one = tcg_constant_i32(float32_one);
1443     int op = float_muladd_negate_result | float_muladd_halve_result;
1444     gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
1445 }
1446 
1447 static void gen_op_fnhaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
1448 {
1449     TCGv_i64 one = tcg_constant_i64(float64_one);
1450     int op = float_muladd_negate_result | float_muladd_halve_result;
1451     gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
1452 }
1453 
1454 static void gen_op_fpexception_im(DisasContext *dc, int ftt)
1455 {
1456     /*
1457      * CEXC is only set when succesfully completing an FPop,
1458      * or when raising FSR_FTT_IEEE_EXCP, i.e. check_ieee_exception.
1459      * Thus we can simply store FTT into this field.
1460      */
1461     tcg_gen_st_i32(tcg_constant_i32(ftt), tcg_env,
1462                    offsetof(CPUSPARCState, fsr_cexc_ftt));
1463     gen_exception(dc, TT_FP_EXCP);
1464 }
1465 
1466 static int gen_trap_ifnofpu(DisasContext *dc)
1467 {
1468 #if !defined(CONFIG_USER_ONLY)
1469     if (!dc->fpu_enabled) {
1470         gen_exception(dc, TT_NFPU_INSN);
1471         return 1;
1472     }
1473 #endif
1474     return 0;
1475 }
1476 
1477 /* asi moves */
1478 typedef enum {
1479     GET_ASI_HELPER,
1480     GET_ASI_EXCP,
1481     GET_ASI_DIRECT,
1482     GET_ASI_DTWINX,
1483     GET_ASI_CODE,
1484     GET_ASI_BLOCK,
1485     GET_ASI_SHORT,
1486     GET_ASI_BCOPY,
1487     GET_ASI_BFILL,
1488 } ASIType;
1489 
1490 typedef struct {
1491     ASIType type;
1492     int asi;
1493     int mem_idx;
1494     MemOp memop;
1495 } DisasASI;
1496 
1497 /*
1498  * Build DisasASI.
1499  * For asi == -1, treat as non-asi.
1500  * For ask == -2, treat as immediate offset (v8 error, v9 %asi).
1501  */
1502 static DisasASI resolve_asi(DisasContext *dc, int asi, MemOp memop)
1503 {
1504     ASIType type = GET_ASI_HELPER;
1505     int mem_idx = dc->mem_idx;
1506 
1507     if (asi == -1) {
1508         /* Artificial "non-asi" case. */
1509         type = GET_ASI_DIRECT;
1510         goto done;
1511     }
1512 
1513 #ifndef TARGET_SPARC64
1514     /* Before v9, all asis are immediate and privileged.  */
1515     if (asi < 0) {
1516         gen_exception(dc, TT_ILL_INSN);
1517         type = GET_ASI_EXCP;
1518     } else if (supervisor(dc)
1519                /* Note that LEON accepts ASI_USERDATA in user mode, for
1520                   use with CASA.  Also note that previous versions of
1521                   QEMU allowed (and old versions of gcc emitted) ASI_P
1522                   for LEON, which is incorrect.  */
1523                || (asi == ASI_USERDATA
1524                    && (dc->def->features & CPU_FEATURE_CASA))) {
1525         switch (asi) {
1526         case ASI_USERDATA:    /* User data access */
1527             mem_idx = MMU_USER_IDX;
1528             type = GET_ASI_DIRECT;
1529             break;
1530         case ASI_KERNELDATA:  /* Supervisor data access */
1531             mem_idx = MMU_KERNEL_IDX;
1532             type = GET_ASI_DIRECT;
1533             break;
1534         case ASI_USERTXT:     /* User text access */
1535             mem_idx = MMU_USER_IDX;
1536             type = GET_ASI_CODE;
1537             break;
1538         case ASI_KERNELTXT:   /* Supervisor text access */
1539             mem_idx = MMU_KERNEL_IDX;
1540             type = GET_ASI_CODE;
1541             break;
1542         case ASI_M_BYPASS:    /* MMU passthrough */
1543         case ASI_LEON_BYPASS: /* LEON MMU passthrough */
1544             mem_idx = MMU_PHYS_IDX;
1545             type = GET_ASI_DIRECT;
1546             break;
1547         case ASI_M_BCOPY: /* Block copy, sta access */
1548             mem_idx = MMU_KERNEL_IDX;
1549             type = GET_ASI_BCOPY;
1550             break;
1551         case ASI_M_BFILL: /* Block fill, stda access */
1552             mem_idx = MMU_KERNEL_IDX;
1553             type = GET_ASI_BFILL;
1554             break;
1555         }
1556 
1557         /* MMU_PHYS_IDX is used when the MMU is disabled to passthrough the
1558          * permissions check in get_physical_address(..).
1559          */
1560         mem_idx = (dc->mem_idx == MMU_PHYS_IDX) ? MMU_PHYS_IDX : mem_idx;
1561     } else {
1562         gen_exception(dc, TT_PRIV_INSN);
1563         type = GET_ASI_EXCP;
1564     }
1565 #else
1566     if (asi < 0) {
1567         asi = dc->asi;
1568     }
1569     /* With v9, all asis below 0x80 are privileged.  */
1570     /* ??? We ought to check cpu_has_hypervisor, but we didn't copy
1571        down that bit into DisasContext.  For the moment that's ok,
1572        since the direct implementations below doesn't have any ASIs
1573        in the restricted [0x30, 0x7f] range, and the check will be
1574        done properly in the helper.  */
1575     if (!supervisor(dc) && asi < 0x80) {
1576         gen_exception(dc, TT_PRIV_ACT);
1577         type = GET_ASI_EXCP;
1578     } else {
1579         switch (asi) {
1580         case ASI_REAL:      /* Bypass */
1581         case ASI_REAL_IO:   /* Bypass, non-cacheable */
1582         case ASI_REAL_L:    /* Bypass LE */
1583         case ASI_REAL_IO_L: /* Bypass, non-cacheable LE */
1584         case ASI_TWINX_REAL:   /* Real address, twinx */
1585         case ASI_TWINX_REAL_L: /* Real address, twinx, LE */
1586         case ASI_QUAD_LDD_PHYS:
1587         case ASI_QUAD_LDD_PHYS_L:
1588             mem_idx = MMU_PHYS_IDX;
1589             break;
1590         case ASI_N:  /* Nucleus */
1591         case ASI_NL: /* Nucleus LE */
1592         case ASI_TWINX_N:
1593         case ASI_TWINX_NL:
1594         case ASI_NUCLEUS_QUAD_LDD:
1595         case ASI_NUCLEUS_QUAD_LDD_L:
1596             if (hypervisor(dc)) {
1597                 mem_idx = MMU_PHYS_IDX;
1598             } else {
1599                 mem_idx = MMU_NUCLEUS_IDX;
1600             }
1601             break;
1602         case ASI_AIUP:  /* As if user primary */
1603         case ASI_AIUPL: /* As if user primary LE */
1604         case ASI_TWINX_AIUP:
1605         case ASI_TWINX_AIUP_L:
1606         case ASI_BLK_AIUP_4V:
1607         case ASI_BLK_AIUP_L_4V:
1608         case ASI_BLK_AIUP:
1609         case ASI_BLK_AIUPL:
1610             mem_idx = MMU_USER_IDX;
1611             break;
1612         case ASI_AIUS:  /* As if user secondary */
1613         case ASI_AIUSL: /* As if user secondary LE */
1614         case ASI_TWINX_AIUS:
1615         case ASI_TWINX_AIUS_L:
1616         case ASI_BLK_AIUS_4V:
1617         case ASI_BLK_AIUS_L_4V:
1618         case ASI_BLK_AIUS:
1619         case ASI_BLK_AIUSL:
1620             mem_idx = MMU_USER_SECONDARY_IDX;
1621             break;
1622         case ASI_S:  /* Secondary */
1623         case ASI_SL: /* Secondary LE */
1624         case ASI_TWINX_S:
1625         case ASI_TWINX_SL:
1626         case ASI_BLK_COMMIT_S:
1627         case ASI_BLK_S:
1628         case ASI_BLK_SL:
1629         case ASI_FL8_S:
1630         case ASI_FL8_SL:
1631         case ASI_FL16_S:
1632         case ASI_FL16_SL:
1633             if (mem_idx == MMU_USER_IDX) {
1634                 mem_idx = MMU_USER_SECONDARY_IDX;
1635             } else if (mem_idx == MMU_KERNEL_IDX) {
1636                 mem_idx = MMU_KERNEL_SECONDARY_IDX;
1637             }
1638             break;
1639         case ASI_P:  /* Primary */
1640         case ASI_PL: /* Primary LE */
1641         case ASI_TWINX_P:
1642         case ASI_TWINX_PL:
1643         case ASI_BLK_COMMIT_P:
1644         case ASI_BLK_P:
1645         case ASI_BLK_PL:
1646         case ASI_FL8_P:
1647         case ASI_FL8_PL:
1648         case ASI_FL16_P:
1649         case ASI_FL16_PL:
1650             break;
1651         }
1652         switch (asi) {
1653         case ASI_REAL:
1654         case ASI_REAL_IO:
1655         case ASI_REAL_L:
1656         case ASI_REAL_IO_L:
1657         case ASI_N:
1658         case ASI_NL:
1659         case ASI_AIUP:
1660         case ASI_AIUPL:
1661         case ASI_AIUS:
1662         case ASI_AIUSL:
1663         case ASI_S:
1664         case ASI_SL:
1665         case ASI_P:
1666         case ASI_PL:
1667             type = GET_ASI_DIRECT;
1668             break;
1669         case ASI_TWINX_REAL:
1670         case ASI_TWINX_REAL_L:
1671         case ASI_TWINX_N:
1672         case ASI_TWINX_NL:
1673         case ASI_TWINX_AIUP:
1674         case ASI_TWINX_AIUP_L:
1675         case ASI_TWINX_AIUS:
1676         case ASI_TWINX_AIUS_L:
1677         case ASI_TWINX_P:
1678         case ASI_TWINX_PL:
1679         case ASI_TWINX_S:
1680         case ASI_TWINX_SL:
1681         case ASI_QUAD_LDD_PHYS:
1682         case ASI_QUAD_LDD_PHYS_L:
1683         case ASI_NUCLEUS_QUAD_LDD:
1684         case ASI_NUCLEUS_QUAD_LDD_L:
1685             type = GET_ASI_DTWINX;
1686             break;
1687         case ASI_BLK_COMMIT_P:
1688         case ASI_BLK_COMMIT_S:
1689         case ASI_BLK_AIUP_4V:
1690         case ASI_BLK_AIUP_L_4V:
1691         case ASI_BLK_AIUP:
1692         case ASI_BLK_AIUPL:
1693         case ASI_BLK_AIUS_4V:
1694         case ASI_BLK_AIUS_L_4V:
1695         case ASI_BLK_AIUS:
1696         case ASI_BLK_AIUSL:
1697         case ASI_BLK_S:
1698         case ASI_BLK_SL:
1699         case ASI_BLK_P:
1700         case ASI_BLK_PL:
1701             type = GET_ASI_BLOCK;
1702             break;
1703         case ASI_FL8_S:
1704         case ASI_FL8_SL:
1705         case ASI_FL8_P:
1706         case ASI_FL8_PL:
1707             memop = MO_UB;
1708             type = GET_ASI_SHORT;
1709             break;
1710         case ASI_FL16_S:
1711         case ASI_FL16_SL:
1712         case ASI_FL16_P:
1713         case ASI_FL16_PL:
1714             memop = MO_TEUW;
1715             type = GET_ASI_SHORT;
1716             break;
1717         }
1718         /* The little-endian asis all have bit 3 set.  */
1719         if (asi & 8) {
1720             memop ^= MO_BSWAP;
1721         }
1722     }
1723 #endif
1724 
1725  done:
1726     return (DisasASI){ type, asi, mem_idx, memop };
1727 }
1728 
1729 #if defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
1730 static void gen_helper_ld_asi(TCGv_i64 r, TCGv_env e, TCGv a,
1731                               TCGv_i32 asi, TCGv_i32 mop)
1732 {
1733     g_assert_not_reached();
1734 }
1735 
1736 static void gen_helper_st_asi(TCGv_env e, TCGv a, TCGv_i64 r,
1737                               TCGv_i32 asi, TCGv_i32 mop)
1738 {
1739     g_assert_not_reached();
1740 }
1741 #endif
1742 
1743 static void gen_ld_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr)
1744 {
1745     switch (da->type) {
1746     case GET_ASI_EXCP:
1747         break;
1748     case GET_ASI_DTWINX: /* Reserved for ldda.  */
1749         gen_exception(dc, TT_ILL_INSN);
1750         break;
1751     case GET_ASI_DIRECT:
1752         tcg_gen_qemu_ld_tl(dst, addr, da->mem_idx, da->memop | MO_ALIGN);
1753         break;
1754 
1755     case GET_ASI_CODE:
1756 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
1757         {
1758             MemOpIdx oi = make_memop_idx(da->memop, da->mem_idx);
1759             TCGv_i64 t64 = tcg_temp_new_i64();
1760 
1761             gen_helper_ld_code(t64, tcg_env, addr, tcg_constant_i32(oi));
1762             tcg_gen_trunc_i64_tl(dst, t64);
1763         }
1764         break;
1765 #else
1766         g_assert_not_reached();
1767 #endif
1768 
1769     default:
1770         {
1771             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1772             TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN);
1773 
1774             save_state(dc);
1775 #ifdef TARGET_SPARC64
1776             gen_helper_ld_asi(dst, tcg_env, addr, r_asi, r_mop);
1777 #else
1778             {
1779                 TCGv_i64 t64 = tcg_temp_new_i64();
1780                 gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
1781                 tcg_gen_trunc_i64_tl(dst, t64);
1782             }
1783 #endif
1784         }
1785         break;
1786     }
1787 }
1788 
1789 static void gen_st_asi(DisasContext *dc, DisasASI *da, TCGv src, TCGv addr)
1790 {
1791     switch (da->type) {
1792     case GET_ASI_EXCP:
1793         break;
1794 
1795     case GET_ASI_DTWINX: /* Reserved for stda.  */
1796         if (TARGET_LONG_BITS == 32) {
1797             gen_exception(dc, TT_ILL_INSN);
1798             break;
1799         } else if (!(dc->def->features & CPU_FEATURE_HYPV)) {
1800             /* Pre OpenSPARC CPUs don't have these */
1801             gen_exception(dc, TT_ILL_INSN);
1802             break;
1803         }
1804         /* In OpenSPARC T1+ CPUs TWINX ASIs in store are ST_BLKINIT_ ASIs */
1805         /* fall through */
1806 
1807     case GET_ASI_DIRECT:
1808         tcg_gen_qemu_st_tl(src, addr, da->mem_idx, da->memop | MO_ALIGN);
1809         break;
1810 
1811     case GET_ASI_BCOPY:
1812         assert(TARGET_LONG_BITS == 32);
1813         /*
1814          * Copy 32 bytes from the address in SRC to ADDR.
1815          *
1816          * From Ross RT625 hyperSPARC manual, section 4.6:
1817          * "Block Copy and Block Fill will work only on cache line boundaries."
1818          *
1819          * It does not specify if an unaliged address is truncated or trapped.
1820          * Previous qemu behaviour was to truncate to 4 byte alignment, which
1821          * is obviously wrong.  The only place I can see this used is in the
1822          * Linux kernel which begins with page alignment, advancing by 32,
1823          * so is always aligned.  Assume truncation as the simpler option.
1824          *
1825          * Since the loads and stores are paired, allow the copy to happen
1826          * in the host endianness.  The copy need not be atomic.
1827          */
1828         {
1829             MemOp mop = MO_128 | MO_ATOM_IFALIGN_PAIR;
1830             TCGv saddr = tcg_temp_new();
1831             TCGv daddr = tcg_temp_new();
1832             TCGv_i128 tmp = tcg_temp_new_i128();
1833 
1834             tcg_gen_andi_tl(saddr, src, -32);
1835             tcg_gen_andi_tl(daddr, addr, -32);
1836             tcg_gen_qemu_ld_i128(tmp, saddr, da->mem_idx, mop);
1837             tcg_gen_qemu_st_i128(tmp, daddr, da->mem_idx, mop);
1838             tcg_gen_addi_tl(saddr, saddr, 16);
1839             tcg_gen_addi_tl(daddr, daddr, 16);
1840             tcg_gen_qemu_ld_i128(tmp, saddr, da->mem_idx, mop);
1841             tcg_gen_qemu_st_i128(tmp, daddr, da->mem_idx, mop);
1842         }
1843         break;
1844 
1845     default:
1846         {
1847             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1848             TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN);
1849 
1850             save_state(dc);
1851 #ifdef TARGET_SPARC64
1852             gen_helper_st_asi(tcg_env, addr, src, r_asi, r_mop);
1853 #else
1854             {
1855                 TCGv_i64 t64 = tcg_temp_new_i64();
1856                 tcg_gen_extu_tl_i64(t64, src);
1857                 gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
1858             }
1859 #endif
1860 
1861             /* A write to a TLB register may alter page maps.  End the TB. */
1862             dc->npc = DYNAMIC_PC;
1863         }
1864         break;
1865     }
1866 }
1867 
1868 static void gen_swap_asi(DisasContext *dc, DisasASI *da,
1869                          TCGv dst, TCGv src, TCGv addr)
1870 {
1871     switch (da->type) {
1872     case GET_ASI_EXCP:
1873         break;
1874     case GET_ASI_DIRECT:
1875         tcg_gen_atomic_xchg_tl(dst, addr, src,
1876                                da->mem_idx, da->memop | MO_ALIGN);
1877         break;
1878     default:
1879         /* ??? Should be DAE_invalid_asi.  */
1880         gen_exception(dc, TT_DATA_ACCESS);
1881         break;
1882     }
1883 }
1884 
1885 static void gen_cas_asi(DisasContext *dc, DisasASI *da,
1886                         TCGv oldv, TCGv newv, TCGv cmpv, TCGv addr)
1887 {
1888     switch (da->type) {
1889     case GET_ASI_EXCP:
1890         return;
1891     case GET_ASI_DIRECT:
1892         tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, newv,
1893                                   da->mem_idx, da->memop | MO_ALIGN);
1894         break;
1895     default:
1896         /* ??? Should be DAE_invalid_asi.  */
1897         gen_exception(dc, TT_DATA_ACCESS);
1898         break;
1899     }
1900 }
1901 
1902 static void gen_ldstub_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr)
1903 {
1904     switch (da->type) {
1905     case GET_ASI_EXCP:
1906         break;
1907     case GET_ASI_DIRECT:
1908         tcg_gen_atomic_xchg_tl(dst, addr, tcg_constant_tl(0xff),
1909                                da->mem_idx, MO_UB);
1910         break;
1911     default:
1912         /* ??? In theory, this should be raise DAE_invalid_asi.
1913            But the SS-20 roms do ldstuba [%l0] #ASI_M_CTL, %o1.  */
1914         if (tb_cflags(dc->base.tb) & CF_PARALLEL) {
1915             gen_helper_exit_atomic(tcg_env);
1916         } else {
1917             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1918             TCGv_i32 r_mop = tcg_constant_i32(MO_UB);
1919             TCGv_i64 s64, t64;
1920 
1921             save_state(dc);
1922             t64 = tcg_temp_new_i64();
1923             gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
1924 
1925             s64 = tcg_constant_i64(0xff);
1926             gen_helper_st_asi(tcg_env, addr, s64, r_asi, r_mop);
1927 
1928             tcg_gen_trunc_i64_tl(dst, t64);
1929 
1930             /* End the TB.  */
1931             dc->npc = DYNAMIC_PC;
1932         }
1933         break;
1934     }
1935 }
1936 
1937 static void gen_ldf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size,
1938                         TCGv addr, int rd)
1939 {
1940     MemOp memop = da->memop;
1941     MemOp size = memop & MO_SIZE;
1942     TCGv_i32 d32;
1943     TCGv_i64 d64, l64;
1944     TCGv addr_tmp;
1945 
1946     /* TODO: Use 128-bit load/store below. */
1947     if (size == MO_128) {
1948         memop = (memop & ~MO_SIZE) | MO_64;
1949     }
1950 
1951     switch (da->type) {
1952     case GET_ASI_EXCP:
1953         break;
1954 
1955     case GET_ASI_DIRECT:
1956         memop |= MO_ALIGN_4;
1957         switch (size) {
1958         case MO_32:
1959             d32 = tcg_temp_new_i32();
1960             tcg_gen_qemu_ld_i32(d32, addr, da->mem_idx, memop);
1961             gen_store_fpr_F(dc, rd, d32);
1962             break;
1963 
1964         case MO_64:
1965             d64 = tcg_temp_new_i64();
1966             tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx, memop);
1967             gen_store_fpr_D(dc, rd, d64);
1968             break;
1969 
1970         case MO_128:
1971             d64 = tcg_temp_new_i64();
1972             l64 = tcg_temp_new_i64();
1973             tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx, memop);
1974             addr_tmp = tcg_temp_new();
1975             tcg_gen_addi_tl(addr_tmp, addr, 8);
1976             tcg_gen_qemu_ld_i64(l64, addr_tmp, da->mem_idx, memop);
1977             gen_store_fpr_D(dc, rd, d64);
1978             gen_store_fpr_D(dc, rd + 2, l64);
1979             break;
1980         default:
1981             g_assert_not_reached();
1982         }
1983         break;
1984 
1985     case GET_ASI_BLOCK:
1986         /* Valid for lddfa on aligned registers only.  */
1987         if (orig_size == MO_64 && (rd & 7) == 0) {
1988             /* The first operation checks required alignment.  */
1989             addr_tmp = tcg_temp_new();
1990             d64 = tcg_temp_new_i64();
1991             for (int i = 0; ; ++i) {
1992                 tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx,
1993                                     memop | (i == 0 ? MO_ALIGN_64 : 0));
1994                 gen_store_fpr_D(dc, rd + 2 * i, d64);
1995                 if (i == 7) {
1996                     break;
1997                 }
1998                 tcg_gen_addi_tl(addr_tmp, addr, 8);
1999                 addr = addr_tmp;
2000             }
2001         } else {
2002             gen_exception(dc, TT_ILL_INSN);
2003         }
2004         break;
2005 
2006     case GET_ASI_SHORT:
2007         /* Valid for lddfa only.  */
2008         if (orig_size == MO_64) {
2009             d64 = tcg_temp_new_i64();
2010             tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx, memop | MO_ALIGN);
2011             gen_store_fpr_D(dc, rd, d64);
2012         } else {
2013             gen_exception(dc, TT_ILL_INSN);
2014         }
2015         break;
2016 
2017     default:
2018         {
2019             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2020             TCGv_i32 r_mop = tcg_constant_i32(memop | MO_ALIGN);
2021 
2022             save_state(dc);
2023             /* According to the table in the UA2011 manual, the only
2024                other asis that are valid for ldfa/lddfa/ldqfa are
2025                the NO_FAULT asis.  We still need a helper for these,
2026                but we can just use the integer asi helper for them.  */
2027             switch (size) {
2028             case MO_32:
2029                 d64 = tcg_temp_new_i64();
2030                 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
2031                 d32 = tcg_temp_new_i32();
2032                 tcg_gen_extrl_i64_i32(d32, d64);
2033                 gen_store_fpr_F(dc, rd, d32);
2034                 break;
2035             case MO_64:
2036                 d64 = tcg_temp_new_i64();
2037                 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
2038                 gen_store_fpr_D(dc, rd, d64);
2039                 break;
2040             case MO_128:
2041                 d64 = tcg_temp_new_i64();
2042                 l64 = tcg_temp_new_i64();
2043                 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
2044                 addr_tmp = tcg_temp_new();
2045                 tcg_gen_addi_tl(addr_tmp, addr, 8);
2046                 gen_helper_ld_asi(l64, tcg_env, addr_tmp, r_asi, r_mop);
2047                 gen_store_fpr_D(dc, rd, d64);
2048                 gen_store_fpr_D(dc, rd + 2, l64);
2049                 break;
2050             default:
2051                 g_assert_not_reached();
2052             }
2053         }
2054         break;
2055     }
2056 }
2057 
2058 static void gen_stf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size,
2059                         TCGv addr, int rd)
2060 {
2061     MemOp memop = da->memop;
2062     MemOp size = memop & MO_SIZE;
2063     TCGv_i32 d32;
2064     TCGv_i64 d64;
2065     TCGv addr_tmp;
2066 
2067     /* TODO: Use 128-bit load/store below. */
2068     if (size == MO_128) {
2069         memop = (memop & ~MO_SIZE) | MO_64;
2070     }
2071 
2072     switch (da->type) {
2073     case GET_ASI_EXCP:
2074         break;
2075 
2076     case GET_ASI_DIRECT:
2077         memop |= MO_ALIGN_4;
2078         switch (size) {
2079         case MO_32:
2080             d32 = gen_load_fpr_F(dc, rd);
2081             tcg_gen_qemu_st_i32(d32, addr, da->mem_idx, memop | MO_ALIGN);
2082             break;
2083         case MO_64:
2084             d64 = gen_load_fpr_D(dc, rd);
2085             tcg_gen_qemu_st_i64(d64, addr, da->mem_idx, memop | MO_ALIGN_4);
2086             break;
2087         case MO_128:
2088             /* Only 4-byte alignment required.  However, it is legal for the
2089                cpu to signal the alignment fault, and the OS trap handler is
2090                required to fix it up.  Requiring 16-byte alignment here avoids
2091                having to probe the second page before performing the first
2092                write.  */
2093             d64 = gen_load_fpr_D(dc, rd);
2094             tcg_gen_qemu_st_i64(d64, addr, da->mem_idx, memop | MO_ALIGN_16);
2095             addr_tmp = tcg_temp_new();
2096             tcg_gen_addi_tl(addr_tmp, addr, 8);
2097             d64 = gen_load_fpr_D(dc, rd + 2);
2098             tcg_gen_qemu_st_i64(d64, addr_tmp, da->mem_idx, memop);
2099             break;
2100         default:
2101             g_assert_not_reached();
2102         }
2103         break;
2104 
2105     case GET_ASI_BLOCK:
2106         /* Valid for stdfa on aligned registers only.  */
2107         if (orig_size == MO_64 && (rd & 7) == 0) {
2108             /* The first operation checks required alignment.  */
2109             addr_tmp = tcg_temp_new();
2110             for (int i = 0; ; ++i) {
2111                 d64 = gen_load_fpr_D(dc, rd + 2 * i);
2112                 tcg_gen_qemu_st_i64(d64, addr, da->mem_idx,
2113                                     memop | (i == 0 ? MO_ALIGN_64 : 0));
2114                 if (i == 7) {
2115                     break;
2116                 }
2117                 tcg_gen_addi_tl(addr_tmp, addr, 8);
2118                 addr = addr_tmp;
2119             }
2120         } else {
2121             gen_exception(dc, TT_ILL_INSN);
2122         }
2123         break;
2124 
2125     case GET_ASI_SHORT:
2126         /* Valid for stdfa only.  */
2127         if (orig_size == MO_64) {
2128             d64 = gen_load_fpr_D(dc, rd);
2129             tcg_gen_qemu_st_i64(d64, addr, da->mem_idx, memop | MO_ALIGN);
2130         } else {
2131             gen_exception(dc, TT_ILL_INSN);
2132         }
2133         break;
2134 
2135     default:
2136         /* According to the table in the UA2011 manual, the only
2137            other asis that are valid for ldfa/lddfa/ldqfa are
2138            the PST* asis, which aren't currently handled.  */
2139         gen_exception(dc, TT_ILL_INSN);
2140         break;
2141     }
2142 }
2143 
2144 static void gen_ldda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd)
2145 {
2146     TCGv hi = gen_dest_gpr(dc, rd);
2147     TCGv lo = gen_dest_gpr(dc, rd + 1);
2148 
2149     switch (da->type) {
2150     case GET_ASI_EXCP:
2151         return;
2152 
2153     case GET_ASI_DTWINX:
2154 #ifdef TARGET_SPARC64
2155         {
2156             MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16;
2157             TCGv_i128 t = tcg_temp_new_i128();
2158 
2159             tcg_gen_qemu_ld_i128(t, addr, da->mem_idx, mop);
2160             /*
2161              * Note that LE twinx acts as if each 64-bit register result is
2162              * byte swapped.  We perform one 128-bit LE load, so must swap
2163              * the order of the writebacks.
2164              */
2165             if ((mop & MO_BSWAP) == MO_TE) {
2166                 tcg_gen_extr_i128_i64(lo, hi, t);
2167             } else {
2168                 tcg_gen_extr_i128_i64(hi, lo, t);
2169             }
2170         }
2171         break;
2172 #else
2173         g_assert_not_reached();
2174 #endif
2175 
2176     case GET_ASI_DIRECT:
2177         {
2178             TCGv_i64 tmp = tcg_temp_new_i64();
2179 
2180             tcg_gen_qemu_ld_i64(tmp, addr, da->mem_idx, da->memop | MO_ALIGN);
2181 
2182             /* Note that LE ldda acts as if each 32-bit register
2183                result is byte swapped.  Having just performed one
2184                64-bit bswap, we need now to swap the writebacks.  */
2185             if ((da->memop & MO_BSWAP) == MO_TE) {
2186                 tcg_gen_extr_i64_tl(lo, hi, tmp);
2187             } else {
2188                 tcg_gen_extr_i64_tl(hi, lo, tmp);
2189             }
2190         }
2191         break;
2192 
2193     case GET_ASI_CODE:
2194 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
2195         {
2196             MemOpIdx oi = make_memop_idx(da->memop, da->mem_idx);
2197             TCGv_i64 tmp = tcg_temp_new_i64();
2198 
2199             gen_helper_ld_code(tmp, tcg_env, addr, tcg_constant_i32(oi));
2200 
2201             /* See above.  */
2202             if ((da->memop & MO_BSWAP) == MO_TE) {
2203                 tcg_gen_extr_i64_tl(lo, hi, tmp);
2204             } else {
2205                 tcg_gen_extr_i64_tl(hi, lo, tmp);
2206             }
2207         }
2208         break;
2209 #else
2210         g_assert_not_reached();
2211 #endif
2212 
2213     default:
2214         /* ??? In theory we've handled all of the ASIs that are valid
2215            for ldda, and this should raise DAE_invalid_asi.  However,
2216            real hardware allows others.  This can be seen with e.g.
2217            FreeBSD 10.3 wrt ASI_IC_TAG.  */
2218         {
2219             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2220             TCGv_i32 r_mop = tcg_constant_i32(da->memop);
2221             TCGv_i64 tmp = tcg_temp_new_i64();
2222 
2223             save_state(dc);
2224             gen_helper_ld_asi(tmp, tcg_env, addr, r_asi, r_mop);
2225 
2226             /* See above.  */
2227             if ((da->memop & MO_BSWAP) == MO_TE) {
2228                 tcg_gen_extr_i64_tl(lo, hi, tmp);
2229             } else {
2230                 tcg_gen_extr_i64_tl(hi, lo, tmp);
2231             }
2232         }
2233         break;
2234     }
2235 
2236     gen_store_gpr(dc, rd, hi);
2237     gen_store_gpr(dc, rd + 1, lo);
2238 }
2239 
2240 static void gen_stda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd)
2241 {
2242     TCGv hi = gen_load_gpr(dc, rd);
2243     TCGv lo = gen_load_gpr(dc, rd + 1);
2244 
2245     switch (da->type) {
2246     case GET_ASI_EXCP:
2247         break;
2248 
2249     case GET_ASI_DTWINX:
2250 #ifdef TARGET_SPARC64
2251         {
2252             MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16;
2253             TCGv_i128 t = tcg_temp_new_i128();
2254 
2255             /*
2256              * Note that LE twinx acts as if each 64-bit register result is
2257              * byte swapped.  We perform one 128-bit LE store, so must swap
2258              * the order of the construction.
2259              */
2260             if ((mop & MO_BSWAP) == MO_TE) {
2261                 tcg_gen_concat_i64_i128(t, lo, hi);
2262             } else {
2263                 tcg_gen_concat_i64_i128(t, hi, lo);
2264             }
2265             tcg_gen_qemu_st_i128(t, addr, da->mem_idx, mop);
2266         }
2267         break;
2268 #else
2269         g_assert_not_reached();
2270 #endif
2271 
2272     case GET_ASI_DIRECT:
2273         {
2274             TCGv_i64 t64 = tcg_temp_new_i64();
2275 
2276             /* Note that LE stda acts as if each 32-bit register result is
2277                byte swapped.  We will perform one 64-bit LE store, so now
2278                we must swap the order of the construction.  */
2279             if ((da->memop & MO_BSWAP) == MO_TE) {
2280                 tcg_gen_concat_tl_i64(t64, lo, hi);
2281             } else {
2282                 tcg_gen_concat_tl_i64(t64, hi, lo);
2283             }
2284             tcg_gen_qemu_st_i64(t64, addr, da->mem_idx, da->memop | MO_ALIGN);
2285         }
2286         break;
2287 
2288     case GET_ASI_BFILL:
2289         assert(TARGET_LONG_BITS == 32);
2290         /*
2291          * Store 32 bytes of [rd:rd+1] to ADDR.
2292          * See comments for GET_ASI_COPY above.
2293          */
2294         {
2295             MemOp mop = MO_TE | MO_128 | MO_ATOM_IFALIGN_PAIR;
2296             TCGv_i64 t8 = tcg_temp_new_i64();
2297             TCGv_i128 t16 = tcg_temp_new_i128();
2298             TCGv daddr = tcg_temp_new();
2299 
2300             tcg_gen_concat_tl_i64(t8, lo, hi);
2301             tcg_gen_concat_i64_i128(t16, t8, t8);
2302             tcg_gen_andi_tl(daddr, addr, -32);
2303             tcg_gen_qemu_st_i128(t16, daddr, da->mem_idx, mop);
2304             tcg_gen_addi_tl(daddr, daddr, 16);
2305             tcg_gen_qemu_st_i128(t16, daddr, da->mem_idx, mop);
2306         }
2307         break;
2308 
2309     default:
2310         /* ??? In theory we've handled all of the ASIs that are valid
2311            for stda, and this should raise DAE_invalid_asi.  */
2312         {
2313             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2314             TCGv_i32 r_mop = tcg_constant_i32(da->memop);
2315             TCGv_i64 t64 = tcg_temp_new_i64();
2316 
2317             /* See above.  */
2318             if ((da->memop & MO_BSWAP) == MO_TE) {
2319                 tcg_gen_concat_tl_i64(t64, lo, hi);
2320             } else {
2321                 tcg_gen_concat_tl_i64(t64, hi, lo);
2322             }
2323 
2324             save_state(dc);
2325             gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
2326         }
2327         break;
2328     }
2329 }
2330 
2331 static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2332 {
2333 #ifdef TARGET_SPARC64
2334     TCGv_i32 c32, zero, dst, s1, s2;
2335     TCGv_i64 c64 = tcg_temp_new_i64();
2336 
2337     /* We have two choices here: extend the 32 bit data and use movcond_i64,
2338        or fold the comparison down to 32 bits and use movcond_i32.  Choose
2339        the later.  */
2340     c32 = tcg_temp_new_i32();
2341     tcg_gen_setcondi_i64(cmp->cond, c64, cmp->c1, cmp->c2);
2342     tcg_gen_extrl_i64_i32(c32, c64);
2343 
2344     s1 = gen_load_fpr_F(dc, rs);
2345     s2 = gen_load_fpr_F(dc, rd);
2346     dst = tcg_temp_new_i32();
2347     zero = tcg_constant_i32(0);
2348 
2349     tcg_gen_movcond_i32(TCG_COND_NE, dst, c32, zero, s1, s2);
2350 
2351     gen_store_fpr_F(dc, rd, dst);
2352 #else
2353     qemu_build_not_reached();
2354 #endif
2355 }
2356 
2357 static void gen_fmovd(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2358 {
2359 #ifdef TARGET_SPARC64
2360     TCGv_i64 dst = tcg_temp_new_i64();
2361     tcg_gen_movcond_i64(cmp->cond, dst, cmp->c1, tcg_constant_tl(cmp->c2),
2362                         gen_load_fpr_D(dc, rs),
2363                         gen_load_fpr_D(dc, rd));
2364     gen_store_fpr_D(dc, rd, dst);
2365 #else
2366     qemu_build_not_reached();
2367 #endif
2368 }
2369 
2370 static void gen_fmovq(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2371 {
2372 #ifdef TARGET_SPARC64
2373     TCGv c2 = tcg_constant_tl(cmp->c2);
2374     TCGv_i64 h = tcg_temp_new_i64();
2375     TCGv_i64 l = tcg_temp_new_i64();
2376 
2377     tcg_gen_movcond_i64(cmp->cond, h, cmp->c1, c2,
2378                         gen_load_fpr_D(dc, rs),
2379                         gen_load_fpr_D(dc, rd));
2380     tcg_gen_movcond_i64(cmp->cond, l, cmp->c1, c2,
2381                         gen_load_fpr_D(dc, rs + 2),
2382                         gen_load_fpr_D(dc, rd + 2));
2383     gen_store_fpr_D(dc, rd, h);
2384     gen_store_fpr_D(dc, rd + 2, l);
2385 #else
2386     qemu_build_not_reached();
2387 #endif
2388 }
2389 
2390 #ifdef TARGET_SPARC64
2391 static void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr)
2392 {
2393     TCGv_i32 r_tl = tcg_temp_new_i32();
2394 
2395     /* load env->tl into r_tl */
2396     tcg_gen_ld_i32(r_tl, tcg_env, offsetof(CPUSPARCState, tl));
2397 
2398     /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2399     tcg_gen_andi_i32(r_tl, r_tl, MAXTL_MASK);
2400 
2401     /* calculate offset to current trap state from env->ts, reuse r_tl */
2402     tcg_gen_muli_i32(r_tl, r_tl, sizeof (trap_state));
2403     tcg_gen_addi_ptr(r_tsptr, tcg_env, offsetof(CPUSPARCState, ts));
2404 
2405     /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2406     {
2407         TCGv_ptr r_tl_tmp = tcg_temp_new_ptr();
2408         tcg_gen_ext_i32_ptr(r_tl_tmp, r_tl);
2409         tcg_gen_add_ptr(r_tsptr, r_tsptr, r_tl_tmp);
2410     }
2411 }
2412 #endif
2413 
2414 static int extract_dfpreg(DisasContext *dc, int x)
2415 {
2416     int r = x & 0x1e;
2417 #ifdef TARGET_SPARC64
2418     r |= (x & 1) << 5;
2419 #endif
2420     return r;
2421 }
2422 
2423 static int extract_qfpreg(DisasContext *dc, int x)
2424 {
2425     int r = x & 0x1c;
2426 #ifdef TARGET_SPARC64
2427     r |= (x & 1) << 5;
2428 #endif
2429     return r;
2430 }
2431 
2432 /* Include the auto-generated decoder.  */
2433 #include "decode-insns.c.inc"
2434 
2435 #define TRANS(NAME, AVAIL, FUNC, ...) \
2436     static bool trans_##NAME(DisasContext *dc, arg_##NAME *a) \
2437     { return avail_##AVAIL(dc) && FUNC(dc, __VA_ARGS__); }
2438 
2439 #define avail_ALL(C)      true
2440 #ifdef TARGET_SPARC64
2441 # define avail_32(C)      false
2442 # define avail_ASR17(C)   false
2443 # define avail_CASA(C)    true
2444 # define avail_DIV(C)     true
2445 # define avail_MUL(C)     true
2446 # define avail_POWERDOWN(C) false
2447 # define avail_64(C)      true
2448 # define avail_FMAF(C)    ((C)->def->features & CPU_FEATURE_FMAF)
2449 # define avail_GL(C)      ((C)->def->features & CPU_FEATURE_GL)
2450 # define avail_HYPV(C)    ((C)->def->features & CPU_FEATURE_HYPV)
2451 # define avail_IMA(C)     ((C)->def->features & CPU_FEATURE_IMA)
2452 # define avail_VIS1(C)    ((C)->def->features & CPU_FEATURE_VIS1)
2453 # define avail_VIS2(C)    ((C)->def->features & CPU_FEATURE_VIS2)
2454 # define avail_VIS3(C)    ((C)->def->features & CPU_FEATURE_VIS3)
2455 # define avail_VIS3B(C)   avail_VIS3(C)
2456 # define avail_VIS4(C)    ((C)->def->features & CPU_FEATURE_VIS4)
2457 #else
2458 # define avail_32(C)      true
2459 # define avail_ASR17(C)   ((C)->def->features & CPU_FEATURE_ASR17)
2460 # define avail_CASA(C)    ((C)->def->features & CPU_FEATURE_CASA)
2461 # define avail_DIV(C)     ((C)->def->features & CPU_FEATURE_DIV)
2462 # define avail_MUL(C)     ((C)->def->features & CPU_FEATURE_MUL)
2463 # define avail_POWERDOWN(C) ((C)->def->features & CPU_FEATURE_POWERDOWN)
2464 # define avail_64(C)      false
2465 # define avail_FMAF(C)    false
2466 # define avail_GL(C)      false
2467 # define avail_HYPV(C)    false
2468 # define avail_IMA(C)     false
2469 # define avail_VIS1(C)    false
2470 # define avail_VIS2(C)    false
2471 # define avail_VIS3(C)    false
2472 # define avail_VIS3B(C)   false
2473 # define avail_VIS4(C)    false
2474 #endif
2475 
2476 /* Default case for non jump instructions. */
2477 static bool advance_pc(DisasContext *dc)
2478 {
2479     TCGLabel *l1;
2480 
2481     finishing_insn(dc);
2482 
2483     if (dc->npc & 3) {
2484         switch (dc->npc) {
2485         case DYNAMIC_PC:
2486         case DYNAMIC_PC_LOOKUP:
2487             dc->pc = dc->npc;
2488             tcg_gen_mov_tl(cpu_pc, cpu_npc);
2489             tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
2490             break;
2491 
2492         case JUMP_PC:
2493             /* we can do a static jump */
2494             l1 = gen_new_label();
2495             tcg_gen_brcondi_tl(dc->jump.cond, dc->jump.c1, dc->jump.c2, l1);
2496 
2497             /* jump not taken */
2498             gen_goto_tb(dc, 1, dc->jump_pc[1], dc->jump_pc[1] + 4);
2499 
2500             /* jump taken */
2501             gen_set_label(l1);
2502             gen_goto_tb(dc, 0, dc->jump_pc[0], dc->jump_pc[0] + 4);
2503 
2504             dc->base.is_jmp = DISAS_NORETURN;
2505             break;
2506 
2507         default:
2508             g_assert_not_reached();
2509         }
2510     } else {
2511         dc->pc = dc->npc;
2512         dc->npc = dc->npc + 4;
2513     }
2514     return true;
2515 }
2516 
2517 /*
2518  * Major opcodes 00 and 01 -- branches, call, and sethi
2519  */
2520 
2521 static bool advance_jump_cond(DisasContext *dc, DisasCompare *cmp,
2522                               bool annul, int disp)
2523 {
2524     target_ulong dest = address_mask_i(dc, dc->pc + disp * 4);
2525     target_ulong npc;
2526 
2527     finishing_insn(dc);
2528 
2529     if (cmp->cond == TCG_COND_ALWAYS) {
2530         if (annul) {
2531             dc->pc = dest;
2532             dc->npc = dest + 4;
2533         } else {
2534             gen_mov_pc_npc(dc);
2535             dc->npc = dest;
2536         }
2537         return true;
2538     }
2539 
2540     if (cmp->cond == TCG_COND_NEVER) {
2541         npc = dc->npc;
2542         if (npc & 3) {
2543             gen_mov_pc_npc(dc);
2544             if (annul) {
2545                 tcg_gen_addi_tl(cpu_pc, cpu_pc, 4);
2546             }
2547             tcg_gen_addi_tl(cpu_npc, cpu_pc, 4);
2548         } else {
2549             dc->pc = npc + (annul ? 4 : 0);
2550             dc->npc = dc->pc + 4;
2551         }
2552         return true;
2553     }
2554 
2555     flush_cond(dc);
2556     npc = dc->npc;
2557 
2558     if (annul) {
2559         TCGLabel *l1 = gen_new_label();
2560 
2561         tcg_gen_brcondi_tl(tcg_invert_cond(cmp->cond), cmp->c1, cmp->c2, l1);
2562         gen_goto_tb(dc, 0, npc, dest);
2563         gen_set_label(l1);
2564         gen_goto_tb(dc, 1, npc + 4, npc + 8);
2565 
2566         dc->base.is_jmp = DISAS_NORETURN;
2567     } else {
2568         if (npc & 3) {
2569             switch (npc) {
2570             case DYNAMIC_PC:
2571             case DYNAMIC_PC_LOOKUP:
2572                 tcg_gen_mov_tl(cpu_pc, cpu_npc);
2573                 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
2574                 tcg_gen_movcond_tl(cmp->cond, cpu_npc,
2575                                    cmp->c1, tcg_constant_tl(cmp->c2),
2576                                    tcg_constant_tl(dest), cpu_npc);
2577                 dc->pc = npc;
2578                 break;
2579             default:
2580                 g_assert_not_reached();
2581             }
2582         } else {
2583             dc->pc = npc;
2584             dc->npc = JUMP_PC;
2585             dc->jump = *cmp;
2586             dc->jump_pc[0] = dest;
2587             dc->jump_pc[1] = npc + 4;
2588 
2589             /* The condition for cpu_cond is always NE -- normalize. */
2590             if (cmp->cond == TCG_COND_NE) {
2591                 tcg_gen_xori_tl(cpu_cond, cmp->c1, cmp->c2);
2592             } else {
2593                 tcg_gen_setcondi_tl(cmp->cond, cpu_cond, cmp->c1, cmp->c2);
2594             }
2595             dc->cpu_cond_live = true;
2596         }
2597     }
2598     return true;
2599 }
2600 
2601 static bool raise_priv(DisasContext *dc)
2602 {
2603     gen_exception(dc, TT_PRIV_INSN);
2604     return true;
2605 }
2606 
2607 static bool raise_unimpfpop(DisasContext *dc)
2608 {
2609     gen_op_fpexception_im(dc, FSR_FTT_UNIMPFPOP);
2610     return true;
2611 }
2612 
2613 static bool gen_trap_float128(DisasContext *dc)
2614 {
2615     if (dc->def->features & CPU_FEATURE_FLOAT128) {
2616         return false;
2617     }
2618     return raise_unimpfpop(dc);
2619 }
2620 
2621 static bool do_bpcc(DisasContext *dc, arg_bcc *a)
2622 {
2623     DisasCompare cmp;
2624 
2625     gen_compare(&cmp, a->cc, a->cond, dc);
2626     return advance_jump_cond(dc, &cmp, a->a, a->i);
2627 }
2628 
2629 TRANS(Bicc, ALL, do_bpcc, a)
2630 TRANS(BPcc,  64, do_bpcc, a)
2631 
2632 static bool do_fbpfcc(DisasContext *dc, arg_bcc *a)
2633 {
2634     DisasCompare cmp;
2635 
2636     if (gen_trap_ifnofpu(dc)) {
2637         return true;
2638     }
2639     gen_fcompare(&cmp, a->cc, a->cond);
2640     return advance_jump_cond(dc, &cmp, a->a, a->i);
2641 }
2642 
2643 TRANS(FBPfcc,  64, do_fbpfcc, a)
2644 TRANS(FBfcc,  ALL, do_fbpfcc, a)
2645 
2646 static bool trans_BPr(DisasContext *dc, arg_BPr *a)
2647 {
2648     DisasCompare cmp;
2649 
2650     if (!avail_64(dc)) {
2651         return false;
2652     }
2653     if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
2654         return false;
2655     }
2656     return advance_jump_cond(dc, &cmp, a->a, a->i);
2657 }
2658 
2659 static bool trans_CALL(DisasContext *dc, arg_CALL *a)
2660 {
2661     target_long target = address_mask_i(dc, dc->pc + a->i * 4);
2662 
2663     gen_store_gpr(dc, 15, tcg_constant_tl(dc->pc));
2664     gen_mov_pc_npc(dc);
2665     dc->npc = target;
2666     return true;
2667 }
2668 
2669 static bool trans_NCP(DisasContext *dc, arg_NCP *a)
2670 {
2671     /*
2672      * For sparc32, always generate the no-coprocessor exception.
2673      * For sparc64, always generate illegal instruction.
2674      */
2675 #ifdef TARGET_SPARC64
2676     return false;
2677 #else
2678     gen_exception(dc, TT_NCP_INSN);
2679     return true;
2680 #endif
2681 }
2682 
2683 static bool trans_SETHI(DisasContext *dc, arg_SETHI *a)
2684 {
2685     /* Special-case %g0 because that's the canonical nop.  */
2686     if (a->rd) {
2687         gen_store_gpr(dc, a->rd, tcg_constant_tl((uint32_t)a->i << 10));
2688     }
2689     return advance_pc(dc);
2690 }
2691 
2692 /*
2693  * Major Opcode 10 -- integer, floating-point, vis, and system insns.
2694  */
2695 
2696 static bool do_tcc(DisasContext *dc, int cond, int cc,
2697                    int rs1, bool imm, int rs2_or_imm)
2698 {
2699     int mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc)
2700                 ? UA2005_HTRAP_MASK : V8_TRAP_MASK);
2701     DisasCompare cmp;
2702     TCGLabel *lab;
2703     TCGv_i32 trap;
2704 
2705     /* Trap never.  */
2706     if (cond == 0) {
2707         return advance_pc(dc);
2708     }
2709 
2710     /*
2711      * Immediate traps are the most common case.  Since this value is
2712      * live across the branch, it really pays to evaluate the constant.
2713      */
2714     if (rs1 == 0 && (imm || rs2_or_imm == 0)) {
2715         trap = tcg_constant_i32((rs2_or_imm & mask) + TT_TRAP);
2716     } else {
2717         trap = tcg_temp_new_i32();
2718         tcg_gen_trunc_tl_i32(trap, gen_load_gpr(dc, rs1));
2719         if (imm) {
2720             tcg_gen_addi_i32(trap, trap, rs2_or_imm);
2721         } else {
2722             TCGv_i32 t2 = tcg_temp_new_i32();
2723             tcg_gen_trunc_tl_i32(t2, gen_load_gpr(dc, rs2_or_imm));
2724             tcg_gen_add_i32(trap, trap, t2);
2725         }
2726         tcg_gen_andi_i32(trap, trap, mask);
2727         tcg_gen_addi_i32(trap, trap, TT_TRAP);
2728     }
2729 
2730     finishing_insn(dc);
2731 
2732     /* Trap always.  */
2733     if (cond == 8) {
2734         save_state(dc);
2735         gen_helper_raise_exception(tcg_env, trap);
2736         dc->base.is_jmp = DISAS_NORETURN;
2737         return true;
2738     }
2739 
2740     /* Conditional trap.  */
2741     flush_cond(dc);
2742     lab = delay_exceptionv(dc, trap);
2743     gen_compare(&cmp, cc, cond, dc);
2744     tcg_gen_brcondi_tl(cmp.cond, cmp.c1, cmp.c2, lab);
2745 
2746     return advance_pc(dc);
2747 }
2748 
2749 static bool trans_Tcc_r(DisasContext *dc, arg_Tcc_r *a)
2750 {
2751     if (avail_32(dc) && a->cc) {
2752         return false;
2753     }
2754     return do_tcc(dc, a->cond, a->cc, a->rs1, false, a->rs2);
2755 }
2756 
2757 static bool trans_Tcc_i_v7(DisasContext *dc, arg_Tcc_i_v7 *a)
2758 {
2759     if (avail_64(dc)) {
2760         return false;
2761     }
2762     return do_tcc(dc, a->cond, 0, a->rs1, true, a->i);
2763 }
2764 
2765 static bool trans_Tcc_i_v9(DisasContext *dc, arg_Tcc_i_v9 *a)
2766 {
2767     if (avail_32(dc)) {
2768         return false;
2769     }
2770     return do_tcc(dc, a->cond, a->cc, a->rs1, true, a->i);
2771 }
2772 
2773 static bool trans_STBAR(DisasContext *dc, arg_STBAR *a)
2774 {
2775     tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
2776     return advance_pc(dc);
2777 }
2778 
2779 static bool trans_MEMBAR(DisasContext *dc, arg_MEMBAR *a)
2780 {
2781     if (avail_32(dc)) {
2782         return false;
2783     }
2784     if (a->mmask) {
2785         /* Note TCG_MO_* was modeled on sparc64, so mmask matches. */
2786         tcg_gen_mb(a->mmask | TCG_BAR_SC);
2787     }
2788     if (a->cmask) {
2789         /* For #Sync, etc, end the TB to recognize interrupts. */
2790         dc->base.is_jmp = DISAS_EXIT;
2791     }
2792     return advance_pc(dc);
2793 }
2794 
2795 static bool do_rd_special(DisasContext *dc, bool priv, int rd,
2796                           TCGv (*func)(DisasContext *, TCGv))
2797 {
2798     if (!priv) {
2799         return raise_priv(dc);
2800     }
2801     gen_store_gpr(dc, rd, func(dc, gen_dest_gpr(dc, rd)));
2802     return advance_pc(dc);
2803 }
2804 
2805 static TCGv do_rdy(DisasContext *dc, TCGv dst)
2806 {
2807     return cpu_y;
2808 }
2809 
2810 static bool trans_RDY(DisasContext *dc, arg_RDY *a)
2811 {
2812     /*
2813      * TODO: Need a feature bit for sparcv8.  In the meantime, treat all
2814      * 32-bit cpus like sparcv7, which ignores the rs1 field.
2815      * This matches after all other ASR, so Leon3 Asr17 is handled first.
2816      */
2817     if (avail_64(dc) && a->rs1 != 0) {
2818         return false;
2819     }
2820     return do_rd_special(dc, true, a->rd, do_rdy);
2821 }
2822 
2823 static TCGv do_rd_leon3_config(DisasContext *dc, TCGv dst)
2824 {
2825     gen_helper_rdasr17(dst, tcg_env);
2826     return dst;
2827 }
2828 
2829 TRANS(RDASR17, ASR17, do_rd_special, true, a->rd, do_rd_leon3_config)
2830 
2831 static TCGv do_rdccr(DisasContext *dc, TCGv dst)
2832 {
2833     gen_helper_rdccr(dst, tcg_env);
2834     return dst;
2835 }
2836 
2837 TRANS(RDCCR, 64, do_rd_special, true, a->rd, do_rdccr)
2838 
2839 static TCGv do_rdasi(DisasContext *dc, TCGv dst)
2840 {
2841 #ifdef TARGET_SPARC64
2842     return tcg_constant_tl(dc->asi);
2843 #else
2844     qemu_build_not_reached();
2845 #endif
2846 }
2847 
2848 TRANS(RDASI, 64, do_rd_special, true, a->rd, do_rdasi)
2849 
2850 static TCGv do_rdtick(DisasContext *dc, TCGv dst)
2851 {
2852     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
2853 
2854     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
2855     if (translator_io_start(&dc->base)) {
2856         dc->base.is_jmp = DISAS_EXIT;
2857     }
2858     gen_helper_tick_get_count(dst, tcg_env, r_tickptr,
2859                               tcg_constant_i32(dc->mem_idx));
2860     return dst;
2861 }
2862 
2863 /* TODO: non-priv access only allowed when enabled. */
2864 TRANS(RDTICK, 64, do_rd_special, true, a->rd, do_rdtick)
2865 
2866 static TCGv do_rdpc(DisasContext *dc, TCGv dst)
2867 {
2868     return tcg_constant_tl(address_mask_i(dc, dc->pc));
2869 }
2870 
2871 TRANS(RDPC, 64, do_rd_special, true, a->rd, do_rdpc)
2872 
2873 static TCGv do_rdfprs(DisasContext *dc, TCGv dst)
2874 {
2875     tcg_gen_ext_i32_tl(dst, cpu_fprs);
2876     return dst;
2877 }
2878 
2879 TRANS(RDFPRS, 64, do_rd_special, true, a->rd, do_rdfprs)
2880 
2881 static TCGv do_rdgsr(DisasContext *dc, TCGv dst)
2882 {
2883     gen_trap_ifnofpu(dc);
2884     return cpu_gsr;
2885 }
2886 
2887 TRANS(RDGSR, 64, do_rd_special, true, a->rd, do_rdgsr)
2888 
2889 static TCGv do_rdsoftint(DisasContext *dc, TCGv dst)
2890 {
2891     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(softint));
2892     return dst;
2893 }
2894 
2895 TRANS(RDSOFTINT, 64, do_rd_special, supervisor(dc), a->rd, do_rdsoftint)
2896 
2897 static TCGv do_rdtick_cmpr(DisasContext *dc, TCGv dst)
2898 {
2899     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(tick_cmpr));
2900     return dst;
2901 }
2902 
2903 /* TODO: non-priv access only allowed when enabled. */
2904 TRANS(RDTICK_CMPR, 64, do_rd_special, true, a->rd, do_rdtick_cmpr)
2905 
2906 static TCGv do_rdstick(DisasContext *dc, TCGv dst)
2907 {
2908     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
2909 
2910     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick));
2911     if (translator_io_start(&dc->base)) {
2912         dc->base.is_jmp = DISAS_EXIT;
2913     }
2914     gen_helper_tick_get_count(dst, tcg_env, r_tickptr,
2915                               tcg_constant_i32(dc->mem_idx));
2916     return dst;
2917 }
2918 
2919 /* TODO: non-priv access only allowed when enabled. */
2920 TRANS(RDSTICK, 64, do_rd_special, true, a->rd, do_rdstick)
2921 
2922 static TCGv do_rdstick_cmpr(DisasContext *dc, TCGv dst)
2923 {
2924     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(stick_cmpr));
2925     return dst;
2926 }
2927 
2928 /* TODO: supervisor access only allowed when enabled by hypervisor. */
2929 TRANS(RDSTICK_CMPR, 64, do_rd_special, supervisor(dc), a->rd, do_rdstick_cmpr)
2930 
2931 /*
2932  * UltraSPARC-T1 Strand status.
2933  * HYPV check maybe not enough, UA2005 & UA2007 describe
2934  * this ASR as impl. dep
2935  */
2936 static TCGv do_rdstrand_status(DisasContext *dc, TCGv dst)
2937 {
2938     return tcg_constant_tl(1);
2939 }
2940 
2941 TRANS(RDSTRAND_STATUS, HYPV, do_rd_special, true, a->rd, do_rdstrand_status)
2942 
2943 static TCGv do_rdpsr(DisasContext *dc, TCGv dst)
2944 {
2945     gen_helper_rdpsr(dst, tcg_env);
2946     return dst;
2947 }
2948 
2949 TRANS(RDPSR, 32, do_rd_special, supervisor(dc), a->rd, do_rdpsr)
2950 
2951 static TCGv do_rdhpstate(DisasContext *dc, TCGv dst)
2952 {
2953     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hpstate));
2954     return dst;
2955 }
2956 
2957 TRANS(RDHPR_hpstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhpstate)
2958 
2959 static TCGv do_rdhtstate(DisasContext *dc, TCGv dst)
2960 {
2961     TCGv_i32 tl = tcg_temp_new_i32();
2962     TCGv_ptr tp = tcg_temp_new_ptr();
2963 
2964     tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl));
2965     tcg_gen_andi_i32(tl, tl, MAXTL_MASK);
2966     tcg_gen_shli_i32(tl, tl, 3);
2967     tcg_gen_ext_i32_ptr(tp, tl);
2968     tcg_gen_add_ptr(tp, tp, tcg_env);
2969 
2970     tcg_gen_ld_tl(dst, tp, env64_field_offsetof(htstate));
2971     return dst;
2972 }
2973 
2974 TRANS(RDHPR_htstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtstate)
2975 
2976 static TCGv do_rdhintp(DisasContext *dc, TCGv dst)
2977 {
2978     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hintp));
2979     return dst;
2980 }
2981 
2982 TRANS(RDHPR_hintp, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhintp)
2983 
2984 static TCGv do_rdhtba(DisasContext *dc, TCGv dst)
2985 {
2986     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(htba));
2987     return dst;
2988 }
2989 
2990 TRANS(RDHPR_htba, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtba)
2991 
2992 static TCGv do_rdhver(DisasContext *dc, TCGv dst)
2993 {
2994     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hver));
2995     return dst;
2996 }
2997 
2998 TRANS(RDHPR_hver, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhver)
2999 
3000 static TCGv do_rdhstick_cmpr(DisasContext *dc, TCGv dst)
3001 {
3002     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hstick_cmpr));
3003     return dst;
3004 }
3005 
3006 TRANS(RDHPR_hstick_cmpr, HYPV, do_rd_special, hypervisor(dc), a->rd,
3007       do_rdhstick_cmpr)
3008 
3009 static TCGv do_rdwim(DisasContext *dc, TCGv dst)
3010 {
3011     tcg_gen_ld_tl(dst, tcg_env, env32_field_offsetof(wim));
3012     return dst;
3013 }
3014 
3015 TRANS(RDWIM, 32, do_rd_special, supervisor(dc), a->rd, do_rdwim)
3016 
3017 static TCGv do_rdtpc(DisasContext *dc, TCGv dst)
3018 {
3019 #ifdef TARGET_SPARC64
3020     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3021 
3022     gen_load_trap_state_at_tl(r_tsptr);
3023     tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tpc));
3024     return dst;
3025 #else
3026     qemu_build_not_reached();
3027 #endif
3028 }
3029 
3030 TRANS(RDPR_tpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtpc)
3031 
3032 static TCGv do_rdtnpc(DisasContext *dc, TCGv dst)
3033 {
3034 #ifdef TARGET_SPARC64
3035     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3036 
3037     gen_load_trap_state_at_tl(r_tsptr);
3038     tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tnpc));
3039     return dst;
3040 #else
3041     qemu_build_not_reached();
3042 #endif
3043 }
3044 
3045 TRANS(RDPR_tnpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtnpc)
3046 
3047 static TCGv do_rdtstate(DisasContext *dc, TCGv dst)
3048 {
3049 #ifdef TARGET_SPARC64
3050     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3051 
3052     gen_load_trap_state_at_tl(r_tsptr);
3053     tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tstate));
3054     return dst;
3055 #else
3056     qemu_build_not_reached();
3057 #endif
3058 }
3059 
3060 TRANS(RDPR_tstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdtstate)
3061 
3062 static TCGv do_rdtt(DisasContext *dc, TCGv dst)
3063 {
3064 #ifdef TARGET_SPARC64
3065     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3066 
3067     gen_load_trap_state_at_tl(r_tsptr);
3068     tcg_gen_ld32s_tl(dst, r_tsptr, offsetof(trap_state, tt));
3069     return dst;
3070 #else
3071     qemu_build_not_reached();
3072 #endif
3073 }
3074 
3075 TRANS(RDPR_tt, 64, do_rd_special, supervisor(dc), a->rd, do_rdtt)
3076 TRANS(RDPR_tick, 64, do_rd_special, supervisor(dc), a->rd, do_rdtick)
3077 
3078 static TCGv do_rdtba(DisasContext *dc, TCGv dst)
3079 {
3080     return cpu_tbr;
3081 }
3082 
3083 TRANS(RDTBR, 32, do_rd_special, supervisor(dc), a->rd, do_rdtba)
3084 TRANS(RDPR_tba, 64, do_rd_special, supervisor(dc), a->rd, do_rdtba)
3085 
3086 static TCGv do_rdpstate(DisasContext *dc, TCGv dst)
3087 {
3088     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(pstate));
3089     return dst;
3090 }
3091 
3092 TRANS(RDPR_pstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdpstate)
3093 
3094 static TCGv do_rdtl(DisasContext *dc, TCGv dst)
3095 {
3096     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(tl));
3097     return dst;
3098 }
3099 
3100 TRANS(RDPR_tl, 64, do_rd_special, supervisor(dc), a->rd, do_rdtl)
3101 
3102 static TCGv do_rdpil(DisasContext *dc, TCGv dst)
3103 {
3104     tcg_gen_ld32s_tl(dst, tcg_env, env_field_offsetof(psrpil));
3105     return dst;
3106 }
3107 
3108 TRANS(RDPR_pil, 64, do_rd_special, supervisor(dc), a->rd, do_rdpil)
3109 
3110 static TCGv do_rdcwp(DisasContext *dc, TCGv dst)
3111 {
3112     gen_helper_rdcwp(dst, tcg_env);
3113     return dst;
3114 }
3115 
3116 TRANS(RDPR_cwp, 64, do_rd_special, supervisor(dc), a->rd, do_rdcwp)
3117 
3118 static TCGv do_rdcansave(DisasContext *dc, TCGv dst)
3119 {
3120     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cansave));
3121     return dst;
3122 }
3123 
3124 TRANS(RDPR_cansave, 64, do_rd_special, supervisor(dc), a->rd, do_rdcansave)
3125 
3126 static TCGv do_rdcanrestore(DisasContext *dc, TCGv dst)
3127 {
3128     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(canrestore));
3129     return dst;
3130 }
3131 
3132 TRANS(RDPR_canrestore, 64, do_rd_special, supervisor(dc), a->rd,
3133       do_rdcanrestore)
3134 
3135 static TCGv do_rdcleanwin(DisasContext *dc, TCGv dst)
3136 {
3137     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cleanwin));
3138     return dst;
3139 }
3140 
3141 TRANS(RDPR_cleanwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdcleanwin)
3142 
3143 static TCGv do_rdotherwin(DisasContext *dc, TCGv dst)
3144 {
3145     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(otherwin));
3146     return dst;
3147 }
3148 
3149 TRANS(RDPR_otherwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdotherwin)
3150 
3151 static TCGv do_rdwstate(DisasContext *dc, TCGv dst)
3152 {
3153     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(wstate));
3154     return dst;
3155 }
3156 
3157 TRANS(RDPR_wstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdwstate)
3158 
3159 static TCGv do_rdgl(DisasContext *dc, TCGv dst)
3160 {
3161     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(gl));
3162     return dst;
3163 }
3164 
3165 TRANS(RDPR_gl, GL, do_rd_special, supervisor(dc), a->rd, do_rdgl)
3166 
3167 /* UA2005 strand status */
3168 static TCGv do_rdssr(DisasContext *dc, TCGv dst)
3169 {
3170     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(ssr));
3171     return dst;
3172 }
3173 
3174 TRANS(RDPR_strand_status, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdssr)
3175 
3176 static TCGv do_rdver(DisasContext *dc, TCGv dst)
3177 {
3178     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(version));
3179     return dst;
3180 }
3181 
3182 TRANS(RDPR_ver, 64, do_rd_special, supervisor(dc), a->rd, do_rdver)
3183 
3184 static bool trans_FLUSHW(DisasContext *dc, arg_FLUSHW *a)
3185 {
3186     if (avail_64(dc)) {
3187         gen_helper_flushw(tcg_env);
3188         return advance_pc(dc);
3189     }
3190     return false;
3191 }
3192 
3193 static bool do_wr_special(DisasContext *dc, arg_r_r_ri *a, bool priv,
3194                           void (*func)(DisasContext *, TCGv))
3195 {
3196     TCGv src;
3197 
3198     /* For simplicity, we under-decoded the rs2 form. */
3199     if (!a->imm && (a->rs2_or_imm & ~0x1f)) {
3200         return false;
3201     }
3202     if (!priv) {
3203         return raise_priv(dc);
3204     }
3205 
3206     if (a->rs1 == 0 && (a->imm || a->rs2_or_imm == 0)) {
3207         src = tcg_constant_tl(a->rs2_or_imm);
3208     } else {
3209         TCGv src1 = gen_load_gpr(dc, a->rs1);
3210         if (a->rs2_or_imm == 0) {
3211             src = src1;
3212         } else {
3213             src = tcg_temp_new();
3214             if (a->imm) {
3215                 tcg_gen_xori_tl(src, src1, a->rs2_or_imm);
3216             } else {
3217                 tcg_gen_xor_tl(src, src1, gen_load_gpr(dc, a->rs2_or_imm));
3218             }
3219         }
3220     }
3221     func(dc, src);
3222     return advance_pc(dc);
3223 }
3224 
3225 static void do_wry(DisasContext *dc, TCGv src)
3226 {
3227     tcg_gen_ext32u_tl(cpu_y, src);
3228 }
3229 
3230 TRANS(WRY, ALL, do_wr_special, a, true, do_wry)
3231 
3232 static void do_wrccr(DisasContext *dc, TCGv src)
3233 {
3234     gen_helper_wrccr(tcg_env, src);
3235 }
3236 
3237 TRANS(WRCCR, 64, do_wr_special, a, true, do_wrccr)
3238 
3239 static void do_wrasi(DisasContext *dc, TCGv src)
3240 {
3241     TCGv tmp = tcg_temp_new();
3242 
3243     tcg_gen_ext8u_tl(tmp, src);
3244     tcg_gen_st32_tl(tmp, tcg_env, env64_field_offsetof(asi));
3245     /* End TB to notice changed ASI. */
3246     dc->base.is_jmp = DISAS_EXIT;
3247 }
3248 
3249 TRANS(WRASI, 64, do_wr_special, a, true, do_wrasi)
3250 
3251 static void do_wrfprs(DisasContext *dc, TCGv src)
3252 {
3253 #ifdef TARGET_SPARC64
3254     tcg_gen_trunc_tl_i32(cpu_fprs, src);
3255     dc->fprs_dirty = 0;
3256     dc->base.is_jmp = DISAS_EXIT;
3257 #else
3258     qemu_build_not_reached();
3259 #endif
3260 }
3261 
3262 TRANS(WRFPRS, 64, do_wr_special, a, true, do_wrfprs)
3263 
3264 static void do_wrgsr(DisasContext *dc, TCGv src)
3265 {
3266     gen_trap_ifnofpu(dc);
3267     tcg_gen_mov_tl(cpu_gsr, src);
3268 }
3269 
3270 TRANS(WRGSR, 64, do_wr_special, a, true, do_wrgsr)
3271 
3272 static void do_wrsoftint_set(DisasContext *dc, TCGv src)
3273 {
3274     gen_helper_set_softint(tcg_env, src);
3275 }
3276 
3277 TRANS(WRSOFTINT_SET, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_set)
3278 
3279 static void do_wrsoftint_clr(DisasContext *dc, TCGv src)
3280 {
3281     gen_helper_clear_softint(tcg_env, src);
3282 }
3283 
3284 TRANS(WRSOFTINT_CLR, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_clr)
3285 
3286 static void do_wrsoftint(DisasContext *dc, TCGv src)
3287 {
3288     gen_helper_write_softint(tcg_env, src);
3289 }
3290 
3291 TRANS(WRSOFTINT, 64, do_wr_special, a, supervisor(dc), do_wrsoftint)
3292 
3293 static void do_wrtick_cmpr(DisasContext *dc, TCGv src)
3294 {
3295     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3296 
3297     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(tick_cmpr));
3298     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
3299     translator_io_start(&dc->base);
3300     gen_helper_tick_set_limit(r_tickptr, src);
3301     /* End TB to handle timer interrupt */
3302     dc->base.is_jmp = DISAS_EXIT;
3303 }
3304 
3305 TRANS(WRTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrtick_cmpr)
3306 
3307 static void do_wrstick(DisasContext *dc, TCGv src)
3308 {
3309 #ifdef TARGET_SPARC64
3310     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3311 
3312     tcg_gen_ld_ptr(r_tickptr, tcg_env, offsetof(CPUSPARCState, stick));
3313     translator_io_start(&dc->base);
3314     gen_helper_tick_set_count(r_tickptr, src);
3315     /* End TB to handle timer interrupt */
3316     dc->base.is_jmp = DISAS_EXIT;
3317 #else
3318     qemu_build_not_reached();
3319 #endif
3320 }
3321 
3322 TRANS(WRSTICK, 64, do_wr_special, a, supervisor(dc), do_wrstick)
3323 
3324 static void do_wrstick_cmpr(DisasContext *dc, TCGv src)
3325 {
3326     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3327 
3328     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(stick_cmpr));
3329     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick));
3330     translator_io_start(&dc->base);
3331     gen_helper_tick_set_limit(r_tickptr, src);
3332     /* End TB to handle timer interrupt */
3333     dc->base.is_jmp = DISAS_EXIT;
3334 }
3335 
3336 TRANS(WRSTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrstick_cmpr)
3337 
3338 static void do_wrpowerdown(DisasContext *dc, TCGv src)
3339 {
3340     finishing_insn(dc);
3341     save_state(dc);
3342     gen_helper_power_down(tcg_env);
3343 }
3344 
3345 TRANS(WRPOWERDOWN, POWERDOWN, do_wr_special, a, supervisor(dc), do_wrpowerdown)
3346 
3347 static void do_wrpsr(DisasContext *dc, TCGv src)
3348 {
3349     gen_helper_wrpsr(tcg_env, src);
3350     dc->base.is_jmp = DISAS_EXIT;
3351 }
3352 
3353 TRANS(WRPSR, 32, do_wr_special, a, supervisor(dc), do_wrpsr)
3354 
3355 static void do_wrwim(DisasContext *dc, TCGv src)
3356 {
3357     target_ulong mask = MAKE_64BIT_MASK(0, dc->def->nwindows);
3358     TCGv tmp = tcg_temp_new();
3359 
3360     tcg_gen_andi_tl(tmp, src, mask);
3361     tcg_gen_st_tl(tmp, tcg_env, env32_field_offsetof(wim));
3362 }
3363 
3364 TRANS(WRWIM, 32, do_wr_special, a, supervisor(dc), do_wrwim)
3365 
3366 static void do_wrtpc(DisasContext *dc, TCGv src)
3367 {
3368 #ifdef TARGET_SPARC64
3369     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3370 
3371     gen_load_trap_state_at_tl(r_tsptr);
3372     tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tpc));
3373 #else
3374     qemu_build_not_reached();
3375 #endif
3376 }
3377 
3378 TRANS(WRPR_tpc, 64, do_wr_special, a, supervisor(dc), do_wrtpc)
3379 
3380 static void do_wrtnpc(DisasContext *dc, TCGv src)
3381 {
3382 #ifdef TARGET_SPARC64
3383     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3384 
3385     gen_load_trap_state_at_tl(r_tsptr);
3386     tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tnpc));
3387 #else
3388     qemu_build_not_reached();
3389 #endif
3390 }
3391 
3392 TRANS(WRPR_tnpc, 64, do_wr_special, a, supervisor(dc), do_wrtnpc)
3393 
3394 static void do_wrtstate(DisasContext *dc, TCGv src)
3395 {
3396 #ifdef TARGET_SPARC64
3397     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3398 
3399     gen_load_trap_state_at_tl(r_tsptr);
3400     tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tstate));
3401 #else
3402     qemu_build_not_reached();
3403 #endif
3404 }
3405 
3406 TRANS(WRPR_tstate, 64, do_wr_special, a, supervisor(dc), do_wrtstate)
3407 
3408 static void do_wrtt(DisasContext *dc, TCGv src)
3409 {
3410 #ifdef TARGET_SPARC64
3411     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3412 
3413     gen_load_trap_state_at_tl(r_tsptr);
3414     tcg_gen_st32_tl(src, r_tsptr, offsetof(trap_state, tt));
3415 #else
3416     qemu_build_not_reached();
3417 #endif
3418 }
3419 
3420 TRANS(WRPR_tt, 64, do_wr_special, a, supervisor(dc), do_wrtt)
3421 
3422 static void do_wrtick(DisasContext *dc, TCGv src)
3423 {
3424     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3425 
3426     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
3427     translator_io_start(&dc->base);
3428     gen_helper_tick_set_count(r_tickptr, src);
3429     /* End TB to handle timer interrupt */
3430     dc->base.is_jmp = DISAS_EXIT;
3431 }
3432 
3433 TRANS(WRPR_tick, 64, do_wr_special, a, supervisor(dc), do_wrtick)
3434 
3435 static void do_wrtba(DisasContext *dc, TCGv src)
3436 {
3437     tcg_gen_mov_tl(cpu_tbr, src);
3438 }
3439 
3440 TRANS(WRPR_tba, 64, do_wr_special, a, supervisor(dc), do_wrtba)
3441 
3442 static void do_wrpstate(DisasContext *dc, TCGv src)
3443 {
3444     save_state(dc);
3445     if (translator_io_start(&dc->base)) {
3446         dc->base.is_jmp = DISAS_EXIT;
3447     }
3448     gen_helper_wrpstate(tcg_env, src);
3449     dc->npc = DYNAMIC_PC;
3450 }
3451 
3452 TRANS(WRPR_pstate, 64, do_wr_special, a, supervisor(dc), do_wrpstate)
3453 
3454 static void do_wrtl(DisasContext *dc, TCGv src)
3455 {
3456     save_state(dc);
3457     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(tl));
3458     dc->npc = DYNAMIC_PC;
3459 }
3460 
3461 TRANS(WRPR_tl, 64, do_wr_special, a, supervisor(dc), do_wrtl)
3462 
3463 static void do_wrpil(DisasContext *dc, TCGv src)
3464 {
3465     if (translator_io_start(&dc->base)) {
3466         dc->base.is_jmp = DISAS_EXIT;
3467     }
3468     gen_helper_wrpil(tcg_env, src);
3469 }
3470 
3471 TRANS(WRPR_pil, 64, do_wr_special, a, supervisor(dc), do_wrpil)
3472 
3473 static void do_wrcwp(DisasContext *dc, TCGv src)
3474 {
3475     gen_helper_wrcwp(tcg_env, src);
3476 }
3477 
3478 TRANS(WRPR_cwp, 64, do_wr_special, a, supervisor(dc), do_wrcwp)
3479 
3480 static void do_wrcansave(DisasContext *dc, TCGv src)
3481 {
3482     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cansave));
3483 }
3484 
3485 TRANS(WRPR_cansave, 64, do_wr_special, a, supervisor(dc), do_wrcansave)
3486 
3487 static void do_wrcanrestore(DisasContext *dc, TCGv src)
3488 {
3489     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(canrestore));
3490 }
3491 
3492 TRANS(WRPR_canrestore, 64, do_wr_special, a, supervisor(dc), do_wrcanrestore)
3493 
3494 static void do_wrcleanwin(DisasContext *dc, TCGv src)
3495 {
3496     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cleanwin));
3497 }
3498 
3499 TRANS(WRPR_cleanwin, 64, do_wr_special, a, supervisor(dc), do_wrcleanwin)
3500 
3501 static void do_wrotherwin(DisasContext *dc, TCGv src)
3502 {
3503     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(otherwin));
3504 }
3505 
3506 TRANS(WRPR_otherwin, 64, do_wr_special, a, supervisor(dc), do_wrotherwin)
3507 
3508 static void do_wrwstate(DisasContext *dc, TCGv src)
3509 {
3510     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(wstate));
3511 }
3512 
3513 TRANS(WRPR_wstate, 64, do_wr_special, a, supervisor(dc), do_wrwstate)
3514 
3515 static void do_wrgl(DisasContext *dc, TCGv src)
3516 {
3517     gen_helper_wrgl(tcg_env, src);
3518 }
3519 
3520 TRANS(WRPR_gl, GL, do_wr_special, a, supervisor(dc), do_wrgl)
3521 
3522 /* UA2005 strand status */
3523 static void do_wrssr(DisasContext *dc, TCGv src)
3524 {
3525     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(ssr));
3526 }
3527 
3528 TRANS(WRPR_strand_status, HYPV, do_wr_special, a, hypervisor(dc), do_wrssr)
3529 
3530 TRANS(WRTBR, 32, do_wr_special, a, supervisor(dc), do_wrtba)
3531 
3532 static void do_wrhpstate(DisasContext *dc, TCGv src)
3533 {
3534     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hpstate));
3535     dc->base.is_jmp = DISAS_EXIT;
3536 }
3537 
3538 TRANS(WRHPR_hpstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhpstate)
3539 
3540 static void do_wrhtstate(DisasContext *dc, TCGv src)
3541 {
3542     TCGv_i32 tl = tcg_temp_new_i32();
3543     TCGv_ptr tp = tcg_temp_new_ptr();
3544 
3545     tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl));
3546     tcg_gen_andi_i32(tl, tl, MAXTL_MASK);
3547     tcg_gen_shli_i32(tl, tl, 3);
3548     tcg_gen_ext_i32_ptr(tp, tl);
3549     tcg_gen_add_ptr(tp, tp, tcg_env);
3550 
3551     tcg_gen_st_tl(src, tp, env64_field_offsetof(htstate));
3552 }
3553 
3554 TRANS(WRHPR_htstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtstate)
3555 
3556 static void do_wrhintp(DisasContext *dc, TCGv src)
3557 {
3558     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hintp));
3559 }
3560 
3561 TRANS(WRHPR_hintp, HYPV, do_wr_special, a, hypervisor(dc), do_wrhintp)
3562 
3563 static void do_wrhtba(DisasContext *dc, TCGv src)
3564 {
3565     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(htba));
3566 }
3567 
3568 TRANS(WRHPR_htba, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtba)
3569 
3570 static void do_wrhstick_cmpr(DisasContext *dc, TCGv src)
3571 {
3572     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3573 
3574     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hstick_cmpr));
3575     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(hstick));
3576     translator_io_start(&dc->base);
3577     gen_helper_tick_set_limit(r_tickptr, src);
3578     /* End TB to handle timer interrupt */
3579     dc->base.is_jmp = DISAS_EXIT;
3580 }
3581 
3582 TRANS(WRHPR_hstick_cmpr, HYPV, do_wr_special, a, hypervisor(dc),
3583       do_wrhstick_cmpr)
3584 
3585 static bool do_saved_restored(DisasContext *dc, bool saved)
3586 {
3587     if (!supervisor(dc)) {
3588         return raise_priv(dc);
3589     }
3590     if (saved) {
3591         gen_helper_saved(tcg_env);
3592     } else {
3593         gen_helper_restored(tcg_env);
3594     }
3595     return advance_pc(dc);
3596 }
3597 
3598 TRANS(SAVED, 64, do_saved_restored, true)
3599 TRANS(RESTORED, 64, do_saved_restored, false)
3600 
3601 static bool trans_NOP(DisasContext *dc, arg_NOP *a)
3602 {
3603     return advance_pc(dc);
3604 }
3605 
3606 /*
3607  * TODO: Need a feature bit for sparcv8.
3608  * In the meantime, treat all 32-bit cpus like sparcv7.
3609  */
3610 TRANS(NOP_v7, 32, trans_NOP, a)
3611 TRANS(NOP_v9, 64, trans_NOP, a)
3612 
3613 static bool do_arith_int(DisasContext *dc, arg_r_r_ri_cc *a,
3614                          void (*func)(TCGv, TCGv, TCGv),
3615                          void (*funci)(TCGv, TCGv, target_long),
3616                          bool logic_cc)
3617 {
3618     TCGv dst, src1;
3619 
3620     /* For simplicity, we under-decoded the rs2 form. */
3621     if (!a->imm && a->rs2_or_imm & ~0x1f) {
3622         return false;
3623     }
3624 
3625     if (logic_cc) {
3626         dst = cpu_cc_N;
3627     } else {
3628         dst = gen_dest_gpr(dc, a->rd);
3629     }
3630     src1 = gen_load_gpr(dc, a->rs1);
3631 
3632     if (a->imm || a->rs2_or_imm == 0) {
3633         if (funci) {
3634             funci(dst, src1, a->rs2_or_imm);
3635         } else {
3636             func(dst, src1, tcg_constant_tl(a->rs2_or_imm));
3637         }
3638     } else {
3639         func(dst, src1, cpu_regs[a->rs2_or_imm]);
3640     }
3641 
3642     if (logic_cc) {
3643         if (TARGET_LONG_BITS == 64) {
3644             tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
3645             tcg_gen_movi_tl(cpu_icc_C, 0);
3646         }
3647         tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
3648         tcg_gen_movi_tl(cpu_cc_C, 0);
3649         tcg_gen_movi_tl(cpu_cc_V, 0);
3650     }
3651 
3652     gen_store_gpr(dc, a->rd, dst);
3653     return advance_pc(dc);
3654 }
3655 
3656 static bool do_arith(DisasContext *dc, arg_r_r_ri_cc *a,
3657                      void (*func)(TCGv, TCGv, TCGv),
3658                      void (*funci)(TCGv, TCGv, target_long),
3659                      void (*func_cc)(TCGv, TCGv, TCGv))
3660 {
3661     if (a->cc) {
3662         return do_arith_int(dc, a, func_cc, NULL, false);
3663     }
3664     return do_arith_int(dc, a, func, funci, false);
3665 }
3666 
3667 static bool do_logic(DisasContext *dc, arg_r_r_ri_cc *a,
3668                      void (*func)(TCGv, TCGv, TCGv),
3669                      void (*funci)(TCGv, TCGv, target_long))
3670 {
3671     return do_arith_int(dc, a, func, funci, a->cc);
3672 }
3673 
3674 TRANS(ADD, ALL, do_arith, a, tcg_gen_add_tl, tcg_gen_addi_tl, gen_op_addcc)
3675 TRANS(SUB, ALL, do_arith, a, tcg_gen_sub_tl, tcg_gen_subi_tl, gen_op_subcc)
3676 TRANS(ADDC, ALL, do_arith, a, gen_op_addc, NULL, gen_op_addccc)
3677 TRANS(SUBC, ALL, do_arith, a, gen_op_subc, NULL, gen_op_subccc)
3678 
3679 TRANS(TADDcc, ALL, do_arith, a, NULL, NULL, gen_op_taddcc)
3680 TRANS(TSUBcc, ALL, do_arith, a, NULL, NULL, gen_op_tsubcc)
3681 TRANS(TADDccTV, ALL, do_arith, a, NULL, NULL, gen_op_taddcctv)
3682 TRANS(TSUBccTV, ALL, do_arith, a, NULL, NULL, gen_op_tsubcctv)
3683 
3684 TRANS(AND, ALL, do_logic, a, tcg_gen_and_tl, tcg_gen_andi_tl)
3685 TRANS(XOR, ALL, do_logic, a, tcg_gen_xor_tl, tcg_gen_xori_tl)
3686 TRANS(ANDN, ALL, do_logic, a, tcg_gen_andc_tl, NULL)
3687 TRANS(ORN, ALL, do_logic, a, tcg_gen_orc_tl, NULL)
3688 TRANS(XORN, ALL, do_logic, a, tcg_gen_eqv_tl, NULL)
3689 
3690 TRANS(MULX, 64, do_arith, a, tcg_gen_mul_tl, tcg_gen_muli_tl, NULL)
3691 TRANS(UMUL, MUL, do_logic, a, gen_op_umul, NULL)
3692 TRANS(SMUL, MUL, do_logic, a, gen_op_smul, NULL)
3693 TRANS(MULScc, ALL, do_arith, a, NULL, NULL, gen_op_mulscc)
3694 
3695 TRANS(UDIVcc, DIV, do_arith, a, NULL, NULL, gen_op_udivcc)
3696 TRANS(SDIV, DIV, do_arith, a, gen_op_sdiv, NULL, gen_op_sdivcc)
3697 
3698 /* TODO: Should have feature bit -- comes in with UltraSparc T2. */
3699 TRANS(POPC, 64, do_arith, a, gen_op_popc, NULL, NULL)
3700 
3701 static bool trans_OR(DisasContext *dc, arg_r_r_ri_cc *a)
3702 {
3703     /* OR with %g0 is the canonical alias for MOV. */
3704     if (!a->cc && a->rs1 == 0) {
3705         if (a->imm || a->rs2_or_imm == 0) {
3706             gen_store_gpr(dc, a->rd, tcg_constant_tl(a->rs2_or_imm));
3707         } else if (a->rs2_or_imm & ~0x1f) {
3708             /* For simplicity, we under-decoded the rs2 form. */
3709             return false;
3710         } else {
3711             gen_store_gpr(dc, a->rd, cpu_regs[a->rs2_or_imm]);
3712         }
3713         return advance_pc(dc);
3714     }
3715     return do_logic(dc, a, tcg_gen_or_tl, tcg_gen_ori_tl);
3716 }
3717 
3718 static bool trans_UDIV(DisasContext *dc, arg_r_r_ri *a)
3719 {
3720     TCGv_i64 t1, t2;
3721     TCGv dst;
3722 
3723     if (!avail_DIV(dc)) {
3724         return false;
3725     }
3726     /* For simplicity, we under-decoded the rs2 form. */
3727     if (!a->imm && a->rs2_or_imm & ~0x1f) {
3728         return false;
3729     }
3730 
3731     if (unlikely(a->rs2_or_imm == 0)) {
3732         gen_exception(dc, TT_DIV_ZERO);
3733         return true;
3734     }
3735 
3736     if (a->imm) {
3737         t2 = tcg_constant_i64((uint32_t)a->rs2_or_imm);
3738     } else {
3739         TCGLabel *lab;
3740         TCGv_i32 n2;
3741 
3742         finishing_insn(dc);
3743         flush_cond(dc);
3744 
3745         n2 = tcg_temp_new_i32();
3746         tcg_gen_trunc_tl_i32(n2, cpu_regs[a->rs2_or_imm]);
3747 
3748         lab = delay_exception(dc, TT_DIV_ZERO);
3749         tcg_gen_brcondi_i32(TCG_COND_EQ, n2, 0, lab);
3750 
3751         t2 = tcg_temp_new_i64();
3752 #ifdef TARGET_SPARC64
3753         tcg_gen_ext32u_i64(t2, cpu_regs[a->rs2_or_imm]);
3754 #else
3755         tcg_gen_extu_i32_i64(t2, cpu_regs[a->rs2_or_imm]);
3756 #endif
3757     }
3758 
3759     t1 = tcg_temp_new_i64();
3760     tcg_gen_concat_tl_i64(t1, gen_load_gpr(dc, a->rs1), cpu_y);
3761 
3762     tcg_gen_divu_i64(t1, t1, t2);
3763     tcg_gen_umin_i64(t1, t1, tcg_constant_i64(UINT32_MAX));
3764 
3765     dst = gen_dest_gpr(dc, a->rd);
3766     tcg_gen_trunc_i64_tl(dst, t1);
3767     gen_store_gpr(dc, a->rd, dst);
3768     return advance_pc(dc);
3769 }
3770 
3771 static bool trans_UDIVX(DisasContext *dc, arg_r_r_ri *a)
3772 {
3773     TCGv dst, src1, src2;
3774 
3775     if (!avail_64(dc)) {
3776         return false;
3777     }
3778     /* For simplicity, we under-decoded the rs2 form. */
3779     if (!a->imm && a->rs2_or_imm & ~0x1f) {
3780         return false;
3781     }
3782 
3783     if (unlikely(a->rs2_or_imm == 0)) {
3784         gen_exception(dc, TT_DIV_ZERO);
3785         return true;
3786     }
3787 
3788     if (a->imm) {
3789         src2 = tcg_constant_tl(a->rs2_or_imm);
3790     } else {
3791         TCGLabel *lab;
3792 
3793         finishing_insn(dc);
3794         flush_cond(dc);
3795 
3796         lab = delay_exception(dc, TT_DIV_ZERO);
3797         src2 = cpu_regs[a->rs2_or_imm];
3798         tcg_gen_brcondi_tl(TCG_COND_EQ, src2, 0, lab);
3799     }
3800 
3801     dst = gen_dest_gpr(dc, a->rd);
3802     src1 = gen_load_gpr(dc, a->rs1);
3803 
3804     tcg_gen_divu_tl(dst, src1, src2);
3805     gen_store_gpr(dc, a->rd, dst);
3806     return advance_pc(dc);
3807 }
3808 
3809 static bool trans_SDIVX(DisasContext *dc, arg_r_r_ri *a)
3810 {
3811     TCGv dst, src1, src2;
3812 
3813     if (!avail_64(dc)) {
3814         return false;
3815     }
3816     /* For simplicity, we under-decoded the rs2 form. */
3817     if (!a->imm && a->rs2_or_imm & ~0x1f) {
3818         return false;
3819     }
3820 
3821     if (unlikely(a->rs2_or_imm == 0)) {
3822         gen_exception(dc, TT_DIV_ZERO);
3823         return true;
3824     }
3825 
3826     dst = gen_dest_gpr(dc, a->rd);
3827     src1 = gen_load_gpr(dc, a->rs1);
3828 
3829     if (a->imm) {
3830         if (unlikely(a->rs2_or_imm == -1)) {
3831             tcg_gen_neg_tl(dst, src1);
3832             gen_store_gpr(dc, a->rd, dst);
3833             return advance_pc(dc);
3834         }
3835         src2 = tcg_constant_tl(a->rs2_or_imm);
3836     } else {
3837         TCGLabel *lab;
3838         TCGv t1, t2;
3839 
3840         finishing_insn(dc);
3841         flush_cond(dc);
3842 
3843         lab = delay_exception(dc, TT_DIV_ZERO);
3844         src2 = cpu_regs[a->rs2_or_imm];
3845         tcg_gen_brcondi_tl(TCG_COND_EQ, src2, 0, lab);
3846 
3847         /*
3848          * Need to avoid INT64_MIN / -1, which will trap on x86 host.
3849          * Set SRC2 to 1 as a new divisor, to produce the correct result.
3850          */
3851         t1 = tcg_temp_new();
3852         t2 = tcg_temp_new();
3853         tcg_gen_setcondi_tl(TCG_COND_EQ, t1, src1, (target_long)INT64_MIN);
3854         tcg_gen_setcondi_tl(TCG_COND_EQ, t2, src2, -1);
3855         tcg_gen_and_tl(t1, t1, t2);
3856         tcg_gen_movcond_tl(TCG_COND_NE, t1, t1, tcg_constant_tl(0),
3857                            tcg_constant_tl(1), src2);
3858         src2 = t1;
3859     }
3860 
3861     tcg_gen_div_tl(dst, src1, src2);
3862     gen_store_gpr(dc, a->rd, dst);
3863     return advance_pc(dc);
3864 }
3865 
3866 static bool gen_edge(DisasContext *dc, arg_r_r_r *a,
3867                      int width, bool cc, bool little_endian)
3868 {
3869     TCGv dst, s1, s2, l, r, t, m;
3870     uint64_t amask = address_mask_i(dc, -8);
3871 
3872     dst = gen_dest_gpr(dc, a->rd);
3873     s1 = gen_load_gpr(dc, a->rs1);
3874     s2 = gen_load_gpr(dc, a->rs2);
3875 
3876     if (cc) {
3877         gen_op_subcc(cpu_cc_N, s1, s2);
3878     }
3879 
3880     l = tcg_temp_new();
3881     r = tcg_temp_new();
3882     t = tcg_temp_new();
3883 
3884     switch (width) {
3885     case 8:
3886         tcg_gen_andi_tl(l, s1, 7);
3887         tcg_gen_andi_tl(r, s2, 7);
3888         tcg_gen_xori_tl(r, r, 7);
3889         m = tcg_constant_tl(0xff);
3890         break;
3891     case 16:
3892         tcg_gen_extract_tl(l, s1, 1, 2);
3893         tcg_gen_extract_tl(r, s2, 1, 2);
3894         tcg_gen_xori_tl(r, r, 3);
3895         m = tcg_constant_tl(0xf);
3896         break;
3897     case 32:
3898         tcg_gen_extract_tl(l, s1, 2, 1);
3899         tcg_gen_extract_tl(r, s2, 2, 1);
3900         tcg_gen_xori_tl(r, r, 1);
3901         m = tcg_constant_tl(0x3);
3902         break;
3903     default:
3904         abort();
3905     }
3906 
3907     /* Compute Left Edge */
3908     if (little_endian) {
3909         tcg_gen_shl_tl(l, m, l);
3910         tcg_gen_and_tl(l, l, m);
3911     } else {
3912         tcg_gen_shr_tl(l, m, l);
3913     }
3914     /* Compute Right Edge */
3915     if (little_endian) {
3916         tcg_gen_shr_tl(r, m, r);
3917     } else {
3918         tcg_gen_shl_tl(r, m, r);
3919         tcg_gen_and_tl(r, r, m);
3920     }
3921 
3922     /* Compute dst = (s1 == s2 under amask ? l : l & r) */
3923     tcg_gen_xor_tl(t, s1, s2);
3924     tcg_gen_and_tl(r, r, l);
3925     tcg_gen_movcond_tl(TCG_COND_TSTEQ, dst, t, tcg_constant_tl(amask), r, l);
3926 
3927     gen_store_gpr(dc, a->rd, dst);
3928     return advance_pc(dc);
3929 }
3930 
3931 TRANS(EDGE8cc, VIS1, gen_edge, a, 8, 1, 0)
3932 TRANS(EDGE8Lcc, VIS1, gen_edge, a, 8, 1, 1)
3933 TRANS(EDGE16cc, VIS1, gen_edge, a, 16, 1, 0)
3934 TRANS(EDGE16Lcc, VIS1, gen_edge, a, 16, 1, 1)
3935 TRANS(EDGE32cc, VIS1, gen_edge, a, 32, 1, 0)
3936 TRANS(EDGE32Lcc, VIS1, gen_edge, a, 32, 1, 1)
3937 
3938 TRANS(EDGE8N, VIS2, gen_edge, a, 8, 0, 0)
3939 TRANS(EDGE8LN, VIS2, gen_edge, a, 8, 0, 1)
3940 TRANS(EDGE16N, VIS2, gen_edge, a, 16, 0, 0)
3941 TRANS(EDGE16LN, VIS2, gen_edge, a, 16, 0, 1)
3942 TRANS(EDGE32N, VIS2, gen_edge, a, 32, 0, 0)
3943 TRANS(EDGE32LN, VIS2, gen_edge, a, 32, 0, 1)
3944 
3945 static bool do_rr(DisasContext *dc, arg_r_r *a,
3946                   void (*func)(TCGv, TCGv))
3947 {
3948     TCGv dst = gen_dest_gpr(dc, a->rd);
3949     TCGv src = gen_load_gpr(dc, a->rs);
3950 
3951     func(dst, src);
3952     gen_store_gpr(dc, a->rd, dst);
3953     return advance_pc(dc);
3954 }
3955 
3956 TRANS(LZCNT, VIS3, do_rr, a, gen_op_lzcnt)
3957 
3958 static bool do_rrr(DisasContext *dc, arg_r_r_r *a,
3959                    void (*func)(TCGv, TCGv, TCGv))
3960 {
3961     TCGv dst = gen_dest_gpr(dc, a->rd);
3962     TCGv src1 = gen_load_gpr(dc, a->rs1);
3963     TCGv src2 = gen_load_gpr(dc, a->rs2);
3964 
3965     func(dst, src1, src2);
3966     gen_store_gpr(dc, a->rd, dst);
3967     return advance_pc(dc);
3968 }
3969 
3970 TRANS(ARRAY8, VIS1, do_rrr, a, gen_helper_array8)
3971 TRANS(ARRAY16, VIS1, do_rrr, a, gen_op_array16)
3972 TRANS(ARRAY32, VIS1, do_rrr, a, gen_op_array32)
3973 
3974 TRANS(ADDXC, VIS3, do_rrr, a, gen_op_addxc)
3975 TRANS(ADDXCcc, VIS3, do_rrr, a, gen_op_addxccc)
3976 
3977 TRANS(SUBXC, VIS4, do_rrr, a, gen_op_subxc)
3978 TRANS(SUBXCcc, VIS4, do_rrr, a, gen_op_subxccc)
3979 
3980 TRANS(UMULXHI, VIS3, do_rrr, a, gen_op_umulxhi)
3981 
3982 static void gen_op_alignaddr(TCGv dst, TCGv s1, TCGv s2)
3983 {
3984 #ifdef TARGET_SPARC64
3985     TCGv tmp = tcg_temp_new();
3986 
3987     tcg_gen_add_tl(tmp, s1, s2);
3988     tcg_gen_andi_tl(dst, tmp, -8);
3989     tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
3990 #else
3991     g_assert_not_reached();
3992 #endif
3993 }
3994 
3995 static void gen_op_alignaddrl(TCGv dst, TCGv s1, TCGv s2)
3996 {
3997 #ifdef TARGET_SPARC64
3998     TCGv tmp = tcg_temp_new();
3999 
4000     tcg_gen_add_tl(tmp, s1, s2);
4001     tcg_gen_andi_tl(dst, tmp, -8);
4002     tcg_gen_neg_tl(tmp, tmp);
4003     tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
4004 #else
4005     g_assert_not_reached();
4006 #endif
4007 }
4008 
4009 TRANS(ALIGNADDR, VIS1, do_rrr, a, gen_op_alignaddr)
4010 TRANS(ALIGNADDRL, VIS1, do_rrr, a, gen_op_alignaddrl)
4011 
4012 static void gen_op_bmask(TCGv dst, TCGv s1, TCGv s2)
4013 {
4014 #ifdef TARGET_SPARC64
4015     tcg_gen_add_tl(dst, s1, s2);
4016     tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, dst, 32, 32);
4017 #else
4018     g_assert_not_reached();
4019 #endif
4020 }
4021 
4022 TRANS(BMASK, VIS2, do_rrr, a, gen_op_bmask)
4023 
4024 static bool do_cmask(DisasContext *dc, int rs2, void (*func)(TCGv, TCGv, TCGv))
4025 {
4026     func(cpu_gsr, cpu_gsr, gen_load_gpr(dc, rs2));
4027     return true;
4028 }
4029 
4030 TRANS(CMASK8, VIS3, do_cmask, a->rs2, gen_helper_cmask8)
4031 TRANS(CMASK16, VIS3, do_cmask, a->rs2, gen_helper_cmask16)
4032 TRANS(CMASK32, VIS3, do_cmask, a->rs2, gen_helper_cmask32)
4033 
4034 static bool do_shift_r(DisasContext *dc, arg_shiftr *a, bool l, bool u)
4035 {
4036     TCGv dst, src1, src2;
4037 
4038     /* Reject 64-bit shifts for sparc32. */
4039     if (avail_32(dc) && a->x) {
4040         return false;
4041     }
4042 
4043     src2 = tcg_temp_new();
4044     tcg_gen_andi_tl(src2, gen_load_gpr(dc, a->rs2), a->x ? 63 : 31);
4045     src1 = gen_load_gpr(dc, a->rs1);
4046     dst = gen_dest_gpr(dc, a->rd);
4047 
4048     if (l) {
4049         tcg_gen_shl_tl(dst, src1, src2);
4050         if (!a->x) {
4051             tcg_gen_ext32u_tl(dst, dst);
4052         }
4053     } else if (u) {
4054         if (!a->x) {
4055             tcg_gen_ext32u_tl(dst, src1);
4056             src1 = dst;
4057         }
4058         tcg_gen_shr_tl(dst, src1, src2);
4059     } else {
4060         if (!a->x) {
4061             tcg_gen_ext32s_tl(dst, src1);
4062             src1 = dst;
4063         }
4064         tcg_gen_sar_tl(dst, src1, src2);
4065     }
4066     gen_store_gpr(dc, a->rd, dst);
4067     return advance_pc(dc);
4068 }
4069 
4070 TRANS(SLL_r, ALL, do_shift_r, a, true, true)
4071 TRANS(SRL_r, ALL, do_shift_r, a, false, true)
4072 TRANS(SRA_r, ALL, do_shift_r, a, false, false)
4073 
4074 static bool do_shift_i(DisasContext *dc, arg_shifti *a, bool l, bool u)
4075 {
4076     TCGv dst, src1;
4077 
4078     /* Reject 64-bit shifts for sparc32. */
4079     if (avail_32(dc) && (a->x || a->i >= 32)) {
4080         return false;
4081     }
4082 
4083     src1 = gen_load_gpr(dc, a->rs1);
4084     dst = gen_dest_gpr(dc, a->rd);
4085 
4086     if (avail_32(dc) || a->x) {
4087         if (l) {
4088             tcg_gen_shli_tl(dst, src1, a->i);
4089         } else if (u) {
4090             tcg_gen_shri_tl(dst, src1, a->i);
4091         } else {
4092             tcg_gen_sari_tl(dst, src1, a->i);
4093         }
4094     } else {
4095         if (l) {
4096             tcg_gen_deposit_z_tl(dst, src1, a->i, 32 - a->i);
4097         } else if (u) {
4098             tcg_gen_extract_tl(dst, src1, a->i, 32 - a->i);
4099         } else {
4100             tcg_gen_sextract_tl(dst, src1, a->i, 32 - a->i);
4101         }
4102     }
4103     gen_store_gpr(dc, a->rd, dst);
4104     return advance_pc(dc);
4105 }
4106 
4107 TRANS(SLL_i, ALL, do_shift_i, a, true, true)
4108 TRANS(SRL_i, ALL, do_shift_i, a, false, true)
4109 TRANS(SRA_i, ALL, do_shift_i, a, false, false)
4110 
4111 static TCGv gen_rs2_or_imm(DisasContext *dc, bool imm, int rs2_or_imm)
4112 {
4113     /* For simplicity, we under-decoded the rs2 form. */
4114     if (!imm && rs2_or_imm & ~0x1f) {
4115         return NULL;
4116     }
4117     if (imm || rs2_or_imm == 0) {
4118         return tcg_constant_tl(rs2_or_imm);
4119     } else {
4120         return cpu_regs[rs2_or_imm];
4121     }
4122 }
4123 
4124 static bool do_mov_cond(DisasContext *dc, DisasCompare *cmp, int rd, TCGv src2)
4125 {
4126     TCGv dst = gen_load_gpr(dc, rd);
4127     TCGv c2 = tcg_constant_tl(cmp->c2);
4128 
4129     tcg_gen_movcond_tl(cmp->cond, dst, cmp->c1, c2, src2, dst);
4130     gen_store_gpr(dc, rd, dst);
4131     return advance_pc(dc);
4132 }
4133 
4134 static bool trans_MOVcc(DisasContext *dc, arg_MOVcc *a)
4135 {
4136     TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4137     DisasCompare cmp;
4138 
4139     if (src2 == NULL) {
4140         return false;
4141     }
4142     gen_compare(&cmp, a->cc, a->cond, dc);
4143     return do_mov_cond(dc, &cmp, a->rd, src2);
4144 }
4145 
4146 static bool trans_MOVfcc(DisasContext *dc, arg_MOVfcc *a)
4147 {
4148     TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4149     DisasCompare cmp;
4150 
4151     if (src2 == NULL) {
4152         return false;
4153     }
4154     gen_fcompare(&cmp, a->cc, a->cond);
4155     return do_mov_cond(dc, &cmp, a->rd, src2);
4156 }
4157 
4158 static bool trans_MOVR(DisasContext *dc, arg_MOVR *a)
4159 {
4160     TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4161     DisasCompare cmp;
4162 
4163     if (src2 == NULL) {
4164         return false;
4165     }
4166     if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
4167         return false;
4168     }
4169     return do_mov_cond(dc, &cmp, a->rd, src2);
4170 }
4171 
4172 static bool do_add_special(DisasContext *dc, arg_r_r_ri *a,
4173                            bool (*func)(DisasContext *dc, int rd, TCGv src))
4174 {
4175     TCGv src1, sum;
4176 
4177     /* For simplicity, we under-decoded the rs2 form. */
4178     if (!a->imm && a->rs2_or_imm & ~0x1f) {
4179         return false;
4180     }
4181 
4182     /*
4183      * Always load the sum into a new temporary.
4184      * This is required to capture the value across a window change,
4185      * e.g. SAVE and RESTORE, and may be optimized away otherwise.
4186      */
4187     sum = tcg_temp_new();
4188     src1 = gen_load_gpr(dc, a->rs1);
4189     if (a->imm || a->rs2_or_imm == 0) {
4190         tcg_gen_addi_tl(sum, src1, a->rs2_or_imm);
4191     } else {
4192         tcg_gen_add_tl(sum, src1, cpu_regs[a->rs2_or_imm]);
4193     }
4194     return func(dc, a->rd, sum);
4195 }
4196 
4197 static bool do_jmpl(DisasContext *dc, int rd, TCGv src)
4198 {
4199     /*
4200      * Preserve pc across advance, so that we can delay
4201      * the writeback to rd until after src is consumed.
4202      */
4203     target_ulong cur_pc = dc->pc;
4204 
4205     gen_check_align(dc, src, 3);
4206 
4207     gen_mov_pc_npc(dc);
4208     tcg_gen_mov_tl(cpu_npc, src);
4209     gen_address_mask(dc, cpu_npc);
4210     gen_store_gpr(dc, rd, tcg_constant_tl(cur_pc));
4211 
4212     dc->npc = DYNAMIC_PC_LOOKUP;
4213     return true;
4214 }
4215 
4216 TRANS(JMPL, ALL, do_add_special, a, do_jmpl)
4217 
4218 static bool do_rett(DisasContext *dc, int rd, TCGv src)
4219 {
4220     if (!supervisor(dc)) {
4221         return raise_priv(dc);
4222     }
4223 
4224     gen_check_align(dc, src, 3);
4225 
4226     gen_mov_pc_npc(dc);
4227     tcg_gen_mov_tl(cpu_npc, src);
4228     gen_helper_rett(tcg_env);
4229 
4230     dc->npc = DYNAMIC_PC;
4231     return true;
4232 }
4233 
4234 TRANS(RETT, 32, do_add_special, a, do_rett)
4235 
4236 static bool do_return(DisasContext *dc, int rd, TCGv src)
4237 {
4238     gen_check_align(dc, src, 3);
4239     gen_helper_restore(tcg_env);
4240 
4241     gen_mov_pc_npc(dc);
4242     tcg_gen_mov_tl(cpu_npc, src);
4243     gen_address_mask(dc, cpu_npc);
4244 
4245     dc->npc = DYNAMIC_PC_LOOKUP;
4246     return true;
4247 }
4248 
4249 TRANS(RETURN, 64, do_add_special, a, do_return)
4250 
4251 static bool do_save(DisasContext *dc, int rd, TCGv src)
4252 {
4253     gen_helper_save(tcg_env);
4254     gen_store_gpr(dc, rd, src);
4255     return advance_pc(dc);
4256 }
4257 
4258 TRANS(SAVE, ALL, do_add_special, a, do_save)
4259 
4260 static bool do_restore(DisasContext *dc, int rd, TCGv src)
4261 {
4262     gen_helper_restore(tcg_env);
4263     gen_store_gpr(dc, rd, src);
4264     return advance_pc(dc);
4265 }
4266 
4267 TRANS(RESTORE, ALL, do_add_special, a, do_restore)
4268 
4269 static bool do_done_retry(DisasContext *dc, bool done)
4270 {
4271     if (!supervisor(dc)) {
4272         return raise_priv(dc);
4273     }
4274     dc->npc = DYNAMIC_PC;
4275     dc->pc = DYNAMIC_PC;
4276     translator_io_start(&dc->base);
4277     if (done) {
4278         gen_helper_done(tcg_env);
4279     } else {
4280         gen_helper_retry(tcg_env);
4281     }
4282     return true;
4283 }
4284 
4285 TRANS(DONE, 64, do_done_retry, true)
4286 TRANS(RETRY, 64, do_done_retry, false)
4287 
4288 /*
4289  * Major opcode 11 -- load and store instructions
4290  */
4291 
4292 static TCGv gen_ldst_addr(DisasContext *dc, int rs1, bool imm, int rs2_or_imm)
4293 {
4294     TCGv addr, tmp = NULL;
4295 
4296     /* For simplicity, we under-decoded the rs2 form. */
4297     if (!imm && rs2_or_imm & ~0x1f) {
4298         return NULL;
4299     }
4300 
4301     addr = gen_load_gpr(dc, rs1);
4302     if (rs2_or_imm) {
4303         tmp = tcg_temp_new();
4304         if (imm) {
4305             tcg_gen_addi_tl(tmp, addr, rs2_or_imm);
4306         } else {
4307             tcg_gen_add_tl(tmp, addr, cpu_regs[rs2_or_imm]);
4308         }
4309         addr = tmp;
4310     }
4311     if (AM_CHECK(dc)) {
4312         if (!tmp) {
4313             tmp = tcg_temp_new();
4314         }
4315         tcg_gen_ext32u_tl(tmp, addr);
4316         addr = tmp;
4317     }
4318     return addr;
4319 }
4320 
4321 static bool do_ld_gpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4322 {
4323     TCGv reg, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4324     DisasASI da;
4325 
4326     if (addr == NULL) {
4327         return false;
4328     }
4329     da = resolve_asi(dc, a->asi, mop);
4330 
4331     reg = gen_dest_gpr(dc, a->rd);
4332     gen_ld_asi(dc, &da, reg, addr);
4333     gen_store_gpr(dc, a->rd, reg);
4334     return advance_pc(dc);
4335 }
4336 
4337 TRANS(LDUW, ALL, do_ld_gpr, a, MO_TEUL)
4338 TRANS(LDUB, ALL, do_ld_gpr, a, MO_UB)
4339 TRANS(LDUH, ALL, do_ld_gpr, a, MO_TEUW)
4340 TRANS(LDSB, ALL, do_ld_gpr, a, MO_SB)
4341 TRANS(LDSH, ALL, do_ld_gpr, a, MO_TESW)
4342 TRANS(LDSW, 64, do_ld_gpr, a, MO_TESL)
4343 TRANS(LDX, 64, do_ld_gpr, a, MO_TEUQ)
4344 
4345 static bool do_st_gpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4346 {
4347     TCGv reg, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4348     DisasASI da;
4349 
4350     if (addr == NULL) {
4351         return false;
4352     }
4353     da = resolve_asi(dc, a->asi, mop);
4354 
4355     reg = gen_load_gpr(dc, a->rd);
4356     gen_st_asi(dc, &da, reg, addr);
4357     return advance_pc(dc);
4358 }
4359 
4360 TRANS(STW, ALL, do_st_gpr, a, MO_TEUL)
4361 TRANS(STB, ALL, do_st_gpr, a, MO_UB)
4362 TRANS(STH, ALL, do_st_gpr, a, MO_TEUW)
4363 TRANS(STX, 64, do_st_gpr, a, MO_TEUQ)
4364 
4365 static bool trans_LDD(DisasContext *dc, arg_r_r_ri_asi *a)
4366 {
4367     TCGv addr;
4368     DisasASI da;
4369 
4370     if (a->rd & 1) {
4371         return false;
4372     }
4373     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4374     if (addr == NULL) {
4375         return false;
4376     }
4377     da = resolve_asi(dc, a->asi, MO_TEUQ);
4378     gen_ldda_asi(dc, &da, addr, a->rd);
4379     return advance_pc(dc);
4380 }
4381 
4382 static bool trans_STD(DisasContext *dc, arg_r_r_ri_asi *a)
4383 {
4384     TCGv addr;
4385     DisasASI da;
4386 
4387     if (a->rd & 1) {
4388         return false;
4389     }
4390     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4391     if (addr == NULL) {
4392         return false;
4393     }
4394     da = resolve_asi(dc, a->asi, MO_TEUQ);
4395     gen_stda_asi(dc, &da, addr, a->rd);
4396     return advance_pc(dc);
4397 }
4398 
4399 static bool trans_LDSTUB(DisasContext *dc, arg_r_r_ri_asi *a)
4400 {
4401     TCGv addr, reg;
4402     DisasASI da;
4403 
4404     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4405     if (addr == NULL) {
4406         return false;
4407     }
4408     da = resolve_asi(dc, a->asi, MO_UB);
4409 
4410     reg = gen_dest_gpr(dc, a->rd);
4411     gen_ldstub_asi(dc, &da, reg, addr);
4412     gen_store_gpr(dc, a->rd, reg);
4413     return advance_pc(dc);
4414 }
4415 
4416 static bool trans_SWAP(DisasContext *dc, arg_r_r_ri_asi *a)
4417 {
4418     TCGv addr, dst, src;
4419     DisasASI da;
4420 
4421     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4422     if (addr == NULL) {
4423         return false;
4424     }
4425     da = resolve_asi(dc, a->asi, MO_TEUL);
4426 
4427     dst = gen_dest_gpr(dc, a->rd);
4428     src = gen_load_gpr(dc, a->rd);
4429     gen_swap_asi(dc, &da, dst, src, addr);
4430     gen_store_gpr(dc, a->rd, dst);
4431     return advance_pc(dc);
4432 }
4433 
4434 static bool do_casa(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4435 {
4436     TCGv addr, o, n, c;
4437     DisasASI da;
4438 
4439     addr = gen_ldst_addr(dc, a->rs1, true, 0);
4440     if (addr == NULL) {
4441         return false;
4442     }
4443     da = resolve_asi(dc, a->asi, mop);
4444 
4445     o = gen_dest_gpr(dc, a->rd);
4446     n = gen_load_gpr(dc, a->rd);
4447     c = gen_load_gpr(dc, a->rs2_or_imm);
4448     gen_cas_asi(dc, &da, o, n, c, addr);
4449     gen_store_gpr(dc, a->rd, o);
4450     return advance_pc(dc);
4451 }
4452 
4453 TRANS(CASA, CASA, do_casa, a, MO_TEUL)
4454 TRANS(CASXA, 64, do_casa, a, MO_TEUQ)
4455 
4456 static bool do_ld_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz)
4457 {
4458     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4459     DisasASI da;
4460 
4461     if (addr == NULL) {
4462         return false;
4463     }
4464     if (gen_trap_ifnofpu(dc)) {
4465         return true;
4466     }
4467     if (sz == MO_128 && gen_trap_float128(dc)) {
4468         return true;
4469     }
4470     da = resolve_asi(dc, a->asi, MO_TE | sz);
4471     gen_ldf_asi(dc, &da, sz, addr, a->rd);
4472     gen_update_fprs_dirty(dc, a->rd);
4473     return advance_pc(dc);
4474 }
4475 
4476 TRANS(LDF, ALL, do_ld_fpr, a, MO_32)
4477 TRANS(LDDF, ALL, do_ld_fpr, a, MO_64)
4478 TRANS(LDQF, ALL, do_ld_fpr, a, MO_128)
4479 
4480 TRANS(LDFA, 64, do_ld_fpr, a, MO_32)
4481 TRANS(LDDFA, 64, do_ld_fpr, a, MO_64)
4482 TRANS(LDQFA, 64, do_ld_fpr, a, MO_128)
4483 
4484 static bool do_st_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz)
4485 {
4486     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4487     DisasASI da;
4488 
4489     if (addr == NULL) {
4490         return false;
4491     }
4492     if (gen_trap_ifnofpu(dc)) {
4493         return true;
4494     }
4495     if (sz == MO_128 && gen_trap_float128(dc)) {
4496         return true;
4497     }
4498     da = resolve_asi(dc, a->asi, MO_TE | sz);
4499     gen_stf_asi(dc, &da, sz, addr, a->rd);
4500     return advance_pc(dc);
4501 }
4502 
4503 TRANS(STF, ALL, do_st_fpr, a, MO_32)
4504 TRANS(STDF, ALL, do_st_fpr, a, MO_64)
4505 TRANS(STQF, ALL, do_st_fpr, a, MO_128)
4506 
4507 TRANS(STFA, 64, do_st_fpr, a, MO_32)
4508 TRANS(STDFA, 64, do_st_fpr, a, MO_64)
4509 TRANS(STQFA, 64, do_st_fpr, a, MO_128)
4510 
4511 static bool trans_STDFQ(DisasContext *dc, arg_STDFQ *a)
4512 {
4513     if (!avail_32(dc)) {
4514         return false;
4515     }
4516     if (!supervisor(dc)) {
4517         return raise_priv(dc);
4518     }
4519     if (gen_trap_ifnofpu(dc)) {
4520         return true;
4521     }
4522     gen_op_fpexception_im(dc, FSR_FTT_SEQ_ERROR);
4523     return true;
4524 }
4525 
4526 static bool trans_LDFSR(DisasContext *dc, arg_r_r_ri *a)
4527 {
4528     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4529     TCGv_i32 tmp;
4530 
4531     if (addr == NULL) {
4532         return false;
4533     }
4534     if (gen_trap_ifnofpu(dc)) {
4535         return true;
4536     }
4537 
4538     tmp = tcg_temp_new_i32();
4539     tcg_gen_qemu_ld_i32(tmp, addr, dc->mem_idx, MO_TEUL | MO_ALIGN);
4540 
4541     tcg_gen_extract_i32(cpu_fcc[0], tmp, FSR_FCC0_SHIFT, 2);
4542     /* LDFSR does not change FCC[1-3]. */
4543 
4544     gen_helper_set_fsr_nofcc_noftt(tcg_env, tmp);
4545     return advance_pc(dc);
4546 }
4547 
4548 static bool do_ldxfsr(DisasContext *dc, arg_r_r_ri *a, bool entire)
4549 {
4550 #ifdef TARGET_SPARC64
4551     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4552     TCGv_i64 t64;
4553     TCGv_i32 lo, hi;
4554 
4555     if (addr == NULL) {
4556         return false;
4557     }
4558     if (gen_trap_ifnofpu(dc)) {
4559         return true;
4560     }
4561 
4562     t64 = tcg_temp_new_i64();
4563     tcg_gen_qemu_ld_i64(t64, addr, dc->mem_idx, MO_TEUQ | MO_ALIGN);
4564 
4565     lo = tcg_temp_new_i32();
4566     hi = cpu_fcc[3];
4567     tcg_gen_extr_i64_i32(lo, hi, t64);
4568     tcg_gen_extract_i32(cpu_fcc[0], lo, FSR_FCC0_SHIFT, 2);
4569     tcg_gen_extract_i32(cpu_fcc[1], hi, FSR_FCC1_SHIFT - 32, 2);
4570     tcg_gen_extract_i32(cpu_fcc[2], hi, FSR_FCC2_SHIFT - 32, 2);
4571     tcg_gen_extract_i32(cpu_fcc[3], hi, FSR_FCC3_SHIFT - 32, 2);
4572 
4573     if (entire) {
4574         gen_helper_set_fsr_nofcc(tcg_env, lo);
4575     } else {
4576         gen_helper_set_fsr_nofcc_noftt(tcg_env, lo);
4577     }
4578     return advance_pc(dc);
4579 #else
4580     return false;
4581 #endif
4582 }
4583 
4584 TRANS(LDXFSR, 64, do_ldxfsr, a, false)
4585 TRANS(LDXEFSR, VIS3B, do_ldxfsr, a, true)
4586 
4587 static bool do_stfsr(DisasContext *dc, arg_r_r_ri *a, MemOp mop)
4588 {
4589     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4590     TCGv fsr;
4591 
4592     if (addr == NULL) {
4593         return false;
4594     }
4595     if (gen_trap_ifnofpu(dc)) {
4596         return true;
4597     }
4598 
4599     fsr = tcg_temp_new();
4600     gen_helper_get_fsr(fsr, tcg_env);
4601     tcg_gen_qemu_st_tl(fsr, addr, dc->mem_idx, mop | MO_ALIGN);
4602     return advance_pc(dc);
4603 }
4604 
4605 TRANS(STFSR, ALL, do_stfsr, a, MO_TEUL)
4606 TRANS(STXFSR, 64, do_stfsr, a, MO_TEUQ)
4607 
4608 static bool do_fc(DisasContext *dc, int rd, int32_t c)
4609 {
4610     if (gen_trap_ifnofpu(dc)) {
4611         return true;
4612     }
4613     gen_store_fpr_F(dc, rd, tcg_constant_i32(c));
4614     return advance_pc(dc);
4615 }
4616 
4617 TRANS(FZEROs, VIS1, do_fc, a->rd, 0)
4618 TRANS(FONEs, VIS1, do_fc, a->rd, -1)
4619 
4620 static bool do_dc(DisasContext *dc, int rd, int64_t c)
4621 {
4622     if (gen_trap_ifnofpu(dc)) {
4623         return true;
4624     }
4625     gen_store_fpr_D(dc, rd, tcg_constant_i64(c));
4626     return advance_pc(dc);
4627 }
4628 
4629 TRANS(FZEROd, VIS1, do_dc, a->rd, 0)
4630 TRANS(FONEd, VIS1, do_dc, a->rd, -1)
4631 
4632 static bool do_ff(DisasContext *dc, arg_r_r *a,
4633                   void (*func)(TCGv_i32, TCGv_i32))
4634 {
4635     TCGv_i32 tmp;
4636 
4637     if (gen_trap_ifnofpu(dc)) {
4638         return true;
4639     }
4640 
4641     tmp = gen_load_fpr_F(dc, a->rs);
4642     func(tmp, tmp);
4643     gen_store_fpr_F(dc, a->rd, tmp);
4644     return advance_pc(dc);
4645 }
4646 
4647 TRANS(FMOVs, ALL, do_ff, a, gen_op_fmovs)
4648 TRANS(FNEGs, ALL, do_ff, a, gen_op_fnegs)
4649 TRANS(FABSs, ALL, do_ff, a, gen_op_fabss)
4650 TRANS(FSRCs, VIS1, do_ff, a, tcg_gen_mov_i32)
4651 TRANS(FNOTs, VIS1, do_ff, a, tcg_gen_not_i32)
4652 
4653 static bool do_fd(DisasContext *dc, arg_r_r *a,
4654                   void (*func)(TCGv_i32, TCGv_i64))
4655 {
4656     TCGv_i32 dst;
4657     TCGv_i64 src;
4658 
4659     if (gen_trap_ifnofpu(dc)) {
4660         return true;
4661     }
4662 
4663     dst = tcg_temp_new_i32();
4664     src = gen_load_fpr_D(dc, a->rs);
4665     func(dst, src);
4666     gen_store_fpr_F(dc, a->rd, dst);
4667     return advance_pc(dc);
4668 }
4669 
4670 TRANS(FPACK16, VIS1, do_fd, a, gen_op_fpack16)
4671 TRANS(FPACKFIX, VIS1, do_fd, a, gen_op_fpackfix)
4672 
4673 static bool do_env_ff(DisasContext *dc, arg_r_r *a,
4674                       void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
4675 {
4676     TCGv_i32 tmp;
4677 
4678     if (gen_trap_ifnofpu(dc)) {
4679         return true;
4680     }
4681 
4682     tmp = gen_load_fpr_F(dc, a->rs);
4683     func(tmp, tcg_env, tmp);
4684     gen_store_fpr_F(dc, a->rd, tmp);
4685     return advance_pc(dc);
4686 }
4687 
4688 TRANS(FSQRTs, ALL, do_env_ff, a, gen_helper_fsqrts)
4689 TRANS(FiTOs, ALL, do_env_ff, a, gen_helper_fitos)
4690 TRANS(FsTOi, ALL, do_env_ff, a, gen_helper_fstoi)
4691 
4692 static bool do_env_fd(DisasContext *dc, arg_r_r *a,
4693                       void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
4694 {
4695     TCGv_i32 dst;
4696     TCGv_i64 src;
4697 
4698     if (gen_trap_ifnofpu(dc)) {
4699         return true;
4700     }
4701 
4702     dst = tcg_temp_new_i32();
4703     src = gen_load_fpr_D(dc, a->rs);
4704     func(dst, tcg_env, src);
4705     gen_store_fpr_F(dc, a->rd, dst);
4706     return advance_pc(dc);
4707 }
4708 
4709 TRANS(FdTOs, ALL, do_env_fd, a, gen_helper_fdtos)
4710 TRANS(FdTOi, ALL, do_env_fd, a, gen_helper_fdtoi)
4711 TRANS(FxTOs, 64, do_env_fd, a, gen_helper_fxtos)
4712 
4713 static bool do_dd(DisasContext *dc, arg_r_r *a,
4714                   void (*func)(TCGv_i64, TCGv_i64))
4715 {
4716     TCGv_i64 dst, src;
4717 
4718     if (gen_trap_ifnofpu(dc)) {
4719         return true;
4720     }
4721 
4722     dst = tcg_temp_new_i64();
4723     src = gen_load_fpr_D(dc, a->rs);
4724     func(dst, src);
4725     gen_store_fpr_D(dc, a->rd, dst);
4726     return advance_pc(dc);
4727 }
4728 
4729 TRANS(FMOVd, 64, do_dd, a, gen_op_fmovd)
4730 TRANS(FNEGd, 64, do_dd, a, gen_op_fnegd)
4731 TRANS(FABSd, 64, do_dd, a, gen_op_fabsd)
4732 TRANS(FSRCd, VIS1, do_dd, a, tcg_gen_mov_i64)
4733 TRANS(FNOTd, VIS1, do_dd, a, tcg_gen_not_i64)
4734 
4735 static bool do_env_dd(DisasContext *dc, arg_r_r *a,
4736                       void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
4737 {
4738     TCGv_i64 dst, src;
4739 
4740     if (gen_trap_ifnofpu(dc)) {
4741         return true;
4742     }
4743 
4744     dst = tcg_temp_new_i64();
4745     src = gen_load_fpr_D(dc, a->rs);
4746     func(dst, tcg_env, src);
4747     gen_store_fpr_D(dc, a->rd, dst);
4748     return advance_pc(dc);
4749 }
4750 
4751 TRANS(FSQRTd, ALL, do_env_dd, a, gen_helper_fsqrtd)
4752 TRANS(FxTOd, 64, do_env_dd, a, gen_helper_fxtod)
4753 TRANS(FdTOx, 64, do_env_dd, a, gen_helper_fdtox)
4754 
4755 static bool do_df(DisasContext *dc, arg_r_r *a,
4756                   void (*func)(TCGv_i64, TCGv_i32))
4757 {
4758     TCGv_i64 dst;
4759     TCGv_i32 src;
4760 
4761     if (gen_trap_ifnofpu(dc)) {
4762         return true;
4763     }
4764 
4765     dst = tcg_temp_new_i64();
4766     src = gen_load_fpr_F(dc, a->rs);
4767     func(dst, src);
4768     gen_store_fpr_D(dc, a->rd, dst);
4769     return advance_pc(dc);
4770 }
4771 
4772 TRANS(FEXPAND, VIS1, do_df, a, gen_helper_fexpand)
4773 
4774 static bool do_env_df(DisasContext *dc, arg_r_r *a,
4775                       void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
4776 {
4777     TCGv_i64 dst;
4778     TCGv_i32 src;
4779 
4780     if (gen_trap_ifnofpu(dc)) {
4781         return true;
4782     }
4783 
4784     dst = tcg_temp_new_i64();
4785     src = gen_load_fpr_F(dc, a->rs);
4786     func(dst, tcg_env, src);
4787     gen_store_fpr_D(dc, a->rd, dst);
4788     return advance_pc(dc);
4789 }
4790 
4791 TRANS(FiTOd, ALL, do_env_df, a, gen_helper_fitod)
4792 TRANS(FsTOd, ALL, do_env_df, a, gen_helper_fstod)
4793 TRANS(FsTOx, 64, do_env_df, a, gen_helper_fstox)
4794 
4795 static bool do_qq(DisasContext *dc, arg_r_r *a,
4796                   void (*func)(TCGv_i128, TCGv_i128))
4797 {
4798     TCGv_i128 t;
4799 
4800     if (gen_trap_ifnofpu(dc)) {
4801         return true;
4802     }
4803     if (gen_trap_float128(dc)) {
4804         return true;
4805     }
4806 
4807     gen_op_clear_ieee_excp_and_FTT();
4808     t = gen_load_fpr_Q(dc, a->rs);
4809     func(t, t);
4810     gen_store_fpr_Q(dc, a->rd, t);
4811     return advance_pc(dc);
4812 }
4813 
4814 TRANS(FMOVq, 64, do_qq, a, tcg_gen_mov_i128)
4815 TRANS(FNEGq, 64, do_qq, a, gen_op_fnegq)
4816 TRANS(FABSq, 64, do_qq, a, gen_op_fabsq)
4817 
4818 static bool do_env_qq(DisasContext *dc, arg_r_r *a,
4819                       void (*func)(TCGv_i128, TCGv_env, TCGv_i128))
4820 {
4821     TCGv_i128 t;
4822 
4823     if (gen_trap_ifnofpu(dc)) {
4824         return true;
4825     }
4826     if (gen_trap_float128(dc)) {
4827         return true;
4828     }
4829 
4830     t = gen_load_fpr_Q(dc, a->rs);
4831     func(t, tcg_env, t);
4832     gen_store_fpr_Q(dc, a->rd, t);
4833     return advance_pc(dc);
4834 }
4835 
4836 TRANS(FSQRTq, ALL, do_env_qq, a, gen_helper_fsqrtq)
4837 
4838 static bool do_env_fq(DisasContext *dc, arg_r_r *a,
4839                       void (*func)(TCGv_i32, TCGv_env, TCGv_i128))
4840 {
4841     TCGv_i128 src;
4842     TCGv_i32 dst;
4843 
4844     if (gen_trap_ifnofpu(dc)) {
4845         return true;
4846     }
4847     if (gen_trap_float128(dc)) {
4848         return true;
4849     }
4850 
4851     src = gen_load_fpr_Q(dc, a->rs);
4852     dst = tcg_temp_new_i32();
4853     func(dst, tcg_env, src);
4854     gen_store_fpr_F(dc, a->rd, dst);
4855     return advance_pc(dc);
4856 }
4857 
4858 TRANS(FqTOs, ALL, do_env_fq, a, gen_helper_fqtos)
4859 TRANS(FqTOi, ALL, do_env_fq, a, gen_helper_fqtoi)
4860 
4861 static bool do_env_dq(DisasContext *dc, arg_r_r *a,
4862                       void (*func)(TCGv_i64, TCGv_env, TCGv_i128))
4863 {
4864     TCGv_i128 src;
4865     TCGv_i64 dst;
4866 
4867     if (gen_trap_ifnofpu(dc)) {
4868         return true;
4869     }
4870     if (gen_trap_float128(dc)) {
4871         return true;
4872     }
4873 
4874     src = gen_load_fpr_Q(dc, a->rs);
4875     dst = tcg_temp_new_i64();
4876     func(dst, tcg_env, src);
4877     gen_store_fpr_D(dc, a->rd, dst);
4878     return advance_pc(dc);
4879 }
4880 
4881 TRANS(FqTOd, ALL, do_env_dq, a, gen_helper_fqtod)
4882 TRANS(FqTOx, 64, do_env_dq, a, gen_helper_fqtox)
4883 
4884 static bool do_env_qf(DisasContext *dc, arg_r_r *a,
4885                       void (*func)(TCGv_i128, TCGv_env, TCGv_i32))
4886 {
4887     TCGv_i32 src;
4888     TCGv_i128 dst;
4889 
4890     if (gen_trap_ifnofpu(dc)) {
4891         return true;
4892     }
4893     if (gen_trap_float128(dc)) {
4894         return true;
4895     }
4896 
4897     src = gen_load_fpr_F(dc, a->rs);
4898     dst = tcg_temp_new_i128();
4899     func(dst, tcg_env, src);
4900     gen_store_fpr_Q(dc, a->rd, dst);
4901     return advance_pc(dc);
4902 }
4903 
4904 TRANS(FiTOq, ALL, do_env_qf, a, gen_helper_fitoq)
4905 TRANS(FsTOq, ALL, do_env_qf, a, gen_helper_fstoq)
4906 
4907 static bool do_env_qd(DisasContext *dc, arg_r_r *a,
4908                       void (*func)(TCGv_i128, TCGv_env, TCGv_i64))
4909 {
4910     TCGv_i64 src;
4911     TCGv_i128 dst;
4912 
4913     if (gen_trap_ifnofpu(dc)) {
4914         return true;
4915     }
4916     if (gen_trap_float128(dc)) {
4917         return true;
4918     }
4919 
4920     src = gen_load_fpr_D(dc, a->rs);
4921     dst = tcg_temp_new_i128();
4922     func(dst, tcg_env, src);
4923     gen_store_fpr_Q(dc, a->rd, dst);
4924     return advance_pc(dc);
4925 }
4926 
4927 TRANS(FdTOq, ALL, do_env_qd, a, gen_helper_fdtoq)
4928 TRANS(FxTOq, 64, do_env_qd, a, gen_helper_fxtoq)
4929 
4930 static bool do_fff(DisasContext *dc, arg_r_r_r *a,
4931                    void (*func)(TCGv_i32, TCGv_i32, TCGv_i32))
4932 {
4933     TCGv_i32 src1, src2;
4934 
4935     if (gen_trap_ifnofpu(dc)) {
4936         return true;
4937     }
4938 
4939     src1 = gen_load_fpr_F(dc, a->rs1);
4940     src2 = gen_load_fpr_F(dc, a->rs2);
4941     func(src1, src1, src2);
4942     gen_store_fpr_F(dc, a->rd, src1);
4943     return advance_pc(dc);
4944 }
4945 
4946 TRANS(FPADD16s, VIS1, do_fff, a, tcg_gen_vec_add16_i32)
4947 TRANS(FPADD32s, VIS1, do_fff, a, tcg_gen_add_i32)
4948 TRANS(FPSUB16s, VIS1, do_fff, a, tcg_gen_vec_sub16_i32)
4949 TRANS(FPSUB32s, VIS1, do_fff, a, tcg_gen_sub_i32)
4950 TRANS(FNORs, VIS1, do_fff, a, tcg_gen_nor_i32)
4951 TRANS(FANDNOTs, VIS1, do_fff, a, tcg_gen_andc_i32)
4952 TRANS(FXORs, VIS1, do_fff, a, tcg_gen_xor_i32)
4953 TRANS(FNANDs, VIS1, do_fff, a, tcg_gen_nand_i32)
4954 TRANS(FANDs, VIS1, do_fff, a, tcg_gen_and_i32)
4955 TRANS(FXNORs, VIS1, do_fff, a, tcg_gen_eqv_i32)
4956 TRANS(FORNOTs, VIS1, do_fff, a, tcg_gen_orc_i32)
4957 TRANS(FORs, VIS1, do_fff, a, tcg_gen_or_i32)
4958 
4959 TRANS(FHADDs, VIS3, do_fff, a, gen_op_fhadds)
4960 TRANS(FHSUBs, VIS3, do_fff, a, gen_op_fhsubs)
4961 TRANS(FNHADDs, VIS3, do_fff, a, gen_op_fnhadds)
4962 
4963 TRANS(FPADDS16s, VIS3, do_fff, a, gen_op_fpadds16s)
4964 TRANS(FPSUBS16s, VIS3, do_fff, a, gen_op_fpsubs16s)
4965 TRANS(FPADDS32s, VIS3, do_fff, a, gen_op_fpadds32s)
4966 TRANS(FPSUBS32s, VIS3, do_fff, a, gen_op_fpsubs32s)
4967 
4968 static bool do_env_fff(DisasContext *dc, arg_r_r_r *a,
4969                        void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
4970 {
4971     TCGv_i32 src1, src2;
4972 
4973     if (gen_trap_ifnofpu(dc)) {
4974         return true;
4975     }
4976 
4977     src1 = gen_load_fpr_F(dc, a->rs1);
4978     src2 = gen_load_fpr_F(dc, a->rs2);
4979     func(src1, tcg_env, src1, src2);
4980     gen_store_fpr_F(dc, a->rd, src1);
4981     return advance_pc(dc);
4982 }
4983 
4984 TRANS(FADDs, ALL, do_env_fff, a, gen_helper_fadds)
4985 TRANS(FSUBs, ALL, do_env_fff, a, gen_helper_fsubs)
4986 TRANS(FMULs, ALL, do_env_fff, a, gen_helper_fmuls)
4987 TRANS(FDIVs, ALL, do_env_fff, a, gen_helper_fdivs)
4988 TRANS(FNADDs, VIS3, do_env_fff, a, gen_helper_fnadds)
4989 TRANS(FNMULs, VIS3, do_env_fff, a, gen_helper_fnmuls)
4990 
4991 static bool do_dff(DisasContext *dc, arg_r_r_r *a,
4992                    void (*func)(TCGv_i64, TCGv_i32, TCGv_i32))
4993 {
4994     TCGv_i64 dst;
4995     TCGv_i32 src1, src2;
4996 
4997     if (gen_trap_ifnofpu(dc)) {
4998         return true;
4999     }
5000 
5001     dst = tcg_temp_new_i64();
5002     src1 = gen_load_fpr_F(dc, a->rs1);
5003     src2 = gen_load_fpr_F(dc, a->rs2);
5004     func(dst, src1, src2);
5005     gen_store_fpr_D(dc, a->rd, dst);
5006     return advance_pc(dc);
5007 }
5008 
5009 TRANS(FMUL8x16AU, VIS1, do_dff, a, gen_op_fmul8x16au)
5010 TRANS(FMUL8x16AL, VIS1, do_dff, a, gen_op_fmul8x16al)
5011 TRANS(FMULD8SUx16, VIS1, do_dff, a, gen_op_fmuld8sux16)
5012 TRANS(FMULD8ULx16, VIS1, do_dff, a, gen_op_fmuld8ulx16)
5013 TRANS(FPMERGE, VIS1, do_dff, a, gen_helper_fpmerge)
5014 
5015 static bool do_dfd(DisasContext *dc, arg_r_r_r *a,
5016                    void (*func)(TCGv_i64, TCGv_i32, TCGv_i64))
5017 {
5018     TCGv_i64 dst, src2;
5019     TCGv_i32 src1;
5020 
5021     if (gen_trap_ifnofpu(dc)) {
5022         return true;
5023     }
5024 
5025     dst = tcg_temp_new_i64();
5026     src1 = gen_load_fpr_F(dc, a->rs1);
5027     src2 = gen_load_fpr_D(dc, a->rs2);
5028     func(dst, src1, src2);
5029     gen_store_fpr_D(dc, a->rd, dst);
5030     return advance_pc(dc);
5031 }
5032 
5033 TRANS(FMUL8x16, VIS1, do_dfd, a, gen_helper_fmul8x16)
5034 
5035 static bool do_gvec_ddd(DisasContext *dc, arg_r_r_r *a, MemOp vece,
5036                         void (*func)(unsigned, uint32_t, uint32_t,
5037                                      uint32_t, uint32_t, uint32_t))
5038 {
5039     if (gen_trap_ifnofpu(dc)) {
5040         return true;
5041     }
5042 
5043     func(vece, gen_offset_fpr_D(a->rd), gen_offset_fpr_D(a->rs1),
5044          gen_offset_fpr_D(a->rs2), 8, 8);
5045     return advance_pc(dc);
5046 }
5047 
5048 TRANS(FPADD8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_add)
5049 TRANS(FPADD16, VIS1, do_gvec_ddd, a, MO_16, tcg_gen_gvec_add)
5050 TRANS(FPADD32, VIS1, do_gvec_ddd, a, MO_32, tcg_gen_gvec_add)
5051 
5052 TRANS(FPSUB8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_sub)
5053 TRANS(FPSUB16, VIS1, do_gvec_ddd, a, MO_16, tcg_gen_gvec_sub)
5054 TRANS(FPSUB32, VIS1, do_gvec_ddd, a, MO_32, tcg_gen_gvec_sub)
5055 
5056 TRANS(FCHKSM16, VIS3, do_gvec_ddd, a, MO_16, gen_op_fchksm16)
5057 TRANS(FMEAN16, VIS3, do_gvec_ddd, a, MO_16, gen_op_fmean16)
5058 
5059 TRANS(FPADDS8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_ssadd)
5060 TRANS(FPADDS16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_ssadd)
5061 TRANS(FPADDS32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_ssadd)
5062 TRANS(FPADDUS8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_usadd)
5063 TRANS(FPADDUS16, VIS4, do_gvec_ddd, a, MO_16, tcg_gen_gvec_usadd)
5064 
5065 TRANS(FPSUBS8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_sssub)
5066 TRANS(FPSUBS16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_sssub)
5067 TRANS(FPSUBS32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_sssub)
5068 TRANS(FPSUBUS8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_ussub)
5069 TRANS(FPSUBUS16, VIS4, do_gvec_ddd, a, MO_16, tcg_gen_gvec_ussub)
5070 
5071 TRANS(FSLL16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_shlv)
5072 TRANS(FSLL32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_shlv)
5073 TRANS(FSRL16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_shrv)
5074 TRANS(FSRL32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_shrv)
5075 TRANS(FSRA16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_sarv)
5076 TRANS(FSRA32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_sarv)
5077 
5078 TRANS(FPMIN8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_smin)
5079 TRANS(FPMIN16, VIS4, do_gvec_ddd, a, MO_16, tcg_gen_gvec_smin)
5080 TRANS(FPMIN32, VIS4, do_gvec_ddd, a, MO_32, tcg_gen_gvec_smin)
5081 TRANS(FPMINU8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_umin)
5082 TRANS(FPMINU16, VIS4, do_gvec_ddd, a, MO_16, tcg_gen_gvec_umin)
5083 TRANS(FPMINU32, VIS4, do_gvec_ddd, a, MO_32, tcg_gen_gvec_umin)
5084 
5085 TRANS(FPMAX8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_smax)
5086 TRANS(FPMAX16, VIS4, do_gvec_ddd, a, MO_16, tcg_gen_gvec_smax)
5087 TRANS(FPMAX32, VIS4, do_gvec_ddd, a, MO_32, tcg_gen_gvec_smax)
5088 TRANS(FPMAXU8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_umax)
5089 TRANS(FPMAXU16, VIS4, do_gvec_ddd, a, MO_16, tcg_gen_gvec_umax)
5090 TRANS(FPMAXU32, VIS4, do_gvec_ddd, a, MO_32, tcg_gen_gvec_umax)
5091 
5092 static bool do_ddd(DisasContext *dc, arg_r_r_r *a,
5093                    void (*func)(TCGv_i64, TCGv_i64, TCGv_i64))
5094 {
5095     TCGv_i64 dst, src1, src2;
5096 
5097     if (gen_trap_ifnofpu(dc)) {
5098         return true;
5099     }
5100 
5101     dst = tcg_temp_new_i64();
5102     src1 = gen_load_fpr_D(dc, a->rs1);
5103     src2 = gen_load_fpr_D(dc, a->rs2);
5104     func(dst, src1, src2);
5105     gen_store_fpr_D(dc, a->rd, dst);
5106     return advance_pc(dc);
5107 }
5108 
5109 TRANS(FMUL8SUx16, VIS1, do_ddd, a, gen_helper_fmul8sux16)
5110 TRANS(FMUL8ULx16, VIS1, do_ddd, a, gen_helper_fmul8ulx16)
5111 
5112 TRANS(FNORd, VIS1, do_ddd, a, tcg_gen_nor_i64)
5113 TRANS(FANDNOTd, VIS1, do_ddd, a, tcg_gen_andc_i64)
5114 TRANS(FXORd, VIS1, do_ddd, a, tcg_gen_xor_i64)
5115 TRANS(FNANDd, VIS1, do_ddd, a, tcg_gen_nand_i64)
5116 TRANS(FANDd, VIS1, do_ddd, a, tcg_gen_and_i64)
5117 TRANS(FXNORd, VIS1, do_ddd, a, tcg_gen_eqv_i64)
5118 TRANS(FORNOTd, VIS1, do_ddd, a, tcg_gen_orc_i64)
5119 TRANS(FORd, VIS1, do_ddd, a, tcg_gen_or_i64)
5120 
5121 TRANS(FPACK32, VIS1, do_ddd, a, gen_op_fpack32)
5122 TRANS(FALIGNDATAg, VIS1, do_ddd, a, gen_op_faligndata_g)
5123 TRANS(BSHUFFLE, VIS2, do_ddd, a, gen_op_bshuffle)
5124 
5125 TRANS(FHADDd, VIS3, do_ddd, a, gen_op_fhaddd)
5126 TRANS(FHSUBd, VIS3, do_ddd, a, gen_op_fhsubd)
5127 TRANS(FNHADDd, VIS3, do_ddd, a, gen_op_fnhaddd)
5128 
5129 TRANS(FPADD64, VIS3B, do_ddd, a, tcg_gen_add_i64)
5130 TRANS(FPSUB64, VIS3B, do_ddd, a, tcg_gen_sub_i64)
5131 TRANS(FSLAS16, VIS3, do_ddd, a, gen_helper_fslas16)
5132 TRANS(FSLAS32, VIS3, do_ddd, a, gen_helper_fslas32)
5133 
5134 static bool do_rdd(DisasContext *dc, arg_r_r_r *a,
5135                    void (*func)(TCGv, TCGv_i64, TCGv_i64))
5136 {
5137     TCGv_i64 src1, src2;
5138     TCGv dst;
5139 
5140     if (gen_trap_ifnofpu(dc)) {
5141         return true;
5142     }
5143 
5144     dst = gen_dest_gpr(dc, a->rd);
5145     src1 = gen_load_fpr_D(dc, a->rs1);
5146     src2 = gen_load_fpr_D(dc, a->rs2);
5147     func(dst, src1, src2);
5148     gen_store_gpr(dc, a->rd, dst);
5149     return advance_pc(dc);
5150 }
5151 
5152 TRANS(FPCMPLE16, VIS1, do_rdd, a, gen_helper_fcmple16)
5153 TRANS(FPCMPNE16, VIS1, do_rdd, a, gen_helper_fcmpne16)
5154 TRANS(FPCMPGT16, VIS1, do_rdd, a, gen_helper_fcmpgt16)
5155 TRANS(FPCMPEQ16, VIS1, do_rdd, a, gen_helper_fcmpeq16)
5156 TRANS(FPCMPULE16, VIS4, do_rdd, a, gen_helper_fcmpule16)
5157 TRANS(FPCMPUGT16, VIS4, do_rdd, a, gen_helper_fcmpugt16)
5158 
5159 TRANS(FPCMPLE32, VIS1, do_rdd, a, gen_helper_fcmple32)
5160 TRANS(FPCMPNE32, VIS1, do_rdd, a, gen_helper_fcmpne32)
5161 TRANS(FPCMPGT32, VIS1, do_rdd, a, gen_helper_fcmpgt32)
5162 TRANS(FPCMPEQ32, VIS1, do_rdd, a, gen_helper_fcmpeq32)
5163 TRANS(FPCMPULE32, VIS4, do_rdd, a, gen_helper_fcmpule32)
5164 TRANS(FPCMPUGT32, VIS4, do_rdd, a, gen_helper_fcmpugt32)
5165 
5166 TRANS(FPCMPEQ8, VIS3B, do_rdd, a, gen_helper_fcmpeq8)
5167 TRANS(FPCMPNE8, VIS3B, do_rdd, a, gen_helper_fcmpne8)
5168 TRANS(FPCMPULE8, VIS3B, do_rdd, a, gen_helper_fcmpule8)
5169 TRANS(FPCMPUGT8, VIS3B, do_rdd, a, gen_helper_fcmpugt8)
5170 TRANS(FPCMPLE8, VIS4, do_rdd, a, gen_helper_fcmple8)
5171 TRANS(FPCMPGT8, VIS4, do_rdd, a, gen_helper_fcmpgt8)
5172 
5173 TRANS(PDISTN, VIS3, do_rdd, a, gen_op_pdistn)
5174 TRANS(XMULX, VIS3, do_rrr, a, gen_helper_xmulx)
5175 TRANS(XMULXHI, VIS3, do_rrr, a, gen_helper_xmulxhi)
5176 
5177 static bool do_env_ddd(DisasContext *dc, arg_r_r_r *a,
5178                        void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
5179 {
5180     TCGv_i64 dst, src1, src2;
5181 
5182     if (gen_trap_ifnofpu(dc)) {
5183         return true;
5184     }
5185 
5186     dst = tcg_temp_new_i64();
5187     src1 = gen_load_fpr_D(dc, a->rs1);
5188     src2 = gen_load_fpr_D(dc, a->rs2);
5189     func(dst, tcg_env, src1, src2);
5190     gen_store_fpr_D(dc, a->rd, dst);
5191     return advance_pc(dc);
5192 }
5193 
5194 TRANS(FADDd, ALL, do_env_ddd, a, gen_helper_faddd)
5195 TRANS(FSUBd, ALL, do_env_ddd, a, gen_helper_fsubd)
5196 TRANS(FMULd, ALL, do_env_ddd, a, gen_helper_fmuld)
5197 TRANS(FDIVd, ALL, do_env_ddd, a, gen_helper_fdivd)
5198 TRANS(FNADDd, VIS3, do_env_ddd, a, gen_helper_fnaddd)
5199 TRANS(FNMULd, VIS3, do_env_ddd, a, gen_helper_fnmuld)
5200 
5201 static bool trans_FsMULd(DisasContext *dc, arg_r_r_r *a)
5202 {
5203     TCGv_i64 dst;
5204     TCGv_i32 src1, src2;
5205 
5206     if (gen_trap_ifnofpu(dc)) {
5207         return true;
5208     }
5209     if (!(dc->def->features & CPU_FEATURE_FSMULD)) {
5210         return raise_unimpfpop(dc);
5211     }
5212 
5213     dst = tcg_temp_new_i64();
5214     src1 = gen_load_fpr_F(dc, a->rs1);
5215     src2 = gen_load_fpr_F(dc, a->rs2);
5216     gen_helper_fsmuld(dst, tcg_env, src1, src2);
5217     gen_store_fpr_D(dc, a->rd, dst);
5218     return advance_pc(dc);
5219 }
5220 
5221 static bool trans_FNsMULd(DisasContext *dc, arg_r_r_r *a)
5222 {
5223     TCGv_i64 dst;
5224     TCGv_i32 src1, src2;
5225 
5226     if (!avail_VIS3(dc)) {
5227         return false;
5228     }
5229     if (gen_trap_ifnofpu(dc)) {
5230         return true;
5231     }
5232     dst = tcg_temp_new_i64();
5233     src1 = gen_load_fpr_F(dc, a->rs1);
5234     src2 = gen_load_fpr_F(dc, a->rs2);
5235     gen_helper_fnsmuld(dst, tcg_env, src1, src2);
5236     gen_store_fpr_D(dc, a->rd, dst);
5237     return advance_pc(dc);
5238 }
5239 
5240 static bool do_ffff(DisasContext *dc, arg_r_r_r_r *a,
5241                     void (*func)(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_i32))
5242 {
5243     TCGv_i32 dst, src1, src2, src3;
5244 
5245     if (gen_trap_ifnofpu(dc)) {
5246         return true;
5247     }
5248 
5249     src1 = gen_load_fpr_F(dc, a->rs1);
5250     src2 = gen_load_fpr_F(dc, a->rs2);
5251     src3 = gen_load_fpr_F(dc, a->rs3);
5252     dst = tcg_temp_new_i32();
5253     func(dst, src1, src2, src3);
5254     gen_store_fpr_F(dc, a->rd, dst);
5255     return advance_pc(dc);
5256 }
5257 
5258 TRANS(FMADDs, FMAF, do_ffff, a, gen_op_fmadds)
5259 TRANS(FMSUBs, FMAF, do_ffff, a, gen_op_fmsubs)
5260 TRANS(FNMSUBs, FMAF, do_ffff, a, gen_op_fnmsubs)
5261 TRANS(FNMADDs, FMAF, do_ffff, a, gen_op_fnmadds)
5262 
5263 static bool do_dddd(DisasContext *dc, arg_r_r_r_r *a,
5264                     void (*func)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
5265 {
5266     TCGv_i64 dst, src1, src2, src3;
5267 
5268     if (gen_trap_ifnofpu(dc)) {
5269         return true;
5270     }
5271 
5272     dst  = tcg_temp_new_i64();
5273     src1 = gen_load_fpr_D(dc, a->rs1);
5274     src2 = gen_load_fpr_D(dc, a->rs2);
5275     src3 = gen_load_fpr_D(dc, a->rs3);
5276     func(dst, src1, src2, src3);
5277     gen_store_fpr_D(dc, a->rd, dst);
5278     return advance_pc(dc);
5279 }
5280 
5281 TRANS(PDIST, VIS1, do_dddd, a, gen_helper_pdist)
5282 TRANS(FMADDd, FMAF, do_dddd, a, gen_op_fmaddd)
5283 TRANS(FMSUBd, FMAF, do_dddd, a, gen_op_fmsubd)
5284 TRANS(FNMSUBd, FMAF, do_dddd, a, gen_op_fnmsubd)
5285 TRANS(FNMADDd, FMAF, do_dddd, a, gen_op_fnmaddd)
5286 TRANS(FPMADDX, IMA, do_dddd, a, gen_op_fpmaddx)
5287 TRANS(FPMADDXHI, IMA, do_dddd, a, gen_op_fpmaddxhi)
5288 
5289 static bool trans_FALIGNDATAi(DisasContext *dc, arg_r_r_r *a)
5290 {
5291     TCGv_i64 dst, src1, src2;
5292     TCGv src3;
5293 
5294     if (!avail_VIS4(dc)) {
5295         return false;
5296     }
5297     if (gen_trap_ifnofpu(dc)) {
5298         return true;
5299     }
5300 
5301     dst  = tcg_temp_new_i64();
5302     src1 = gen_load_fpr_D(dc, a->rd);
5303     src2 = gen_load_fpr_D(dc, a->rs2);
5304     src3 = gen_load_gpr(dc, a->rs1);
5305     gen_op_faligndata_i(dst, src1, src2, src3);
5306     gen_store_fpr_D(dc, a->rd, dst);
5307     return advance_pc(dc);
5308 }
5309 
5310 static bool do_env_qqq(DisasContext *dc, arg_r_r_r *a,
5311                        void (*func)(TCGv_i128, TCGv_env, TCGv_i128, TCGv_i128))
5312 {
5313     TCGv_i128 src1, src2;
5314 
5315     if (gen_trap_ifnofpu(dc)) {
5316         return true;
5317     }
5318     if (gen_trap_float128(dc)) {
5319         return true;
5320     }
5321 
5322     src1 = gen_load_fpr_Q(dc, a->rs1);
5323     src2 = gen_load_fpr_Q(dc, a->rs2);
5324     func(src1, tcg_env, src1, src2);
5325     gen_store_fpr_Q(dc, a->rd, src1);
5326     return advance_pc(dc);
5327 }
5328 
5329 TRANS(FADDq, ALL, do_env_qqq, a, gen_helper_faddq)
5330 TRANS(FSUBq, ALL, do_env_qqq, a, gen_helper_fsubq)
5331 TRANS(FMULq, ALL, do_env_qqq, a, gen_helper_fmulq)
5332 TRANS(FDIVq, ALL, do_env_qqq, a, gen_helper_fdivq)
5333 
5334 static bool trans_FdMULq(DisasContext *dc, arg_r_r_r *a)
5335 {
5336     TCGv_i64 src1, src2;
5337     TCGv_i128 dst;
5338 
5339     if (gen_trap_ifnofpu(dc)) {
5340         return true;
5341     }
5342     if (gen_trap_float128(dc)) {
5343         return true;
5344     }
5345 
5346     src1 = gen_load_fpr_D(dc, a->rs1);
5347     src2 = gen_load_fpr_D(dc, a->rs2);
5348     dst = tcg_temp_new_i128();
5349     gen_helper_fdmulq(dst, tcg_env, src1, src2);
5350     gen_store_fpr_Q(dc, a->rd, dst);
5351     return advance_pc(dc);
5352 }
5353 
5354 static bool do_fmovr(DisasContext *dc, arg_FMOVRs *a, bool is_128,
5355                      void (*func)(DisasContext *, DisasCompare *, int, int))
5356 {
5357     DisasCompare cmp;
5358 
5359     if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
5360         return false;
5361     }
5362     if (gen_trap_ifnofpu(dc)) {
5363         return true;
5364     }
5365     if (is_128 && gen_trap_float128(dc)) {
5366         return true;
5367     }
5368 
5369     gen_op_clear_ieee_excp_and_FTT();
5370     func(dc, &cmp, a->rd, a->rs2);
5371     return advance_pc(dc);
5372 }
5373 
5374 TRANS(FMOVRs, 64, do_fmovr, a, false, gen_fmovs)
5375 TRANS(FMOVRd, 64, do_fmovr, a, false, gen_fmovd)
5376 TRANS(FMOVRq, 64, do_fmovr, a, true, gen_fmovq)
5377 
5378 static bool do_fmovcc(DisasContext *dc, arg_FMOVscc *a, bool is_128,
5379                       void (*func)(DisasContext *, DisasCompare *, int, int))
5380 {
5381     DisasCompare cmp;
5382 
5383     if (gen_trap_ifnofpu(dc)) {
5384         return true;
5385     }
5386     if (is_128 && gen_trap_float128(dc)) {
5387         return true;
5388     }
5389 
5390     gen_op_clear_ieee_excp_and_FTT();
5391     gen_compare(&cmp, a->cc, a->cond, dc);
5392     func(dc, &cmp, a->rd, a->rs2);
5393     return advance_pc(dc);
5394 }
5395 
5396 TRANS(FMOVscc, 64, do_fmovcc, a, false, gen_fmovs)
5397 TRANS(FMOVdcc, 64, do_fmovcc, a, false, gen_fmovd)
5398 TRANS(FMOVqcc, 64, do_fmovcc, a, true, gen_fmovq)
5399 
5400 static bool do_fmovfcc(DisasContext *dc, arg_FMOVsfcc *a, bool is_128,
5401                        void (*func)(DisasContext *, DisasCompare *, int, int))
5402 {
5403     DisasCompare cmp;
5404 
5405     if (gen_trap_ifnofpu(dc)) {
5406         return true;
5407     }
5408     if (is_128 && gen_trap_float128(dc)) {
5409         return true;
5410     }
5411 
5412     gen_op_clear_ieee_excp_and_FTT();
5413     gen_fcompare(&cmp, a->cc, a->cond);
5414     func(dc, &cmp, a->rd, a->rs2);
5415     return advance_pc(dc);
5416 }
5417 
5418 TRANS(FMOVsfcc, 64, do_fmovfcc, a, false, gen_fmovs)
5419 TRANS(FMOVdfcc, 64, do_fmovfcc, a, false, gen_fmovd)
5420 TRANS(FMOVqfcc, 64, do_fmovfcc, a, true, gen_fmovq)
5421 
5422 static bool do_fcmps(DisasContext *dc, arg_FCMPs *a, bool e)
5423 {
5424     TCGv_i32 src1, src2;
5425 
5426     if (avail_32(dc) && a->cc != 0) {
5427         return false;
5428     }
5429     if (gen_trap_ifnofpu(dc)) {
5430         return true;
5431     }
5432 
5433     src1 = gen_load_fpr_F(dc, a->rs1);
5434     src2 = gen_load_fpr_F(dc, a->rs2);
5435     if (e) {
5436         gen_helper_fcmpes(cpu_fcc[a->cc], tcg_env, src1, src2);
5437     } else {
5438         gen_helper_fcmps(cpu_fcc[a->cc], tcg_env, src1, src2);
5439     }
5440     return advance_pc(dc);
5441 }
5442 
5443 TRANS(FCMPs, ALL, do_fcmps, a, false)
5444 TRANS(FCMPEs, ALL, do_fcmps, a, true)
5445 
5446 static bool do_fcmpd(DisasContext *dc, arg_FCMPd *a, bool e)
5447 {
5448     TCGv_i64 src1, src2;
5449 
5450     if (avail_32(dc) && a->cc != 0) {
5451         return false;
5452     }
5453     if (gen_trap_ifnofpu(dc)) {
5454         return true;
5455     }
5456 
5457     src1 = gen_load_fpr_D(dc, a->rs1);
5458     src2 = gen_load_fpr_D(dc, a->rs2);
5459     if (e) {
5460         gen_helper_fcmped(cpu_fcc[a->cc], tcg_env, src1, src2);
5461     } else {
5462         gen_helper_fcmpd(cpu_fcc[a->cc], tcg_env, src1, src2);
5463     }
5464     return advance_pc(dc);
5465 }
5466 
5467 TRANS(FCMPd, ALL, do_fcmpd, a, false)
5468 TRANS(FCMPEd, ALL, do_fcmpd, a, true)
5469 
5470 static bool do_fcmpq(DisasContext *dc, arg_FCMPq *a, bool e)
5471 {
5472     TCGv_i128 src1, src2;
5473 
5474     if (avail_32(dc) && a->cc != 0) {
5475         return false;
5476     }
5477     if (gen_trap_ifnofpu(dc)) {
5478         return true;
5479     }
5480     if (gen_trap_float128(dc)) {
5481         return true;
5482     }
5483 
5484     src1 = gen_load_fpr_Q(dc, a->rs1);
5485     src2 = gen_load_fpr_Q(dc, a->rs2);
5486     if (e) {
5487         gen_helper_fcmpeq(cpu_fcc[a->cc], tcg_env, src1, src2);
5488     } else {
5489         gen_helper_fcmpq(cpu_fcc[a->cc], tcg_env, src1, src2);
5490     }
5491     return advance_pc(dc);
5492 }
5493 
5494 TRANS(FCMPq, ALL, do_fcmpq, a, false)
5495 TRANS(FCMPEq, ALL, do_fcmpq, a, true)
5496 
5497 static bool trans_FLCMPs(DisasContext *dc, arg_FLCMPs *a)
5498 {
5499     TCGv_i32 src1, src2;
5500 
5501     if (!avail_VIS3(dc)) {
5502         return false;
5503     }
5504     if (gen_trap_ifnofpu(dc)) {
5505         return true;
5506     }
5507 
5508     src1 = gen_load_fpr_F(dc, a->rs1);
5509     src2 = gen_load_fpr_F(dc, a->rs2);
5510     gen_helper_flcmps(cpu_fcc[a->cc], src1, src2);
5511     return advance_pc(dc);
5512 }
5513 
5514 static bool trans_FLCMPd(DisasContext *dc, arg_FLCMPd *a)
5515 {
5516     TCGv_i64 src1, src2;
5517 
5518     if (!avail_VIS3(dc)) {
5519         return false;
5520     }
5521     if (gen_trap_ifnofpu(dc)) {
5522         return true;
5523     }
5524 
5525     src1 = gen_load_fpr_D(dc, a->rs1);
5526     src2 = gen_load_fpr_D(dc, a->rs2);
5527     gen_helper_flcmpd(cpu_fcc[a->cc], src1, src2);
5528     return advance_pc(dc);
5529 }
5530 
5531 static bool do_movf2r(DisasContext *dc, arg_r_r *a,
5532                       int (*offset)(unsigned int),
5533                       void (*load)(TCGv, TCGv_ptr, tcg_target_long))
5534 {
5535     TCGv dst;
5536 
5537     if (gen_trap_ifnofpu(dc)) {
5538         return true;
5539     }
5540     dst = gen_dest_gpr(dc, a->rd);
5541     load(dst, tcg_env, offset(a->rs));
5542     gen_store_gpr(dc, a->rd, dst);
5543     return advance_pc(dc);
5544 }
5545 
5546 TRANS(MOVsTOsw, VIS3B, do_movf2r, a, gen_offset_fpr_F, tcg_gen_ld32s_tl)
5547 TRANS(MOVsTOuw, VIS3B, do_movf2r, a, gen_offset_fpr_F, tcg_gen_ld32u_tl)
5548 TRANS(MOVdTOx, VIS3B, do_movf2r, a, gen_offset_fpr_D, tcg_gen_ld_tl)
5549 
5550 static bool do_movr2f(DisasContext *dc, arg_r_r *a,
5551                       int (*offset)(unsigned int),
5552                       void (*store)(TCGv, TCGv_ptr, tcg_target_long))
5553 {
5554     TCGv src;
5555 
5556     if (gen_trap_ifnofpu(dc)) {
5557         return true;
5558     }
5559     src = gen_load_gpr(dc, a->rs);
5560     store(src, tcg_env, offset(a->rd));
5561     return advance_pc(dc);
5562 }
5563 
5564 TRANS(MOVwTOs, VIS3B, do_movr2f, a, gen_offset_fpr_F, tcg_gen_st32_tl)
5565 TRANS(MOVxTOd, VIS3B, do_movr2f, a, gen_offset_fpr_D, tcg_gen_st_tl)
5566 
5567 static void sparc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
5568 {
5569     DisasContext *dc = container_of(dcbase, DisasContext, base);
5570     int bound;
5571 
5572     dc->pc = dc->base.pc_first;
5573     dc->npc = (target_ulong)dc->base.tb->cs_base;
5574     dc->mem_idx = dc->base.tb->flags & TB_FLAG_MMU_MASK;
5575     dc->def = &cpu_env(cs)->def;
5576     dc->fpu_enabled = tb_fpu_enabled(dc->base.tb->flags);
5577     dc->address_mask_32bit = tb_am_enabled(dc->base.tb->flags);
5578 #ifndef CONFIG_USER_ONLY
5579     dc->supervisor = (dc->base.tb->flags & TB_FLAG_SUPER) != 0;
5580 #endif
5581 #ifdef TARGET_SPARC64
5582     dc->fprs_dirty = 0;
5583     dc->asi = (dc->base.tb->flags >> TB_FLAG_ASI_SHIFT) & 0xff;
5584 #ifndef CONFIG_USER_ONLY
5585     dc->hypervisor = (dc->base.tb->flags & TB_FLAG_HYPER) != 0;
5586 #endif
5587 #endif
5588     /*
5589      * if we reach a page boundary, we stop generation so that the
5590      * PC of a TT_TFAULT exception is always in the right page
5591      */
5592     bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
5593     dc->base.max_insns = MIN(dc->base.max_insns, bound);
5594 }
5595 
5596 static void sparc_tr_tb_start(DisasContextBase *db, CPUState *cs)
5597 {
5598 }
5599 
5600 static void sparc_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
5601 {
5602     DisasContext *dc = container_of(dcbase, DisasContext, base);
5603     target_ulong npc = dc->npc;
5604 
5605     if (npc & 3) {
5606         switch (npc) {
5607         case JUMP_PC:
5608             assert(dc->jump_pc[1] == dc->pc + 4);
5609             npc = dc->jump_pc[0] | JUMP_PC;
5610             break;
5611         case DYNAMIC_PC:
5612         case DYNAMIC_PC_LOOKUP:
5613             npc = DYNAMIC_PC;
5614             break;
5615         default:
5616             g_assert_not_reached();
5617         }
5618     }
5619     tcg_gen_insn_start(dc->pc, npc);
5620 }
5621 
5622 static void sparc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
5623 {
5624     DisasContext *dc = container_of(dcbase, DisasContext, base);
5625     unsigned int insn;
5626 
5627     insn = translator_ldl(cpu_env(cs), &dc->base, dc->pc);
5628     dc->base.pc_next += 4;
5629 
5630     if (!decode(dc, insn)) {
5631         gen_exception(dc, TT_ILL_INSN);
5632     }
5633 
5634     if (dc->base.is_jmp == DISAS_NORETURN) {
5635         return;
5636     }
5637     if (dc->pc != dc->base.pc_next) {
5638         dc->base.is_jmp = DISAS_TOO_MANY;
5639     }
5640 }
5641 
5642 static void sparc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
5643 {
5644     DisasContext *dc = container_of(dcbase, DisasContext, base);
5645     DisasDelayException *e, *e_next;
5646     bool may_lookup;
5647 
5648     finishing_insn(dc);
5649 
5650     switch (dc->base.is_jmp) {
5651     case DISAS_NEXT:
5652     case DISAS_TOO_MANY:
5653         if (((dc->pc | dc->npc) & 3) == 0) {
5654             /* static PC and NPC: we can use direct chaining */
5655             gen_goto_tb(dc, 0, dc->pc, dc->npc);
5656             break;
5657         }
5658 
5659         may_lookup = true;
5660         if (dc->pc & 3) {
5661             switch (dc->pc) {
5662             case DYNAMIC_PC_LOOKUP:
5663                 break;
5664             case DYNAMIC_PC:
5665                 may_lookup = false;
5666                 break;
5667             default:
5668                 g_assert_not_reached();
5669             }
5670         } else {
5671             tcg_gen_movi_tl(cpu_pc, dc->pc);
5672         }
5673 
5674         if (dc->npc & 3) {
5675             switch (dc->npc) {
5676             case JUMP_PC:
5677                 gen_generic_branch(dc);
5678                 break;
5679             case DYNAMIC_PC:
5680                 may_lookup = false;
5681                 break;
5682             case DYNAMIC_PC_LOOKUP:
5683                 break;
5684             default:
5685                 g_assert_not_reached();
5686             }
5687         } else {
5688             tcg_gen_movi_tl(cpu_npc, dc->npc);
5689         }
5690         if (may_lookup) {
5691             tcg_gen_lookup_and_goto_ptr();
5692         } else {
5693             tcg_gen_exit_tb(NULL, 0);
5694         }
5695         break;
5696 
5697     case DISAS_NORETURN:
5698        break;
5699 
5700     case DISAS_EXIT:
5701         /* Exit TB */
5702         save_state(dc);
5703         tcg_gen_exit_tb(NULL, 0);
5704         break;
5705 
5706     default:
5707         g_assert_not_reached();
5708     }
5709 
5710     for (e = dc->delay_excp_list; e ; e = e_next) {
5711         gen_set_label(e->lab);
5712 
5713         tcg_gen_movi_tl(cpu_pc, e->pc);
5714         if (e->npc % 4 == 0) {
5715             tcg_gen_movi_tl(cpu_npc, e->npc);
5716         }
5717         gen_helper_raise_exception(tcg_env, e->excp);
5718 
5719         e_next = e->next;
5720         g_free(e);
5721     }
5722 }
5723 
5724 static const TranslatorOps sparc_tr_ops = {
5725     .init_disas_context = sparc_tr_init_disas_context,
5726     .tb_start           = sparc_tr_tb_start,
5727     .insn_start         = sparc_tr_insn_start,
5728     .translate_insn     = sparc_tr_translate_insn,
5729     .tb_stop            = sparc_tr_tb_stop,
5730 };
5731 
5732 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
5733                            vaddr pc, void *host_pc)
5734 {
5735     DisasContext dc = {};
5736 
5737     translator_loop(cs, tb, max_insns, pc, host_pc, &sparc_tr_ops, &dc.base);
5738 }
5739 
5740 void sparc_tcg_init(void)
5741 {
5742     static const char gregnames[32][4] = {
5743         "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7",
5744         "o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7",
5745         "l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7",
5746         "i0", "i1", "i2", "i3", "i4", "i5", "i6", "i7",
5747     };
5748 
5749     static const struct { TCGv_i32 *ptr; int off; const char *name; } r32[] = {
5750 #ifdef TARGET_SPARC64
5751         { &cpu_fprs, offsetof(CPUSPARCState, fprs), "fprs" },
5752         { &cpu_fcc[0], offsetof(CPUSPARCState, fcc[0]), "fcc0" },
5753         { &cpu_fcc[1], offsetof(CPUSPARCState, fcc[1]), "fcc1" },
5754         { &cpu_fcc[2], offsetof(CPUSPARCState, fcc[2]), "fcc2" },
5755         { &cpu_fcc[3], offsetof(CPUSPARCState, fcc[3]), "fcc3" },
5756 #else
5757         { &cpu_fcc[0], offsetof(CPUSPARCState, fcc[0]), "fcc" },
5758 #endif
5759     };
5760 
5761     static const struct { TCGv *ptr; int off; const char *name; } rtl[] = {
5762 #ifdef TARGET_SPARC64
5763         { &cpu_gsr, offsetof(CPUSPARCState, gsr), "gsr" },
5764         { &cpu_xcc_Z, offsetof(CPUSPARCState, xcc_Z), "xcc_Z" },
5765         { &cpu_xcc_C, offsetof(CPUSPARCState, xcc_C), "xcc_C" },
5766 #endif
5767         { &cpu_cc_N, offsetof(CPUSPARCState, cc_N), "cc_N" },
5768         { &cpu_cc_V, offsetof(CPUSPARCState, cc_V), "cc_V" },
5769         { &cpu_icc_Z, offsetof(CPUSPARCState, icc_Z), "icc_Z" },
5770         { &cpu_icc_C, offsetof(CPUSPARCState, icc_C), "icc_C" },
5771         { &cpu_cond, offsetof(CPUSPARCState, cond), "cond" },
5772         { &cpu_pc, offsetof(CPUSPARCState, pc), "pc" },
5773         { &cpu_npc, offsetof(CPUSPARCState, npc), "npc" },
5774         { &cpu_y, offsetof(CPUSPARCState, y), "y" },
5775         { &cpu_tbr, offsetof(CPUSPARCState, tbr), "tbr" },
5776     };
5777 
5778     unsigned int i;
5779 
5780     cpu_regwptr = tcg_global_mem_new_ptr(tcg_env,
5781                                          offsetof(CPUSPARCState, regwptr),
5782                                          "regwptr");
5783 
5784     for (i = 0; i < ARRAY_SIZE(r32); ++i) {
5785         *r32[i].ptr = tcg_global_mem_new_i32(tcg_env, r32[i].off, r32[i].name);
5786     }
5787 
5788     for (i = 0; i < ARRAY_SIZE(rtl); ++i) {
5789         *rtl[i].ptr = tcg_global_mem_new(tcg_env, rtl[i].off, rtl[i].name);
5790     }
5791 
5792     cpu_regs[0] = NULL;
5793     for (i = 1; i < 8; ++i) {
5794         cpu_regs[i] = tcg_global_mem_new(tcg_env,
5795                                          offsetof(CPUSPARCState, gregs[i]),
5796                                          gregnames[i]);
5797     }
5798 
5799     for (i = 8; i < 32; ++i) {
5800         cpu_regs[i] = tcg_global_mem_new(cpu_regwptr,
5801                                          (i - 8) * sizeof(target_ulong),
5802                                          gregnames[i]);
5803     }
5804 }
5805 
5806 void sparc_restore_state_to_opc(CPUState *cs,
5807                                 const TranslationBlock *tb,
5808                                 const uint64_t *data)
5809 {
5810     CPUSPARCState *env = cpu_env(cs);
5811     target_ulong pc = data[0];
5812     target_ulong npc = data[1];
5813 
5814     env->pc = pc;
5815     if (npc == DYNAMIC_PC) {
5816         /* dynamic NPC: already stored */
5817     } else if (npc & JUMP_PC) {
5818         /* jump PC: use 'cond' and the jump targets of the translation */
5819         if (env->cond) {
5820             env->npc = npc & ~3;
5821         } else {
5822             env->npc = pc + 4;
5823         }
5824     } else {
5825         env->npc = npc;
5826     }
5827 }
5828