xref: /openbmc/qemu/target/sparc/translate.c (revision 0d1d3aaf6405f9ecf67af886c06f1f710b046563)
1 /*
2    SPARC translation
3 
4    Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5    Copyright (C) 2003-2005 Fabrice Bellard
6 
7    This library is free software; you can redistribute it and/or
8    modify it under the terms of the GNU Lesser General Public
9    License as published by the Free Software Foundation; either
10    version 2.1 of the License, or (at your option) any later version.
11 
12    This library is distributed in the hope that it will be useful,
13    but WITHOUT ANY WARRANTY; without even the implied warranty of
14    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15    Lesser General Public License for more details.
16 
17    You should have received a copy of the GNU Lesser General Public
18    License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 
23 #include "cpu.h"
24 #include "exec/helper-proto.h"
25 #include "exec/exec-all.h"
26 #include "tcg/tcg-op.h"
27 #include "tcg/tcg-op-gvec.h"
28 #include "exec/helper-gen.h"
29 #include "exec/translator.h"
30 #include "exec/log.h"
31 #include "fpu/softfloat.h"
32 #include "asi.h"
33 
34 #define HELPER_H "helper.h"
35 #include "exec/helper-info.c.inc"
36 #undef  HELPER_H
37 
38 #ifdef TARGET_SPARC64
39 # define gen_helper_rdpsr(D, E)                 qemu_build_not_reached()
40 # define gen_helper_rdasr17(D, E)               qemu_build_not_reached()
41 # define gen_helper_rett(E)                     qemu_build_not_reached()
42 # define gen_helper_power_down(E)               qemu_build_not_reached()
43 # define gen_helper_wrpsr(E, S)                 qemu_build_not_reached()
44 #else
45 # define gen_helper_clear_softint(E, S)         qemu_build_not_reached()
46 # define gen_helper_done(E)                     qemu_build_not_reached()
47 # define gen_helper_flushw(E)                   qemu_build_not_reached()
48 # define gen_helper_fmul8x16a(D, S1, S2)        qemu_build_not_reached()
49 # define gen_helper_rdccr(D, E)                 qemu_build_not_reached()
50 # define gen_helper_rdcwp(D, E)                 qemu_build_not_reached()
51 # define gen_helper_restored(E)                 qemu_build_not_reached()
52 # define gen_helper_retry(E)                    qemu_build_not_reached()
53 # define gen_helper_saved(E)                    qemu_build_not_reached()
54 # define gen_helper_set_softint(E, S)           qemu_build_not_reached()
55 # define gen_helper_tick_get_count(D, E, T, C)  qemu_build_not_reached()
56 # define gen_helper_tick_set_count(P, S)        qemu_build_not_reached()
57 # define gen_helper_tick_set_limit(P, S)        qemu_build_not_reached()
58 # define gen_helper_wrccr(E, S)                 qemu_build_not_reached()
59 # define gen_helper_wrcwp(E, S)                 qemu_build_not_reached()
60 # define gen_helper_wrgl(E, S)                  qemu_build_not_reached()
61 # define gen_helper_write_softint(E, S)         qemu_build_not_reached()
62 # define gen_helper_wrpil(E, S)                 qemu_build_not_reached()
63 # define gen_helper_wrpstate(E, S)              qemu_build_not_reached()
64 # define gen_helper_cmask8               ({ qemu_build_not_reached(); NULL; })
65 # define gen_helper_cmask16              ({ qemu_build_not_reached(); NULL; })
66 # define gen_helper_cmask32              ({ qemu_build_not_reached(); NULL; })
67 # define gen_helper_fcmpeq16             ({ qemu_build_not_reached(); NULL; })
68 # define gen_helper_fcmpeq32             ({ qemu_build_not_reached(); NULL; })
69 # define gen_helper_fcmpgt16             ({ qemu_build_not_reached(); NULL; })
70 # define gen_helper_fcmpgt32             ({ qemu_build_not_reached(); NULL; })
71 # define gen_helper_fcmple16             ({ qemu_build_not_reached(); NULL; })
72 # define gen_helper_fcmple32             ({ qemu_build_not_reached(); NULL; })
73 # define gen_helper_fcmpne16             ({ qemu_build_not_reached(); NULL; })
74 # define gen_helper_fcmpne32             ({ qemu_build_not_reached(); NULL; })
75 # define gen_helper_fdtox                ({ qemu_build_not_reached(); NULL; })
76 # define gen_helper_fexpand              ({ qemu_build_not_reached(); NULL; })
77 # define gen_helper_fmul8sux16           ({ qemu_build_not_reached(); NULL; })
78 # define gen_helper_fmul8ulx16           ({ qemu_build_not_reached(); NULL; })
79 # define gen_helper_fmul8x16             ({ qemu_build_not_reached(); NULL; })
80 # define gen_helper_fpmerge              ({ qemu_build_not_reached(); NULL; })
81 # define gen_helper_fqtox                ({ qemu_build_not_reached(); NULL; })
82 # define gen_helper_fstox                ({ qemu_build_not_reached(); NULL; })
83 # define gen_helper_fxtod                ({ qemu_build_not_reached(); NULL; })
84 # define gen_helper_fxtoq                ({ qemu_build_not_reached(); NULL; })
85 # define gen_helper_fxtos                ({ qemu_build_not_reached(); NULL; })
86 # define gen_helper_pdist                ({ qemu_build_not_reached(); NULL; })
87 # define MAXTL_MASK                             0
88 #endif
89 
90 /* Dynamic PC, must exit to main loop. */
91 #define DYNAMIC_PC         1
92 /* Dynamic PC, one of two values according to jump_pc[T2]. */
93 #define JUMP_PC            2
94 /* Dynamic PC, may lookup next TB. */
95 #define DYNAMIC_PC_LOOKUP  3
96 
97 #define DISAS_EXIT  DISAS_TARGET_0
98 
99 /* global register indexes */
100 static TCGv_ptr cpu_regwptr;
101 static TCGv cpu_pc, cpu_npc;
102 static TCGv cpu_regs[32];
103 static TCGv cpu_y;
104 static TCGv cpu_tbr;
105 static TCGv cpu_cond;
106 static TCGv cpu_cc_N;
107 static TCGv cpu_cc_V;
108 static TCGv cpu_icc_Z;
109 static TCGv cpu_icc_C;
110 #ifdef TARGET_SPARC64
111 static TCGv cpu_xcc_Z;
112 static TCGv cpu_xcc_C;
113 static TCGv_i32 cpu_fprs;
114 static TCGv cpu_gsr;
115 #else
116 # define cpu_fprs               ({ qemu_build_not_reached(); (TCGv)NULL; })
117 # define cpu_gsr                ({ qemu_build_not_reached(); (TCGv)NULL; })
118 #endif
119 
120 #ifdef TARGET_SPARC64
121 #define cpu_cc_Z  cpu_xcc_Z
122 #define cpu_cc_C  cpu_xcc_C
123 #else
124 #define cpu_cc_Z  cpu_icc_Z
125 #define cpu_cc_C  cpu_icc_C
126 #define cpu_xcc_Z ({ qemu_build_not_reached(); NULL; })
127 #define cpu_xcc_C ({ qemu_build_not_reached(); NULL; })
128 #endif
129 
130 /* Floating point comparison registers */
131 static TCGv_i32 cpu_fcc[TARGET_FCCREGS];
132 
133 #define env_field_offsetof(X)     offsetof(CPUSPARCState, X)
134 #ifdef TARGET_SPARC64
135 # define env32_field_offsetof(X)  ({ qemu_build_not_reached(); 0; })
136 # define env64_field_offsetof(X)  env_field_offsetof(X)
137 #else
138 # define env32_field_offsetof(X)  env_field_offsetof(X)
139 # define env64_field_offsetof(X)  ({ qemu_build_not_reached(); 0; })
140 #endif
141 
142 typedef struct DisasCompare {
143     TCGCond cond;
144     TCGv c1;
145     int c2;
146 } DisasCompare;
147 
148 typedef struct DisasDelayException {
149     struct DisasDelayException *next;
150     TCGLabel *lab;
151     TCGv_i32 excp;
152     /* Saved state at parent insn. */
153     target_ulong pc;
154     target_ulong npc;
155 } DisasDelayException;
156 
157 typedef struct DisasContext {
158     DisasContextBase base;
159     target_ulong pc;    /* current Program Counter: integer or DYNAMIC_PC */
160     target_ulong npc;   /* next PC: integer or DYNAMIC_PC or JUMP_PC */
161 
162     /* Used when JUMP_PC value is used. */
163     DisasCompare jump;
164     target_ulong jump_pc[2];
165 
166     int mem_idx;
167     bool cpu_cond_live;
168     bool fpu_enabled;
169     bool address_mask_32bit;
170 #ifndef CONFIG_USER_ONLY
171     bool supervisor;
172 #ifdef TARGET_SPARC64
173     bool hypervisor;
174 #endif
175 #endif
176 
177     sparc_def_t *def;
178 #ifdef TARGET_SPARC64
179     int fprs_dirty;
180     int asi;
181 #endif
182     DisasDelayException *delay_excp_list;
183 } DisasContext;
184 
185 // This function uses non-native bit order
186 #define GET_FIELD(X, FROM, TO)                                  \
187     ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
188 
189 // This function uses the order in the manuals, i.e. bit 0 is 2^0
190 #define GET_FIELD_SP(X, FROM, TO)               \
191     GET_FIELD(X, 31 - (TO), 31 - (FROM))
192 
193 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
194 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
195 
196 #define UA2005_HTRAP_MASK 0xff
197 #define V8_TRAP_MASK 0x7f
198 
199 #define IS_IMM (insn & (1<<13))
200 
201 static void gen_update_fprs_dirty(DisasContext *dc, int rd)
202 {
203 #if defined(TARGET_SPARC64)
204     int bit = (rd < 32) ? 1 : 2;
205     /* If we know we've already set this bit within the TB,
206        we can avoid setting it again.  */
207     if (!(dc->fprs_dirty & bit)) {
208         dc->fprs_dirty |= bit;
209         tcg_gen_ori_i32(cpu_fprs, cpu_fprs, bit);
210     }
211 #endif
212 }
213 
214 /* floating point registers moves */
215 
216 static int gen_offset_fpr_F(unsigned int reg)
217 {
218     int ret;
219 
220     tcg_debug_assert(reg < 32);
221     ret= offsetof(CPUSPARCState, fpr[reg / 2]);
222     if (reg & 1) {
223         ret += offsetof(CPU_DoubleU, l.lower);
224     } else {
225         ret += offsetof(CPU_DoubleU, l.upper);
226     }
227     return ret;
228 }
229 
230 static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src)
231 {
232     TCGv_i32 ret = tcg_temp_new_i32();
233     tcg_gen_ld_i32(ret, tcg_env, gen_offset_fpr_F(src));
234     return ret;
235 }
236 
237 static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v)
238 {
239     tcg_gen_st_i32(v, tcg_env, gen_offset_fpr_F(dst));
240     gen_update_fprs_dirty(dc, dst);
241 }
242 
243 static int gen_offset_fpr_D(unsigned int reg)
244 {
245     tcg_debug_assert(reg < 64);
246     tcg_debug_assert(reg % 2 == 0);
247     return offsetof(CPUSPARCState, fpr[reg / 2]);
248 }
249 
250 static TCGv_i64 gen_load_fpr_D(DisasContext *dc, unsigned int src)
251 {
252     TCGv_i64 ret = tcg_temp_new_i64();
253     tcg_gen_ld_i64(ret, tcg_env, gen_offset_fpr_D(src));
254     return ret;
255 }
256 
257 static void gen_store_fpr_D(DisasContext *dc, unsigned int dst, TCGv_i64 v)
258 {
259     tcg_gen_st_i64(v, tcg_env, gen_offset_fpr_D(dst));
260     gen_update_fprs_dirty(dc, dst);
261 }
262 
263 static TCGv_i128 gen_load_fpr_Q(DisasContext *dc, unsigned int src)
264 {
265     TCGv_i128 ret = tcg_temp_new_i128();
266     TCGv_i64 h = gen_load_fpr_D(dc, src);
267     TCGv_i64 l = gen_load_fpr_D(dc, src + 2);
268 
269     tcg_gen_concat_i64_i128(ret, l, h);
270     return ret;
271 }
272 
273 static void gen_store_fpr_Q(DisasContext *dc, unsigned int dst, TCGv_i128 v)
274 {
275     TCGv_i64 h = tcg_temp_new_i64();
276     TCGv_i64 l = tcg_temp_new_i64();
277 
278     tcg_gen_extr_i128_i64(l, h, v);
279     gen_store_fpr_D(dc, dst, h);
280     gen_store_fpr_D(dc, dst + 2, l);
281 }
282 
283 /* moves */
284 #ifdef CONFIG_USER_ONLY
285 #define supervisor(dc) 0
286 #define hypervisor(dc) 0
287 #else
288 #ifdef TARGET_SPARC64
289 #define hypervisor(dc) (dc->hypervisor)
290 #define supervisor(dc) (dc->supervisor | dc->hypervisor)
291 #else
292 #define supervisor(dc) (dc->supervisor)
293 #define hypervisor(dc) 0
294 #endif
295 #endif
296 
297 #if !defined(TARGET_SPARC64)
298 # define AM_CHECK(dc)  false
299 #elif defined(TARGET_ABI32)
300 # define AM_CHECK(dc)  true
301 #elif defined(CONFIG_USER_ONLY)
302 # define AM_CHECK(dc)  false
303 #else
304 # define AM_CHECK(dc)  ((dc)->address_mask_32bit)
305 #endif
306 
307 static void gen_address_mask(DisasContext *dc, TCGv addr)
308 {
309     if (AM_CHECK(dc)) {
310         tcg_gen_andi_tl(addr, addr, 0xffffffffULL);
311     }
312 }
313 
314 static target_ulong address_mask_i(DisasContext *dc, target_ulong addr)
315 {
316     return AM_CHECK(dc) ? (uint32_t)addr : addr;
317 }
318 
319 static TCGv gen_load_gpr(DisasContext *dc, int reg)
320 {
321     if (reg > 0) {
322         assert(reg < 32);
323         return cpu_regs[reg];
324     } else {
325         TCGv t = tcg_temp_new();
326         tcg_gen_movi_tl(t, 0);
327         return t;
328     }
329 }
330 
331 static void gen_store_gpr(DisasContext *dc, int reg, TCGv v)
332 {
333     if (reg > 0) {
334         assert(reg < 32);
335         tcg_gen_mov_tl(cpu_regs[reg], v);
336     }
337 }
338 
339 static TCGv gen_dest_gpr(DisasContext *dc, int reg)
340 {
341     if (reg > 0) {
342         assert(reg < 32);
343         return cpu_regs[reg];
344     } else {
345         return tcg_temp_new();
346     }
347 }
348 
349 static bool use_goto_tb(DisasContext *s, target_ulong pc, target_ulong npc)
350 {
351     return translator_use_goto_tb(&s->base, pc) &&
352            translator_use_goto_tb(&s->base, npc);
353 }
354 
355 static void gen_goto_tb(DisasContext *s, int tb_num,
356                         target_ulong pc, target_ulong npc)
357 {
358     if (use_goto_tb(s, pc, npc))  {
359         /* jump to same page: we can use a direct jump */
360         tcg_gen_goto_tb(tb_num);
361         tcg_gen_movi_tl(cpu_pc, pc);
362         tcg_gen_movi_tl(cpu_npc, npc);
363         tcg_gen_exit_tb(s->base.tb, tb_num);
364     } else {
365         /* jump to another page: we can use an indirect jump */
366         tcg_gen_movi_tl(cpu_pc, pc);
367         tcg_gen_movi_tl(cpu_npc, npc);
368         tcg_gen_lookup_and_goto_ptr();
369     }
370 }
371 
372 static TCGv gen_carry32(void)
373 {
374     if (TARGET_LONG_BITS == 64) {
375         TCGv t = tcg_temp_new();
376         tcg_gen_extract_tl(t, cpu_icc_C, 32, 1);
377         return t;
378     }
379     return cpu_icc_C;
380 }
381 
382 static void gen_op_addcc_int(TCGv dst, TCGv src1, TCGv src2, TCGv cin)
383 {
384     TCGv z = tcg_constant_tl(0);
385 
386     if (cin) {
387         tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, src1, z, cin, z);
388         tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, cpu_cc_N, cpu_cc_C, src2, z);
389     } else {
390         tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, src1, z, src2, z);
391     }
392     tcg_gen_xor_tl(cpu_cc_Z, src1, src2);
393     tcg_gen_xor_tl(cpu_cc_V, cpu_cc_N, src2);
394     tcg_gen_andc_tl(cpu_cc_V, cpu_cc_V, cpu_cc_Z);
395     if (TARGET_LONG_BITS == 64) {
396         /*
397          * Carry-in to bit 32 is result ^ src1 ^ src2.
398          * We already have the src xor term in Z, from computation of V.
399          */
400         tcg_gen_xor_tl(cpu_icc_C, cpu_cc_Z, cpu_cc_N);
401         tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
402     }
403     tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
404     tcg_gen_mov_tl(dst, cpu_cc_N);
405 }
406 
407 static void gen_op_addcc(TCGv dst, TCGv src1, TCGv src2)
408 {
409     gen_op_addcc_int(dst, src1, src2, NULL);
410 }
411 
412 static void gen_op_taddcc(TCGv dst, TCGv src1, TCGv src2)
413 {
414     TCGv t = tcg_temp_new();
415 
416     /* Save the tag bits around modification of dst. */
417     tcg_gen_or_tl(t, src1, src2);
418 
419     gen_op_addcc(dst, src1, src2);
420 
421     /* Incorprate tag bits into icc.V */
422     tcg_gen_andi_tl(t, t, 3);
423     tcg_gen_neg_tl(t, t);
424     tcg_gen_ext32u_tl(t, t);
425     tcg_gen_or_tl(cpu_cc_V, cpu_cc_V, t);
426 }
427 
428 static void gen_op_addc(TCGv dst, TCGv src1, TCGv src2)
429 {
430     tcg_gen_add_tl(dst, src1, src2);
431     tcg_gen_add_tl(dst, dst, gen_carry32());
432 }
433 
434 static void gen_op_addccc(TCGv dst, TCGv src1, TCGv src2)
435 {
436     gen_op_addcc_int(dst, src1, src2, gen_carry32());
437 }
438 
439 static void gen_op_addxc(TCGv dst, TCGv src1, TCGv src2)
440 {
441     tcg_gen_add_tl(dst, src1, src2);
442     tcg_gen_add_tl(dst, dst, cpu_cc_C);
443 }
444 
445 static void gen_op_addxccc(TCGv dst, TCGv src1, TCGv src2)
446 {
447     gen_op_addcc_int(dst, src1, src2, cpu_cc_C);
448 }
449 
450 static void gen_op_subcc_int(TCGv dst, TCGv src1, TCGv src2, TCGv cin)
451 {
452     TCGv z = tcg_constant_tl(0);
453 
454     if (cin) {
455         tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, src1, z, cin, z);
456         tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, cpu_cc_N, cpu_cc_C, src2, z);
457     } else {
458         tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, src1, z, src2, z);
459     }
460     tcg_gen_neg_tl(cpu_cc_C, cpu_cc_C);
461     tcg_gen_xor_tl(cpu_cc_Z, src1, src2);
462     tcg_gen_xor_tl(cpu_cc_V, cpu_cc_N, src1);
463     tcg_gen_and_tl(cpu_cc_V, cpu_cc_V, cpu_cc_Z);
464 #ifdef TARGET_SPARC64
465     tcg_gen_xor_tl(cpu_icc_C, cpu_cc_Z, cpu_cc_N);
466     tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
467 #endif
468     tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
469     tcg_gen_mov_tl(dst, cpu_cc_N);
470 }
471 
472 static void gen_op_subcc(TCGv dst, TCGv src1, TCGv src2)
473 {
474     gen_op_subcc_int(dst, src1, src2, NULL);
475 }
476 
477 static void gen_op_tsubcc(TCGv dst, TCGv src1, TCGv src2)
478 {
479     TCGv t = tcg_temp_new();
480 
481     /* Save the tag bits around modification of dst. */
482     tcg_gen_or_tl(t, src1, src2);
483 
484     gen_op_subcc(dst, src1, src2);
485 
486     /* Incorprate tag bits into icc.V */
487     tcg_gen_andi_tl(t, t, 3);
488     tcg_gen_neg_tl(t, t);
489     tcg_gen_ext32u_tl(t, t);
490     tcg_gen_or_tl(cpu_cc_V, cpu_cc_V, t);
491 }
492 
493 static void gen_op_subc(TCGv dst, TCGv src1, TCGv src2)
494 {
495     tcg_gen_sub_tl(dst, src1, src2);
496     tcg_gen_sub_tl(dst, dst, gen_carry32());
497 }
498 
499 static void gen_op_subccc(TCGv dst, TCGv src1, TCGv src2)
500 {
501     gen_op_subcc_int(dst, src1, src2, gen_carry32());
502 }
503 
504 static void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
505 {
506     TCGv zero = tcg_constant_tl(0);
507     TCGv one = tcg_constant_tl(1);
508     TCGv t_src1 = tcg_temp_new();
509     TCGv t_src2 = tcg_temp_new();
510     TCGv t0 = tcg_temp_new();
511 
512     tcg_gen_ext32u_tl(t_src1, src1);
513     tcg_gen_ext32u_tl(t_src2, src2);
514 
515     /*
516      * if (!(env->y & 1))
517      *   src2 = 0;
518      */
519     tcg_gen_movcond_tl(TCG_COND_TSTEQ, t_src2, cpu_y, one, zero, t_src2);
520 
521     /*
522      * b2 = src1 & 1;
523      * y = (b2 << 31) | (y >> 1);
524      */
525     tcg_gen_extract_tl(t0, cpu_y, 1, 31);
526     tcg_gen_deposit_tl(cpu_y, t0, src1, 31, 1);
527 
528     // b1 = N ^ V;
529     tcg_gen_xor_tl(t0, cpu_cc_N, cpu_cc_V);
530 
531     /*
532      * src1 = (b1 << 31) | (src1 >> 1)
533      */
534     tcg_gen_andi_tl(t0, t0, 1u << 31);
535     tcg_gen_shri_tl(t_src1, t_src1, 1);
536     tcg_gen_or_tl(t_src1, t_src1, t0);
537 
538     gen_op_addcc(dst, t_src1, t_src2);
539 }
540 
541 static void gen_op_multiply(TCGv dst, TCGv src1, TCGv src2, int sign_ext)
542 {
543 #if TARGET_LONG_BITS == 32
544     if (sign_ext) {
545         tcg_gen_muls2_tl(dst, cpu_y, src1, src2);
546     } else {
547         tcg_gen_mulu2_tl(dst, cpu_y, src1, src2);
548     }
549 #else
550     TCGv t0 = tcg_temp_new_i64();
551     TCGv t1 = tcg_temp_new_i64();
552 
553     if (sign_ext) {
554         tcg_gen_ext32s_i64(t0, src1);
555         tcg_gen_ext32s_i64(t1, src2);
556     } else {
557         tcg_gen_ext32u_i64(t0, src1);
558         tcg_gen_ext32u_i64(t1, src2);
559     }
560 
561     tcg_gen_mul_i64(dst, t0, t1);
562     tcg_gen_shri_i64(cpu_y, dst, 32);
563 #endif
564 }
565 
566 static void gen_op_umul(TCGv dst, TCGv src1, TCGv src2)
567 {
568     /* zero-extend truncated operands before multiplication */
569     gen_op_multiply(dst, src1, src2, 0);
570 }
571 
572 static void gen_op_smul(TCGv dst, TCGv src1, TCGv src2)
573 {
574     /* sign-extend truncated operands before multiplication */
575     gen_op_multiply(dst, src1, src2, 1);
576 }
577 
578 static void gen_op_sdiv(TCGv dst, TCGv src1, TCGv src2)
579 {
580 #ifdef TARGET_SPARC64
581     gen_helper_sdiv(dst, tcg_env, src1, src2);
582     tcg_gen_ext32s_tl(dst, dst);
583 #else
584     TCGv_i64 t64 = tcg_temp_new_i64();
585     gen_helper_sdiv(t64, tcg_env, src1, src2);
586     tcg_gen_trunc_i64_tl(dst, t64);
587 #endif
588 }
589 
590 static void gen_op_udivcc(TCGv dst, TCGv src1, TCGv src2)
591 {
592     TCGv_i64 t64;
593 
594 #ifdef TARGET_SPARC64
595     t64 = cpu_cc_V;
596 #else
597     t64 = tcg_temp_new_i64();
598 #endif
599 
600     gen_helper_udiv(t64, tcg_env, src1, src2);
601 
602 #ifdef TARGET_SPARC64
603     tcg_gen_ext32u_tl(cpu_cc_N, t64);
604     tcg_gen_shri_tl(cpu_cc_V, t64, 32);
605     tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
606     tcg_gen_movi_tl(cpu_icc_C, 0);
607 #else
608     tcg_gen_extr_i64_tl(cpu_cc_N, cpu_cc_V, t64);
609 #endif
610     tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
611     tcg_gen_movi_tl(cpu_cc_C, 0);
612     tcg_gen_mov_tl(dst, cpu_cc_N);
613 }
614 
615 static void gen_op_sdivcc(TCGv dst, TCGv src1, TCGv src2)
616 {
617     TCGv_i64 t64;
618 
619 #ifdef TARGET_SPARC64
620     t64 = cpu_cc_V;
621 #else
622     t64 = tcg_temp_new_i64();
623 #endif
624 
625     gen_helper_sdiv(t64, tcg_env, src1, src2);
626 
627 #ifdef TARGET_SPARC64
628     tcg_gen_ext32s_tl(cpu_cc_N, t64);
629     tcg_gen_shri_tl(cpu_cc_V, t64, 32);
630     tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
631     tcg_gen_movi_tl(cpu_icc_C, 0);
632 #else
633     tcg_gen_extr_i64_tl(cpu_cc_N, cpu_cc_V, t64);
634 #endif
635     tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
636     tcg_gen_movi_tl(cpu_cc_C, 0);
637     tcg_gen_mov_tl(dst, cpu_cc_N);
638 }
639 
640 static void gen_op_taddcctv(TCGv dst, TCGv src1, TCGv src2)
641 {
642     gen_helper_taddcctv(dst, tcg_env, src1, src2);
643 }
644 
645 static void gen_op_tsubcctv(TCGv dst, TCGv src1, TCGv src2)
646 {
647     gen_helper_tsubcctv(dst, tcg_env, src1, src2);
648 }
649 
650 static void gen_op_popc(TCGv dst, TCGv src1, TCGv src2)
651 {
652     tcg_gen_ctpop_tl(dst, src2);
653 }
654 
655 #ifndef TARGET_SPARC64
656 static void gen_helper_array8(TCGv dst, TCGv src1, TCGv src2)
657 {
658     g_assert_not_reached();
659 }
660 #endif
661 
662 static void gen_op_array16(TCGv dst, TCGv src1, TCGv src2)
663 {
664     gen_helper_array8(dst, src1, src2);
665     tcg_gen_shli_tl(dst, dst, 1);
666 }
667 
668 static void gen_op_array32(TCGv dst, TCGv src1, TCGv src2)
669 {
670     gen_helper_array8(dst, src1, src2);
671     tcg_gen_shli_tl(dst, dst, 2);
672 }
673 
674 static void gen_op_fpack16(TCGv_i32 dst, TCGv_i64 src)
675 {
676 #ifdef TARGET_SPARC64
677     gen_helper_fpack16(dst, cpu_gsr, src);
678 #else
679     g_assert_not_reached();
680 #endif
681 }
682 
683 static void gen_op_fpackfix(TCGv_i32 dst, TCGv_i64 src)
684 {
685 #ifdef TARGET_SPARC64
686     gen_helper_fpackfix(dst, cpu_gsr, src);
687 #else
688     g_assert_not_reached();
689 #endif
690 }
691 
692 static void gen_op_fpack32(TCGv_i64 dst, TCGv_i64 src1, TCGv_i64 src2)
693 {
694 #ifdef TARGET_SPARC64
695     gen_helper_fpack32(dst, cpu_gsr, src1, src2);
696 #else
697     g_assert_not_reached();
698 #endif
699 }
700 
701 static void gen_op_fpadds16s(TCGv_i32 d, TCGv_i32 src1, TCGv_i32 src2)
702 {
703     TCGv_i32 t[2];
704 
705     for (int i = 0; i < 2; i++) {
706         TCGv_i32 u = tcg_temp_new_i32();
707         TCGv_i32 v = tcg_temp_new_i32();
708 
709         tcg_gen_sextract_i32(u, src1, i * 16, 16);
710         tcg_gen_sextract_i32(v, src2, i * 16, 16);
711         tcg_gen_add_i32(u, u, v);
712         tcg_gen_smax_i32(u, u, tcg_constant_i32(INT16_MIN));
713         tcg_gen_smin_i32(u, u, tcg_constant_i32(INT16_MAX));
714         t[i] = u;
715     }
716     tcg_gen_deposit_i32(d, t[0], t[1], 16, 16);
717 }
718 
719 static void gen_op_fpsubs16s(TCGv_i32 d, TCGv_i32 src1, TCGv_i32 src2)
720 {
721     TCGv_i32 t[2];
722 
723     for (int i = 0; i < 2; i++) {
724         TCGv_i32 u = tcg_temp_new_i32();
725         TCGv_i32 v = tcg_temp_new_i32();
726 
727         tcg_gen_sextract_i32(u, src1, i * 16, 16);
728         tcg_gen_sextract_i32(v, src2, i * 16, 16);
729         tcg_gen_sub_i32(u, u, v);
730         tcg_gen_smax_i32(u, u, tcg_constant_i32(INT16_MIN));
731         tcg_gen_smin_i32(u, u, tcg_constant_i32(INT16_MAX));
732         t[i] = u;
733     }
734     tcg_gen_deposit_i32(d, t[0], t[1], 16, 16);
735 }
736 
737 static void gen_op_fpadds32s(TCGv_i32 d, TCGv_i32 src1, TCGv_i32 src2)
738 {
739     TCGv_i32 r = tcg_temp_new_i32();
740     TCGv_i32 t = tcg_temp_new_i32();
741     TCGv_i32 v = tcg_temp_new_i32();
742     TCGv_i32 z = tcg_constant_i32(0);
743 
744     tcg_gen_add_i32(r, src1, src2);
745     tcg_gen_xor_i32(t, src1, src2);
746     tcg_gen_xor_i32(v, r, src2);
747     tcg_gen_andc_i32(v, v, t);
748 
749     tcg_gen_setcond_i32(TCG_COND_GE, t, r, z);
750     tcg_gen_addi_i32(t, t, INT32_MAX);
751 
752     tcg_gen_movcond_i32(TCG_COND_LT, d, v, z, t, r);
753 }
754 
755 static void gen_op_fpsubs32s(TCGv_i32 d, TCGv_i32 src1, TCGv_i32 src2)
756 {
757     TCGv_i32 r = tcg_temp_new_i32();
758     TCGv_i32 t = tcg_temp_new_i32();
759     TCGv_i32 v = tcg_temp_new_i32();
760     TCGv_i32 z = tcg_constant_i32(0);
761 
762     tcg_gen_sub_i32(r, src1, src2);
763     tcg_gen_xor_i32(t, src1, src2);
764     tcg_gen_xor_i32(v, r, src1);
765     tcg_gen_and_i32(v, v, t);
766 
767     tcg_gen_setcond_i32(TCG_COND_GE, t, r, z);
768     tcg_gen_addi_i32(t, t, INT32_MAX);
769 
770     tcg_gen_movcond_i32(TCG_COND_LT, d, v, z, t, r);
771 }
772 
773 static void gen_op_faligndata(TCGv_i64 dst, TCGv_i64 s1, TCGv_i64 s2)
774 {
775 #ifdef TARGET_SPARC64
776     TCGv t1, t2, shift;
777 
778     t1 = tcg_temp_new();
779     t2 = tcg_temp_new();
780     shift = tcg_temp_new();
781 
782     tcg_gen_andi_tl(shift, cpu_gsr, 7);
783     tcg_gen_shli_tl(shift, shift, 3);
784     tcg_gen_shl_tl(t1, s1, shift);
785 
786     /*
787      * A shift of 64 does not produce 0 in TCG.  Divide this into a
788      * shift of (up to 63) followed by a constant shift of 1.
789      */
790     tcg_gen_xori_tl(shift, shift, 63);
791     tcg_gen_shr_tl(t2, s2, shift);
792     tcg_gen_shri_tl(t2, t2, 1);
793 
794     tcg_gen_or_tl(dst, t1, t2);
795 #else
796     g_assert_not_reached();
797 #endif
798 }
799 
800 static void gen_op_bshuffle(TCGv_i64 dst, TCGv_i64 src1, TCGv_i64 src2)
801 {
802 #ifdef TARGET_SPARC64
803     gen_helper_bshuffle(dst, cpu_gsr, src1, src2);
804 #else
805     g_assert_not_reached();
806 #endif
807 }
808 
809 static void gen_op_fmul8x16al(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
810 {
811     tcg_gen_ext16s_i32(src2, src2);
812     gen_helper_fmul8x16a(dst, src1, src2);
813 }
814 
815 static void gen_op_fmul8x16au(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
816 {
817     tcg_gen_sari_i32(src2, src2, 16);
818     gen_helper_fmul8x16a(dst, src1, src2);
819 }
820 
821 static void gen_op_fmuld8ulx16(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
822 {
823     TCGv_i32 t0 = tcg_temp_new_i32();
824     TCGv_i32 t1 = tcg_temp_new_i32();
825     TCGv_i32 t2 = tcg_temp_new_i32();
826 
827     tcg_gen_ext8u_i32(t0, src1);
828     tcg_gen_ext16s_i32(t1, src2);
829     tcg_gen_mul_i32(t0, t0, t1);
830 
831     tcg_gen_extract_i32(t1, src1, 16, 8);
832     tcg_gen_sextract_i32(t2, src2, 16, 16);
833     tcg_gen_mul_i32(t1, t1, t2);
834 
835     tcg_gen_concat_i32_i64(dst, t0, t1);
836 }
837 
838 static void gen_op_fmuld8sux16(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
839 {
840     TCGv_i32 t0 = tcg_temp_new_i32();
841     TCGv_i32 t1 = tcg_temp_new_i32();
842     TCGv_i32 t2 = tcg_temp_new_i32();
843 
844     /*
845      * The insn description talks about extracting the upper 8 bits
846      * of the signed 16-bit input rs1, performing the multiply, then
847      * shifting left by 8 bits.  Instead, zap the lower 8 bits of
848      * the rs1 input, which avoids the need for two shifts.
849      */
850     tcg_gen_ext16s_i32(t0, src1);
851     tcg_gen_andi_i32(t0, t0, ~0xff);
852     tcg_gen_ext16s_i32(t1, src2);
853     tcg_gen_mul_i32(t0, t0, t1);
854 
855     tcg_gen_sextract_i32(t1, src1, 16, 16);
856     tcg_gen_andi_i32(t1, t1, ~0xff);
857     tcg_gen_sextract_i32(t2, src2, 16, 16);
858     tcg_gen_mul_i32(t1, t1, t2);
859 
860     tcg_gen_concat_i32_i64(dst, t0, t1);
861 }
862 
863 #ifdef TARGET_SPARC64
864 static void gen_vec_fchksm16(unsigned vece, TCGv_vec dst,
865                              TCGv_vec src1, TCGv_vec src2)
866 {
867     TCGv_vec a = tcg_temp_new_vec_matching(dst);
868     TCGv_vec c = tcg_temp_new_vec_matching(dst);
869 
870     tcg_gen_add_vec(vece, a, src1, src2);
871     tcg_gen_cmp_vec(TCG_COND_LTU, vece, c, a, src1);
872     /* Vector cmp produces -1 for true, so subtract to add carry. */
873     tcg_gen_sub_vec(vece, dst, a, c);
874 }
875 
876 static void gen_op_fchksm16(unsigned vece, uint32_t dofs, uint32_t aofs,
877                             uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
878 {
879     static const TCGOpcode vecop_list[] = {
880         INDEX_op_cmp_vec, INDEX_op_add_vec, INDEX_op_sub_vec,
881     };
882     static const GVecGen3 op = {
883         .fni8 = gen_helper_fchksm16,
884         .fniv = gen_vec_fchksm16,
885         .opt_opc = vecop_list,
886         .vece = MO_16,
887     };
888     tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &op);
889 }
890 
891 static void gen_vec_fmean16(unsigned vece, TCGv_vec dst,
892                             TCGv_vec src1, TCGv_vec src2)
893 {
894     TCGv_vec t = tcg_temp_new_vec_matching(dst);
895 
896     tcg_gen_or_vec(vece, t, src1, src2);
897     tcg_gen_and_vec(vece, t, t, tcg_constant_vec_matching(dst, vece, 1));
898     tcg_gen_sari_vec(vece, src1, src1, 1);
899     tcg_gen_sari_vec(vece, src2, src2, 1);
900     tcg_gen_add_vec(vece, dst, src1, src2);
901     tcg_gen_add_vec(vece, dst, dst, t);
902 }
903 
904 static void gen_op_fmean16(unsigned vece, uint32_t dofs, uint32_t aofs,
905                            uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
906 {
907     static const TCGOpcode vecop_list[] = {
908         INDEX_op_add_vec, INDEX_op_sari_vec,
909     };
910     static const GVecGen3 op = {
911         .fni8 = gen_helper_fmean16,
912         .fniv = gen_vec_fmean16,
913         .opt_opc = vecop_list,
914         .vece = MO_16,
915     };
916     tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &op);
917 }
918 #else
919 #define gen_op_fchksm16   ({ qemu_build_not_reached(); NULL; })
920 #define gen_op_fmean16    ({ qemu_build_not_reached(); NULL; })
921 #endif
922 
923 static void finishing_insn(DisasContext *dc)
924 {
925     /*
926      * From here, there is no future path through an unwinding exception.
927      * If the current insn cannot raise an exception, the computation of
928      * cpu_cond may be able to be elided.
929      */
930     if (dc->cpu_cond_live) {
931         tcg_gen_discard_tl(cpu_cond);
932         dc->cpu_cond_live = false;
933     }
934 }
935 
936 static void gen_generic_branch(DisasContext *dc)
937 {
938     TCGv npc0 = tcg_constant_tl(dc->jump_pc[0]);
939     TCGv npc1 = tcg_constant_tl(dc->jump_pc[1]);
940     TCGv c2 = tcg_constant_tl(dc->jump.c2);
941 
942     tcg_gen_movcond_tl(dc->jump.cond, cpu_npc, dc->jump.c1, c2, npc0, npc1);
943 }
944 
945 /* call this function before using the condition register as it may
946    have been set for a jump */
947 static void flush_cond(DisasContext *dc)
948 {
949     if (dc->npc == JUMP_PC) {
950         gen_generic_branch(dc);
951         dc->npc = DYNAMIC_PC_LOOKUP;
952     }
953 }
954 
955 static void save_npc(DisasContext *dc)
956 {
957     if (dc->npc & 3) {
958         switch (dc->npc) {
959         case JUMP_PC:
960             gen_generic_branch(dc);
961             dc->npc = DYNAMIC_PC_LOOKUP;
962             break;
963         case DYNAMIC_PC:
964         case DYNAMIC_PC_LOOKUP:
965             break;
966         default:
967             g_assert_not_reached();
968         }
969     } else {
970         tcg_gen_movi_tl(cpu_npc, dc->npc);
971     }
972 }
973 
974 static void save_state(DisasContext *dc)
975 {
976     tcg_gen_movi_tl(cpu_pc, dc->pc);
977     save_npc(dc);
978 }
979 
980 static void gen_exception(DisasContext *dc, int which)
981 {
982     finishing_insn(dc);
983     save_state(dc);
984     gen_helper_raise_exception(tcg_env, tcg_constant_i32(which));
985     dc->base.is_jmp = DISAS_NORETURN;
986 }
987 
988 static TCGLabel *delay_exceptionv(DisasContext *dc, TCGv_i32 excp)
989 {
990     DisasDelayException *e = g_new0(DisasDelayException, 1);
991 
992     e->next = dc->delay_excp_list;
993     dc->delay_excp_list = e;
994 
995     e->lab = gen_new_label();
996     e->excp = excp;
997     e->pc = dc->pc;
998     /* Caller must have used flush_cond before branch. */
999     assert(e->npc != JUMP_PC);
1000     e->npc = dc->npc;
1001 
1002     return e->lab;
1003 }
1004 
1005 static TCGLabel *delay_exception(DisasContext *dc, int excp)
1006 {
1007     return delay_exceptionv(dc, tcg_constant_i32(excp));
1008 }
1009 
1010 static void gen_check_align(DisasContext *dc, TCGv addr, int mask)
1011 {
1012     TCGv t = tcg_temp_new();
1013     TCGLabel *lab;
1014 
1015     tcg_gen_andi_tl(t, addr, mask);
1016 
1017     flush_cond(dc);
1018     lab = delay_exception(dc, TT_UNALIGNED);
1019     tcg_gen_brcondi_tl(TCG_COND_NE, t, 0, lab);
1020 }
1021 
1022 static void gen_mov_pc_npc(DisasContext *dc)
1023 {
1024     finishing_insn(dc);
1025 
1026     if (dc->npc & 3) {
1027         switch (dc->npc) {
1028         case JUMP_PC:
1029             gen_generic_branch(dc);
1030             tcg_gen_mov_tl(cpu_pc, cpu_npc);
1031             dc->pc = DYNAMIC_PC_LOOKUP;
1032             break;
1033         case DYNAMIC_PC:
1034         case DYNAMIC_PC_LOOKUP:
1035             tcg_gen_mov_tl(cpu_pc, cpu_npc);
1036             dc->pc = dc->npc;
1037             break;
1038         default:
1039             g_assert_not_reached();
1040         }
1041     } else {
1042         dc->pc = dc->npc;
1043     }
1044 }
1045 
1046 static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond,
1047                         DisasContext *dc)
1048 {
1049     TCGv t1;
1050 
1051     cmp->c1 = t1 = tcg_temp_new();
1052     cmp->c2 = 0;
1053 
1054     switch (cond & 7) {
1055     case 0x0: /* never */
1056         cmp->cond = TCG_COND_NEVER;
1057         cmp->c1 = tcg_constant_tl(0);
1058         break;
1059 
1060     case 0x1: /* eq: Z */
1061         cmp->cond = TCG_COND_EQ;
1062         if (TARGET_LONG_BITS == 32 || xcc) {
1063             tcg_gen_mov_tl(t1, cpu_cc_Z);
1064         } else {
1065             tcg_gen_ext32u_tl(t1, cpu_icc_Z);
1066         }
1067         break;
1068 
1069     case 0x2: /* le: Z | (N ^ V) */
1070         /*
1071          * Simplify:
1072          *   cc_Z || (N ^ V) < 0        NE
1073          *   cc_Z && !((N ^ V) < 0)     EQ
1074          *   cc_Z & ~((N ^ V) >> TLB)   EQ
1075          */
1076         cmp->cond = TCG_COND_EQ;
1077         tcg_gen_xor_tl(t1, cpu_cc_N, cpu_cc_V);
1078         tcg_gen_sextract_tl(t1, t1, xcc ? 63 : 31, 1);
1079         tcg_gen_andc_tl(t1, xcc ? cpu_cc_Z : cpu_icc_Z, t1);
1080         if (TARGET_LONG_BITS == 64 && !xcc) {
1081             tcg_gen_ext32u_tl(t1, t1);
1082         }
1083         break;
1084 
1085     case 0x3: /* lt: N ^ V */
1086         cmp->cond = TCG_COND_LT;
1087         tcg_gen_xor_tl(t1, cpu_cc_N, cpu_cc_V);
1088         if (TARGET_LONG_BITS == 64 && !xcc) {
1089             tcg_gen_ext32s_tl(t1, t1);
1090         }
1091         break;
1092 
1093     case 0x4: /* leu: Z | C */
1094         /*
1095          * Simplify:
1096          *   cc_Z == 0 || cc_C != 0     NE
1097          *   cc_Z != 0 && cc_C == 0     EQ
1098          *   cc_Z & (cc_C ? 0 : -1)     EQ
1099          *   cc_Z & (cc_C - 1)          EQ
1100          */
1101         cmp->cond = TCG_COND_EQ;
1102         if (TARGET_LONG_BITS == 32 || xcc) {
1103             tcg_gen_subi_tl(t1, cpu_cc_C, 1);
1104             tcg_gen_and_tl(t1, t1, cpu_cc_Z);
1105         } else {
1106             tcg_gen_extract_tl(t1, cpu_icc_C, 32, 1);
1107             tcg_gen_subi_tl(t1, t1, 1);
1108             tcg_gen_and_tl(t1, t1, cpu_icc_Z);
1109             tcg_gen_ext32u_tl(t1, t1);
1110         }
1111         break;
1112 
1113     case 0x5: /* ltu: C */
1114         cmp->cond = TCG_COND_NE;
1115         if (TARGET_LONG_BITS == 32 || xcc) {
1116             tcg_gen_mov_tl(t1, cpu_cc_C);
1117         } else {
1118             tcg_gen_extract_tl(t1, cpu_icc_C, 32, 1);
1119         }
1120         break;
1121 
1122     case 0x6: /* neg: N */
1123         cmp->cond = TCG_COND_LT;
1124         if (TARGET_LONG_BITS == 32 || xcc) {
1125             tcg_gen_mov_tl(t1, cpu_cc_N);
1126         } else {
1127             tcg_gen_ext32s_tl(t1, cpu_cc_N);
1128         }
1129         break;
1130 
1131     case 0x7: /* vs: V */
1132         cmp->cond = TCG_COND_LT;
1133         if (TARGET_LONG_BITS == 32 || xcc) {
1134             tcg_gen_mov_tl(t1, cpu_cc_V);
1135         } else {
1136             tcg_gen_ext32s_tl(t1, cpu_cc_V);
1137         }
1138         break;
1139     }
1140     if (cond & 8) {
1141         cmp->cond = tcg_invert_cond(cmp->cond);
1142     }
1143 }
1144 
1145 static void gen_fcompare(DisasCompare *cmp, unsigned int cc, unsigned int cond)
1146 {
1147     TCGv_i32 fcc = cpu_fcc[cc];
1148     TCGv_i32 c1 = fcc;
1149     int c2 = 0;
1150     TCGCond tcond;
1151 
1152     /*
1153      * FCC values:
1154      * 0 =
1155      * 1 <
1156      * 2 >
1157      * 3 unordered
1158      */
1159     switch (cond & 7) {
1160     case 0x0: /* fbn */
1161         tcond = TCG_COND_NEVER;
1162         break;
1163     case 0x1: /* fbne : !0 */
1164         tcond = TCG_COND_NE;
1165         break;
1166     case 0x2: /* fblg : 1 or 2 */
1167         /* fcc in {1,2} - 1 -> fcc in {0,1} */
1168         c1 = tcg_temp_new_i32();
1169         tcg_gen_addi_i32(c1, fcc, -1);
1170         c2 = 1;
1171         tcond = TCG_COND_LEU;
1172         break;
1173     case 0x3: /* fbul : 1 or 3 */
1174         c1 = tcg_temp_new_i32();
1175         tcg_gen_andi_i32(c1, fcc, 1);
1176         tcond = TCG_COND_NE;
1177         break;
1178     case 0x4: /* fbl  : 1 */
1179         c2 = 1;
1180         tcond = TCG_COND_EQ;
1181         break;
1182     case 0x5: /* fbug : 2 or 3 */
1183         c2 = 2;
1184         tcond = TCG_COND_GEU;
1185         break;
1186     case 0x6: /* fbg  : 2 */
1187         c2 = 2;
1188         tcond = TCG_COND_EQ;
1189         break;
1190     case 0x7: /* fbu  : 3 */
1191         c2 = 3;
1192         tcond = TCG_COND_EQ;
1193         break;
1194     }
1195     if (cond & 8) {
1196         tcond = tcg_invert_cond(tcond);
1197     }
1198 
1199     cmp->cond = tcond;
1200     cmp->c2 = c2;
1201     cmp->c1 = tcg_temp_new();
1202     tcg_gen_extu_i32_tl(cmp->c1, c1);
1203 }
1204 
1205 static bool gen_compare_reg(DisasCompare *cmp, int cond, TCGv r_src)
1206 {
1207     static const TCGCond cond_reg[4] = {
1208         TCG_COND_NEVER,  /* reserved */
1209         TCG_COND_EQ,
1210         TCG_COND_LE,
1211         TCG_COND_LT,
1212     };
1213     TCGCond tcond;
1214 
1215     if ((cond & 3) == 0) {
1216         return false;
1217     }
1218     tcond = cond_reg[cond & 3];
1219     if (cond & 4) {
1220         tcond = tcg_invert_cond(tcond);
1221     }
1222 
1223     cmp->cond = tcond;
1224     cmp->c1 = tcg_temp_new();
1225     cmp->c2 = 0;
1226     tcg_gen_mov_tl(cmp->c1, r_src);
1227     return true;
1228 }
1229 
1230 static void gen_op_clear_ieee_excp_and_FTT(void)
1231 {
1232     tcg_gen_st_i32(tcg_constant_i32(0), tcg_env,
1233                    offsetof(CPUSPARCState, fsr_cexc_ftt));
1234 }
1235 
1236 static void gen_op_fmovs(TCGv_i32 dst, TCGv_i32 src)
1237 {
1238     gen_op_clear_ieee_excp_and_FTT();
1239     tcg_gen_mov_i32(dst, src);
1240 }
1241 
1242 static void gen_op_fnegs(TCGv_i32 dst, TCGv_i32 src)
1243 {
1244     gen_op_clear_ieee_excp_and_FTT();
1245     tcg_gen_xori_i32(dst, src, 1u << 31);
1246 }
1247 
1248 static void gen_op_fabss(TCGv_i32 dst, TCGv_i32 src)
1249 {
1250     gen_op_clear_ieee_excp_and_FTT();
1251     tcg_gen_andi_i32(dst, src, ~(1u << 31));
1252 }
1253 
1254 static void gen_op_fmovd(TCGv_i64 dst, TCGv_i64 src)
1255 {
1256     gen_op_clear_ieee_excp_and_FTT();
1257     tcg_gen_mov_i64(dst, src);
1258 }
1259 
1260 static void gen_op_fnegd(TCGv_i64 dst, TCGv_i64 src)
1261 {
1262     gen_op_clear_ieee_excp_and_FTT();
1263     tcg_gen_xori_i64(dst, src, 1ull << 63);
1264 }
1265 
1266 static void gen_op_fabsd(TCGv_i64 dst, TCGv_i64 src)
1267 {
1268     gen_op_clear_ieee_excp_and_FTT();
1269     tcg_gen_andi_i64(dst, src, ~(1ull << 63));
1270 }
1271 
1272 static void gen_op_fnegq(TCGv_i128 dst, TCGv_i128 src)
1273 {
1274     TCGv_i64 l = tcg_temp_new_i64();
1275     TCGv_i64 h = tcg_temp_new_i64();
1276 
1277     tcg_gen_extr_i128_i64(l, h, src);
1278     tcg_gen_xori_i64(h, h, 1ull << 63);
1279     tcg_gen_concat_i64_i128(dst, l, h);
1280 }
1281 
1282 static void gen_op_fabsq(TCGv_i128 dst, TCGv_i128 src)
1283 {
1284     TCGv_i64 l = tcg_temp_new_i64();
1285     TCGv_i64 h = tcg_temp_new_i64();
1286 
1287     tcg_gen_extr_i128_i64(l, h, src);
1288     tcg_gen_andi_i64(h, h, ~(1ull << 63));
1289     tcg_gen_concat_i64_i128(dst, l, h);
1290 }
1291 
1292 static void gen_op_fmadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
1293 {
1294     gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(0));
1295 }
1296 
1297 static void gen_op_fmaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
1298 {
1299     gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(0));
1300 }
1301 
1302 static void gen_op_fmsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
1303 {
1304     int op = float_muladd_negate_c;
1305     gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1306 }
1307 
1308 static void gen_op_fmsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
1309 {
1310     int op = float_muladd_negate_c;
1311     gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1312 }
1313 
1314 static void gen_op_fnmsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
1315 {
1316     int op = float_muladd_negate_c | float_muladd_negate_result;
1317     gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1318 }
1319 
1320 static void gen_op_fnmsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
1321 {
1322     int op = float_muladd_negate_c | float_muladd_negate_result;
1323     gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1324 }
1325 
1326 static void gen_op_fnmadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
1327 {
1328     int op = float_muladd_negate_result;
1329     gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1330 }
1331 
1332 static void gen_op_fnmaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
1333 {
1334     int op = float_muladd_negate_result;
1335     gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1336 }
1337 
1338 /* Use muladd to compute (1 * src1) + src2 / 2 with one rounding. */
1339 static void gen_op_fhadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
1340 {
1341     TCGv_i32 one = tcg_constant_i32(float32_one);
1342     int op = float_muladd_halve_result;
1343     gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
1344 }
1345 
1346 static void gen_op_fhaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
1347 {
1348     TCGv_i64 one = tcg_constant_i64(float64_one);
1349     int op = float_muladd_halve_result;
1350     gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
1351 }
1352 
1353 /* Use muladd to compute (1 * src1) - src2 / 2 with one rounding. */
1354 static void gen_op_fhsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
1355 {
1356     TCGv_i32 one = tcg_constant_i32(float32_one);
1357     int op = float_muladd_negate_c | float_muladd_halve_result;
1358     gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
1359 }
1360 
1361 static void gen_op_fhsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
1362 {
1363     TCGv_i64 one = tcg_constant_i64(float64_one);
1364     int op = float_muladd_negate_c | float_muladd_halve_result;
1365     gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
1366 }
1367 
1368 /* Use muladd to compute -((1 * src1) + src2 / 2) with one rounding. */
1369 static void gen_op_fnhadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
1370 {
1371     TCGv_i32 one = tcg_constant_i32(float32_one);
1372     int op = float_muladd_negate_result | float_muladd_halve_result;
1373     gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
1374 }
1375 
1376 static void gen_op_fnhaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
1377 {
1378     TCGv_i64 one = tcg_constant_i64(float64_one);
1379     int op = float_muladd_negate_result | float_muladd_halve_result;
1380     gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
1381 }
1382 
1383 static void gen_op_fpexception_im(DisasContext *dc, int ftt)
1384 {
1385     /*
1386      * CEXC is only set when succesfully completing an FPop,
1387      * or when raising FSR_FTT_IEEE_EXCP, i.e. check_ieee_exception.
1388      * Thus we can simply store FTT into this field.
1389      */
1390     tcg_gen_st_i32(tcg_constant_i32(ftt), tcg_env,
1391                    offsetof(CPUSPARCState, fsr_cexc_ftt));
1392     gen_exception(dc, TT_FP_EXCP);
1393 }
1394 
1395 static int gen_trap_ifnofpu(DisasContext *dc)
1396 {
1397 #if !defined(CONFIG_USER_ONLY)
1398     if (!dc->fpu_enabled) {
1399         gen_exception(dc, TT_NFPU_INSN);
1400         return 1;
1401     }
1402 #endif
1403     return 0;
1404 }
1405 
1406 /* asi moves */
1407 typedef enum {
1408     GET_ASI_HELPER,
1409     GET_ASI_EXCP,
1410     GET_ASI_DIRECT,
1411     GET_ASI_DTWINX,
1412     GET_ASI_CODE,
1413     GET_ASI_BLOCK,
1414     GET_ASI_SHORT,
1415     GET_ASI_BCOPY,
1416     GET_ASI_BFILL,
1417 } ASIType;
1418 
1419 typedef struct {
1420     ASIType type;
1421     int asi;
1422     int mem_idx;
1423     MemOp memop;
1424 } DisasASI;
1425 
1426 /*
1427  * Build DisasASI.
1428  * For asi == -1, treat as non-asi.
1429  * For ask == -2, treat as immediate offset (v8 error, v9 %asi).
1430  */
1431 static DisasASI resolve_asi(DisasContext *dc, int asi, MemOp memop)
1432 {
1433     ASIType type = GET_ASI_HELPER;
1434     int mem_idx = dc->mem_idx;
1435 
1436     if (asi == -1) {
1437         /* Artificial "non-asi" case. */
1438         type = GET_ASI_DIRECT;
1439         goto done;
1440     }
1441 
1442 #ifndef TARGET_SPARC64
1443     /* Before v9, all asis are immediate and privileged.  */
1444     if (asi < 0) {
1445         gen_exception(dc, TT_ILL_INSN);
1446         type = GET_ASI_EXCP;
1447     } else if (supervisor(dc)
1448                /* Note that LEON accepts ASI_USERDATA in user mode, for
1449                   use with CASA.  Also note that previous versions of
1450                   QEMU allowed (and old versions of gcc emitted) ASI_P
1451                   for LEON, which is incorrect.  */
1452                || (asi == ASI_USERDATA
1453                    && (dc->def->features & CPU_FEATURE_CASA))) {
1454         switch (asi) {
1455         case ASI_USERDATA:    /* User data access */
1456             mem_idx = MMU_USER_IDX;
1457             type = GET_ASI_DIRECT;
1458             break;
1459         case ASI_KERNELDATA:  /* Supervisor data access */
1460             mem_idx = MMU_KERNEL_IDX;
1461             type = GET_ASI_DIRECT;
1462             break;
1463         case ASI_USERTXT:     /* User text access */
1464             mem_idx = MMU_USER_IDX;
1465             type = GET_ASI_CODE;
1466             break;
1467         case ASI_KERNELTXT:   /* Supervisor text access */
1468             mem_idx = MMU_KERNEL_IDX;
1469             type = GET_ASI_CODE;
1470             break;
1471         case ASI_M_BYPASS:    /* MMU passthrough */
1472         case ASI_LEON_BYPASS: /* LEON MMU passthrough */
1473             mem_idx = MMU_PHYS_IDX;
1474             type = GET_ASI_DIRECT;
1475             break;
1476         case ASI_M_BCOPY: /* Block copy, sta access */
1477             mem_idx = MMU_KERNEL_IDX;
1478             type = GET_ASI_BCOPY;
1479             break;
1480         case ASI_M_BFILL: /* Block fill, stda access */
1481             mem_idx = MMU_KERNEL_IDX;
1482             type = GET_ASI_BFILL;
1483             break;
1484         }
1485 
1486         /* MMU_PHYS_IDX is used when the MMU is disabled to passthrough the
1487          * permissions check in get_physical_address(..).
1488          */
1489         mem_idx = (dc->mem_idx == MMU_PHYS_IDX) ? MMU_PHYS_IDX : mem_idx;
1490     } else {
1491         gen_exception(dc, TT_PRIV_INSN);
1492         type = GET_ASI_EXCP;
1493     }
1494 #else
1495     if (asi < 0) {
1496         asi = dc->asi;
1497     }
1498     /* With v9, all asis below 0x80 are privileged.  */
1499     /* ??? We ought to check cpu_has_hypervisor, but we didn't copy
1500        down that bit into DisasContext.  For the moment that's ok,
1501        since the direct implementations below doesn't have any ASIs
1502        in the restricted [0x30, 0x7f] range, and the check will be
1503        done properly in the helper.  */
1504     if (!supervisor(dc) && asi < 0x80) {
1505         gen_exception(dc, TT_PRIV_ACT);
1506         type = GET_ASI_EXCP;
1507     } else {
1508         switch (asi) {
1509         case ASI_REAL:      /* Bypass */
1510         case ASI_REAL_IO:   /* Bypass, non-cacheable */
1511         case ASI_REAL_L:    /* Bypass LE */
1512         case ASI_REAL_IO_L: /* Bypass, non-cacheable LE */
1513         case ASI_TWINX_REAL:   /* Real address, twinx */
1514         case ASI_TWINX_REAL_L: /* Real address, twinx, LE */
1515         case ASI_QUAD_LDD_PHYS:
1516         case ASI_QUAD_LDD_PHYS_L:
1517             mem_idx = MMU_PHYS_IDX;
1518             break;
1519         case ASI_N:  /* Nucleus */
1520         case ASI_NL: /* Nucleus LE */
1521         case ASI_TWINX_N:
1522         case ASI_TWINX_NL:
1523         case ASI_NUCLEUS_QUAD_LDD:
1524         case ASI_NUCLEUS_QUAD_LDD_L:
1525             if (hypervisor(dc)) {
1526                 mem_idx = MMU_PHYS_IDX;
1527             } else {
1528                 mem_idx = MMU_NUCLEUS_IDX;
1529             }
1530             break;
1531         case ASI_AIUP:  /* As if user primary */
1532         case ASI_AIUPL: /* As if user primary LE */
1533         case ASI_TWINX_AIUP:
1534         case ASI_TWINX_AIUP_L:
1535         case ASI_BLK_AIUP_4V:
1536         case ASI_BLK_AIUP_L_4V:
1537         case ASI_BLK_AIUP:
1538         case ASI_BLK_AIUPL:
1539             mem_idx = MMU_USER_IDX;
1540             break;
1541         case ASI_AIUS:  /* As if user secondary */
1542         case ASI_AIUSL: /* As if user secondary LE */
1543         case ASI_TWINX_AIUS:
1544         case ASI_TWINX_AIUS_L:
1545         case ASI_BLK_AIUS_4V:
1546         case ASI_BLK_AIUS_L_4V:
1547         case ASI_BLK_AIUS:
1548         case ASI_BLK_AIUSL:
1549             mem_idx = MMU_USER_SECONDARY_IDX;
1550             break;
1551         case ASI_S:  /* Secondary */
1552         case ASI_SL: /* Secondary LE */
1553         case ASI_TWINX_S:
1554         case ASI_TWINX_SL:
1555         case ASI_BLK_COMMIT_S:
1556         case ASI_BLK_S:
1557         case ASI_BLK_SL:
1558         case ASI_FL8_S:
1559         case ASI_FL8_SL:
1560         case ASI_FL16_S:
1561         case ASI_FL16_SL:
1562             if (mem_idx == MMU_USER_IDX) {
1563                 mem_idx = MMU_USER_SECONDARY_IDX;
1564             } else if (mem_idx == MMU_KERNEL_IDX) {
1565                 mem_idx = MMU_KERNEL_SECONDARY_IDX;
1566             }
1567             break;
1568         case ASI_P:  /* Primary */
1569         case ASI_PL: /* Primary LE */
1570         case ASI_TWINX_P:
1571         case ASI_TWINX_PL:
1572         case ASI_BLK_COMMIT_P:
1573         case ASI_BLK_P:
1574         case ASI_BLK_PL:
1575         case ASI_FL8_P:
1576         case ASI_FL8_PL:
1577         case ASI_FL16_P:
1578         case ASI_FL16_PL:
1579             break;
1580         }
1581         switch (asi) {
1582         case ASI_REAL:
1583         case ASI_REAL_IO:
1584         case ASI_REAL_L:
1585         case ASI_REAL_IO_L:
1586         case ASI_N:
1587         case ASI_NL:
1588         case ASI_AIUP:
1589         case ASI_AIUPL:
1590         case ASI_AIUS:
1591         case ASI_AIUSL:
1592         case ASI_S:
1593         case ASI_SL:
1594         case ASI_P:
1595         case ASI_PL:
1596             type = GET_ASI_DIRECT;
1597             break;
1598         case ASI_TWINX_REAL:
1599         case ASI_TWINX_REAL_L:
1600         case ASI_TWINX_N:
1601         case ASI_TWINX_NL:
1602         case ASI_TWINX_AIUP:
1603         case ASI_TWINX_AIUP_L:
1604         case ASI_TWINX_AIUS:
1605         case ASI_TWINX_AIUS_L:
1606         case ASI_TWINX_P:
1607         case ASI_TWINX_PL:
1608         case ASI_TWINX_S:
1609         case ASI_TWINX_SL:
1610         case ASI_QUAD_LDD_PHYS:
1611         case ASI_QUAD_LDD_PHYS_L:
1612         case ASI_NUCLEUS_QUAD_LDD:
1613         case ASI_NUCLEUS_QUAD_LDD_L:
1614             type = GET_ASI_DTWINX;
1615             break;
1616         case ASI_BLK_COMMIT_P:
1617         case ASI_BLK_COMMIT_S:
1618         case ASI_BLK_AIUP_4V:
1619         case ASI_BLK_AIUP_L_4V:
1620         case ASI_BLK_AIUP:
1621         case ASI_BLK_AIUPL:
1622         case ASI_BLK_AIUS_4V:
1623         case ASI_BLK_AIUS_L_4V:
1624         case ASI_BLK_AIUS:
1625         case ASI_BLK_AIUSL:
1626         case ASI_BLK_S:
1627         case ASI_BLK_SL:
1628         case ASI_BLK_P:
1629         case ASI_BLK_PL:
1630             type = GET_ASI_BLOCK;
1631             break;
1632         case ASI_FL8_S:
1633         case ASI_FL8_SL:
1634         case ASI_FL8_P:
1635         case ASI_FL8_PL:
1636             memop = MO_UB;
1637             type = GET_ASI_SHORT;
1638             break;
1639         case ASI_FL16_S:
1640         case ASI_FL16_SL:
1641         case ASI_FL16_P:
1642         case ASI_FL16_PL:
1643             memop = MO_TEUW;
1644             type = GET_ASI_SHORT;
1645             break;
1646         }
1647         /* The little-endian asis all have bit 3 set.  */
1648         if (asi & 8) {
1649             memop ^= MO_BSWAP;
1650         }
1651     }
1652 #endif
1653 
1654  done:
1655     return (DisasASI){ type, asi, mem_idx, memop };
1656 }
1657 
1658 #if defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
1659 static void gen_helper_ld_asi(TCGv_i64 r, TCGv_env e, TCGv a,
1660                               TCGv_i32 asi, TCGv_i32 mop)
1661 {
1662     g_assert_not_reached();
1663 }
1664 
1665 static void gen_helper_st_asi(TCGv_env e, TCGv a, TCGv_i64 r,
1666                               TCGv_i32 asi, TCGv_i32 mop)
1667 {
1668     g_assert_not_reached();
1669 }
1670 #endif
1671 
1672 static void gen_ld_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr)
1673 {
1674     switch (da->type) {
1675     case GET_ASI_EXCP:
1676         break;
1677     case GET_ASI_DTWINX: /* Reserved for ldda.  */
1678         gen_exception(dc, TT_ILL_INSN);
1679         break;
1680     case GET_ASI_DIRECT:
1681         tcg_gen_qemu_ld_tl(dst, addr, da->mem_idx, da->memop | MO_ALIGN);
1682         break;
1683 
1684     case GET_ASI_CODE:
1685 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
1686         {
1687             MemOpIdx oi = make_memop_idx(da->memop, da->mem_idx);
1688             TCGv_i64 t64 = tcg_temp_new_i64();
1689 
1690             gen_helper_ld_code(t64, tcg_env, addr, tcg_constant_i32(oi));
1691             tcg_gen_trunc_i64_tl(dst, t64);
1692         }
1693         break;
1694 #else
1695         g_assert_not_reached();
1696 #endif
1697 
1698     default:
1699         {
1700             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1701             TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN);
1702 
1703             save_state(dc);
1704 #ifdef TARGET_SPARC64
1705             gen_helper_ld_asi(dst, tcg_env, addr, r_asi, r_mop);
1706 #else
1707             {
1708                 TCGv_i64 t64 = tcg_temp_new_i64();
1709                 gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
1710                 tcg_gen_trunc_i64_tl(dst, t64);
1711             }
1712 #endif
1713         }
1714         break;
1715     }
1716 }
1717 
1718 static void gen_st_asi(DisasContext *dc, DisasASI *da, TCGv src, TCGv addr)
1719 {
1720     switch (da->type) {
1721     case GET_ASI_EXCP:
1722         break;
1723 
1724     case GET_ASI_DTWINX: /* Reserved for stda.  */
1725         if (TARGET_LONG_BITS == 32) {
1726             gen_exception(dc, TT_ILL_INSN);
1727             break;
1728         } else if (!(dc->def->features & CPU_FEATURE_HYPV)) {
1729             /* Pre OpenSPARC CPUs don't have these */
1730             gen_exception(dc, TT_ILL_INSN);
1731             break;
1732         }
1733         /* In OpenSPARC T1+ CPUs TWINX ASIs in store are ST_BLKINIT_ ASIs */
1734         /* fall through */
1735 
1736     case GET_ASI_DIRECT:
1737         tcg_gen_qemu_st_tl(src, addr, da->mem_idx, da->memop | MO_ALIGN);
1738         break;
1739 
1740     case GET_ASI_BCOPY:
1741         assert(TARGET_LONG_BITS == 32);
1742         /*
1743          * Copy 32 bytes from the address in SRC to ADDR.
1744          *
1745          * From Ross RT625 hyperSPARC manual, section 4.6:
1746          * "Block Copy and Block Fill will work only on cache line boundaries."
1747          *
1748          * It does not specify if an unaliged address is truncated or trapped.
1749          * Previous qemu behaviour was to truncate to 4 byte alignment, which
1750          * is obviously wrong.  The only place I can see this used is in the
1751          * Linux kernel which begins with page alignment, advancing by 32,
1752          * so is always aligned.  Assume truncation as the simpler option.
1753          *
1754          * Since the loads and stores are paired, allow the copy to happen
1755          * in the host endianness.  The copy need not be atomic.
1756          */
1757         {
1758             MemOp mop = MO_128 | MO_ATOM_IFALIGN_PAIR;
1759             TCGv saddr = tcg_temp_new();
1760             TCGv daddr = tcg_temp_new();
1761             TCGv_i128 tmp = tcg_temp_new_i128();
1762 
1763             tcg_gen_andi_tl(saddr, src, -32);
1764             tcg_gen_andi_tl(daddr, addr, -32);
1765             tcg_gen_qemu_ld_i128(tmp, saddr, da->mem_idx, mop);
1766             tcg_gen_qemu_st_i128(tmp, daddr, da->mem_idx, mop);
1767             tcg_gen_addi_tl(saddr, saddr, 16);
1768             tcg_gen_addi_tl(daddr, daddr, 16);
1769             tcg_gen_qemu_ld_i128(tmp, saddr, da->mem_idx, mop);
1770             tcg_gen_qemu_st_i128(tmp, daddr, da->mem_idx, mop);
1771         }
1772         break;
1773 
1774     default:
1775         {
1776             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1777             TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN);
1778 
1779             save_state(dc);
1780 #ifdef TARGET_SPARC64
1781             gen_helper_st_asi(tcg_env, addr, src, r_asi, r_mop);
1782 #else
1783             {
1784                 TCGv_i64 t64 = tcg_temp_new_i64();
1785                 tcg_gen_extu_tl_i64(t64, src);
1786                 gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
1787             }
1788 #endif
1789 
1790             /* A write to a TLB register may alter page maps.  End the TB. */
1791             dc->npc = DYNAMIC_PC;
1792         }
1793         break;
1794     }
1795 }
1796 
1797 static void gen_swap_asi(DisasContext *dc, DisasASI *da,
1798                          TCGv dst, TCGv src, TCGv addr)
1799 {
1800     switch (da->type) {
1801     case GET_ASI_EXCP:
1802         break;
1803     case GET_ASI_DIRECT:
1804         tcg_gen_atomic_xchg_tl(dst, addr, src,
1805                                da->mem_idx, da->memop | MO_ALIGN);
1806         break;
1807     default:
1808         /* ??? Should be DAE_invalid_asi.  */
1809         gen_exception(dc, TT_DATA_ACCESS);
1810         break;
1811     }
1812 }
1813 
1814 static void gen_cas_asi(DisasContext *dc, DisasASI *da,
1815                         TCGv oldv, TCGv newv, TCGv cmpv, TCGv addr)
1816 {
1817     switch (da->type) {
1818     case GET_ASI_EXCP:
1819         return;
1820     case GET_ASI_DIRECT:
1821         tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, newv,
1822                                   da->mem_idx, da->memop | MO_ALIGN);
1823         break;
1824     default:
1825         /* ??? Should be DAE_invalid_asi.  */
1826         gen_exception(dc, TT_DATA_ACCESS);
1827         break;
1828     }
1829 }
1830 
1831 static void gen_ldstub_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr)
1832 {
1833     switch (da->type) {
1834     case GET_ASI_EXCP:
1835         break;
1836     case GET_ASI_DIRECT:
1837         tcg_gen_atomic_xchg_tl(dst, addr, tcg_constant_tl(0xff),
1838                                da->mem_idx, MO_UB);
1839         break;
1840     default:
1841         /* ??? In theory, this should be raise DAE_invalid_asi.
1842            But the SS-20 roms do ldstuba [%l0] #ASI_M_CTL, %o1.  */
1843         if (tb_cflags(dc->base.tb) & CF_PARALLEL) {
1844             gen_helper_exit_atomic(tcg_env);
1845         } else {
1846             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1847             TCGv_i32 r_mop = tcg_constant_i32(MO_UB);
1848             TCGv_i64 s64, t64;
1849 
1850             save_state(dc);
1851             t64 = tcg_temp_new_i64();
1852             gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
1853 
1854             s64 = tcg_constant_i64(0xff);
1855             gen_helper_st_asi(tcg_env, addr, s64, r_asi, r_mop);
1856 
1857             tcg_gen_trunc_i64_tl(dst, t64);
1858 
1859             /* End the TB.  */
1860             dc->npc = DYNAMIC_PC;
1861         }
1862         break;
1863     }
1864 }
1865 
1866 static void gen_ldf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size,
1867                         TCGv addr, int rd)
1868 {
1869     MemOp memop = da->memop;
1870     MemOp size = memop & MO_SIZE;
1871     TCGv_i32 d32;
1872     TCGv_i64 d64, l64;
1873     TCGv addr_tmp;
1874 
1875     /* TODO: Use 128-bit load/store below. */
1876     if (size == MO_128) {
1877         memop = (memop & ~MO_SIZE) | MO_64;
1878     }
1879 
1880     switch (da->type) {
1881     case GET_ASI_EXCP:
1882         break;
1883 
1884     case GET_ASI_DIRECT:
1885         memop |= MO_ALIGN_4;
1886         switch (size) {
1887         case MO_32:
1888             d32 = tcg_temp_new_i32();
1889             tcg_gen_qemu_ld_i32(d32, addr, da->mem_idx, memop);
1890             gen_store_fpr_F(dc, rd, d32);
1891             break;
1892 
1893         case MO_64:
1894             d64 = tcg_temp_new_i64();
1895             tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx, memop);
1896             gen_store_fpr_D(dc, rd, d64);
1897             break;
1898 
1899         case MO_128:
1900             d64 = tcg_temp_new_i64();
1901             l64 = tcg_temp_new_i64();
1902             tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx, memop);
1903             addr_tmp = tcg_temp_new();
1904             tcg_gen_addi_tl(addr_tmp, addr, 8);
1905             tcg_gen_qemu_ld_i64(l64, addr_tmp, da->mem_idx, memop);
1906             gen_store_fpr_D(dc, rd, d64);
1907             gen_store_fpr_D(dc, rd + 2, l64);
1908             break;
1909         default:
1910             g_assert_not_reached();
1911         }
1912         break;
1913 
1914     case GET_ASI_BLOCK:
1915         /* Valid for lddfa on aligned registers only.  */
1916         if (orig_size == MO_64 && (rd & 7) == 0) {
1917             /* The first operation checks required alignment.  */
1918             addr_tmp = tcg_temp_new();
1919             d64 = tcg_temp_new_i64();
1920             for (int i = 0; ; ++i) {
1921                 tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx,
1922                                     memop | (i == 0 ? MO_ALIGN_64 : 0));
1923                 gen_store_fpr_D(dc, rd + 2 * i, d64);
1924                 if (i == 7) {
1925                     break;
1926                 }
1927                 tcg_gen_addi_tl(addr_tmp, addr, 8);
1928                 addr = addr_tmp;
1929             }
1930         } else {
1931             gen_exception(dc, TT_ILL_INSN);
1932         }
1933         break;
1934 
1935     case GET_ASI_SHORT:
1936         /* Valid for lddfa only.  */
1937         if (orig_size == MO_64) {
1938             d64 = tcg_temp_new_i64();
1939             tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx, memop | MO_ALIGN);
1940             gen_store_fpr_D(dc, rd, d64);
1941         } else {
1942             gen_exception(dc, TT_ILL_INSN);
1943         }
1944         break;
1945 
1946     default:
1947         {
1948             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1949             TCGv_i32 r_mop = tcg_constant_i32(memop | MO_ALIGN);
1950 
1951             save_state(dc);
1952             /* According to the table in the UA2011 manual, the only
1953                other asis that are valid for ldfa/lddfa/ldqfa are
1954                the NO_FAULT asis.  We still need a helper for these,
1955                but we can just use the integer asi helper for them.  */
1956             switch (size) {
1957             case MO_32:
1958                 d64 = tcg_temp_new_i64();
1959                 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
1960                 d32 = tcg_temp_new_i32();
1961                 tcg_gen_extrl_i64_i32(d32, d64);
1962                 gen_store_fpr_F(dc, rd, d32);
1963                 break;
1964             case MO_64:
1965                 d64 = tcg_temp_new_i64();
1966                 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
1967                 gen_store_fpr_D(dc, rd, d64);
1968                 break;
1969             case MO_128:
1970                 d64 = tcg_temp_new_i64();
1971                 l64 = tcg_temp_new_i64();
1972                 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
1973                 addr_tmp = tcg_temp_new();
1974                 tcg_gen_addi_tl(addr_tmp, addr, 8);
1975                 gen_helper_ld_asi(l64, tcg_env, addr_tmp, r_asi, r_mop);
1976                 gen_store_fpr_D(dc, rd, d64);
1977                 gen_store_fpr_D(dc, rd + 2, l64);
1978                 break;
1979             default:
1980                 g_assert_not_reached();
1981             }
1982         }
1983         break;
1984     }
1985 }
1986 
1987 static void gen_stf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size,
1988                         TCGv addr, int rd)
1989 {
1990     MemOp memop = da->memop;
1991     MemOp size = memop & MO_SIZE;
1992     TCGv_i32 d32;
1993     TCGv_i64 d64;
1994     TCGv addr_tmp;
1995 
1996     /* TODO: Use 128-bit load/store below. */
1997     if (size == MO_128) {
1998         memop = (memop & ~MO_SIZE) | MO_64;
1999     }
2000 
2001     switch (da->type) {
2002     case GET_ASI_EXCP:
2003         break;
2004 
2005     case GET_ASI_DIRECT:
2006         memop |= MO_ALIGN_4;
2007         switch (size) {
2008         case MO_32:
2009             d32 = gen_load_fpr_F(dc, rd);
2010             tcg_gen_qemu_st_i32(d32, addr, da->mem_idx, memop | MO_ALIGN);
2011             break;
2012         case MO_64:
2013             d64 = gen_load_fpr_D(dc, rd);
2014             tcg_gen_qemu_st_i64(d64, addr, da->mem_idx, memop | MO_ALIGN_4);
2015             break;
2016         case MO_128:
2017             /* Only 4-byte alignment required.  However, it is legal for the
2018                cpu to signal the alignment fault, and the OS trap handler is
2019                required to fix it up.  Requiring 16-byte alignment here avoids
2020                having to probe the second page before performing the first
2021                write.  */
2022             d64 = gen_load_fpr_D(dc, rd);
2023             tcg_gen_qemu_st_i64(d64, addr, da->mem_idx, memop | MO_ALIGN_16);
2024             addr_tmp = tcg_temp_new();
2025             tcg_gen_addi_tl(addr_tmp, addr, 8);
2026             d64 = gen_load_fpr_D(dc, rd + 2);
2027             tcg_gen_qemu_st_i64(d64, addr_tmp, da->mem_idx, memop);
2028             break;
2029         default:
2030             g_assert_not_reached();
2031         }
2032         break;
2033 
2034     case GET_ASI_BLOCK:
2035         /* Valid for stdfa on aligned registers only.  */
2036         if (orig_size == MO_64 && (rd & 7) == 0) {
2037             /* The first operation checks required alignment.  */
2038             addr_tmp = tcg_temp_new();
2039             for (int i = 0; ; ++i) {
2040                 d64 = gen_load_fpr_D(dc, rd + 2 * i);
2041                 tcg_gen_qemu_st_i64(d64, addr, da->mem_idx,
2042                                     memop | (i == 0 ? MO_ALIGN_64 : 0));
2043                 if (i == 7) {
2044                     break;
2045                 }
2046                 tcg_gen_addi_tl(addr_tmp, addr, 8);
2047                 addr = addr_tmp;
2048             }
2049         } else {
2050             gen_exception(dc, TT_ILL_INSN);
2051         }
2052         break;
2053 
2054     case GET_ASI_SHORT:
2055         /* Valid for stdfa only.  */
2056         if (orig_size == MO_64) {
2057             d64 = gen_load_fpr_D(dc, rd);
2058             tcg_gen_qemu_st_i64(d64, addr, da->mem_idx, memop | MO_ALIGN);
2059         } else {
2060             gen_exception(dc, TT_ILL_INSN);
2061         }
2062         break;
2063 
2064     default:
2065         /* According to the table in the UA2011 manual, the only
2066            other asis that are valid for ldfa/lddfa/ldqfa are
2067            the PST* asis, which aren't currently handled.  */
2068         gen_exception(dc, TT_ILL_INSN);
2069         break;
2070     }
2071 }
2072 
2073 static void gen_ldda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd)
2074 {
2075     TCGv hi = gen_dest_gpr(dc, rd);
2076     TCGv lo = gen_dest_gpr(dc, rd + 1);
2077 
2078     switch (da->type) {
2079     case GET_ASI_EXCP:
2080         return;
2081 
2082     case GET_ASI_DTWINX:
2083 #ifdef TARGET_SPARC64
2084         {
2085             MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16;
2086             TCGv_i128 t = tcg_temp_new_i128();
2087 
2088             tcg_gen_qemu_ld_i128(t, addr, da->mem_idx, mop);
2089             /*
2090              * Note that LE twinx acts as if each 64-bit register result is
2091              * byte swapped.  We perform one 128-bit LE load, so must swap
2092              * the order of the writebacks.
2093              */
2094             if ((mop & MO_BSWAP) == MO_TE) {
2095                 tcg_gen_extr_i128_i64(lo, hi, t);
2096             } else {
2097                 tcg_gen_extr_i128_i64(hi, lo, t);
2098             }
2099         }
2100         break;
2101 #else
2102         g_assert_not_reached();
2103 #endif
2104 
2105     case GET_ASI_DIRECT:
2106         {
2107             TCGv_i64 tmp = tcg_temp_new_i64();
2108 
2109             tcg_gen_qemu_ld_i64(tmp, addr, da->mem_idx, da->memop | MO_ALIGN);
2110 
2111             /* Note that LE ldda acts as if each 32-bit register
2112                result is byte swapped.  Having just performed one
2113                64-bit bswap, we need now to swap the writebacks.  */
2114             if ((da->memop & MO_BSWAP) == MO_TE) {
2115                 tcg_gen_extr_i64_tl(lo, hi, tmp);
2116             } else {
2117                 tcg_gen_extr_i64_tl(hi, lo, tmp);
2118             }
2119         }
2120         break;
2121 
2122     case GET_ASI_CODE:
2123 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
2124         {
2125             MemOpIdx oi = make_memop_idx(da->memop, da->mem_idx);
2126             TCGv_i64 tmp = tcg_temp_new_i64();
2127 
2128             gen_helper_ld_code(tmp, tcg_env, addr, tcg_constant_i32(oi));
2129 
2130             /* See above.  */
2131             if ((da->memop & MO_BSWAP) == MO_TE) {
2132                 tcg_gen_extr_i64_tl(lo, hi, tmp);
2133             } else {
2134                 tcg_gen_extr_i64_tl(hi, lo, tmp);
2135             }
2136         }
2137         break;
2138 #else
2139         g_assert_not_reached();
2140 #endif
2141 
2142     default:
2143         /* ??? In theory we've handled all of the ASIs that are valid
2144            for ldda, and this should raise DAE_invalid_asi.  However,
2145            real hardware allows others.  This can be seen with e.g.
2146            FreeBSD 10.3 wrt ASI_IC_TAG.  */
2147         {
2148             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2149             TCGv_i32 r_mop = tcg_constant_i32(da->memop);
2150             TCGv_i64 tmp = tcg_temp_new_i64();
2151 
2152             save_state(dc);
2153             gen_helper_ld_asi(tmp, tcg_env, addr, r_asi, r_mop);
2154 
2155             /* See above.  */
2156             if ((da->memop & MO_BSWAP) == MO_TE) {
2157                 tcg_gen_extr_i64_tl(lo, hi, tmp);
2158             } else {
2159                 tcg_gen_extr_i64_tl(hi, lo, tmp);
2160             }
2161         }
2162         break;
2163     }
2164 
2165     gen_store_gpr(dc, rd, hi);
2166     gen_store_gpr(dc, rd + 1, lo);
2167 }
2168 
2169 static void gen_stda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd)
2170 {
2171     TCGv hi = gen_load_gpr(dc, rd);
2172     TCGv lo = gen_load_gpr(dc, rd + 1);
2173 
2174     switch (da->type) {
2175     case GET_ASI_EXCP:
2176         break;
2177 
2178     case GET_ASI_DTWINX:
2179 #ifdef TARGET_SPARC64
2180         {
2181             MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16;
2182             TCGv_i128 t = tcg_temp_new_i128();
2183 
2184             /*
2185              * Note that LE twinx acts as if each 64-bit register result is
2186              * byte swapped.  We perform one 128-bit LE store, so must swap
2187              * the order of the construction.
2188              */
2189             if ((mop & MO_BSWAP) == MO_TE) {
2190                 tcg_gen_concat_i64_i128(t, lo, hi);
2191             } else {
2192                 tcg_gen_concat_i64_i128(t, hi, lo);
2193             }
2194             tcg_gen_qemu_st_i128(t, addr, da->mem_idx, mop);
2195         }
2196         break;
2197 #else
2198         g_assert_not_reached();
2199 #endif
2200 
2201     case GET_ASI_DIRECT:
2202         {
2203             TCGv_i64 t64 = tcg_temp_new_i64();
2204 
2205             /* Note that LE stda acts as if each 32-bit register result is
2206                byte swapped.  We will perform one 64-bit LE store, so now
2207                we must swap the order of the construction.  */
2208             if ((da->memop & MO_BSWAP) == MO_TE) {
2209                 tcg_gen_concat_tl_i64(t64, lo, hi);
2210             } else {
2211                 tcg_gen_concat_tl_i64(t64, hi, lo);
2212             }
2213             tcg_gen_qemu_st_i64(t64, addr, da->mem_idx, da->memop | MO_ALIGN);
2214         }
2215         break;
2216 
2217     case GET_ASI_BFILL:
2218         assert(TARGET_LONG_BITS == 32);
2219         /*
2220          * Store 32 bytes of [rd:rd+1] to ADDR.
2221          * See comments for GET_ASI_COPY above.
2222          */
2223         {
2224             MemOp mop = MO_TE | MO_128 | MO_ATOM_IFALIGN_PAIR;
2225             TCGv_i64 t8 = tcg_temp_new_i64();
2226             TCGv_i128 t16 = tcg_temp_new_i128();
2227             TCGv daddr = tcg_temp_new();
2228 
2229             tcg_gen_concat_tl_i64(t8, lo, hi);
2230             tcg_gen_concat_i64_i128(t16, t8, t8);
2231             tcg_gen_andi_tl(daddr, addr, -32);
2232             tcg_gen_qemu_st_i128(t16, daddr, da->mem_idx, mop);
2233             tcg_gen_addi_tl(daddr, daddr, 16);
2234             tcg_gen_qemu_st_i128(t16, daddr, da->mem_idx, mop);
2235         }
2236         break;
2237 
2238     default:
2239         /* ??? In theory we've handled all of the ASIs that are valid
2240            for stda, and this should raise DAE_invalid_asi.  */
2241         {
2242             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2243             TCGv_i32 r_mop = tcg_constant_i32(da->memop);
2244             TCGv_i64 t64 = tcg_temp_new_i64();
2245 
2246             /* See above.  */
2247             if ((da->memop & MO_BSWAP) == MO_TE) {
2248                 tcg_gen_concat_tl_i64(t64, lo, hi);
2249             } else {
2250                 tcg_gen_concat_tl_i64(t64, hi, lo);
2251             }
2252 
2253             save_state(dc);
2254             gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
2255         }
2256         break;
2257     }
2258 }
2259 
2260 static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2261 {
2262 #ifdef TARGET_SPARC64
2263     TCGv_i32 c32, zero, dst, s1, s2;
2264     TCGv_i64 c64 = tcg_temp_new_i64();
2265 
2266     /* We have two choices here: extend the 32 bit data and use movcond_i64,
2267        or fold the comparison down to 32 bits and use movcond_i32.  Choose
2268        the later.  */
2269     c32 = tcg_temp_new_i32();
2270     tcg_gen_setcondi_i64(cmp->cond, c64, cmp->c1, cmp->c2);
2271     tcg_gen_extrl_i64_i32(c32, c64);
2272 
2273     s1 = gen_load_fpr_F(dc, rs);
2274     s2 = gen_load_fpr_F(dc, rd);
2275     dst = tcg_temp_new_i32();
2276     zero = tcg_constant_i32(0);
2277 
2278     tcg_gen_movcond_i32(TCG_COND_NE, dst, c32, zero, s1, s2);
2279 
2280     gen_store_fpr_F(dc, rd, dst);
2281 #else
2282     qemu_build_not_reached();
2283 #endif
2284 }
2285 
2286 static void gen_fmovd(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2287 {
2288 #ifdef TARGET_SPARC64
2289     TCGv_i64 dst = tcg_temp_new_i64();
2290     tcg_gen_movcond_i64(cmp->cond, dst, cmp->c1, tcg_constant_tl(cmp->c2),
2291                         gen_load_fpr_D(dc, rs),
2292                         gen_load_fpr_D(dc, rd));
2293     gen_store_fpr_D(dc, rd, dst);
2294 #else
2295     qemu_build_not_reached();
2296 #endif
2297 }
2298 
2299 static void gen_fmovq(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2300 {
2301 #ifdef TARGET_SPARC64
2302     TCGv c2 = tcg_constant_tl(cmp->c2);
2303     TCGv_i64 h = tcg_temp_new_i64();
2304     TCGv_i64 l = tcg_temp_new_i64();
2305 
2306     tcg_gen_movcond_i64(cmp->cond, h, cmp->c1, c2,
2307                         gen_load_fpr_D(dc, rs),
2308                         gen_load_fpr_D(dc, rd));
2309     tcg_gen_movcond_i64(cmp->cond, l, cmp->c1, c2,
2310                         gen_load_fpr_D(dc, rs + 2),
2311                         gen_load_fpr_D(dc, rd + 2));
2312     gen_store_fpr_D(dc, rd, h);
2313     gen_store_fpr_D(dc, rd + 2, l);
2314 #else
2315     qemu_build_not_reached();
2316 #endif
2317 }
2318 
2319 #ifdef TARGET_SPARC64
2320 static void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr)
2321 {
2322     TCGv_i32 r_tl = tcg_temp_new_i32();
2323 
2324     /* load env->tl into r_tl */
2325     tcg_gen_ld_i32(r_tl, tcg_env, offsetof(CPUSPARCState, tl));
2326 
2327     /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2328     tcg_gen_andi_i32(r_tl, r_tl, MAXTL_MASK);
2329 
2330     /* calculate offset to current trap state from env->ts, reuse r_tl */
2331     tcg_gen_muli_i32(r_tl, r_tl, sizeof (trap_state));
2332     tcg_gen_addi_ptr(r_tsptr, tcg_env, offsetof(CPUSPARCState, ts));
2333 
2334     /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2335     {
2336         TCGv_ptr r_tl_tmp = tcg_temp_new_ptr();
2337         tcg_gen_ext_i32_ptr(r_tl_tmp, r_tl);
2338         tcg_gen_add_ptr(r_tsptr, r_tsptr, r_tl_tmp);
2339     }
2340 }
2341 #endif
2342 
2343 static int extract_dfpreg(DisasContext *dc, int x)
2344 {
2345     int r = x & 0x1e;
2346 #ifdef TARGET_SPARC64
2347     r |= (x & 1) << 5;
2348 #endif
2349     return r;
2350 }
2351 
2352 static int extract_qfpreg(DisasContext *dc, int x)
2353 {
2354     int r = x & 0x1c;
2355 #ifdef TARGET_SPARC64
2356     r |= (x & 1) << 5;
2357 #endif
2358     return r;
2359 }
2360 
2361 /* Include the auto-generated decoder.  */
2362 #include "decode-insns.c.inc"
2363 
2364 #define TRANS(NAME, AVAIL, FUNC, ...) \
2365     static bool trans_##NAME(DisasContext *dc, arg_##NAME *a) \
2366     { return avail_##AVAIL(dc) && FUNC(dc, __VA_ARGS__); }
2367 
2368 #define avail_ALL(C)      true
2369 #ifdef TARGET_SPARC64
2370 # define avail_32(C)      false
2371 # define avail_ASR17(C)   false
2372 # define avail_CASA(C)    true
2373 # define avail_DIV(C)     true
2374 # define avail_MUL(C)     true
2375 # define avail_POWERDOWN(C) false
2376 # define avail_64(C)      true
2377 # define avail_FMAF(C)    ((C)->def->features & CPU_FEATURE_FMAF)
2378 # define avail_GL(C)      ((C)->def->features & CPU_FEATURE_GL)
2379 # define avail_HYPV(C)    ((C)->def->features & CPU_FEATURE_HYPV)
2380 # define avail_VIS1(C)    ((C)->def->features & CPU_FEATURE_VIS1)
2381 # define avail_VIS2(C)    ((C)->def->features & CPU_FEATURE_VIS2)
2382 # define avail_VIS3(C)    ((C)->def->features & CPU_FEATURE_VIS3)
2383 # define avail_VIS3B(C)   avail_VIS3(C)
2384 #else
2385 # define avail_32(C)      true
2386 # define avail_ASR17(C)   ((C)->def->features & CPU_FEATURE_ASR17)
2387 # define avail_CASA(C)    ((C)->def->features & CPU_FEATURE_CASA)
2388 # define avail_DIV(C)     ((C)->def->features & CPU_FEATURE_DIV)
2389 # define avail_MUL(C)     ((C)->def->features & CPU_FEATURE_MUL)
2390 # define avail_POWERDOWN(C) ((C)->def->features & CPU_FEATURE_POWERDOWN)
2391 # define avail_64(C)      false
2392 # define avail_FMAF(C)    false
2393 # define avail_GL(C)      false
2394 # define avail_HYPV(C)    false
2395 # define avail_VIS1(C)    false
2396 # define avail_VIS2(C)    false
2397 # define avail_VIS3(C)    false
2398 # define avail_VIS3B(C)   false
2399 #endif
2400 
2401 /* Default case for non jump instructions. */
2402 static bool advance_pc(DisasContext *dc)
2403 {
2404     TCGLabel *l1;
2405 
2406     finishing_insn(dc);
2407 
2408     if (dc->npc & 3) {
2409         switch (dc->npc) {
2410         case DYNAMIC_PC:
2411         case DYNAMIC_PC_LOOKUP:
2412             dc->pc = dc->npc;
2413             tcg_gen_mov_tl(cpu_pc, cpu_npc);
2414             tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
2415             break;
2416 
2417         case JUMP_PC:
2418             /* we can do a static jump */
2419             l1 = gen_new_label();
2420             tcg_gen_brcondi_tl(dc->jump.cond, dc->jump.c1, dc->jump.c2, l1);
2421 
2422             /* jump not taken */
2423             gen_goto_tb(dc, 1, dc->jump_pc[1], dc->jump_pc[1] + 4);
2424 
2425             /* jump taken */
2426             gen_set_label(l1);
2427             gen_goto_tb(dc, 0, dc->jump_pc[0], dc->jump_pc[0] + 4);
2428 
2429             dc->base.is_jmp = DISAS_NORETURN;
2430             break;
2431 
2432         default:
2433             g_assert_not_reached();
2434         }
2435     } else {
2436         dc->pc = dc->npc;
2437         dc->npc = dc->npc + 4;
2438     }
2439     return true;
2440 }
2441 
2442 /*
2443  * Major opcodes 00 and 01 -- branches, call, and sethi
2444  */
2445 
2446 static bool advance_jump_cond(DisasContext *dc, DisasCompare *cmp,
2447                               bool annul, int disp)
2448 {
2449     target_ulong dest = address_mask_i(dc, dc->pc + disp * 4);
2450     target_ulong npc;
2451 
2452     finishing_insn(dc);
2453 
2454     if (cmp->cond == TCG_COND_ALWAYS) {
2455         if (annul) {
2456             dc->pc = dest;
2457             dc->npc = dest + 4;
2458         } else {
2459             gen_mov_pc_npc(dc);
2460             dc->npc = dest;
2461         }
2462         return true;
2463     }
2464 
2465     if (cmp->cond == TCG_COND_NEVER) {
2466         npc = dc->npc;
2467         if (npc & 3) {
2468             gen_mov_pc_npc(dc);
2469             if (annul) {
2470                 tcg_gen_addi_tl(cpu_pc, cpu_pc, 4);
2471             }
2472             tcg_gen_addi_tl(cpu_npc, cpu_pc, 4);
2473         } else {
2474             dc->pc = npc + (annul ? 4 : 0);
2475             dc->npc = dc->pc + 4;
2476         }
2477         return true;
2478     }
2479 
2480     flush_cond(dc);
2481     npc = dc->npc;
2482 
2483     if (annul) {
2484         TCGLabel *l1 = gen_new_label();
2485 
2486         tcg_gen_brcondi_tl(tcg_invert_cond(cmp->cond), cmp->c1, cmp->c2, l1);
2487         gen_goto_tb(dc, 0, npc, dest);
2488         gen_set_label(l1);
2489         gen_goto_tb(dc, 1, npc + 4, npc + 8);
2490 
2491         dc->base.is_jmp = DISAS_NORETURN;
2492     } else {
2493         if (npc & 3) {
2494             switch (npc) {
2495             case DYNAMIC_PC:
2496             case DYNAMIC_PC_LOOKUP:
2497                 tcg_gen_mov_tl(cpu_pc, cpu_npc);
2498                 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
2499                 tcg_gen_movcond_tl(cmp->cond, cpu_npc,
2500                                    cmp->c1, tcg_constant_tl(cmp->c2),
2501                                    tcg_constant_tl(dest), cpu_npc);
2502                 dc->pc = npc;
2503                 break;
2504             default:
2505                 g_assert_not_reached();
2506             }
2507         } else {
2508             dc->pc = npc;
2509             dc->npc = JUMP_PC;
2510             dc->jump = *cmp;
2511             dc->jump_pc[0] = dest;
2512             dc->jump_pc[1] = npc + 4;
2513 
2514             /* The condition for cpu_cond is always NE -- normalize. */
2515             if (cmp->cond == TCG_COND_NE) {
2516                 tcg_gen_xori_tl(cpu_cond, cmp->c1, cmp->c2);
2517             } else {
2518                 tcg_gen_setcondi_tl(cmp->cond, cpu_cond, cmp->c1, cmp->c2);
2519             }
2520             dc->cpu_cond_live = true;
2521         }
2522     }
2523     return true;
2524 }
2525 
2526 static bool raise_priv(DisasContext *dc)
2527 {
2528     gen_exception(dc, TT_PRIV_INSN);
2529     return true;
2530 }
2531 
2532 static bool raise_unimpfpop(DisasContext *dc)
2533 {
2534     gen_op_fpexception_im(dc, FSR_FTT_UNIMPFPOP);
2535     return true;
2536 }
2537 
2538 static bool gen_trap_float128(DisasContext *dc)
2539 {
2540     if (dc->def->features & CPU_FEATURE_FLOAT128) {
2541         return false;
2542     }
2543     return raise_unimpfpop(dc);
2544 }
2545 
2546 static bool do_bpcc(DisasContext *dc, arg_bcc *a)
2547 {
2548     DisasCompare cmp;
2549 
2550     gen_compare(&cmp, a->cc, a->cond, dc);
2551     return advance_jump_cond(dc, &cmp, a->a, a->i);
2552 }
2553 
2554 TRANS(Bicc, ALL, do_bpcc, a)
2555 TRANS(BPcc,  64, do_bpcc, a)
2556 
2557 static bool do_fbpfcc(DisasContext *dc, arg_bcc *a)
2558 {
2559     DisasCompare cmp;
2560 
2561     if (gen_trap_ifnofpu(dc)) {
2562         return true;
2563     }
2564     gen_fcompare(&cmp, a->cc, a->cond);
2565     return advance_jump_cond(dc, &cmp, a->a, a->i);
2566 }
2567 
2568 TRANS(FBPfcc,  64, do_fbpfcc, a)
2569 TRANS(FBfcc,  ALL, do_fbpfcc, a)
2570 
2571 static bool trans_BPr(DisasContext *dc, arg_BPr *a)
2572 {
2573     DisasCompare cmp;
2574 
2575     if (!avail_64(dc)) {
2576         return false;
2577     }
2578     if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
2579         return false;
2580     }
2581     return advance_jump_cond(dc, &cmp, a->a, a->i);
2582 }
2583 
2584 static bool trans_CALL(DisasContext *dc, arg_CALL *a)
2585 {
2586     target_long target = address_mask_i(dc, dc->pc + a->i * 4);
2587 
2588     gen_store_gpr(dc, 15, tcg_constant_tl(dc->pc));
2589     gen_mov_pc_npc(dc);
2590     dc->npc = target;
2591     return true;
2592 }
2593 
2594 static bool trans_NCP(DisasContext *dc, arg_NCP *a)
2595 {
2596     /*
2597      * For sparc32, always generate the no-coprocessor exception.
2598      * For sparc64, always generate illegal instruction.
2599      */
2600 #ifdef TARGET_SPARC64
2601     return false;
2602 #else
2603     gen_exception(dc, TT_NCP_INSN);
2604     return true;
2605 #endif
2606 }
2607 
2608 static bool trans_SETHI(DisasContext *dc, arg_SETHI *a)
2609 {
2610     /* Special-case %g0 because that's the canonical nop.  */
2611     if (a->rd) {
2612         gen_store_gpr(dc, a->rd, tcg_constant_tl((uint32_t)a->i << 10));
2613     }
2614     return advance_pc(dc);
2615 }
2616 
2617 /*
2618  * Major Opcode 10 -- integer, floating-point, vis, and system insns.
2619  */
2620 
2621 static bool do_tcc(DisasContext *dc, int cond, int cc,
2622                    int rs1, bool imm, int rs2_or_imm)
2623 {
2624     int mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc)
2625                 ? UA2005_HTRAP_MASK : V8_TRAP_MASK);
2626     DisasCompare cmp;
2627     TCGLabel *lab;
2628     TCGv_i32 trap;
2629 
2630     /* Trap never.  */
2631     if (cond == 0) {
2632         return advance_pc(dc);
2633     }
2634 
2635     /*
2636      * Immediate traps are the most common case.  Since this value is
2637      * live across the branch, it really pays to evaluate the constant.
2638      */
2639     if (rs1 == 0 && (imm || rs2_or_imm == 0)) {
2640         trap = tcg_constant_i32((rs2_or_imm & mask) + TT_TRAP);
2641     } else {
2642         trap = tcg_temp_new_i32();
2643         tcg_gen_trunc_tl_i32(trap, gen_load_gpr(dc, rs1));
2644         if (imm) {
2645             tcg_gen_addi_i32(trap, trap, rs2_or_imm);
2646         } else {
2647             TCGv_i32 t2 = tcg_temp_new_i32();
2648             tcg_gen_trunc_tl_i32(t2, gen_load_gpr(dc, rs2_or_imm));
2649             tcg_gen_add_i32(trap, trap, t2);
2650         }
2651         tcg_gen_andi_i32(trap, trap, mask);
2652         tcg_gen_addi_i32(trap, trap, TT_TRAP);
2653     }
2654 
2655     finishing_insn(dc);
2656 
2657     /* Trap always.  */
2658     if (cond == 8) {
2659         save_state(dc);
2660         gen_helper_raise_exception(tcg_env, trap);
2661         dc->base.is_jmp = DISAS_NORETURN;
2662         return true;
2663     }
2664 
2665     /* Conditional trap.  */
2666     flush_cond(dc);
2667     lab = delay_exceptionv(dc, trap);
2668     gen_compare(&cmp, cc, cond, dc);
2669     tcg_gen_brcondi_tl(cmp.cond, cmp.c1, cmp.c2, lab);
2670 
2671     return advance_pc(dc);
2672 }
2673 
2674 static bool trans_Tcc_r(DisasContext *dc, arg_Tcc_r *a)
2675 {
2676     if (avail_32(dc) && a->cc) {
2677         return false;
2678     }
2679     return do_tcc(dc, a->cond, a->cc, a->rs1, false, a->rs2);
2680 }
2681 
2682 static bool trans_Tcc_i_v7(DisasContext *dc, arg_Tcc_i_v7 *a)
2683 {
2684     if (avail_64(dc)) {
2685         return false;
2686     }
2687     return do_tcc(dc, a->cond, 0, a->rs1, true, a->i);
2688 }
2689 
2690 static bool trans_Tcc_i_v9(DisasContext *dc, arg_Tcc_i_v9 *a)
2691 {
2692     if (avail_32(dc)) {
2693         return false;
2694     }
2695     return do_tcc(dc, a->cond, a->cc, a->rs1, true, a->i);
2696 }
2697 
2698 static bool trans_STBAR(DisasContext *dc, arg_STBAR *a)
2699 {
2700     tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
2701     return advance_pc(dc);
2702 }
2703 
2704 static bool trans_MEMBAR(DisasContext *dc, arg_MEMBAR *a)
2705 {
2706     if (avail_32(dc)) {
2707         return false;
2708     }
2709     if (a->mmask) {
2710         /* Note TCG_MO_* was modeled on sparc64, so mmask matches. */
2711         tcg_gen_mb(a->mmask | TCG_BAR_SC);
2712     }
2713     if (a->cmask) {
2714         /* For #Sync, etc, end the TB to recognize interrupts. */
2715         dc->base.is_jmp = DISAS_EXIT;
2716     }
2717     return advance_pc(dc);
2718 }
2719 
2720 static bool do_rd_special(DisasContext *dc, bool priv, int rd,
2721                           TCGv (*func)(DisasContext *, TCGv))
2722 {
2723     if (!priv) {
2724         return raise_priv(dc);
2725     }
2726     gen_store_gpr(dc, rd, func(dc, gen_dest_gpr(dc, rd)));
2727     return advance_pc(dc);
2728 }
2729 
2730 static TCGv do_rdy(DisasContext *dc, TCGv dst)
2731 {
2732     return cpu_y;
2733 }
2734 
2735 static bool trans_RDY(DisasContext *dc, arg_RDY *a)
2736 {
2737     /*
2738      * TODO: Need a feature bit for sparcv8.  In the meantime, treat all
2739      * 32-bit cpus like sparcv7, which ignores the rs1 field.
2740      * This matches after all other ASR, so Leon3 Asr17 is handled first.
2741      */
2742     if (avail_64(dc) && a->rs1 != 0) {
2743         return false;
2744     }
2745     return do_rd_special(dc, true, a->rd, do_rdy);
2746 }
2747 
2748 static TCGv do_rd_leon3_config(DisasContext *dc, TCGv dst)
2749 {
2750     gen_helper_rdasr17(dst, tcg_env);
2751     return dst;
2752 }
2753 
2754 TRANS(RDASR17, ASR17, do_rd_special, true, a->rd, do_rd_leon3_config)
2755 
2756 static TCGv do_rdccr(DisasContext *dc, TCGv dst)
2757 {
2758     gen_helper_rdccr(dst, tcg_env);
2759     return dst;
2760 }
2761 
2762 TRANS(RDCCR, 64, do_rd_special, true, a->rd, do_rdccr)
2763 
2764 static TCGv do_rdasi(DisasContext *dc, TCGv dst)
2765 {
2766 #ifdef TARGET_SPARC64
2767     return tcg_constant_tl(dc->asi);
2768 #else
2769     qemu_build_not_reached();
2770 #endif
2771 }
2772 
2773 TRANS(RDASI, 64, do_rd_special, true, a->rd, do_rdasi)
2774 
2775 static TCGv do_rdtick(DisasContext *dc, TCGv dst)
2776 {
2777     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
2778 
2779     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
2780     if (translator_io_start(&dc->base)) {
2781         dc->base.is_jmp = DISAS_EXIT;
2782     }
2783     gen_helper_tick_get_count(dst, tcg_env, r_tickptr,
2784                               tcg_constant_i32(dc->mem_idx));
2785     return dst;
2786 }
2787 
2788 /* TODO: non-priv access only allowed when enabled. */
2789 TRANS(RDTICK, 64, do_rd_special, true, a->rd, do_rdtick)
2790 
2791 static TCGv do_rdpc(DisasContext *dc, TCGv dst)
2792 {
2793     return tcg_constant_tl(address_mask_i(dc, dc->pc));
2794 }
2795 
2796 TRANS(RDPC, 64, do_rd_special, true, a->rd, do_rdpc)
2797 
2798 static TCGv do_rdfprs(DisasContext *dc, TCGv dst)
2799 {
2800     tcg_gen_ext_i32_tl(dst, cpu_fprs);
2801     return dst;
2802 }
2803 
2804 TRANS(RDFPRS, 64, do_rd_special, true, a->rd, do_rdfprs)
2805 
2806 static TCGv do_rdgsr(DisasContext *dc, TCGv dst)
2807 {
2808     gen_trap_ifnofpu(dc);
2809     return cpu_gsr;
2810 }
2811 
2812 TRANS(RDGSR, 64, do_rd_special, true, a->rd, do_rdgsr)
2813 
2814 static TCGv do_rdsoftint(DisasContext *dc, TCGv dst)
2815 {
2816     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(softint));
2817     return dst;
2818 }
2819 
2820 TRANS(RDSOFTINT, 64, do_rd_special, supervisor(dc), a->rd, do_rdsoftint)
2821 
2822 static TCGv do_rdtick_cmpr(DisasContext *dc, TCGv dst)
2823 {
2824     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(tick_cmpr));
2825     return dst;
2826 }
2827 
2828 /* TODO: non-priv access only allowed when enabled. */
2829 TRANS(RDTICK_CMPR, 64, do_rd_special, true, a->rd, do_rdtick_cmpr)
2830 
2831 static TCGv do_rdstick(DisasContext *dc, TCGv dst)
2832 {
2833     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
2834 
2835     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick));
2836     if (translator_io_start(&dc->base)) {
2837         dc->base.is_jmp = DISAS_EXIT;
2838     }
2839     gen_helper_tick_get_count(dst, tcg_env, r_tickptr,
2840                               tcg_constant_i32(dc->mem_idx));
2841     return dst;
2842 }
2843 
2844 /* TODO: non-priv access only allowed when enabled. */
2845 TRANS(RDSTICK, 64, do_rd_special, true, a->rd, do_rdstick)
2846 
2847 static TCGv do_rdstick_cmpr(DisasContext *dc, TCGv dst)
2848 {
2849     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(stick_cmpr));
2850     return dst;
2851 }
2852 
2853 /* TODO: supervisor access only allowed when enabled by hypervisor. */
2854 TRANS(RDSTICK_CMPR, 64, do_rd_special, supervisor(dc), a->rd, do_rdstick_cmpr)
2855 
2856 /*
2857  * UltraSPARC-T1 Strand status.
2858  * HYPV check maybe not enough, UA2005 & UA2007 describe
2859  * this ASR as impl. dep
2860  */
2861 static TCGv do_rdstrand_status(DisasContext *dc, TCGv dst)
2862 {
2863     return tcg_constant_tl(1);
2864 }
2865 
2866 TRANS(RDSTRAND_STATUS, HYPV, do_rd_special, true, a->rd, do_rdstrand_status)
2867 
2868 static TCGv do_rdpsr(DisasContext *dc, TCGv dst)
2869 {
2870     gen_helper_rdpsr(dst, tcg_env);
2871     return dst;
2872 }
2873 
2874 TRANS(RDPSR, 32, do_rd_special, supervisor(dc), a->rd, do_rdpsr)
2875 
2876 static TCGv do_rdhpstate(DisasContext *dc, TCGv dst)
2877 {
2878     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hpstate));
2879     return dst;
2880 }
2881 
2882 TRANS(RDHPR_hpstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhpstate)
2883 
2884 static TCGv do_rdhtstate(DisasContext *dc, TCGv dst)
2885 {
2886     TCGv_i32 tl = tcg_temp_new_i32();
2887     TCGv_ptr tp = tcg_temp_new_ptr();
2888 
2889     tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl));
2890     tcg_gen_andi_i32(tl, tl, MAXTL_MASK);
2891     tcg_gen_shli_i32(tl, tl, 3);
2892     tcg_gen_ext_i32_ptr(tp, tl);
2893     tcg_gen_add_ptr(tp, tp, tcg_env);
2894 
2895     tcg_gen_ld_tl(dst, tp, env64_field_offsetof(htstate));
2896     return dst;
2897 }
2898 
2899 TRANS(RDHPR_htstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtstate)
2900 
2901 static TCGv do_rdhintp(DisasContext *dc, TCGv dst)
2902 {
2903     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hintp));
2904     return dst;
2905 }
2906 
2907 TRANS(RDHPR_hintp, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhintp)
2908 
2909 static TCGv do_rdhtba(DisasContext *dc, TCGv dst)
2910 {
2911     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(htba));
2912     return dst;
2913 }
2914 
2915 TRANS(RDHPR_htba, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtba)
2916 
2917 static TCGv do_rdhver(DisasContext *dc, TCGv dst)
2918 {
2919     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hver));
2920     return dst;
2921 }
2922 
2923 TRANS(RDHPR_hver, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhver)
2924 
2925 static TCGv do_rdhstick_cmpr(DisasContext *dc, TCGv dst)
2926 {
2927     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hstick_cmpr));
2928     return dst;
2929 }
2930 
2931 TRANS(RDHPR_hstick_cmpr, HYPV, do_rd_special, hypervisor(dc), a->rd,
2932       do_rdhstick_cmpr)
2933 
2934 static TCGv do_rdwim(DisasContext *dc, TCGv dst)
2935 {
2936     tcg_gen_ld_tl(dst, tcg_env, env32_field_offsetof(wim));
2937     return dst;
2938 }
2939 
2940 TRANS(RDWIM, 32, do_rd_special, supervisor(dc), a->rd, do_rdwim)
2941 
2942 static TCGv do_rdtpc(DisasContext *dc, TCGv dst)
2943 {
2944 #ifdef TARGET_SPARC64
2945     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2946 
2947     gen_load_trap_state_at_tl(r_tsptr);
2948     tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tpc));
2949     return dst;
2950 #else
2951     qemu_build_not_reached();
2952 #endif
2953 }
2954 
2955 TRANS(RDPR_tpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtpc)
2956 
2957 static TCGv do_rdtnpc(DisasContext *dc, TCGv dst)
2958 {
2959 #ifdef TARGET_SPARC64
2960     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2961 
2962     gen_load_trap_state_at_tl(r_tsptr);
2963     tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tnpc));
2964     return dst;
2965 #else
2966     qemu_build_not_reached();
2967 #endif
2968 }
2969 
2970 TRANS(RDPR_tnpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtnpc)
2971 
2972 static TCGv do_rdtstate(DisasContext *dc, TCGv dst)
2973 {
2974 #ifdef TARGET_SPARC64
2975     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2976 
2977     gen_load_trap_state_at_tl(r_tsptr);
2978     tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tstate));
2979     return dst;
2980 #else
2981     qemu_build_not_reached();
2982 #endif
2983 }
2984 
2985 TRANS(RDPR_tstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdtstate)
2986 
2987 static TCGv do_rdtt(DisasContext *dc, TCGv dst)
2988 {
2989 #ifdef TARGET_SPARC64
2990     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2991 
2992     gen_load_trap_state_at_tl(r_tsptr);
2993     tcg_gen_ld32s_tl(dst, r_tsptr, offsetof(trap_state, tt));
2994     return dst;
2995 #else
2996     qemu_build_not_reached();
2997 #endif
2998 }
2999 
3000 TRANS(RDPR_tt, 64, do_rd_special, supervisor(dc), a->rd, do_rdtt)
3001 TRANS(RDPR_tick, 64, do_rd_special, supervisor(dc), a->rd, do_rdtick)
3002 
3003 static TCGv do_rdtba(DisasContext *dc, TCGv dst)
3004 {
3005     return cpu_tbr;
3006 }
3007 
3008 TRANS(RDTBR, 32, do_rd_special, supervisor(dc), a->rd, do_rdtba)
3009 TRANS(RDPR_tba, 64, do_rd_special, supervisor(dc), a->rd, do_rdtba)
3010 
3011 static TCGv do_rdpstate(DisasContext *dc, TCGv dst)
3012 {
3013     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(pstate));
3014     return dst;
3015 }
3016 
3017 TRANS(RDPR_pstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdpstate)
3018 
3019 static TCGv do_rdtl(DisasContext *dc, TCGv dst)
3020 {
3021     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(tl));
3022     return dst;
3023 }
3024 
3025 TRANS(RDPR_tl, 64, do_rd_special, supervisor(dc), a->rd, do_rdtl)
3026 
3027 static TCGv do_rdpil(DisasContext *dc, TCGv dst)
3028 {
3029     tcg_gen_ld32s_tl(dst, tcg_env, env_field_offsetof(psrpil));
3030     return dst;
3031 }
3032 
3033 TRANS(RDPR_pil, 64, do_rd_special, supervisor(dc), a->rd, do_rdpil)
3034 
3035 static TCGv do_rdcwp(DisasContext *dc, TCGv dst)
3036 {
3037     gen_helper_rdcwp(dst, tcg_env);
3038     return dst;
3039 }
3040 
3041 TRANS(RDPR_cwp, 64, do_rd_special, supervisor(dc), a->rd, do_rdcwp)
3042 
3043 static TCGv do_rdcansave(DisasContext *dc, TCGv dst)
3044 {
3045     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cansave));
3046     return dst;
3047 }
3048 
3049 TRANS(RDPR_cansave, 64, do_rd_special, supervisor(dc), a->rd, do_rdcansave)
3050 
3051 static TCGv do_rdcanrestore(DisasContext *dc, TCGv dst)
3052 {
3053     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(canrestore));
3054     return dst;
3055 }
3056 
3057 TRANS(RDPR_canrestore, 64, do_rd_special, supervisor(dc), a->rd,
3058       do_rdcanrestore)
3059 
3060 static TCGv do_rdcleanwin(DisasContext *dc, TCGv dst)
3061 {
3062     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cleanwin));
3063     return dst;
3064 }
3065 
3066 TRANS(RDPR_cleanwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdcleanwin)
3067 
3068 static TCGv do_rdotherwin(DisasContext *dc, TCGv dst)
3069 {
3070     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(otherwin));
3071     return dst;
3072 }
3073 
3074 TRANS(RDPR_otherwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdotherwin)
3075 
3076 static TCGv do_rdwstate(DisasContext *dc, TCGv dst)
3077 {
3078     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(wstate));
3079     return dst;
3080 }
3081 
3082 TRANS(RDPR_wstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdwstate)
3083 
3084 static TCGv do_rdgl(DisasContext *dc, TCGv dst)
3085 {
3086     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(gl));
3087     return dst;
3088 }
3089 
3090 TRANS(RDPR_gl, GL, do_rd_special, supervisor(dc), a->rd, do_rdgl)
3091 
3092 /* UA2005 strand status */
3093 static TCGv do_rdssr(DisasContext *dc, TCGv dst)
3094 {
3095     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(ssr));
3096     return dst;
3097 }
3098 
3099 TRANS(RDPR_strand_status, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdssr)
3100 
3101 static TCGv do_rdver(DisasContext *dc, TCGv dst)
3102 {
3103     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(version));
3104     return dst;
3105 }
3106 
3107 TRANS(RDPR_ver, 64, do_rd_special, supervisor(dc), a->rd, do_rdver)
3108 
3109 static bool trans_FLUSHW(DisasContext *dc, arg_FLUSHW *a)
3110 {
3111     if (avail_64(dc)) {
3112         gen_helper_flushw(tcg_env);
3113         return advance_pc(dc);
3114     }
3115     return false;
3116 }
3117 
3118 static bool do_wr_special(DisasContext *dc, arg_r_r_ri *a, bool priv,
3119                           void (*func)(DisasContext *, TCGv))
3120 {
3121     TCGv src;
3122 
3123     /* For simplicity, we under-decoded the rs2 form. */
3124     if (!a->imm && (a->rs2_or_imm & ~0x1f)) {
3125         return false;
3126     }
3127     if (!priv) {
3128         return raise_priv(dc);
3129     }
3130 
3131     if (a->rs1 == 0 && (a->imm || a->rs2_or_imm == 0)) {
3132         src = tcg_constant_tl(a->rs2_or_imm);
3133     } else {
3134         TCGv src1 = gen_load_gpr(dc, a->rs1);
3135         if (a->rs2_or_imm == 0) {
3136             src = src1;
3137         } else {
3138             src = tcg_temp_new();
3139             if (a->imm) {
3140                 tcg_gen_xori_tl(src, src1, a->rs2_or_imm);
3141             } else {
3142                 tcg_gen_xor_tl(src, src1, gen_load_gpr(dc, a->rs2_or_imm));
3143             }
3144         }
3145     }
3146     func(dc, src);
3147     return advance_pc(dc);
3148 }
3149 
3150 static void do_wry(DisasContext *dc, TCGv src)
3151 {
3152     tcg_gen_ext32u_tl(cpu_y, src);
3153 }
3154 
3155 TRANS(WRY, ALL, do_wr_special, a, true, do_wry)
3156 
3157 static void do_wrccr(DisasContext *dc, TCGv src)
3158 {
3159     gen_helper_wrccr(tcg_env, src);
3160 }
3161 
3162 TRANS(WRCCR, 64, do_wr_special, a, true, do_wrccr)
3163 
3164 static void do_wrasi(DisasContext *dc, TCGv src)
3165 {
3166     TCGv tmp = tcg_temp_new();
3167 
3168     tcg_gen_ext8u_tl(tmp, src);
3169     tcg_gen_st32_tl(tmp, tcg_env, env64_field_offsetof(asi));
3170     /* End TB to notice changed ASI. */
3171     dc->base.is_jmp = DISAS_EXIT;
3172 }
3173 
3174 TRANS(WRASI, 64, do_wr_special, a, true, do_wrasi)
3175 
3176 static void do_wrfprs(DisasContext *dc, TCGv src)
3177 {
3178 #ifdef TARGET_SPARC64
3179     tcg_gen_trunc_tl_i32(cpu_fprs, src);
3180     dc->fprs_dirty = 0;
3181     dc->base.is_jmp = DISAS_EXIT;
3182 #else
3183     qemu_build_not_reached();
3184 #endif
3185 }
3186 
3187 TRANS(WRFPRS, 64, do_wr_special, a, true, do_wrfprs)
3188 
3189 static void do_wrgsr(DisasContext *dc, TCGv src)
3190 {
3191     gen_trap_ifnofpu(dc);
3192     tcg_gen_mov_tl(cpu_gsr, src);
3193 }
3194 
3195 TRANS(WRGSR, 64, do_wr_special, a, true, do_wrgsr)
3196 
3197 static void do_wrsoftint_set(DisasContext *dc, TCGv src)
3198 {
3199     gen_helper_set_softint(tcg_env, src);
3200 }
3201 
3202 TRANS(WRSOFTINT_SET, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_set)
3203 
3204 static void do_wrsoftint_clr(DisasContext *dc, TCGv src)
3205 {
3206     gen_helper_clear_softint(tcg_env, src);
3207 }
3208 
3209 TRANS(WRSOFTINT_CLR, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_clr)
3210 
3211 static void do_wrsoftint(DisasContext *dc, TCGv src)
3212 {
3213     gen_helper_write_softint(tcg_env, src);
3214 }
3215 
3216 TRANS(WRSOFTINT, 64, do_wr_special, a, supervisor(dc), do_wrsoftint)
3217 
3218 static void do_wrtick_cmpr(DisasContext *dc, TCGv src)
3219 {
3220     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3221 
3222     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(tick_cmpr));
3223     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
3224     translator_io_start(&dc->base);
3225     gen_helper_tick_set_limit(r_tickptr, src);
3226     /* End TB to handle timer interrupt */
3227     dc->base.is_jmp = DISAS_EXIT;
3228 }
3229 
3230 TRANS(WRTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrtick_cmpr)
3231 
3232 static void do_wrstick(DisasContext *dc, TCGv src)
3233 {
3234 #ifdef TARGET_SPARC64
3235     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3236 
3237     tcg_gen_ld_ptr(r_tickptr, tcg_env, offsetof(CPUSPARCState, stick));
3238     translator_io_start(&dc->base);
3239     gen_helper_tick_set_count(r_tickptr, src);
3240     /* End TB to handle timer interrupt */
3241     dc->base.is_jmp = DISAS_EXIT;
3242 #else
3243     qemu_build_not_reached();
3244 #endif
3245 }
3246 
3247 TRANS(WRSTICK, 64, do_wr_special, a, supervisor(dc), do_wrstick)
3248 
3249 static void do_wrstick_cmpr(DisasContext *dc, TCGv src)
3250 {
3251     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3252 
3253     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(stick_cmpr));
3254     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick));
3255     translator_io_start(&dc->base);
3256     gen_helper_tick_set_limit(r_tickptr, src);
3257     /* End TB to handle timer interrupt */
3258     dc->base.is_jmp = DISAS_EXIT;
3259 }
3260 
3261 TRANS(WRSTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrstick_cmpr)
3262 
3263 static void do_wrpowerdown(DisasContext *dc, TCGv src)
3264 {
3265     finishing_insn(dc);
3266     save_state(dc);
3267     gen_helper_power_down(tcg_env);
3268 }
3269 
3270 TRANS(WRPOWERDOWN, POWERDOWN, do_wr_special, a, supervisor(dc), do_wrpowerdown)
3271 
3272 static void do_wrpsr(DisasContext *dc, TCGv src)
3273 {
3274     gen_helper_wrpsr(tcg_env, src);
3275     dc->base.is_jmp = DISAS_EXIT;
3276 }
3277 
3278 TRANS(WRPSR, 32, do_wr_special, a, supervisor(dc), do_wrpsr)
3279 
3280 static void do_wrwim(DisasContext *dc, TCGv src)
3281 {
3282     target_ulong mask = MAKE_64BIT_MASK(0, dc->def->nwindows);
3283     TCGv tmp = tcg_temp_new();
3284 
3285     tcg_gen_andi_tl(tmp, src, mask);
3286     tcg_gen_st_tl(tmp, tcg_env, env32_field_offsetof(wim));
3287 }
3288 
3289 TRANS(WRWIM, 32, do_wr_special, a, supervisor(dc), do_wrwim)
3290 
3291 static void do_wrtpc(DisasContext *dc, TCGv src)
3292 {
3293 #ifdef TARGET_SPARC64
3294     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3295 
3296     gen_load_trap_state_at_tl(r_tsptr);
3297     tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tpc));
3298 #else
3299     qemu_build_not_reached();
3300 #endif
3301 }
3302 
3303 TRANS(WRPR_tpc, 64, do_wr_special, a, supervisor(dc), do_wrtpc)
3304 
3305 static void do_wrtnpc(DisasContext *dc, TCGv src)
3306 {
3307 #ifdef TARGET_SPARC64
3308     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3309 
3310     gen_load_trap_state_at_tl(r_tsptr);
3311     tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tnpc));
3312 #else
3313     qemu_build_not_reached();
3314 #endif
3315 }
3316 
3317 TRANS(WRPR_tnpc, 64, do_wr_special, a, supervisor(dc), do_wrtnpc)
3318 
3319 static void do_wrtstate(DisasContext *dc, TCGv src)
3320 {
3321 #ifdef TARGET_SPARC64
3322     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3323 
3324     gen_load_trap_state_at_tl(r_tsptr);
3325     tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tstate));
3326 #else
3327     qemu_build_not_reached();
3328 #endif
3329 }
3330 
3331 TRANS(WRPR_tstate, 64, do_wr_special, a, supervisor(dc), do_wrtstate)
3332 
3333 static void do_wrtt(DisasContext *dc, TCGv src)
3334 {
3335 #ifdef TARGET_SPARC64
3336     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3337 
3338     gen_load_trap_state_at_tl(r_tsptr);
3339     tcg_gen_st32_tl(src, r_tsptr, offsetof(trap_state, tt));
3340 #else
3341     qemu_build_not_reached();
3342 #endif
3343 }
3344 
3345 TRANS(WRPR_tt, 64, do_wr_special, a, supervisor(dc), do_wrtt)
3346 
3347 static void do_wrtick(DisasContext *dc, TCGv src)
3348 {
3349     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3350 
3351     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
3352     translator_io_start(&dc->base);
3353     gen_helper_tick_set_count(r_tickptr, src);
3354     /* End TB to handle timer interrupt */
3355     dc->base.is_jmp = DISAS_EXIT;
3356 }
3357 
3358 TRANS(WRPR_tick, 64, do_wr_special, a, supervisor(dc), do_wrtick)
3359 
3360 static void do_wrtba(DisasContext *dc, TCGv src)
3361 {
3362     tcg_gen_mov_tl(cpu_tbr, src);
3363 }
3364 
3365 TRANS(WRPR_tba, 64, do_wr_special, a, supervisor(dc), do_wrtba)
3366 
3367 static void do_wrpstate(DisasContext *dc, TCGv src)
3368 {
3369     save_state(dc);
3370     if (translator_io_start(&dc->base)) {
3371         dc->base.is_jmp = DISAS_EXIT;
3372     }
3373     gen_helper_wrpstate(tcg_env, src);
3374     dc->npc = DYNAMIC_PC;
3375 }
3376 
3377 TRANS(WRPR_pstate, 64, do_wr_special, a, supervisor(dc), do_wrpstate)
3378 
3379 static void do_wrtl(DisasContext *dc, TCGv src)
3380 {
3381     save_state(dc);
3382     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(tl));
3383     dc->npc = DYNAMIC_PC;
3384 }
3385 
3386 TRANS(WRPR_tl, 64, do_wr_special, a, supervisor(dc), do_wrtl)
3387 
3388 static void do_wrpil(DisasContext *dc, TCGv src)
3389 {
3390     if (translator_io_start(&dc->base)) {
3391         dc->base.is_jmp = DISAS_EXIT;
3392     }
3393     gen_helper_wrpil(tcg_env, src);
3394 }
3395 
3396 TRANS(WRPR_pil, 64, do_wr_special, a, supervisor(dc), do_wrpil)
3397 
3398 static void do_wrcwp(DisasContext *dc, TCGv src)
3399 {
3400     gen_helper_wrcwp(tcg_env, src);
3401 }
3402 
3403 TRANS(WRPR_cwp, 64, do_wr_special, a, supervisor(dc), do_wrcwp)
3404 
3405 static void do_wrcansave(DisasContext *dc, TCGv src)
3406 {
3407     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cansave));
3408 }
3409 
3410 TRANS(WRPR_cansave, 64, do_wr_special, a, supervisor(dc), do_wrcansave)
3411 
3412 static void do_wrcanrestore(DisasContext *dc, TCGv src)
3413 {
3414     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(canrestore));
3415 }
3416 
3417 TRANS(WRPR_canrestore, 64, do_wr_special, a, supervisor(dc), do_wrcanrestore)
3418 
3419 static void do_wrcleanwin(DisasContext *dc, TCGv src)
3420 {
3421     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cleanwin));
3422 }
3423 
3424 TRANS(WRPR_cleanwin, 64, do_wr_special, a, supervisor(dc), do_wrcleanwin)
3425 
3426 static void do_wrotherwin(DisasContext *dc, TCGv src)
3427 {
3428     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(otherwin));
3429 }
3430 
3431 TRANS(WRPR_otherwin, 64, do_wr_special, a, supervisor(dc), do_wrotherwin)
3432 
3433 static void do_wrwstate(DisasContext *dc, TCGv src)
3434 {
3435     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(wstate));
3436 }
3437 
3438 TRANS(WRPR_wstate, 64, do_wr_special, a, supervisor(dc), do_wrwstate)
3439 
3440 static void do_wrgl(DisasContext *dc, TCGv src)
3441 {
3442     gen_helper_wrgl(tcg_env, src);
3443 }
3444 
3445 TRANS(WRPR_gl, GL, do_wr_special, a, supervisor(dc), do_wrgl)
3446 
3447 /* UA2005 strand status */
3448 static void do_wrssr(DisasContext *dc, TCGv src)
3449 {
3450     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(ssr));
3451 }
3452 
3453 TRANS(WRPR_strand_status, HYPV, do_wr_special, a, hypervisor(dc), do_wrssr)
3454 
3455 TRANS(WRTBR, 32, do_wr_special, a, supervisor(dc), do_wrtba)
3456 
3457 static void do_wrhpstate(DisasContext *dc, TCGv src)
3458 {
3459     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hpstate));
3460     dc->base.is_jmp = DISAS_EXIT;
3461 }
3462 
3463 TRANS(WRHPR_hpstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhpstate)
3464 
3465 static void do_wrhtstate(DisasContext *dc, TCGv src)
3466 {
3467     TCGv_i32 tl = tcg_temp_new_i32();
3468     TCGv_ptr tp = tcg_temp_new_ptr();
3469 
3470     tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl));
3471     tcg_gen_andi_i32(tl, tl, MAXTL_MASK);
3472     tcg_gen_shli_i32(tl, tl, 3);
3473     tcg_gen_ext_i32_ptr(tp, tl);
3474     tcg_gen_add_ptr(tp, tp, tcg_env);
3475 
3476     tcg_gen_st_tl(src, tp, env64_field_offsetof(htstate));
3477 }
3478 
3479 TRANS(WRHPR_htstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtstate)
3480 
3481 static void do_wrhintp(DisasContext *dc, TCGv src)
3482 {
3483     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hintp));
3484 }
3485 
3486 TRANS(WRHPR_hintp, HYPV, do_wr_special, a, hypervisor(dc), do_wrhintp)
3487 
3488 static void do_wrhtba(DisasContext *dc, TCGv src)
3489 {
3490     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(htba));
3491 }
3492 
3493 TRANS(WRHPR_htba, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtba)
3494 
3495 static void do_wrhstick_cmpr(DisasContext *dc, TCGv src)
3496 {
3497     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3498 
3499     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hstick_cmpr));
3500     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(hstick));
3501     translator_io_start(&dc->base);
3502     gen_helper_tick_set_limit(r_tickptr, src);
3503     /* End TB to handle timer interrupt */
3504     dc->base.is_jmp = DISAS_EXIT;
3505 }
3506 
3507 TRANS(WRHPR_hstick_cmpr, HYPV, do_wr_special, a, hypervisor(dc),
3508       do_wrhstick_cmpr)
3509 
3510 static bool do_saved_restored(DisasContext *dc, bool saved)
3511 {
3512     if (!supervisor(dc)) {
3513         return raise_priv(dc);
3514     }
3515     if (saved) {
3516         gen_helper_saved(tcg_env);
3517     } else {
3518         gen_helper_restored(tcg_env);
3519     }
3520     return advance_pc(dc);
3521 }
3522 
3523 TRANS(SAVED, 64, do_saved_restored, true)
3524 TRANS(RESTORED, 64, do_saved_restored, false)
3525 
3526 static bool trans_NOP(DisasContext *dc, arg_NOP *a)
3527 {
3528     return advance_pc(dc);
3529 }
3530 
3531 /*
3532  * TODO: Need a feature bit for sparcv8.
3533  * In the meantime, treat all 32-bit cpus like sparcv7.
3534  */
3535 TRANS(NOP_v7, 32, trans_NOP, a)
3536 TRANS(NOP_v9, 64, trans_NOP, a)
3537 
3538 static bool do_arith_int(DisasContext *dc, arg_r_r_ri_cc *a,
3539                          void (*func)(TCGv, TCGv, TCGv),
3540                          void (*funci)(TCGv, TCGv, target_long),
3541                          bool logic_cc)
3542 {
3543     TCGv dst, src1;
3544 
3545     /* For simplicity, we under-decoded the rs2 form. */
3546     if (!a->imm && a->rs2_or_imm & ~0x1f) {
3547         return false;
3548     }
3549 
3550     if (logic_cc) {
3551         dst = cpu_cc_N;
3552     } else {
3553         dst = gen_dest_gpr(dc, a->rd);
3554     }
3555     src1 = gen_load_gpr(dc, a->rs1);
3556 
3557     if (a->imm || a->rs2_or_imm == 0) {
3558         if (funci) {
3559             funci(dst, src1, a->rs2_or_imm);
3560         } else {
3561             func(dst, src1, tcg_constant_tl(a->rs2_or_imm));
3562         }
3563     } else {
3564         func(dst, src1, cpu_regs[a->rs2_or_imm]);
3565     }
3566 
3567     if (logic_cc) {
3568         if (TARGET_LONG_BITS == 64) {
3569             tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
3570             tcg_gen_movi_tl(cpu_icc_C, 0);
3571         }
3572         tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
3573         tcg_gen_movi_tl(cpu_cc_C, 0);
3574         tcg_gen_movi_tl(cpu_cc_V, 0);
3575     }
3576 
3577     gen_store_gpr(dc, a->rd, dst);
3578     return advance_pc(dc);
3579 }
3580 
3581 static bool do_arith(DisasContext *dc, arg_r_r_ri_cc *a,
3582                      void (*func)(TCGv, TCGv, TCGv),
3583                      void (*funci)(TCGv, TCGv, target_long),
3584                      void (*func_cc)(TCGv, TCGv, TCGv))
3585 {
3586     if (a->cc) {
3587         return do_arith_int(dc, a, func_cc, NULL, false);
3588     }
3589     return do_arith_int(dc, a, func, funci, false);
3590 }
3591 
3592 static bool do_logic(DisasContext *dc, arg_r_r_ri_cc *a,
3593                      void (*func)(TCGv, TCGv, TCGv),
3594                      void (*funci)(TCGv, TCGv, target_long))
3595 {
3596     return do_arith_int(dc, a, func, funci, a->cc);
3597 }
3598 
3599 TRANS(ADD, ALL, do_arith, a, tcg_gen_add_tl, tcg_gen_addi_tl, gen_op_addcc)
3600 TRANS(SUB, ALL, do_arith, a, tcg_gen_sub_tl, tcg_gen_subi_tl, gen_op_subcc)
3601 TRANS(ADDC, ALL, do_arith, a, gen_op_addc, NULL, gen_op_addccc)
3602 TRANS(SUBC, ALL, do_arith, a, gen_op_subc, NULL, gen_op_subccc)
3603 
3604 TRANS(TADDcc, ALL, do_arith, a, NULL, NULL, gen_op_taddcc)
3605 TRANS(TSUBcc, ALL, do_arith, a, NULL, NULL, gen_op_tsubcc)
3606 TRANS(TADDccTV, ALL, do_arith, a, NULL, NULL, gen_op_taddcctv)
3607 TRANS(TSUBccTV, ALL, do_arith, a, NULL, NULL, gen_op_tsubcctv)
3608 
3609 TRANS(AND, ALL, do_logic, a, tcg_gen_and_tl, tcg_gen_andi_tl)
3610 TRANS(XOR, ALL, do_logic, a, tcg_gen_xor_tl, tcg_gen_xori_tl)
3611 TRANS(ANDN, ALL, do_logic, a, tcg_gen_andc_tl, NULL)
3612 TRANS(ORN, ALL, do_logic, a, tcg_gen_orc_tl, NULL)
3613 TRANS(XORN, ALL, do_logic, a, tcg_gen_eqv_tl, NULL)
3614 
3615 TRANS(MULX, 64, do_arith, a, tcg_gen_mul_tl, tcg_gen_muli_tl, NULL)
3616 TRANS(UMUL, MUL, do_logic, a, gen_op_umul, NULL)
3617 TRANS(SMUL, MUL, do_logic, a, gen_op_smul, NULL)
3618 TRANS(MULScc, ALL, do_arith, a, NULL, NULL, gen_op_mulscc)
3619 
3620 TRANS(UDIVcc, DIV, do_arith, a, NULL, NULL, gen_op_udivcc)
3621 TRANS(SDIV, DIV, do_arith, a, gen_op_sdiv, NULL, gen_op_sdivcc)
3622 
3623 /* TODO: Should have feature bit -- comes in with UltraSparc T2. */
3624 TRANS(POPC, 64, do_arith, a, gen_op_popc, NULL, NULL)
3625 
3626 static bool trans_OR(DisasContext *dc, arg_r_r_ri_cc *a)
3627 {
3628     /* OR with %g0 is the canonical alias for MOV. */
3629     if (!a->cc && a->rs1 == 0) {
3630         if (a->imm || a->rs2_or_imm == 0) {
3631             gen_store_gpr(dc, a->rd, tcg_constant_tl(a->rs2_or_imm));
3632         } else if (a->rs2_or_imm & ~0x1f) {
3633             /* For simplicity, we under-decoded the rs2 form. */
3634             return false;
3635         } else {
3636             gen_store_gpr(dc, a->rd, cpu_regs[a->rs2_or_imm]);
3637         }
3638         return advance_pc(dc);
3639     }
3640     return do_logic(dc, a, tcg_gen_or_tl, tcg_gen_ori_tl);
3641 }
3642 
3643 static bool trans_UDIV(DisasContext *dc, arg_r_r_ri *a)
3644 {
3645     TCGv_i64 t1, t2;
3646     TCGv dst;
3647 
3648     if (!avail_DIV(dc)) {
3649         return false;
3650     }
3651     /* For simplicity, we under-decoded the rs2 form. */
3652     if (!a->imm && a->rs2_or_imm & ~0x1f) {
3653         return false;
3654     }
3655 
3656     if (unlikely(a->rs2_or_imm == 0)) {
3657         gen_exception(dc, TT_DIV_ZERO);
3658         return true;
3659     }
3660 
3661     if (a->imm) {
3662         t2 = tcg_constant_i64((uint32_t)a->rs2_or_imm);
3663     } else {
3664         TCGLabel *lab;
3665         TCGv_i32 n2;
3666 
3667         finishing_insn(dc);
3668         flush_cond(dc);
3669 
3670         n2 = tcg_temp_new_i32();
3671         tcg_gen_trunc_tl_i32(n2, cpu_regs[a->rs2_or_imm]);
3672 
3673         lab = delay_exception(dc, TT_DIV_ZERO);
3674         tcg_gen_brcondi_i32(TCG_COND_EQ, n2, 0, lab);
3675 
3676         t2 = tcg_temp_new_i64();
3677 #ifdef TARGET_SPARC64
3678         tcg_gen_ext32u_i64(t2, cpu_regs[a->rs2_or_imm]);
3679 #else
3680         tcg_gen_extu_i32_i64(t2, cpu_regs[a->rs2_or_imm]);
3681 #endif
3682     }
3683 
3684     t1 = tcg_temp_new_i64();
3685     tcg_gen_concat_tl_i64(t1, gen_load_gpr(dc, a->rs1), cpu_y);
3686 
3687     tcg_gen_divu_i64(t1, t1, t2);
3688     tcg_gen_umin_i64(t1, t1, tcg_constant_i64(UINT32_MAX));
3689 
3690     dst = gen_dest_gpr(dc, a->rd);
3691     tcg_gen_trunc_i64_tl(dst, t1);
3692     gen_store_gpr(dc, a->rd, dst);
3693     return advance_pc(dc);
3694 }
3695 
3696 static bool trans_UDIVX(DisasContext *dc, arg_r_r_ri *a)
3697 {
3698     TCGv dst, src1, src2;
3699 
3700     if (!avail_64(dc)) {
3701         return false;
3702     }
3703     /* For simplicity, we under-decoded the rs2 form. */
3704     if (!a->imm && a->rs2_or_imm & ~0x1f) {
3705         return false;
3706     }
3707 
3708     if (unlikely(a->rs2_or_imm == 0)) {
3709         gen_exception(dc, TT_DIV_ZERO);
3710         return true;
3711     }
3712 
3713     if (a->imm) {
3714         src2 = tcg_constant_tl(a->rs2_or_imm);
3715     } else {
3716         TCGLabel *lab;
3717 
3718         finishing_insn(dc);
3719         flush_cond(dc);
3720 
3721         lab = delay_exception(dc, TT_DIV_ZERO);
3722         src2 = cpu_regs[a->rs2_or_imm];
3723         tcg_gen_brcondi_tl(TCG_COND_EQ, src2, 0, lab);
3724     }
3725 
3726     dst = gen_dest_gpr(dc, a->rd);
3727     src1 = gen_load_gpr(dc, a->rs1);
3728 
3729     tcg_gen_divu_tl(dst, src1, src2);
3730     gen_store_gpr(dc, a->rd, dst);
3731     return advance_pc(dc);
3732 }
3733 
3734 static bool trans_SDIVX(DisasContext *dc, arg_r_r_ri *a)
3735 {
3736     TCGv dst, src1, src2;
3737 
3738     if (!avail_64(dc)) {
3739         return false;
3740     }
3741     /* For simplicity, we under-decoded the rs2 form. */
3742     if (!a->imm && a->rs2_or_imm & ~0x1f) {
3743         return false;
3744     }
3745 
3746     if (unlikely(a->rs2_or_imm == 0)) {
3747         gen_exception(dc, TT_DIV_ZERO);
3748         return true;
3749     }
3750 
3751     dst = gen_dest_gpr(dc, a->rd);
3752     src1 = gen_load_gpr(dc, a->rs1);
3753 
3754     if (a->imm) {
3755         if (unlikely(a->rs2_or_imm == -1)) {
3756             tcg_gen_neg_tl(dst, src1);
3757             gen_store_gpr(dc, a->rd, dst);
3758             return advance_pc(dc);
3759         }
3760         src2 = tcg_constant_tl(a->rs2_or_imm);
3761     } else {
3762         TCGLabel *lab;
3763         TCGv t1, t2;
3764 
3765         finishing_insn(dc);
3766         flush_cond(dc);
3767 
3768         lab = delay_exception(dc, TT_DIV_ZERO);
3769         src2 = cpu_regs[a->rs2_or_imm];
3770         tcg_gen_brcondi_tl(TCG_COND_EQ, src2, 0, lab);
3771 
3772         /*
3773          * Need to avoid INT64_MIN / -1, which will trap on x86 host.
3774          * Set SRC2 to 1 as a new divisor, to produce the correct result.
3775          */
3776         t1 = tcg_temp_new();
3777         t2 = tcg_temp_new();
3778         tcg_gen_setcondi_tl(TCG_COND_EQ, t1, src1, (target_long)INT64_MIN);
3779         tcg_gen_setcondi_tl(TCG_COND_EQ, t2, src2, -1);
3780         tcg_gen_and_tl(t1, t1, t2);
3781         tcg_gen_movcond_tl(TCG_COND_NE, t1, t1, tcg_constant_tl(0),
3782                            tcg_constant_tl(1), src2);
3783         src2 = t1;
3784     }
3785 
3786     tcg_gen_div_tl(dst, src1, src2);
3787     gen_store_gpr(dc, a->rd, dst);
3788     return advance_pc(dc);
3789 }
3790 
3791 static bool gen_edge(DisasContext *dc, arg_r_r_r *a,
3792                      int width, bool cc, bool little_endian)
3793 {
3794     TCGv dst, s1, s2, l, r, t, m;
3795     uint64_t amask = address_mask_i(dc, -8);
3796 
3797     dst = gen_dest_gpr(dc, a->rd);
3798     s1 = gen_load_gpr(dc, a->rs1);
3799     s2 = gen_load_gpr(dc, a->rs2);
3800 
3801     if (cc) {
3802         gen_op_subcc(cpu_cc_N, s1, s2);
3803     }
3804 
3805     l = tcg_temp_new();
3806     r = tcg_temp_new();
3807     t = tcg_temp_new();
3808 
3809     switch (width) {
3810     case 8:
3811         tcg_gen_andi_tl(l, s1, 7);
3812         tcg_gen_andi_tl(r, s2, 7);
3813         tcg_gen_xori_tl(r, r, 7);
3814         m = tcg_constant_tl(0xff);
3815         break;
3816     case 16:
3817         tcg_gen_extract_tl(l, s1, 1, 2);
3818         tcg_gen_extract_tl(r, s2, 1, 2);
3819         tcg_gen_xori_tl(r, r, 3);
3820         m = tcg_constant_tl(0xf);
3821         break;
3822     case 32:
3823         tcg_gen_extract_tl(l, s1, 2, 1);
3824         tcg_gen_extract_tl(r, s2, 2, 1);
3825         tcg_gen_xori_tl(r, r, 1);
3826         m = tcg_constant_tl(0x3);
3827         break;
3828     default:
3829         abort();
3830     }
3831 
3832     /* Compute Left Edge */
3833     if (little_endian) {
3834         tcg_gen_shl_tl(l, m, l);
3835         tcg_gen_and_tl(l, l, m);
3836     } else {
3837         tcg_gen_shr_tl(l, m, l);
3838     }
3839     /* Compute Right Edge */
3840     if (little_endian) {
3841         tcg_gen_shr_tl(r, m, r);
3842     } else {
3843         tcg_gen_shl_tl(r, m, r);
3844         tcg_gen_and_tl(r, r, m);
3845     }
3846 
3847     /* Compute dst = (s1 == s2 under amask ? l : l & r) */
3848     tcg_gen_xor_tl(t, s1, s2);
3849     tcg_gen_and_tl(r, r, l);
3850     tcg_gen_movcond_tl(TCG_COND_TSTEQ, dst, t, tcg_constant_tl(amask), r, l);
3851 
3852     gen_store_gpr(dc, a->rd, dst);
3853     return advance_pc(dc);
3854 }
3855 
3856 TRANS(EDGE8cc, VIS1, gen_edge, a, 8, 1, 0)
3857 TRANS(EDGE8Lcc, VIS1, gen_edge, a, 8, 1, 1)
3858 TRANS(EDGE16cc, VIS1, gen_edge, a, 16, 1, 0)
3859 TRANS(EDGE16Lcc, VIS1, gen_edge, a, 16, 1, 1)
3860 TRANS(EDGE32cc, VIS1, gen_edge, a, 32, 1, 0)
3861 TRANS(EDGE32Lcc, VIS1, gen_edge, a, 32, 1, 1)
3862 
3863 TRANS(EDGE8N, VIS2, gen_edge, a, 8, 0, 0)
3864 TRANS(EDGE8LN, VIS2, gen_edge, a, 8, 0, 1)
3865 TRANS(EDGE16N, VIS2, gen_edge, a, 16, 0, 0)
3866 TRANS(EDGE16LN, VIS2, gen_edge, a, 16, 0, 1)
3867 TRANS(EDGE32N, VIS2, gen_edge, a, 32, 0, 0)
3868 TRANS(EDGE32LN, VIS2, gen_edge, a, 32, 0, 1)
3869 
3870 static bool do_rrr(DisasContext *dc, arg_r_r_r *a,
3871                    void (*func)(TCGv, TCGv, TCGv))
3872 {
3873     TCGv dst = gen_dest_gpr(dc, a->rd);
3874     TCGv src1 = gen_load_gpr(dc, a->rs1);
3875     TCGv src2 = gen_load_gpr(dc, a->rs2);
3876 
3877     func(dst, src1, src2);
3878     gen_store_gpr(dc, a->rd, dst);
3879     return advance_pc(dc);
3880 }
3881 
3882 TRANS(ARRAY8, VIS1, do_rrr, a, gen_helper_array8)
3883 TRANS(ARRAY16, VIS1, do_rrr, a, gen_op_array16)
3884 TRANS(ARRAY32, VIS1, do_rrr, a, gen_op_array32)
3885 
3886 TRANS(ADDXC, VIS3, do_rrr, a, gen_op_addxc)
3887 TRANS(ADDXCcc, VIS3, do_rrr, a, gen_op_addxccc)
3888 
3889 static void gen_op_alignaddr(TCGv dst, TCGv s1, TCGv s2)
3890 {
3891 #ifdef TARGET_SPARC64
3892     TCGv tmp = tcg_temp_new();
3893 
3894     tcg_gen_add_tl(tmp, s1, s2);
3895     tcg_gen_andi_tl(dst, tmp, -8);
3896     tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
3897 #else
3898     g_assert_not_reached();
3899 #endif
3900 }
3901 
3902 static void gen_op_alignaddrl(TCGv dst, TCGv s1, TCGv s2)
3903 {
3904 #ifdef TARGET_SPARC64
3905     TCGv tmp = tcg_temp_new();
3906 
3907     tcg_gen_add_tl(tmp, s1, s2);
3908     tcg_gen_andi_tl(dst, tmp, -8);
3909     tcg_gen_neg_tl(tmp, tmp);
3910     tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
3911 #else
3912     g_assert_not_reached();
3913 #endif
3914 }
3915 
3916 TRANS(ALIGNADDR, VIS1, do_rrr, a, gen_op_alignaddr)
3917 TRANS(ALIGNADDRL, VIS1, do_rrr, a, gen_op_alignaddrl)
3918 
3919 static void gen_op_bmask(TCGv dst, TCGv s1, TCGv s2)
3920 {
3921 #ifdef TARGET_SPARC64
3922     tcg_gen_add_tl(dst, s1, s2);
3923     tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, dst, 32, 32);
3924 #else
3925     g_assert_not_reached();
3926 #endif
3927 }
3928 
3929 TRANS(BMASK, VIS2, do_rrr, a, gen_op_bmask)
3930 
3931 static bool do_cmask(DisasContext *dc, int rs2, void (*func)(TCGv, TCGv, TCGv))
3932 {
3933     func(cpu_gsr, cpu_gsr, gen_load_gpr(dc, rs2));
3934     return true;
3935 }
3936 
3937 TRANS(CMASK8, VIS3, do_cmask, a->rs2, gen_helper_cmask8)
3938 TRANS(CMASK16, VIS3, do_cmask, a->rs2, gen_helper_cmask16)
3939 TRANS(CMASK32, VIS3, do_cmask, a->rs2, gen_helper_cmask32)
3940 
3941 static bool do_shift_r(DisasContext *dc, arg_shiftr *a, bool l, bool u)
3942 {
3943     TCGv dst, src1, src2;
3944 
3945     /* Reject 64-bit shifts for sparc32. */
3946     if (avail_32(dc) && a->x) {
3947         return false;
3948     }
3949 
3950     src2 = tcg_temp_new();
3951     tcg_gen_andi_tl(src2, gen_load_gpr(dc, a->rs2), a->x ? 63 : 31);
3952     src1 = gen_load_gpr(dc, a->rs1);
3953     dst = gen_dest_gpr(dc, a->rd);
3954 
3955     if (l) {
3956         tcg_gen_shl_tl(dst, src1, src2);
3957         if (!a->x) {
3958             tcg_gen_ext32u_tl(dst, dst);
3959         }
3960     } else if (u) {
3961         if (!a->x) {
3962             tcg_gen_ext32u_tl(dst, src1);
3963             src1 = dst;
3964         }
3965         tcg_gen_shr_tl(dst, src1, src2);
3966     } else {
3967         if (!a->x) {
3968             tcg_gen_ext32s_tl(dst, src1);
3969             src1 = dst;
3970         }
3971         tcg_gen_sar_tl(dst, src1, src2);
3972     }
3973     gen_store_gpr(dc, a->rd, dst);
3974     return advance_pc(dc);
3975 }
3976 
3977 TRANS(SLL_r, ALL, do_shift_r, a, true, true)
3978 TRANS(SRL_r, ALL, do_shift_r, a, false, true)
3979 TRANS(SRA_r, ALL, do_shift_r, a, false, false)
3980 
3981 static bool do_shift_i(DisasContext *dc, arg_shifti *a, bool l, bool u)
3982 {
3983     TCGv dst, src1;
3984 
3985     /* Reject 64-bit shifts for sparc32. */
3986     if (avail_32(dc) && (a->x || a->i >= 32)) {
3987         return false;
3988     }
3989 
3990     src1 = gen_load_gpr(dc, a->rs1);
3991     dst = gen_dest_gpr(dc, a->rd);
3992 
3993     if (avail_32(dc) || a->x) {
3994         if (l) {
3995             tcg_gen_shli_tl(dst, src1, a->i);
3996         } else if (u) {
3997             tcg_gen_shri_tl(dst, src1, a->i);
3998         } else {
3999             tcg_gen_sari_tl(dst, src1, a->i);
4000         }
4001     } else {
4002         if (l) {
4003             tcg_gen_deposit_z_tl(dst, src1, a->i, 32 - a->i);
4004         } else if (u) {
4005             tcg_gen_extract_tl(dst, src1, a->i, 32 - a->i);
4006         } else {
4007             tcg_gen_sextract_tl(dst, src1, a->i, 32 - a->i);
4008         }
4009     }
4010     gen_store_gpr(dc, a->rd, dst);
4011     return advance_pc(dc);
4012 }
4013 
4014 TRANS(SLL_i, ALL, do_shift_i, a, true, true)
4015 TRANS(SRL_i, ALL, do_shift_i, a, false, true)
4016 TRANS(SRA_i, ALL, do_shift_i, a, false, false)
4017 
4018 static TCGv gen_rs2_or_imm(DisasContext *dc, bool imm, int rs2_or_imm)
4019 {
4020     /* For simplicity, we under-decoded the rs2 form. */
4021     if (!imm && rs2_or_imm & ~0x1f) {
4022         return NULL;
4023     }
4024     if (imm || rs2_or_imm == 0) {
4025         return tcg_constant_tl(rs2_or_imm);
4026     } else {
4027         return cpu_regs[rs2_or_imm];
4028     }
4029 }
4030 
4031 static bool do_mov_cond(DisasContext *dc, DisasCompare *cmp, int rd, TCGv src2)
4032 {
4033     TCGv dst = gen_load_gpr(dc, rd);
4034     TCGv c2 = tcg_constant_tl(cmp->c2);
4035 
4036     tcg_gen_movcond_tl(cmp->cond, dst, cmp->c1, c2, src2, dst);
4037     gen_store_gpr(dc, rd, dst);
4038     return advance_pc(dc);
4039 }
4040 
4041 static bool trans_MOVcc(DisasContext *dc, arg_MOVcc *a)
4042 {
4043     TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4044     DisasCompare cmp;
4045 
4046     if (src2 == NULL) {
4047         return false;
4048     }
4049     gen_compare(&cmp, a->cc, a->cond, dc);
4050     return do_mov_cond(dc, &cmp, a->rd, src2);
4051 }
4052 
4053 static bool trans_MOVfcc(DisasContext *dc, arg_MOVfcc *a)
4054 {
4055     TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4056     DisasCompare cmp;
4057 
4058     if (src2 == NULL) {
4059         return false;
4060     }
4061     gen_fcompare(&cmp, a->cc, a->cond);
4062     return do_mov_cond(dc, &cmp, a->rd, src2);
4063 }
4064 
4065 static bool trans_MOVR(DisasContext *dc, arg_MOVR *a)
4066 {
4067     TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4068     DisasCompare cmp;
4069 
4070     if (src2 == NULL) {
4071         return false;
4072     }
4073     if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
4074         return false;
4075     }
4076     return do_mov_cond(dc, &cmp, a->rd, src2);
4077 }
4078 
4079 static bool do_add_special(DisasContext *dc, arg_r_r_ri *a,
4080                            bool (*func)(DisasContext *dc, int rd, TCGv src))
4081 {
4082     TCGv src1, sum;
4083 
4084     /* For simplicity, we under-decoded the rs2 form. */
4085     if (!a->imm && a->rs2_or_imm & ~0x1f) {
4086         return false;
4087     }
4088 
4089     /*
4090      * Always load the sum into a new temporary.
4091      * This is required to capture the value across a window change,
4092      * e.g. SAVE and RESTORE, and may be optimized away otherwise.
4093      */
4094     sum = tcg_temp_new();
4095     src1 = gen_load_gpr(dc, a->rs1);
4096     if (a->imm || a->rs2_or_imm == 0) {
4097         tcg_gen_addi_tl(sum, src1, a->rs2_or_imm);
4098     } else {
4099         tcg_gen_add_tl(sum, src1, cpu_regs[a->rs2_or_imm]);
4100     }
4101     return func(dc, a->rd, sum);
4102 }
4103 
4104 static bool do_jmpl(DisasContext *dc, int rd, TCGv src)
4105 {
4106     /*
4107      * Preserve pc across advance, so that we can delay
4108      * the writeback to rd until after src is consumed.
4109      */
4110     target_ulong cur_pc = dc->pc;
4111 
4112     gen_check_align(dc, src, 3);
4113 
4114     gen_mov_pc_npc(dc);
4115     tcg_gen_mov_tl(cpu_npc, src);
4116     gen_address_mask(dc, cpu_npc);
4117     gen_store_gpr(dc, rd, tcg_constant_tl(cur_pc));
4118 
4119     dc->npc = DYNAMIC_PC_LOOKUP;
4120     return true;
4121 }
4122 
4123 TRANS(JMPL, ALL, do_add_special, a, do_jmpl)
4124 
4125 static bool do_rett(DisasContext *dc, int rd, TCGv src)
4126 {
4127     if (!supervisor(dc)) {
4128         return raise_priv(dc);
4129     }
4130 
4131     gen_check_align(dc, src, 3);
4132 
4133     gen_mov_pc_npc(dc);
4134     tcg_gen_mov_tl(cpu_npc, src);
4135     gen_helper_rett(tcg_env);
4136 
4137     dc->npc = DYNAMIC_PC;
4138     return true;
4139 }
4140 
4141 TRANS(RETT, 32, do_add_special, a, do_rett)
4142 
4143 static bool do_return(DisasContext *dc, int rd, TCGv src)
4144 {
4145     gen_check_align(dc, src, 3);
4146     gen_helper_restore(tcg_env);
4147 
4148     gen_mov_pc_npc(dc);
4149     tcg_gen_mov_tl(cpu_npc, src);
4150     gen_address_mask(dc, cpu_npc);
4151 
4152     dc->npc = DYNAMIC_PC_LOOKUP;
4153     return true;
4154 }
4155 
4156 TRANS(RETURN, 64, do_add_special, a, do_return)
4157 
4158 static bool do_save(DisasContext *dc, int rd, TCGv src)
4159 {
4160     gen_helper_save(tcg_env);
4161     gen_store_gpr(dc, rd, src);
4162     return advance_pc(dc);
4163 }
4164 
4165 TRANS(SAVE, ALL, do_add_special, a, do_save)
4166 
4167 static bool do_restore(DisasContext *dc, int rd, TCGv src)
4168 {
4169     gen_helper_restore(tcg_env);
4170     gen_store_gpr(dc, rd, src);
4171     return advance_pc(dc);
4172 }
4173 
4174 TRANS(RESTORE, ALL, do_add_special, a, do_restore)
4175 
4176 static bool do_done_retry(DisasContext *dc, bool done)
4177 {
4178     if (!supervisor(dc)) {
4179         return raise_priv(dc);
4180     }
4181     dc->npc = DYNAMIC_PC;
4182     dc->pc = DYNAMIC_PC;
4183     translator_io_start(&dc->base);
4184     if (done) {
4185         gen_helper_done(tcg_env);
4186     } else {
4187         gen_helper_retry(tcg_env);
4188     }
4189     return true;
4190 }
4191 
4192 TRANS(DONE, 64, do_done_retry, true)
4193 TRANS(RETRY, 64, do_done_retry, false)
4194 
4195 /*
4196  * Major opcode 11 -- load and store instructions
4197  */
4198 
4199 static TCGv gen_ldst_addr(DisasContext *dc, int rs1, bool imm, int rs2_or_imm)
4200 {
4201     TCGv addr, tmp = NULL;
4202 
4203     /* For simplicity, we under-decoded the rs2 form. */
4204     if (!imm && rs2_or_imm & ~0x1f) {
4205         return NULL;
4206     }
4207 
4208     addr = gen_load_gpr(dc, rs1);
4209     if (rs2_or_imm) {
4210         tmp = tcg_temp_new();
4211         if (imm) {
4212             tcg_gen_addi_tl(tmp, addr, rs2_or_imm);
4213         } else {
4214             tcg_gen_add_tl(tmp, addr, cpu_regs[rs2_or_imm]);
4215         }
4216         addr = tmp;
4217     }
4218     if (AM_CHECK(dc)) {
4219         if (!tmp) {
4220             tmp = tcg_temp_new();
4221         }
4222         tcg_gen_ext32u_tl(tmp, addr);
4223         addr = tmp;
4224     }
4225     return addr;
4226 }
4227 
4228 static bool do_ld_gpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4229 {
4230     TCGv reg, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4231     DisasASI da;
4232 
4233     if (addr == NULL) {
4234         return false;
4235     }
4236     da = resolve_asi(dc, a->asi, mop);
4237 
4238     reg = gen_dest_gpr(dc, a->rd);
4239     gen_ld_asi(dc, &da, reg, addr);
4240     gen_store_gpr(dc, a->rd, reg);
4241     return advance_pc(dc);
4242 }
4243 
4244 TRANS(LDUW, ALL, do_ld_gpr, a, MO_TEUL)
4245 TRANS(LDUB, ALL, do_ld_gpr, a, MO_UB)
4246 TRANS(LDUH, ALL, do_ld_gpr, a, MO_TEUW)
4247 TRANS(LDSB, ALL, do_ld_gpr, a, MO_SB)
4248 TRANS(LDSH, ALL, do_ld_gpr, a, MO_TESW)
4249 TRANS(LDSW, 64, do_ld_gpr, a, MO_TESL)
4250 TRANS(LDX, 64, do_ld_gpr, a, MO_TEUQ)
4251 
4252 static bool do_st_gpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4253 {
4254     TCGv reg, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4255     DisasASI da;
4256 
4257     if (addr == NULL) {
4258         return false;
4259     }
4260     da = resolve_asi(dc, a->asi, mop);
4261 
4262     reg = gen_load_gpr(dc, a->rd);
4263     gen_st_asi(dc, &da, reg, addr);
4264     return advance_pc(dc);
4265 }
4266 
4267 TRANS(STW, ALL, do_st_gpr, a, MO_TEUL)
4268 TRANS(STB, ALL, do_st_gpr, a, MO_UB)
4269 TRANS(STH, ALL, do_st_gpr, a, MO_TEUW)
4270 TRANS(STX, 64, do_st_gpr, a, MO_TEUQ)
4271 
4272 static bool trans_LDD(DisasContext *dc, arg_r_r_ri_asi *a)
4273 {
4274     TCGv addr;
4275     DisasASI da;
4276 
4277     if (a->rd & 1) {
4278         return false;
4279     }
4280     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4281     if (addr == NULL) {
4282         return false;
4283     }
4284     da = resolve_asi(dc, a->asi, MO_TEUQ);
4285     gen_ldda_asi(dc, &da, addr, a->rd);
4286     return advance_pc(dc);
4287 }
4288 
4289 static bool trans_STD(DisasContext *dc, arg_r_r_ri_asi *a)
4290 {
4291     TCGv addr;
4292     DisasASI da;
4293 
4294     if (a->rd & 1) {
4295         return false;
4296     }
4297     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4298     if (addr == NULL) {
4299         return false;
4300     }
4301     da = resolve_asi(dc, a->asi, MO_TEUQ);
4302     gen_stda_asi(dc, &da, addr, a->rd);
4303     return advance_pc(dc);
4304 }
4305 
4306 static bool trans_LDSTUB(DisasContext *dc, arg_r_r_ri_asi *a)
4307 {
4308     TCGv addr, reg;
4309     DisasASI da;
4310 
4311     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4312     if (addr == NULL) {
4313         return false;
4314     }
4315     da = resolve_asi(dc, a->asi, MO_UB);
4316 
4317     reg = gen_dest_gpr(dc, a->rd);
4318     gen_ldstub_asi(dc, &da, reg, addr);
4319     gen_store_gpr(dc, a->rd, reg);
4320     return advance_pc(dc);
4321 }
4322 
4323 static bool trans_SWAP(DisasContext *dc, arg_r_r_ri_asi *a)
4324 {
4325     TCGv addr, dst, src;
4326     DisasASI da;
4327 
4328     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4329     if (addr == NULL) {
4330         return false;
4331     }
4332     da = resolve_asi(dc, a->asi, MO_TEUL);
4333 
4334     dst = gen_dest_gpr(dc, a->rd);
4335     src = gen_load_gpr(dc, a->rd);
4336     gen_swap_asi(dc, &da, dst, src, addr);
4337     gen_store_gpr(dc, a->rd, dst);
4338     return advance_pc(dc);
4339 }
4340 
4341 static bool do_casa(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4342 {
4343     TCGv addr, o, n, c;
4344     DisasASI da;
4345 
4346     addr = gen_ldst_addr(dc, a->rs1, true, 0);
4347     if (addr == NULL) {
4348         return false;
4349     }
4350     da = resolve_asi(dc, a->asi, mop);
4351 
4352     o = gen_dest_gpr(dc, a->rd);
4353     n = gen_load_gpr(dc, a->rd);
4354     c = gen_load_gpr(dc, a->rs2_or_imm);
4355     gen_cas_asi(dc, &da, o, n, c, addr);
4356     gen_store_gpr(dc, a->rd, o);
4357     return advance_pc(dc);
4358 }
4359 
4360 TRANS(CASA, CASA, do_casa, a, MO_TEUL)
4361 TRANS(CASXA, 64, do_casa, a, MO_TEUQ)
4362 
4363 static bool do_ld_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz)
4364 {
4365     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4366     DisasASI da;
4367 
4368     if (addr == NULL) {
4369         return false;
4370     }
4371     if (gen_trap_ifnofpu(dc)) {
4372         return true;
4373     }
4374     if (sz == MO_128 && gen_trap_float128(dc)) {
4375         return true;
4376     }
4377     da = resolve_asi(dc, a->asi, MO_TE | sz);
4378     gen_ldf_asi(dc, &da, sz, addr, a->rd);
4379     gen_update_fprs_dirty(dc, a->rd);
4380     return advance_pc(dc);
4381 }
4382 
4383 TRANS(LDF, ALL, do_ld_fpr, a, MO_32)
4384 TRANS(LDDF, ALL, do_ld_fpr, a, MO_64)
4385 TRANS(LDQF, ALL, do_ld_fpr, a, MO_128)
4386 
4387 TRANS(LDFA, 64, do_ld_fpr, a, MO_32)
4388 TRANS(LDDFA, 64, do_ld_fpr, a, MO_64)
4389 TRANS(LDQFA, 64, do_ld_fpr, a, MO_128)
4390 
4391 static bool do_st_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz)
4392 {
4393     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4394     DisasASI da;
4395 
4396     if (addr == NULL) {
4397         return false;
4398     }
4399     if (gen_trap_ifnofpu(dc)) {
4400         return true;
4401     }
4402     if (sz == MO_128 && gen_trap_float128(dc)) {
4403         return true;
4404     }
4405     da = resolve_asi(dc, a->asi, MO_TE | sz);
4406     gen_stf_asi(dc, &da, sz, addr, a->rd);
4407     return advance_pc(dc);
4408 }
4409 
4410 TRANS(STF, ALL, do_st_fpr, a, MO_32)
4411 TRANS(STDF, ALL, do_st_fpr, a, MO_64)
4412 TRANS(STQF, ALL, do_st_fpr, a, MO_128)
4413 
4414 TRANS(STFA, 64, do_st_fpr, a, MO_32)
4415 TRANS(STDFA, 64, do_st_fpr, a, MO_64)
4416 TRANS(STQFA, 64, do_st_fpr, a, MO_128)
4417 
4418 static bool trans_STDFQ(DisasContext *dc, arg_STDFQ *a)
4419 {
4420     if (!avail_32(dc)) {
4421         return false;
4422     }
4423     if (!supervisor(dc)) {
4424         return raise_priv(dc);
4425     }
4426     if (gen_trap_ifnofpu(dc)) {
4427         return true;
4428     }
4429     gen_op_fpexception_im(dc, FSR_FTT_SEQ_ERROR);
4430     return true;
4431 }
4432 
4433 static bool trans_LDFSR(DisasContext *dc, arg_r_r_ri *a)
4434 {
4435     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4436     TCGv_i32 tmp;
4437 
4438     if (addr == NULL) {
4439         return false;
4440     }
4441     if (gen_trap_ifnofpu(dc)) {
4442         return true;
4443     }
4444 
4445     tmp = tcg_temp_new_i32();
4446     tcg_gen_qemu_ld_i32(tmp, addr, dc->mem_idx, MO_TEUL | MO_ALIGN);
4447 
4448     tcg_gen_extract_i32(cpu_fcc[0], tmp, FSR_FCC0_SHIFT, 2);
4449     /* LDFSR does not change FCC[1-3]. */
4450 
4451     gen_helper_set_fsr_nofcc_noftt(tcg_env, tmp);
4452     return advance_pc(dc);
4453 }
4454 
4455 static bool trans_LDXFSR(DisasContext *dc, arg_r_r_ri *a)
4456 {
4457 #ifdef TARGET_SPARC64
4458     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4459     TCGv_i64 t64;
4460     TCGv_i32 lo, hi;
4461 
4462     if (addr == NULL) {
4463         return false;
4464     }
4465     if (gen_trap_ifnofpu(dc)) {
4466         return true;
4467     }
4468 
4469     t64 = tcg_temp_new_i64();
4470     tcg_gen_qemu_ld_i64(t64, addr, dc->mem_idx, MO_TEUQ | MO_ALIGN);
4471 
4472     lo = tcg_temp_new_i32();
4473     hi = cpu_fcc[3];
4474     tcg_gen_extr_i64_i32(lo, hi, t64);
4475     tcg_gen_extract_i32(cpu_fcc[0], lo, FSR_FCC0_SHIFT, 2);
4476     tcg_gen_extract_i32(cpu_fcc[1], hi, FSR_FCC1_SHIFT - 32, 2);
4477     tcg_gen_extract_i32(cpu_fcc[2], hi, FSR_FCC2_SHIFT - 32, 2);
4478     tcg_gen_extract_i32(cpu_fcc[3], hi, FSR_FCC3_SHIFT - 32, 2);
4479 
4480     gen_helper_set_fsr_nofcc_noftt(tcg_env, lo);
4481     return advance_pc(dc);
4482 #else
4483     return false;
4484 #endif
4485 }
4486 
4487 static bool do_stfsr(DisasContext *dc, arg_r_r_ri *a, MemOp mop)
4488 {
4489     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4490     TCGv fsr;
4491 
4492     if (addr == NULL) {
4493         return false;
4494     }
4495     if (gen_trap_ifnofpu(dc)) {
4496         return true;
4497     }
4498 
4499     fsr = tcg_temp_new();
4500     gen_helper_get_fsr(fsr, tcg_env);
4501     tcg_gen_qemu_st_tl(fsr, addr, dc->mem_idx, mop | MO_ALIGN);
4502     return advance_pc(dc);
4503 }
4504 
4505 TRANS(STFSR, ALL, do_stfsr, a, MO_TEUL)
4506 TRANS(STXFSR, 64, do_stfsr, a, MO_TEUQ)
4507 
4508 static bool do_fc(DisasContext *dc, int rd, int32_t c)
4509 {
4510     if (gen_trap_ifnofpu(dc)) {
4511         return true;
4512     }
4513     gen_store_fpr_F(dc, rd, tcg_constant_i32(c));
4514     return advance_pc(dc);
4515 }
4516 
4517 TRANS(FZEROs, VIS1, do_fc, a->rd, 0)
4518 TRANS(FONEs, VIS1, do_fc, a->rd, -1)
4519 
4520 static bool do_dc(DisasContext *dc, int rd, int64_t c)
4521 {
4522     if (gen_trap_ifnofpu(dc)) {
4523         return true;
4524     }
4525     gen_store_fpr_D(dc, rd, tcg_constant_i64(c));
4526     return advance_pc(dc);
4527 }
4528 
4529 TRANS(FZEROd, VIS1, do_dc, a->rd, 0)
4530 TRANS(FONEd, VIS1, do_dc, a->rd, -1)
4531 
4532 static bool do_ff(DisasContext *dc, arg_r_r *a,
4533                   void (*func)(TCGv_i32, TCGv_i32))
4534 {
4535     TCGv_i32 tmp;
4536 
4537     if (gen_trap_ifnofpu(dc)) {
4538         return true;
4539     }
4540 
4541     tmp = gen_load_fpr_F(dc, a->rs);
4542     func(tmp, tmp);
4543     gen_store_fpr_F(dc, a->rd, tmp);
4544     return advance_pc(dc);
4545 }
4546 
4547 TRANS(FMOVs, ALL, do_ff, a, gen_op_fmovs)
4548 TRANS(FNEGs, ALL, do_ff, a, gen_op_fnegs)
4549 TRANS(FABSs, ALL, do_ff, a, gen_op_fabss)
4550 TRANS(FSRCs, VIS1, do_ff, a, tcg_gen_mov_i32)
4551 TRANS(FNOTs, VIS1, do_ff, a, tcg_gen_not_i32)
4552 
4553 static bool do_fd(DisasContext *dc, arg_r_r *a,
4554                   void (*func)(TCGv_i32, TCGv_i64))
4555 {
4556     TCGv_i32 dst;
4557     TCGv_i64 src;
4558 
4559     if (gen_trap_ifnofpu(dc)) {
4560         return true;
4561     }
4562 
4563     dst = tcg_temp_new_i32();
4564     src = gen_load_fpr_D(dc, a->rs);
4565     func(dst, src);
4566     gen_store_fpr_F(dc, a->rd, dst);
4567     return advance_pc(dc);
4568 }
4569 
4570 TRANS(FPACK16, VIS1, do_fd, a, gen_op_fpack16)
4571 TRANS(FPACKFIX, VIS1, do_fd, a, gen_op_fpackfix)
4572 
4573 static bool do_env_ff(DisasContext *dc, arg_r_r *a,
4574                       void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
4575 {
4576     TCGv_i32 tmp;
4577 
4578     if (gen_trap_ifnofpu(dc)) {
4579         return true;
4580     }
4581 
4582     tmp = gen_load_fpr_F(dc, a->rs);
4583     func(tmp, tcg_env, tmp);
4584     gen_store_fpr_F(dc, a->rd, tmp);
4585     return advance_pc(dc);
4586 }
4587 
4588 TRANS(FSQRTs, ALL, do_env_ff, a, gen_helper_fsqrts)
4589 TRANS(FiTOs, ALL, do_env_ff, a, gen_helper_fitos)
4590 TRANS(FsTOi, ALL, do_env_ff, a, gen_helper_fstoi)
4591 
4592 static bool do_env_fd(DisasContext *dc, arg_r_r *a,
4593                       void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
4594 {
4595     TCGv_i32 dst;
4596     TCGv_i64 src;
4597 
4598     if (gen_trap_ifnofpu(dc)) {
4599         return true;
4600     }
4601 
4602     dst = tcg_temp_new_i32();
4603     src = gen_load_fpr_D(dc, a->rs);
4604     func(dst, tcg_env, src);
4605     gen_store_fpr_F(dc, a->rd, dst);
4606     return advance_pc(dc);
4607 }
4608 
4609 TRANS(FdTOs, ALL, do_env_fd, a, gen_helper_fdtos)
4610 TRANS(FdTOi, ALL, do_env_fd, a, gen_helper_fdtoi)
4611 TRANS(FxTOs, 64, do_env_fd, a, gen_helper_fxtos)
4612 
4613 static bool do_dd(DisasContext *dc, arg_r_r *a,
4614                   void (*func)(TCGv_i64, TCGv_i64))
4615 {
4616     TCGv_i64 dst, src;
4617 
4618     if (gen_trap_ifnofpu(dc)) {
4619         return true;
4620     }
4621 
4622     dst = tcg_temp_new_i64();
4623     src = gen_load_fpr_D(dc, a->rs);
4624     func(dst, src);
4625     gen_store_fpr_D(dc, a->rd, dst);
4626     return advance_pc(dc);
4627 }
4628 
4629 TRANS(FMOVd, 64, do_dd, a, gen_op_fmovd)
4630 TRANS(FNEGd, 64, do_dd, a, gen_op_fnegd)
4631 TRANS(FABSd, 64, do_dd, a, gen_op_fabsd)
4632 TRANS(FSRCd, VIS1, do_dd, a, tcg_gen_mov_i64)
4633 TRANS(FNOTd, VIS1, do_dd, a, tcg_gen_not_i64)
4634 
4635 static bool do_env_dd(DisasContext *dc, arg_r_r *a,
4636                       void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
4637 {
4638     TCGv_i64 dst, src;
4639 
4640     if (gen_trap_ifnofpu(dc)) {
4641         return true;
4642     }
4643 
4644     dst = tcg_temp_new_i64();
4645     src = gen_load_fpr_D(dc, a->rs);
4646     func(dst, tcg_env, src);
4647     gen_store_fpr_D(dc, a->rd, dst);
4648     return advance_pc(dc);
4649 }
4650 
4651 TRANS(FSQRTd, ALL, do_env_dd, a, gen_helper_fsqrtd)
4652 TRANS(FxTOd, 64, do_env_dd, a, gen_helper_fxtod)
4653 TRANS(FdTOx, 64, do_env_dd, a, gen_helper_fdtox)
4654 
4655 static bool do_df(DisasContext *dc, arg_r_r *a,
4656                   void (*func)(TCGv_i64, TCGv_i32))
4657 {
4658     TCGv_i64 dst;
4659     TCGv_i32 src;
4660 
4661     if (gen_trap_ifnofpu(dc)) {
4662         return true;
4663     }
4664 
4665     dst = tcg_temp_new_i64();
4666     src = gen_load_fpr_F(dc, a->rs);
4667     func(dst, src);
4668     gen_store_fpr_D(dc, a->rd, dst);
4669     return advance_pc(dc);
4670 }
4671 
4672 TRANS(FEXPAND, VIS1, do_df, a, gen_helper_fexpand)
4673 
4674 static bool do_env_df(DisasContext *dc, arg_r_r *a,
4675                       void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
4676 {
4677     TCGv_i64 dst;
4678     TCGv_i32 src;
4679 
4680     if (gen_trap_ifnofpu(dc)) {
4681         return true;
4682     }
4683 
4684     dst = tcg_temp_new_i64();
4685     src = gen_load_fpr_F(dc, a->rs);
4686     func(dst, tcg_env, src);
4687     gen_store_fpr_D(dc, a->rd, dst);
4688     return advance_pc(dc);
4689 }
4690 
4691 TRANS(FiTOd, ALL, do_env_df, a, gen_helper_fitod)
4692 TRANS(FsTOd, ALL, do_env_df, a, gen_helper_fstod)
4693 TRANS(FsTOx, 64, do_env_df, a, gen_helper_fstox)
4694 
4695 static bool do_qq(DisasContext *dc, arg_r_r *a,
4696                   void (*func)(TCGv_i128, TCGv_i128))
4697 {
4698     TCGv_i128 t;
4699 
4700     if (gen_trap_ifnofpu(dc)) {
4701         return true;
4702     }
4703     if (gen_trap_float128(dc)) {
4704         return true;
4705     }
4706 
4707     gen_op_clear_ieee_excp_and_FTT();
4708     t = gen_load_fpr_Q(dc, a->rs);
4709     func(t, t);
4710     gen_store_fpr_Q(dc, a->rd, t);
4711     return advance_pc(dc);
4712 }
4713 
4714 TRANS(FMOVq, 64, do_qq, a, tcg_gen_mov_i128)
4715 TRANS(FNEGq, 64, do_qq, a, gen_op_fnegq)
4716 TRANS(FABSq, 64, do_qq, a, gen_op_fabsq)
4717 
4718 static bool do_env_qq(DisasContext *dc, arg_r_r *a,
4719                       void (*func)(TCGv_i128, TCGv_env, TCGv_i128))
4720 {
4721     TCGv_i128 t;
4722 
4723     if (gen_trap_ifnofpu(dc)) {
4724         return true;
4725     }
4726     if (gen_trap_float128(dc)) {
4727         return true;
4728     }
4729 
4730     t = gen_load_fpr_Q(dc, a->rs);
4731     func(t, tcg_env, t);
4732     gen_store_fpr_Q(dc, a->rd, t);
4733     return advance_pc(dc);
4734 }
4735 
4736 TRANS(FSQRTq, ALL, do_env_qq, a, gen_helper_fsqrtq)
4737 
4738 static bool do_env_fq(DisasContext *dc, arg_r_r *a,
4739                       void (*func)(TCGv_i32, TCGv_env, TCGv_i128))
4740 {
4741     TCGv_i128 src;
4742     TCGv_i32 dst;
4743 
4744     if (gen_trap_ifnofpu(dc)) {
4745         return true;
4746     }
4747     if (gen_trap_float128(dc)) {
4748         return true;
4749     }
4750 
4751     src = gen_load_fpr_Q(dc, a->rs);
4752     dst = tcg_temp_new_i32();
4753     func(dst, tcg_env, src);
4754     gen_store_fpr_F(dc, a->rd, dst);
4755     return advance_pc(dc);
4756 }
4757 
4758 TRANS(FqTOs, ALL, do_env_fq, a, gen_helper_fqtos)
4759 TRANS(FqTOi, ALL, do_env_fq, a, gen_helper_fqtoi)
4760 
4761 static bool do_env_dq(DisasContext *dc, arg_r_r *a,
4762                       void (*func)(TCGv_i64, TCGv_env, TCGv_i128))
4763 {
4764     TCGv_i128 src;
4765     TCGv_i64 dst;
4766 
4767     if (gen_trap_ifnofpu(dc)) {
4768         return true;
4769     }
4770     if (gen_trap_float128(dc)) {
4771         return true;
4772     }
4773 
4774     src = gen_load_fpr_Q(dc, a->rs);
4775     dst = tcg_temp_new_i64();
4776     func(dst, tcg_env, src);
4777     gen_store_fpr_D(dc, a->rd, dst);
4778     return advance_pc(dc);
4779 }
4780 
4781 TRANS(FqTOd, ALL, do_env_dq, a, gen_helper_fqtod)
4782 TRANS(FqTOx, 64, do_env_dq, a, gen_helper_fqtox)
4783 
4784 static bool do_env_qf(DisasContext *dc, arg_r_r *a,
4785                       void (*func)(TCGv_i128, TCGv_env, TCGv_i32))
4786 {
4787     TCGv_i32 src;
4788     TCGv_i128 dst;
4789 
4790     if (gen_trap_ifnofpu(dc)) {
4791         return true;
4792     }
4793     if (gen_trap_float128(dc)) {
4794         return true;
4795     }
4796 
4797     src = gen_load_fpr_F(dc, a->rs);
4798     dst = tcg_temp_new_i128();
4799     func(dst, tcg_env, src);
4800     gen_store_fpr_Q(dc, a->rd, dst);
4801     return advance_pc(dc);
4802 }
4803 
4804 TRANS(FiTOq, ALL, do_env_qf, a, gen_helper_fitoq)
4805 TRANS(FsTOq, ALL, do_env_qf, a, gen_helper_fstoq)
4806 
4807 static bool do_env_qd(DisasContext *dc, arg_r_r *a,
4808                       void (*func)(TCGv_i128, TCGv_env, TCGv_i64))
4809 {
4810     TCGv_i64 src;
4811     TCGv_i128 dst;
4812 
4813     if (gen_trap_ifnofpu(dc)) {
4814         return true;
4815     }
4816     if (gen_trap_float128(dc)) {
4817         return true;
4818     }
4819 
4820     src = gen_load_fpr_D(dc, a->rs);
4821     dst = tcg_temp_new_i128();
4822     func(dst, tcg_env, src);
4823     gen_store_fpr_Q(dc, a->rd, dst);
4824     return advance_pc(dc);
4825 }
4826 
4827 TRANS(FdTOq, ALL, do_env_qd, a, gen_helper_fdtoq)
4828 TRANS(FxTOq, 64, do_env_qd, a, gen_helper_fxtoq)
4829 
4830 static bool do_fff(DisasContext *dc, arg_r_r_r *a,
4831                    void (*func)(TCGv_i32, TCGv_i32, TCGv_i32))
4832 {
4833     TCGv_i32 src1, src2;
4834 
4835     if (gen_trap_ifnofpu(dc)) {
4836         return true;
4837     }
4838 
4839     src1 = gen_load_fpr_F(dc, a->rs1);
4840     src2 = gen_load_fpr_F(dc, a->rs2);
4841     func(src1, src1, src2);
4842     gen_store_fpr_F(dc, a->rd, src1);
4843     return advance_pc(dc);
4844 }
4845 
4846 TRANS(FPADD16s, VIS1, do_fff, a, tcg_gen_vec_add16_i32)
4847 TRANS(FPADD32s, VIS1, do_fff, a, tcg_gen_add_i32)
4848 TRANS(FPSUB16s, VIS1, do_fff, a, tcg_gen_vec_sub16_i32)
4849 TRANS(FPSUB32s, VIS1, do_fff, a, tcg_gen_sub_i32)
4850 TRANS(FNORs, VIS1, do_fff, a, tcg_gen_nor_i32)
4851 TRANS(FANDNOTs, VIS1, do_fff, a, tcg_gen_andc_i32)
4852 TRANS(FXORs, VIS1, do_fff, a, tcg_gen_xor_i32)
4853 TRANS(FNANDs, VIS1, do_fff, a, tcg_gen_nand_i32)
4854 TRANS(FANDs, VIS1, do_fff, a, tcg_gen_and_i32)
4855 TRANS(FXNORs, VIS1, do_fff, a, tcg_gen_eqv_i32)
4856 TRANS(FORNOTs, VIS1, do_fff, a, tcg_gen_orc_i32)
4857 TRANS(FORs, VIS1, do_fff, a, tcg_gen_or_i32)
4858 
4859 TRANS(FHADDs, VIS3, do_fff, a, gen_op_fhadds)
4860 TRANS(FHSUBs, VIS3, do_fff, a, gen_op_fhsubs)
4861 TRANS(FNHADDs, VIS3, do_fff, a, gen_op_fnhadds)
4862 
4863 TRANS(FPADDS16s, VIS3, do_fff, a, gen_op_fpadds16s)
4864 TRANS(FPSUBS16s, VIS3, do_fff, a, gen_op_fpsubs16s)
4865 TRANS(FPADDS32s, VIS3, do_fff, a, gen_op_fpadds32s)
4866 TRANS(FPSUBS32s, VIS3, do_fff, a, gen_op_fpsubs32s)
4867 
4868 static bool do_env_fff(DisasContext *dc, arg_r_r_r *a,
4869                        void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
4870 {
4871     TCGv_i32 src1, src2;
4872 
4873     if (gen_trap_ifnofpu(dc)) {
4874         return true;
4875     }
4876 
4877     src1 = gen_load_fpr_F(dc, a->rs1);
4878     src2 = gen_load_fpr_F(dc, a->rs2);
4879     func(src1, tcg_env, src1, src2);
4880     gen_store_fpr_F(dc, a->rd, src1);
4881     return advance_pc(dc);
4882 }
4883 
4884 TRANS(FADDs, ALL, do_env_fff, a, gen_helper_fadds)
4885 TRANS(FSUBs, ALL, do_env_fff, a, gen_helper_fsubs)
4886 TRANS(FMULs, ALL, do_env_fff, a, gen_helper_fmuls)
4887 TRANS(FDIVs, ALL, do_env_fff, a, gen_helper_fdivs)
4888 TRANS(FNADDs, VIS3, do_env_fff, a, gen_helper_fnadds)
4889 TRANS(FNMULs, VIS3, do_env_fff, a, gen_helper_fnmuls)
4890 
4891 static bool do_dff(DisasContext *dc, arg_r_r_r *a,
4892                    void (*func)(TCGv_i64, TCGv_i32, TCGv_i32))
4893 {
4894     TCGv_i64 dst;
4895     TCGv_i32 src1, src2;
4896 
4897     if (gen_trap_ifnofpu(dc)) {
4898         return true;
4899     }
4900 
4901     dst = tcg_temp_new_i64();
4902     src1 = gen_load_fpr_F(dc, a->rs1);
4903     src2 = gen_load_fpr_F(dc, a->rs2);
4904     func(dst, src1, src2);
4905     gen_store_fpr_D(dc, a->rd, dst);
4906     return advance_pc(dc);
4907 }
4908 
4909 TRANS(FMUL8x16AU, VIS1, do_dff, a, gen_op_fmul8x16au)
4910 TRANS(FMUL8x16AL, VIS1, do_dff, a, gen_op_fmul8x16al)
4911 TRANS(FMULD8SUx16, VIS1, do_dff, a, gen_op_fmuld8sux16)
4912 TRANS(FMULD8ULx16, VIS1, do_dff, a, gen_op_fmuld8ulx16)
4913 TRANS(FPMERGE, VIS1, do_dff, a, gen_helper_fpmerge)
4914 
4915 static bool do_dfd(DisasContext *dc, arg_r_r_r *a,
4916                    void (*func)(TCGv_i64, TCGv_i32, TCGv_i64))
4917 {
4918     TCGv_i64 dst, src2;
4919     TCGv_i32 src1;
4920 
4921     if (gen_trap_ifnofpu(dc)) {
4922         return true;
4923     }
4924 
4925     dst = tcg_temp_new_i64();
4926     src1 = gen_load_fpr_F(dc, a->rs1);
4927     src2 = gen_load_fpr_D(dc, a->rs2);
4928     func(dst, src1, src2);
4929     gen_store_fpr_D(dc, a->rd, dst);
4930     return advance_pc(dc);
4931 }
4932 
4933 TRANS(FMUL8x16, VIS1, do_dfd, a, gen_helper_fmul8x16)
4934 
4935 static bool do_gvec_ddd(DisasContext *dc, arg_r_r_r *a, MemOp vece,
4936                         void (*func)(unsigned, uint32_t, uint32_t,
4937                                      uint32_t, uint32_t, uint32_t))
4938 {
4939     if (gen_trap_ifnofpu(dc)) {
4940         return true;
4941     }
4942 
4943     func(vece, gen_offset_fpr_D(a->rd), gen_offset_fpr_D(a->rs1),
4944          gen_offset_fpr_D(a->rs2), 8, 8);
4945     return advance_pc(dc);
4946 }
4947 
4948 TRANS(FPADD16, VIS1, do_gvec_ddd, a, MO_16, tcg_gen_gvec_add)
4949 TRANS(FPADD32, VIS1, do_gvec_ddd, a, MO_32, tcg_gen_gvec_add)
4950 TRANS(FPSUB16, VIS1, do_gvec_ddd, a, MO_16, tcg_gen_gvec_sub)
4951 TRANS(FPSUB32, VIS1, do_gvec_ddd, a, MO_32, tcg_gen_gvec_sub)
4952 TRANS(FCHKSM16, VIS3, do_gvec_ddd, a, MO_16, gen_op_fchksm16)
4953 TRANS(FMEAN16, VIS3, do_gvec_ddd, a, MO_16, gen_op_fmean16)
4954 
4955 TRANS(FPADDS16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_ssadd)
4956 TRANS(FPADDS32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_ssadd)
4957 TRANS(FPSUBS16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_sssub)
4958 TRANS(FPSUBS32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_sssub)
4959 
4960 static bool do_ddd(DisasContext *dc, arg_r_r_r *a,
4961                    void (*func)(TCGv_i64, TCGv_i64, TCGv_i64))
4962 {
4963     TCGv_i64 dst, src1, src2;
4964 
4965     if (gen_trap_ifnofpu(dc)) {
4966         return true;
4967     }
4968 
4969     dst = tcg_temp_new_i64();
4970     src1 = gen_load_fpr_D(dc, a->rs1);
4971     src2 = gen_load_fpr_D(dc, a->rs2);
4972     func(dst, src1, src2);
4973     gen_store_fpr_D(dc, a->rd, dst);
4974     return advance_pc(dc);
4975 }
4976 
4977 TRANS(FMUL8SUx16, VIS1, do_ddd, a, gen_helper_fmul8sux16)
4978 TRANS(FMUL8ULx16, VIS1, do_ddd, a, gen_helper_fmul8ulx16)
4979 
4980 TRANS(FNORd, VIS1, do_ddd, a, tcg_gen_nor_i64)
4981 TRANS(FANDNOTd, VIS1, do_ddd, a, tcg_gen_andc_i64)
4982 TRANS(FXORd, VIS1, do_ddd, a, tcg_gen_xor_i64)
4983 TRANS(FNANDd, VIS1, do_ddd, a, tcg_gen_nand_i64)
4984 TRANS(FANDd, VIS1, do_ddd, a, tcg_gen_and_i64)
4985 TRANS(FXNORd, VIS1, do_ddd, a, tcg_gen_eqv_i64)
4986 TRANS(FORNOTd, VIS1, do_ddd, a, tcg_gen_orc_i64)
4987 TRANS(FORd, VIS1, do_ddd, a, tcg_gen_or_i64)
4988 
4989 TRANS(FPACK32, VIS1, do_ddd, a, gen_op_fpack32)
4990 TRANS(FALIGNDATAg, VIS1, do_ddd, a, gen_op_faligndata)
4991 TRANS(BSHUFFLE, VIS2, do_ddd, a, gen_op_bshuffle)
4992 
4993 TRANS(FHADDd, VIS3, do_ddd, a, gen_op_fhaddd)
4994 TRANS(FHSUBd, VIS3, do_ddd, a, gen_op_fhsubd)
4995 TRANS(FNHADDd, VIS3, do_ddd, a, gen_op_fnhaddd)
4996 
4997 TRANS(FPADD64, VIS3B, do_ddd, a, tcg_gen_add_i64)
4998 TRANS(FPSUB64, VIS3B, do_ddd, a, tcg_gen_sub_i64)
4999 
5000 static bool do_rdd(DisasContext *dc, arg_r_r_r *a,
5001                    void (*func)(TCGv, TCGv_i64, TCGv_i64))
5002 {
5003     TCGv_i64 src1, src2;
5004     TCGv dst;
5005 
5006     if (gen_trap_ifnofpu(dc)) {
5007         return true;
5008     }
5009 
5010     dst = gen_dest_gpr(dc, a->rd);
5011     src1 = gen_load_fpr_D(dc, a->rs1);
5012     src2 = gen_load_fpr_D(dc, a->rs2);
5013     func(dst, src1, src2);
5014     gen_store_gpr(dc, a->rd, dst);
5015     return advance_pc(dc);
5016 }
5017 
5018 TRANS(FPCMPLE16, VIS1, do_rdd, a, gen_helper_fcmple16)
5019 TRANS(FPCMPNE16, VIS1, do_rdd, a, gen_helper_fcmpne16)
5020 TRANS(FPCMPGT16, VIS1, do_rdd, a, gen_helper_fcmpgt16)
5021 TRANS(FPCMPEQ16, VIS1, do_rdd, a, gen_helper_fcmpeq16)
5022 
5023 TRANS(FPCMPLE32, VIS1, do_rdd, a, gen_helper_fcmple32)
5024 TRANS(FPCMPNE32, VIS1, do_rdd, a, gen_helper_fcmpne32)
5025 TRANS(FPCMPGT32, VIS1, do_rdd, a, gen_helper_fcmpgt32)
5026 TRANS(FPCMPEQ32, VIS1, do_rdd, a, gen_helper_fcmpeq32)
5027 
5028 static bool do_env_ddd(DisasContext *dc, arg_r_r_r *a,
5029                        void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
5030 {
5031     TCGv_i64 dst, src1, src2;
5032 
5033     if (gen_trap_ifnofpu(dc)) {
5034         return true;
5035     }
5036 
5037     dst = tcg_temp_new_i64();
5038     src1 = gen_load_fpr_D(dc, a->rs1);
5039     src2 = gen_load_fpr_D(dc, a->rs2);
5040     func(dst, tcg_env, src1, src2);
5041     gen_store_fpr_D(dc, a->rd, dst);
5042     return advance_pc(dc);
5043 }
5044 
5045 TRANS(FADDd, ALL, do_env_ddd, a, gen_helper_faddd)
5046 TRANS(FSUBd, ALL, do_env_ddd, a, gen_helper_fsubd)
5047 TRANS(FMULd, ALL, do_env_ddd, a, gen_helper_fmuld)
5048 TRANS(FDIVd, ALL, do_env_ddd, a, gen_helper_fdivd)
5049 TRANS(FNADDd, VIS3, do_env_ddd, a, gen_helper_fnaddd)
5050 TRANS(FNMULd, VIS3, do_env_ddd, a, gen_helper_fnmuld)
5051 
5052 static bool trans_FsMULd(DisasContext *dc, arg_r_r_r *a)
5053 {
5054     TCGv_i64 dst;
5055     TCGv_i32 src1, src2;
5056 
5057     if (gen_trap_ifnofpu(dc)) {
5058         return true;
5059     }
5060     if (!(dc->def->features & CPU_FEATURE_FSMULD)) {
5061         return raise_unimpfpop(dc);
5062     }
5063 
5064     dst = tcg_temp_new_i64();
5065     src1 = gen_load_fpr_F(dc, a->rs1);
5066     src2 = gen_load_fpr_F(dc, a->rs2);
5067     gen_helper_fsmuld(dst, tcg_env, src1, src2);
5068     gen_store_fpr_D(dc, a->rd, dst);
5069     return advance_pc(dc);
5070 }
5071 
5072 static bool trans_FNsMULd(DisasContext *dc, arg_r_r_r *a)
5073 {
5074     TCGv_i64 dst;
5075     TCGv_i32 src1, src2;
5076 
5077     if (!avail_VIS3(dc)) {
5078         return false;
5079     }
5080     if (gen_trap_ifnofpu(dc)) {
5081         return true;
5082     }
5083     dst = tcg_temp_new_i64();
5084     src1 = gen_load_fpr_F(dc, a->rs1);
5085     src2 = gen_load_fpr_F(dc, a->rs2);
5086     gen_helper_fnsmuld(dst, tcg_env, src1, src2);
5087     gen_store_fpr_D(dc, a->rd, dst);
5088     return advance_pc(dc);
5089 }
5090 
5091 static bool do_ffff(DisasContext *dc, arg_r_r_r_r *a,
5092                     void (*func)(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_i32))
5093 {
5094     TCGv_i32 dst, src1, src2, src3;
5095 
5096     if (gen_trap_ifnofpu(dc)) {
5097         return true;
5098     }
5099 
5100     src1 = gen_load_fpr_F(dc, a->rs1);
5101     src2 = gen_load_fpr_F(dc, a->rs2);
5102     src3 = gen_load_fpr_F(dc, a->rs3);
5103     dst = tcg_temp_new_i32();
5104     func(dst, src1, src2, src3);
5105     gen_store_fpr_F(dc, a->rd, dst);
5106     return advance_pc(dc);
5107 }
5108 
5109 TRANS(FMADDs, FMAF, do_ffff, a, gen_op_fmadds)
5110 TRANS(FMSUBs, FMAF, do_ffff, a, gen_op_fmsubs)
5111 TRANS(FNMSUBs, FMAF, do_ffff, a, gen_op_fnmsubs)
5112 TRANS(FNMADDs, FMAF, do_ffff, a, gen_op_fnmadds)
5113 
5114 static bool do_dddd(DisasContext *dc, arg_r_r_r_r *a,
5115                     void (*func)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
5116 {
5117     TCGv_i64 dst, src1, src2, src3;
5118 
5119     if (gen_trap_ifnofpu(dc)) {
5120         return true;
5121     }
5122 
5123     dst  = tcg_temp_new_i64();
5124     src1 = gen_load_fpr_D(dc, a->rs1);
5125     src2 = gen_load_fpr_D(dc, a->rs2);
5126     src3 = gen_load_fpr_D(dc, a->rs3);
5127     func(dst, src1, src2, src3);
5128     gen_store_fpr_D(dc, a->rd, dst);
5129     return advance_pc(dc);
5130 }
5131 
5132 TRANS(PDIST, VIS1, do_dddd, a, gen_helper_pdist)
5133 TRANS(FMADDd, FMAF, do_dddd, a, gen_op_fmaddd)
5134 TRANS(FMSUBd, FMAF, do_dddd, a, gen_op_fmsubd)
5135 TRANS(FNMSUBd, FMAF, do_dddd, a, gen_op_fnmsubd)
5136 TRANS(FNMADDd, FMAF, do_dddd, a, gen_op_fnmaddd)
5137 
5138 static bool do_env_qqq(DisasContext *dc, arg_r_r_r *a,
5139                        void (*func)(TCGv_i128, TCGv_env, TCGv_i128, TCGv_i128))
5140 {
5141     TCGv_i128 src1, src2;
5142 
5143     if (gen_trap_ifnofpu(dc)) {
5144         return true;
5145     }
5146     if (gen_trap_float128(dc)) {
5147         return true;
5148     }
5149 
5150     src1 = gen_load_fpr_Q(dc, a->rs1);
5151     src2 = gen_load_fpr_Q(dc, a->rs2);
5152     func(src1, tcg_env, src1, src2);
5153     gen_store_fpr_Q(dc, a->rd, src1);
5154     return advance_pc(dc);
5155 }
5156 
5157 TRANS(FADDq, ALL, do_env_qqq, a, gen_helper_faddq)
5158 TRANS(FSUBq, ALL, do_env_qqq, a, gen_helper_fsubq)
5159 TRANS(FMULq, ALL, do_env_qqq, a, gen_helper_fmulq)
5160 TRANS(FDIVq, ALL, do_env_qqq, a, gen_helper_fdivq)
5161 
5162 static bool trans_FdMULq(DisasContext *dc, arg_r_r_r *a)
5163 {
5164     TCGv_i64 src1, src2;
5165     TCGv_i128 dst;
5166 
5167     if (gen_trap_ifnofpu(dc)) {
5168         return true;
5169     }
5170     if (gen_trap_float128(dc)) {
5171         return true;
5172     }
5173 
5174     src1 = gen_load_fpr_D(dc, a->rs1);
5175     src2 = gen_load_fpr_D(dc, a->rs2);
5176     dst = tcg_temp_new_i128();
5177     gen_helper_fdmulq(dst, tcg_env, src1, src2);
5178     gen_store_fpr_Q(dc, a->rd, dst);
5179     return advance_pc(dc);
5180 }
5181 
5182 static bool do_fmovr(DisasContext *dc, arg_FMOVRs *a, bool is_128,
5183                      void (*func)(DisasContext *, DisasCompare *, int, int))
5184 {
5185     DisasCompare cmp;
5186 
5187     if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
5188         return false;
5189     }
5190     if (gen_trap_ifnofpu(dc)) {
5191         return true;
5192     }
5193     if (is_128 && gen_trap_float128(dc)) {
5194         return true;
5195     }
5196 
5197     gen_op_clear_ieee_excp_and_FTT();
5198     func(dc, &cmp, a->rd, a->rs2);
5199     return advance_pc(dc);
5200 }
5201 
5202 TRANS(FMOVRs, 64, do_fmovr, a, false, gen_fmovs)
5203 TRANS(FMOVRd, 64, do_fmovr, a, false, gen_fmovd)
5204 TRANS(FMOVRq, 64, do_fmovr, a, true, gen_fmovq)
5205 
5206 static bool do_fmovcc(DisasContext *dc, arg_FMOVscc *a, bool is_128,
5207                       void (*func)(DisasContext *, DisasCompare *, int, int))
5208 {
5209     DisasCompare cmp;
5210 
5211     if (gen_trap_ifnofpu(dc)) {
5212         return true;
5213     }
5214     if (is_128 && gen_trap_float128(dc)) {
5215         return true;
5216     }
5217 
5218     gen_op_clear_ieee_excp_and_FTT();
5219     gen_compare(&cmp, a->cc, a->cond, dc);
5220     func(dc, &cmp, a->rd, a->rs2);
5221     return advance_pc(dc);
5222 }
5223 
5224 TRANS(FMOVscc, 64, do_fmovcc, a, false, gen_fmovs)
5225 TRANS(FMOVdcc, 64, do_fmovcc, a, false, gen_fmovd)
5226 TRANS(FMOVqcc, 64, do_fmovcc, a, true, gen_fmovq)
5227 
5228 static bool do_fmovfcc(DisasContext *dc, arg_FMOVsfcc *a, bool is_128,
5229                        void (*func)(DisasContext *, DisasCompare *, int, int))
5230 {
5231     DisasCompare cmp;
5232 
5233     if (gen_trap_ifnofpu(dc)) {
5234         return true;
5235     }
5236     if (is_128 && gen_trap_float128(dc)) {
5237         return true;
5238     }
5239 
5240     gen_op_clear_ieee_excp_and_FTT();
5241     gen_fcompare(&cmp, a->cc, a->cond);
5242     func(dc, &cmp, a->rd, a->rs2);
5243     return advance_pc(dc);
5244 }
5245 
5246 TRANS(FMOVsfcc, 64, do_fmovfcc, a, false, gen_fmovs)
5247 TRANS(FMOVdfcc, 64, do_fmovfcc, a, false, gen_fmovd)
5248 TRANS(FMOVqfcc, 64, do_fmovfcc, a, true, gen_fmovq)
5249 
5250 static bool do_fcmps(DisasContext *dc, arg_FCMPs *a, bool e)
5251 {
5252     TCGv_i32 src1, src2;
5253 
5254     if (avail_32(dc) && a->cc != 0) {
5255         return false;
5256     }
5257     if (gen_trap_ifnofpu(dc)) {
5258         return true;
5259     }
5260 
5261     src1 = gen_load_fpr_F(dc, a->rs1);
5262     src2 = gen_load_fpr_F(dc, a->rs2);
5263     if (e) {
5264         gen_helper_fcmpes(cpu_fcc[a->cc], tcg_env, src1, src2);
5265     } else {
5266         gen_helper_fcmps(cpu_fcc[a->cc], tcg_env, src1, src2);
5267     }
5268     return advance_pc(dc);
5269 }
5270 
5271 TRANS(FCMPs, ALL, do_fcmps, a, false)
5272 TRANS(FCMPEs, ALL, do_fcmps, a, true)
5273 
5274 static bool do_fcmpd(DisasContext *dc, arg_FCMPd *a, bool e)
5275 {
5276     TCGv_i64 src1, src2;
5277 
5278     if (avail_32(dc) && a->cc != 0) {
5279         return false;
5280     }
5281     if (gen_trap_ifnofpu(dc)) {
5282         return true;
5283     }
5284 
5285     src1 = gen_load_fpr_D(dc, a->rs1);
5286     src2 = gen_load_fpr_D(dc, a->rs2);
5287     if (e) {
5288         gen_helper_fcmped(cpu_fcc[a->cc], tcg_env, src1, src2);
5289     } else {
5290         gen_helper_fcmpd(cpu_fcc[a->cc], tcg_env, src1, src2);
5291     }
5292     return advance_pc(dc);
5293 }
5294 
5295 TRANS(FCMPd, ALL, do_fcmpd, a, false)
5296 TRANS(FCMPEd, ALL, do_fcmpd, a, true)
5297 
5298 static bool do_fcmpq(DisasContext *dc, arg_FCMPq *a, bool e)
5299 {
5300     TCGv_i128 src1, src2;
5301 
5302     if (avail_32(dc) && a->cc != 0) {
5303         return false;
5304     }
5305     if (gen_trap_ifnofpu(dc)) {
5306         return true;
5307     }
5308     if (gen_trap_float128(dc)) {
5309         return true;
5310     }
5311 
5312     src1 = gen_load_fpr_Q(dc, a->rs1);
5313     src2 = gen_load_fpr_Q(dc, a->rs2);
5314     if (e) {
5315         gen_helper_fcmpeq(cpu_fcc[a->cc], tcg_env, src1, src2);
5316     } else {
5317         gen_helper_fcmpq(cpu_fcc[a->cc], tcg_env, src1, src2);
5318     }
5319     return advance_pc(dc);
5320 }
5321 
5322 TRANS(FCMPq, ALL, do_fcmpq, a, false)
5323 TRANS(FCMPEq, ALL, do_fcmpq, a, true)
5324 
5325 static bool trans_FLCMPs(DisasContext *dc, arg_FLCMPs *a)
5326 {
5327     TCGv_i32 src1, src2;
5328 
5329     if (!avail_VIS3(dc)) {
5330         return false;
5331     }
5332     if (gen_trap_ifnofpu(dc)) {
5333         return true;
5334     }
5335 
5336     src1 = gen_load_fpr_F(dc, a->rs1);
5337     src2 = gen_load_fpr_F(dc, a->rs2);
5338     gen_helper_flcmps(cpu_fcc[a->cc], src1, src2);
5339     return advance_pc(dc);
5340 }
5341 
5342 static bool trans_FLCMPd(DisasContext *dc, arg_FLCMPd *a)
5343 {
5344     TCGv_i64 src1, src2;
5345 
5346     if (!avail_VIS3(dc)) {
5347         return false;
5348     }
5349     if (gen_trap_ifnofpu(dc)) {
5350         return true;
5351     }
5352 
5353     src1 = gen_load_fpr_D(dc, a->rs1);
5354     src2 = gen_load_fpr_D(dc, a->rs2);
5355     gen_helper_flcmpd(cpu_fcc[a->cc], src1, src2);
5356     return advance_pc(dc);
5357 }
5358 
5359 static void sparc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
5360 {
5361     DisasContext *dc = container_of(dcbase, DisasContext, base);
5362     int bound;
5363 
5364     dc->pc = dc->base.pc_first;
5365     dc->npc = (target_ulong)dc->base.tb->cs_base;
5366     dc->mem_idx = dc->base.tb->flags & TB_FLAG_MMU_MASK;
5367     dc->def = &cpu_env(cs)->def;
5368     dc->fpu_enabled = tb_fpu_enabled(dc->base.tb->flags);
5369     dc->address_mask_32bit = tb_am_enabled(dc->base.tb->flags);
5370 #ifndef CONFIG_USER_ONLY
5371     dc->supervisor = (dc->base.tb->flags & TB_FLAG_SUPER) != 0;
5372 #endif
5373 #ifdef TARGET_SPARC64
5374     dc->fprs_dirty = 0;
5375     dc->asi = (dc->base.tb->flags >> TB_FLAG_ASI_SHIFT) & 0xff;
5376 #ifndef CONFIG_USER_ONLY
5377     dc->hypervisor = (dc->base.tb->flags & TB_FLAG_HYPER) != 0;
5378 #endif
5379 #endif
5380     /*
5381      * if we reach a page boundary, we stop generation so that the
5382      * PC of a TT_TFAULT exception is always in the right page
5383      */
5384     bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
5385     dc->base.max_insns = MIN(dc->base.max_insns, bound);
5386 }
5387 
5388 static void sparc_tr_tb_start(DisasContextBase *db, CPUState *cs)
5389 {
5390 }
5391 
5392 static void sparc_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
5393 {
5394     DisasContext *dc = container_of(dcbase, DisasContext, base);
5395     target_ulong npc = dc->npc;
5396 
5397     if (npc & 3) {
5398         switch (npc) {
5399         case JUMP_PC:
5400             assert(dc->jump_pc[1] == dc->pc + 4);
5401             npc = dc->jump_pc[0] | JUMP_PC;
5402             break;
5403         case DYNAMIC_PC:
5404         case DYNAMIC_PC_LOOKUP:
5405             npc = DYNAMIC_PC;
5406             break;
5407         default:
5408             g_assert_not_reached();
5409         }
5410     }
5411     tcg_gen_insn_start(dc->pc, npc);
5412 }
5413 
5414 static void sparc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
5415 {
5416     DisasContext *dc = container_of(dcbase, DisasContext, base);
5417     unsigned int insn;
5418 
5419     insn = translator_ldl(cpu_env(cs), &dc->base, dc->pc);
5420     dc->base.pc_next += 4;
5421 
5422     if (!decode(dc, insn)) {
5423         gen_exception(dc, TT_ILL_INSN);
5424     }
5425 
5426     if (dc->base.is_jmp == DISAS_NORETURN) {
5427         return;
5428     }
5429     if (dc->pc != dc->base.pc_next) {
5430         dc->base.is_jmp = DISAS_TOO_MANY;
5431     }
5432 }
5433 
5434 static void sparc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
5435 {
5436     DisasContext *dc = container_of(dcbase, DisasContext, base);
5437     DisasDelayException *e, *e_next;
5438     bool may_lookup;
5439 
5440     finishing_insn(dc);
5441 
5442     switch (dc->base.is_jmp) {
5443     case DISAS_NEXT:
5444     case DISAS_TOO_MANY:
5445         if (((dc->pc | dc->npc) & 3) == 0) {
5446             /* static PC and NPC: we can use direct chaining */
5447             gen_goto_tb(dc, 0, dc->pc, dc->npc);
5448             break;
5449         }
5450 
5451         may_lookup = true;
5452         if (dc->pc & 3) {
5453             switch (dc->pc) {
5454             case DYNAMIC_PC_LOOKUP:
5455                 break;
5456             case DYNAMIC_PC:
5457                 may_lookup = false;
5458                 break;
5459             default:
5460                 g_assert_not_reached();
5461             }
5462         } else {
5463             tcg_gen_movi_tl(cpu_pc, dc->pc);
5464         }
5465 
5466         if (dc->npc & 3) {
5467             switch (dc->npc) {
5468             case JUMP_PC:
5469                 gen_generic_branch(dc);
5470                 break;
5471             case DYNAMIC_PC:
5472                 may_lookup = false;
5473                 break;
5474             case DYNAMIC_PC_LOOKUP:
5475                 break;
5476             default:
5477                 g_assert_not_reached();
5478             }
5479         } else {
5480             tcg_gen_movi_tl(cpu_npc, dc->npc);
5481         }
5482         if (may_lookup) {
5483             tcg_gen_lookup_and_goto_ptr();
5484         } else {
5485             tcg_gen_exit_tb(NULL, 0);
5486         }
5487         break;
5488 
5489     case DISAS_NORETURN:
5490        break;
5491 
5492     case DISAS_EXIT:
5493         /* Exit TB */
5494         save_state(dc);
5495         tcg_gen_exit_tb(NULL, 0);
5496         break;
5497 
5498     default:
5499         g_assert_not_reached();
5500     }
5501 
5502     for (e = dc->delay_excp_list; e ; e = e_next) {
5503         gen_set_label(e->lab);
5504 
5505         tcg_gen_movi_tl(cpu_pc, e->pc);
5506         if (e->npc % 4 == 0) {
5507             tcg_gen_movi_tl(cpu_npc, e->npc);
5508         }
5509         gen_helper_raise_exception(tcg_env, e->excp);
5510 
5511         e_next = e->next;
5512         g_free(e);
5513     }
5514 }
5515 
5516 static const TranslatorOps sparc_tr_ops = {
5517     .init_disas_context = sparc_tr_init_disas_context,
5518     .tb_start           = sparc_tr_tb_start,
5519     .insn_start         = sparc_tr_insn_start,
5520     .translate_insn     = sparc_tr_translate_insn,
5521     .tb_stop            = sparc_tr_tb_stop,
5522 };
5523 
5524 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
5525                            vaddr pc, void *host_pc)
5526 {
5527     DisasContext dc = {};
5528 
5529     translator_loop(cs, tb, max_insns, pc, host_pc, &sparc_tr_ops, &dc.base);
5530 }
5531 
5532 void sparc_tcg_init(void)
5533 {
5534     static const char gregnames[32][4] = {
5535         "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7",
5536         "o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7",
5537         "l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7",
5538         "i0", "i1", "i2", "i3", "i4", "i5", "i6", "i7",
5539     };
5540 
5541     static const struct { TCGv_i32 *ptr; int off; const char *name; } r32[] = {
5542 #ifdef TARGET_SPARC64
5543         { &cpu_fprs, offsetof(CPUSPARCState, fprs), "fprs" },
5544         { &cpu_fcc[0], offsetof(CPUSPARCState, fcc[0]), "fcc0" },
5545         { &cpu_fcc[1], offsetof(CPUSPARCState, fcc[1]), "fcc1" },
5546         { &cpu_fcc[2], offsetof(CPUSPARCState, fcc[2]), "fcc2" },
5547         { &cpu_fcc[3], offsetof(CPUSPARCState, fcc[3]), "fcc3" },
5548 #else
5549         { &cpu_fcc[0], offsetof(CPUSPARCState, fcc[0]), "fcc" },
5550 #endif
5551     };
5552 
5553     static const struct { TCGv *ptr; int off; const char *name; } rtl[] = {
5554 #ifdef TARGET_SPARC64
5555         { &cpu_gsr, offsetof(CPUSPARCState, gsr), "gsr" },
5556         { &cpu_xcc_Z, offsetof(CPUSPARCState, xcc_Z), "xcc_Z" },
5557         { &cpu_xcc_C, offsetof(CPUSPARCState, xcc_C), "xcc_C" },
5558 #endif
5559         { &cpu_cc_N, offsetof(CPUSPARCState, cc_N), "cc_N" },
5560         { &cpu_cc_V, offsetof(CPUSPARCState, cc_V), "cc_V" },
5561         { &cpu_icc_Z, offsetof(CPUSPARCState, icc_Z), "icc_Z" },
5562         { &cpu_icc_C, offsetof(CPUSPARCState, icc_C), "icc_C" },
5563         { &cpu_cond, offsetof(CPUSPARCState, cond), "cond" },
5564         { &cpu_pc, offsetof(CPUSPARCState, pc), "pc" },
5565         { &cpu_npc, offsetof(CPUSPARCState, npc), "npc" },
5566         { &cpu_y, offsetof(CPUSPARCState, y), "y" },
5567         { &cpu_tbr, offsetof(CPUSPARCState, tbr), "tbr" },
5568     };
5569 
5570     unsigned int i;
5571 
5572     cpu_regwptr = tcg_global_mem_new_ptr(tcg_env,
5573                                          offsetof(CPUSPARCState, regwptr),
5574                                          "regwptr");
5575 
5576     for (i = 0; i < ARRAY_SIZE(r32); ++i) {
5577         *r32[i].ptr = tcg_global_mem_new_i32(tcg_env, r32[i].off, r32[i].name);
5578     }
5579 
5580     for (i = 0; i < ARRAY_SIZE(rtl); ++i) {
5581         *rtl[i].ptr = tcg_global_mem_new(tcg_env, rtl[i].off, rtl[i].name);
5582     }
5583 
5584     cpu_regs[0] = NULL;
5585     for (i = 1; i < 8; ++i) {
5586         cpu_regs[i] = tcg_global_mem_new(tcg_env,
5587                                          offsetof(CPUSPARCState, gregs[i]),
5588                                          gregnames[i]);
5589     }
5590 
5591     for (i = 8; i < 32; ++i) {
5592         cpu_regs[i] = tcg_global_mem_new(cpu_regwptr,
5593                                          (i - 8) * sizeof(target_ulong),
5594                                          gregnames[i]);
5595     }
5596 }
5597 
5598 void sparc_restore_state_to_opc(CPUState *cs,
5599                                 const TranslationBlock *tb,
5600                                 const uint64_t *data)
5601 {
5602     CPUSPARCState *env = cpu_env(cs);
5603     target_ulong pc = data[0];
5604     target_ulong npc = data[1];
5605 
5606     env->pc = pc;
5607     if (npc == DYNAMIC_PC) {
5608         /* dynamic NPC: already stored */
5609     } else if (npc & JUMP_PC) {
5610         /* jump PC: use 'cond' and the jump targets of the translation */
5611         if (env->cond) {
5612             env->npc = npc & ~3;
5613         } else {
5614             env->npc = pc + 4;
5615         }
5616     } else {
5617         env->npc = npc;
5618     }
5619 }
5620